2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
46 #ifdef EVENT__HAVE_UNISTD_H
55 #ifdef EVENT__HAVE_FCNTL_H
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/event_compat.h"
62 #include "event-internal.h"
63 #include "defer-internal.h"
64 #include "evthread-internal.h"
65 #include "event2/thread.h"
66 #include "event2/util.h"
67 #include "log-internal.h"
68 #include "evmap-internal.h"
69 #include "iocp-internal.h"
70 #include "changelist-internal.h"
71 #define HT_NO_CACHE_HASH_VALUES
72 #include "ht-internal.h"
73 #include "util-internal.h"
76 #ifdef EVENT__HAVE_WORKING_KQUEUE
77 #include "kqueue-internal.h"
80 #ifdef EVENT__HAVE_EVENT_PORTS
81 extern const struct eventop evportops;
83 #ifdef EVENT__HAVE_SELECT
84 extern const struct eventop selectops;
86 #ifdef EVENT__HAVE_POLL
87 extern const struct eventop pollops;
89 #ifdef EVENT__HAVE_EPOLL
90 extern const struct eventop epollops;
92 #ifdef EVENT__HAVE_WORKING_KQUEUE
93 extern const struct eventop kqops;
95 #ifdef EVENT__HAVE_DEVPOLL
96 extern const struct eventop devpollops;
99 extern const struct eventop win32ops;
102 /* Array of backends in order of preference. */
103 static const struct eventop *eventops[] = {
104 #ifdef EVENT__HAVE_EVENT_PORTS
107 #ifdef EVENT__HAVE_WORKING_KQUEUE
110 #ifdef EVENT__HAVE_EPOLL
113 #ifdef EVENT__HAVE_DEVPOLL
116 #ifdef EVENT__HAVE_POLL
119 #ifdef EVENT__HAVE_SELECT
128 /* Global state; deprecated */
130 struct event_base *event_global_current_base_ = NULL;
131 #define current_base event_global_current_base_
135 static void *event_self_cbarg_ptr_ = NULL;
138 static void event_queue_insert_active(struct event_base *, struct event_callback *);
139 static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
140 static void event_queue_insert_timeout(struct event_base *, struct event *);
141 static void event_queue_insert_inserted(struct event_base *, struct event *);
142 static void event_queue_remove_active(struct event_base *, struct event_callback *);
143 static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
144 static void event_queue_remove_timeout(struct event_base *, struct event *);
145 static void event_queue_remove_inserted(struct event_base *, struct event *);
146 static void event_queue_make_later_events_active(struct event_base *base);
148 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
149 static int event_del_(struct event *ev, int blocking);
151 #ifdef USE_REINSERT_TIMEOUT
152 /* This code seems buggy; only turn it on if we find out what the trouble is. */
153 static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
156 static int event_haveevents(struct event_base *);
158 static int event_process_active(struct event_base *);
160 static int timeout_next(struct event_base *, struct timeval **);
161 static void timeout_process(struct event_base *);
163 static inline void event_signal_closure(struct event_base *, struct event *ev);
164 static inline void event_persist_closure(struct event_base *, struct event *ev);
166 static int evthread_notify_base(struct event_base *base);
168 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
171 #ifndef EVENT__DISABLE_DEBUG_MODE
172 /* These functions implement a hashtable of which 'struct event *' structures
173 * have been setup or added. We don't want to trust the content of the struct
174 * event itself, since we're trying to work through cases where an event gets
175 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
178 struct event_debug_entry {
179 HT_ENTRY(event_debug_entry) node;
180 const struct event *ptr;
184 static inline unsigned
185 hash_debug_entry(const struct event_debug_entry *e)
187 /* We need to do this silliness to convince compilers that we
188 * honestly mean to cast e->ptr to an integer, and discard any
189 * part of it that doesn't fit in an unsigned.
191 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
192 /* Our hashtable implementation is pretty sensitive to low bits,
193 * and every struct event is over 64 bytes in size, so we can
199 eq_debug_entry(const struct event_debug_entry *a,
200 const struct event_debug_entry *b)
202 return a->ptr == b->ptr;
205 int event_debug_mode_on_ = 0;
208 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
210 * @brief debug mode variable which is set for any function/structure that needs
211 * to be shared across threads (if thread support is enabled).
213 * When and if evthreads are initialized, this variable will be evaluated,
214 * and if set to something other than zero, this means the evthread setup
215 * functions were called out of order.
217 * See: "Locks and threading" in the documentation.
219 int event_debug_created_threadable_ctx_ = 0;
222 /* Set if it's too late to enable event_debug_mode. */
223 static int event_debug_mode_too_late = 0;
224 #ifndef EVENT__DISABLE_THREAD_SUPPORT
225 static void *event_debug_map_lock_ = NULL;
227 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
230 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
232 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
233 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
235 /* record that ev is now setup (that is, ready for an add) */
236 static void event_debug_note_setup_(const struct event *ev)
238 struct event_debug_entry *dent, find;
240 if (!event_debug_mode_on_)
244 EVLOCK_LOCK(event_debug_map_lock_, 0);
245 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
249 dent = mm_malloc(sizeof(*dent));
252 "Out of memory in debugging code");
255 HT_INSERT(event_debug_map, &global_debug_map, dent);
257 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
260 event_debug_mode_too_late = 1;
262 /* record that ev is no longer setup */
263 static void event_debug_note_teardown_(const struct event *ev)
265 struct event_debug_entry *dent, find;
267 if (!event_debug_mode_on_)
271 EVLOCK_LOCK(event_debug_map_lock_, 0);
272 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
275 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
278 event_debug_mode_too_late = 1;
280 /* Macro: record that ev is now added */
281 static void event_debug_note_add_(const struct event *ev)
283 struct event_debug_entry *dent,find;
285 if (!event_debug_mode_on_)
289 EVLOCK_LOCK(event_debug_map_lock_, 0);
290 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
294 event_errx(EVENT_ERR_ABORT_,
295 "%s: noting an add on a non-setup event %p"
296 " (events: 0x%x, fd: "EV_SOCK_FMT
298 __func__, ev, ev->ev_events,
299 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
301 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
304 event_debug_mode_too_late = 1;
306 /* record that ev is no longer added */
307 static void event_debug_note_del_(const struct event *ev)
309 struct event_debug_entry *dent, find;
311 if (!event_debug_mode_on_)
315 EVLOCK_LOCK(event_debug_map_lock_, 0);
316 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
320 event_errx(EVENT_ERR_ABORT_,
321 "%s: noting a del on a non-setup event %p"
322 " (events: 0x%x, fd: "EV_SOCK_FMT
324 __func__, ev, ev->ev_events,
325 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
327 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
330 event_debug_mode_too_late = 1;
332 /* assert that ev is setup (i.e., okay to add or inspect) */
333 static void event_debug_assert_is_setup_(const struct event *ev)
335 struct event_debug_entry *dent, find;
337 if (!event_debug_mode_on_)
341 EVLOCK_LOCK(event_debug_map_lock_, 0);
342 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
344 event_errx(EVENT_ERR_ABORT_,
345 "%s called on a non-initialized event %p"
346 " (events: 0x%x, fd: "EV_SOCK_FMT
348 __func__, ev, ev->ev_events,
349 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
351 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
353 /* assert that ev is not added (i.e., okay to tear down or set up again) */
354 static void event_debug_assert_not_added_(const struct event *ev)
356 struct event_debug_entry *dent, find;
358 if (!event_debug_mode_on_)
362 EVLOCK_LOCK(event_debug_map_lock_, 0);
363 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
364 if (dent && dent->added) {
365 event_errx(EVENT_ERR_ABORT_,
366 "%s called on an already added event %p"
367 " (events: 0x%x, fd: "EV_SOCK_FMT", "
369 __func__, ev, ev->ev_events,
370 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
372 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
374 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
376 if (!event_debug_mode_on_)
384 if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
385 EVUTIL_ASSERT(flags & O_NONBLOCK);
391 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
392 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
393 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
394 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
395 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
396 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
397 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
400 #define EVENT_BASE_ASSERT_LOCKED(base) \
401 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
403 /* How often (in seconds) do we check for changes in wall clock time relative
404 * to monotonic time? Set this to -1 for 'never.' */
405 #define CLOCK_SYNC_INTERVAL 5
407 /** Set 'tp' to the current time according to 'base'. We must hold the lock
408 * on 'base'. If there is a cached time, return it. Otherwise, use
409 * clock_gettime or gettimeofday as appropriate to find out the right time.
410 * Return 0 on success, -1 on failure.
413 gettime(struct event_base *base, struct timeval *tp)
415 EVENT_BASE_ASSERT_LOCKED(base);
417 if (base->tv_cache.tv_sec) {
418 *tp = base->tv_cache;
422 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
426 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
429 evutil_gettimeofday(&tv,NULL);
430 evutil_timersub(&tv, tp, &base->tv_clock_diff);
431 base->last_updated_clock_diff = tp->tv_sec;
438 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
444 return evutil_gettimeofday(tv, NULL);
447 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
448 if (base->tv_cache.tv_sec == 0) {
449 r = evutil_gettimeofday(tv, NULL);
451 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
454 EVBASE_RELEASE_LOCK(base, th_base_lock);
458 /** Make 'base' have no current cached time. */
460 clear_time_cache(struct event_base *base)
462 base->tv_cache.tv_sec = 0;
465 /** Replace the cached time in 'base' with the current time. */
467 update_time_cache(struct event_base *base)
469 base->tv_cache.tv_sec = 0;
470 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
471 gettime(base, &base->tv_cache);
475 event_base_update_cache_time(struct event_base *base)
484 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
485 if (base->running_loop)
486 update_time_cache(base);
487 EVBASE_RELEASE_LOCK(base, th_base_lock);
491 static inline struct event *
492 event_callback_to_event(struct event_callback *evcb)
494 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
495 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
498 static inline struct event_callback *
499 event_to_event_callback(struct event *ev)
501 return &ev->ev_evcallback;
507 struct event_base *base = event_base_new_with_config(NULL);
510 event_errx(1, "%s: Unable to construct event_base", __func__);
522 struct event_base *base = NULL;
523 struct event_config *cfg = event_config_new();
525 base = event_base_new_with_config(cfg);
526 event_config_free(cfg);
531 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
534 event_config_is_avoided_method(const struct event_config *cfg,
537 struct event_config_entry *entry;
539 TAILQ_FOREACH(entry, &cfg->entries, next) {
540 if (entry->avoid_method != NULL &&
541 strcmp(entry->avoid_method, method) == 0)
548 /** Return true iff 'method' is disabled according to the environment. */
550 event_is_method_disabled(const char *name)
552 char environment[64];
555 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
556 for (i = 8; environment[i] != '\0'; ++i)
557 environment[i] = EVUTIL_TOUPPER_(environment[i]);
558 /* Note that evutil_getenv_() ignores the environment entirely if
560 return (evutil_getenv_(environment) != NULL);
564 event_base_get_features(const struct event_base *base)
566 return base->evsel->features;
570 event_enable_debug_mode(void)
572 #ifndef EVENT__DISABLE_DEBUG_MODE
573 if (event_debug_mode_on_)
574 event_errx(1, "%s was called twice!", __func__);
575 if (event_debug_mode_too_late)
576 event_errx(1, "%s must be called *before* creating any events "
577 "or event_bases",__func__);
579 event_debug_mode_on_ = 1;
581 HT_INIT(event_debug_map, &global_debug_map);
586 event_disable_debug_mode(void)
588 #ifndef EVENT__DISABLE_DEBUG_MODE
589 struct event_debug_entry **ent, *victim;
591 EVLOCK_LOCK(event_debug_map_lock_, 0);
592 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
594 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
597 HT_CLEAR(event_debug_map, &global_debug_map);
598 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
600 event_debug_mode_on_ = 0;
605 event_base_new_with_config(const struct event_config *cfg)
608 struct event_base *base;
609 int should_check_environment;
611 #ifndef EVENT__DISABLE_DEBUG_MODE
612 event_debug_mode_too_late = 1;
615 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
616 event_warn("%s: calloc", __func__);
621 base->flags = cfg->flags;
623 should_check_environment =
624 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
629 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
631 if (should_check_environment && !precise_time) {
632 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
634 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
637 flags = precise_time ? EV_MONOT_PRECISE : 0;
638 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
643 min_heap_ctor_(&base->timeheap);
645 base->sig.ev_signal_pair[0] = -1;
646 base->sig.ev_signal_pair[1] = -1;
647 base->th_notify_fd[0] = -1;
648 base->th_notify_fd[1] = -1;
650 TAILQ_INIT(&base->active_later_queue);
652 evmap_io_initmap_(&base->io);
653 evmap_signal_initmap_(&base->sigmap);
654 event_changelist_init_(&base->changelist);
659 memcpy(&base->max_dispatch_time,
660 &cfg->max_dispatch_interval, sizeof(struct timeval));
661 base->limit_callbacks_after_prio =
662 cfg->limit_callbacks_after_prio;
664 base->max_dispatch_time.tv_sec = -1;
665 base->limit_callbacks_after_prio = 1;
667 if (cfg && cfg->max_dispatch_callbacks >= 0) {
668 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
670 base->max_dispatch_callbacks = INT_MAX;
672 if (base->max_dispatch_callbacks == INT_MAX &&
673 base->max_dispatch_time.tv_sec == -1)
674 base->limit_callbacks_after_prio = INT_MAX;
676 for (i = 0; eventops[i] && !base->evbase; i++) {
678 /* determine if this backend should be avoided */
679 if (event_config_is_avoided_method(cfg,
682 if ((eventops[i]->features & cfg->require_features)
683 != cfg->require_features)
687 /* also obey the environment variables */
688 if (should_check_environment &&
689 event_is_method_disabled(eventops[i]->name))
692 base->evsel = eventops[i];
694 base->evbase = base->evsel->init(base);
697 if (base->evbase == NULL) {
698 event_warnx("%s: no event mechanism available",
701 event_base_free(base);
705 if (evutil_getenv_("EVENT_SHOW_METHOD"))
706 event_msgx("libevent using: %s", base->evsel->name);
708 /* allocate a single active event queue */
709 if (event_base_priority_init(base, 1) < 0) {
710 event_base_free(base);
714 /* prepare for threading */
716 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
717 event_debug_created_threadable_ctx_ = 1;
720 #ifndef EVENT__DISABLE_THREAD_SUPPORT
721 if (EVTHREAD_LOCKING_ENABLED() &&
722 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
724 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
725 EVTHREAD_ALLOC_COND(base->current_event_cond);
726 r = evthread_make_base_notifiable(base);
728 event_warnx("%s: Unable to make base notifiable.", __func__);
729 event_base_free(base);
736 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
737 event_base_start_iocp_(base, cfg->n_cpus_hint);
744 event_base_start_iocp_(struct event_base *base, int n_cpus)
749 base->iocp = event_iocp_port_launch_(n_cpus);
751 event_warnx("%s: Couldn't launch IOCP", __func__);
761 event_base_stop_iocp_(struct event_base *base)
768 rv = event_iocp_shutdown_(base->iocp, -1);
769 EVUTIL_ASSERT(rv >= 0);
775 event_base_cancel_single_callback_(struct event_base *base,
776 struct event_callback *evcb,
781 if (evcb->evcb_flags & EVLIST_INIT) {
782 struct event *ev = event_callback_to_event(evcb);
783 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
784 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
788 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
789 event_callback_cancel_nolock_(base, evcb, 1);
790 EVBASE_RELEASE_LOCK(base, th_base_lock);
794 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
795 switch (evcb->evcb_closure) {
796 case EV_CLOSURE_EVENT_FINALIZE:
797 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
798 struct event *ev = event_callback_to_event(evcb);
799 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
800 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
804 case EV_CLOSURE_CB_FINALIZE:
805 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
814 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
818 for (i = 0; i < base->nactivequeues; ++i) {
819 struct event_callback *evcb, *next;
820 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
821 next = TAILQ_NEXT(evcb, evcb_active_next);
822 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
828 struct event_callback *evcb;
829 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
830 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
838 event_base_free_(struct event_base *base, int run_finalizers)
843 /* XXXX grab the lock? If there is contention when one thread frees
844 * the base, then the contending thread will be very sad soon. */
846 /* event_base_free(NULL) is how to free the current_base if we
847 * made it with event_init and forgot to hold a reference to it. */
848 if (base == NULL && current_base)
850 /* Don't actually free NULL. */
852 event_warnx("%s: no base to free", __func__);
855 /* XXX(niels) - check for internal events first */
858 event_base_stop_iocp_(base);
861 /* threading fds if we have them */
862 if (base->th_notify_fd[0] != -1) {
863 event_del(&base->th_notify);
864 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
865 if (base->th_notify_fd[1] != -1)
866 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
867 base->th_notify_fd[0] = -1;
868 base->th_notify_fd[1] = -1;
869 event_debug_unassign(&base->th_notify);
872 /* Delete all non-internal events. */
873 evmap_delete_all_(base);
875 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
879 for (i = 0; i < base->n_common_timeouts; ++i) {
880 struct common_timeout_list *ctl =
881 base->common_timeout_queues[i];
882 event_del(&ctl->timeout_event); /* Internal; doesn't count */
883 event_debug_unassign(&ctl->timeout_event);
884 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
885 struct event *next = TAILQ_NEXT(ev,
886 ev_timeout_pos.ev_next_with_common_timeout);
887 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
895 if (base->common_timeout_queues)
896 mm_free(base->common_timeout_queues);
899 /* For finalizers we can register yet another finalizer out from
900 * finalizer, and iff finalizer will be in active_later_queue we can
901 * add finalizer to activequeues, and we will have events in
902 * activequeues after this function returns, which is not what we want
903 * (we even have an assertion for this).
905 * A simple case is bufferevent with underlying (i.e. filters).
907 int i = event_base_free_queues_(base, run_finalizers);
908 event_debug(("%s: %d events freed", __func__, i));
916 event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
917 __func__, n_deleted));
919 while (LIST_FIRST(&base->once_events)) {
920 struct event_once *eonce = LIST_FIRST(&base->once_events);
921 LIST_REMOVE(eonce, next_once);
925 if (base->evsel != NULL && base->evsel->dealloc != NULL)
926 base->evsel->dealloc(base);
928 for (i = 0; i < base->nactivequeues; ++i)
929 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
931 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
932 min_heap_dtor_(&base->timeheap);
934 mm_free(base->activequeues);
936 evmap_io_clear_(&base->io);
937 evmap_signal_clear_(&base->sigmap);
938 event_changelist_freemem_(&base->changelist);
940 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
941 EVTHREAD_FREE_COND(base->current_event_cond);
943 /* If we're freeing current_base, there won't be a current_base. */
944 if (base == current_base)
950 event_base_free_nofinalize(struct event_base *base)
952 event_base_free_(base, 0);
956 event_base_free(struct event_base *base)
958 event_base_free_(base, 1);
961 /* Fake eventop; used to disable the backend temporarily inside event_reinit
962 * so that we can call event_del() on an event without telling the backend.
965 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
966 short events, void *fdinfo)
970 const struct eventop nil_eventop = {
972 NULL, /* init: unused. */
973 NULL, /* add: unused. */
974 nil_backend_del, /* del: used, so needs to be killed. */
975 NULL, /* dispatch: unused. */
976 NULL, /* dealloc: unused. */
980 /* reinitialize the event base after a fork */
982 event_reinit(struct event_base *base)
984 const struct eventop *evsel;
986 int was_notifiable = 0;
987 int had_signal_added = 0;
989 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
993 /* check if this event mechanism requires reinit on the backend */
994 if (evsel->need_reinit) {
995 /* We're going to call event_del() on our notify events (the
996 * ones that tell about signals and wakeup events). But we
997 * don't actually want to tell the backend to change its
998 * state, since it might still share some resource (a kqueue,
999 * an epoll fd) with the parent process, and we don't want to
1000 * delete the fds from _that_ backend, we temporarily stub out
1001 * the evsel with a replacement.
1003 base->evsel = &nil_eventop;
1006 /* We need to re-create a new signal-notification fd and a new
1007 * thread-notification fd. Otherwise, we'll still share those with
1008 * the parent process, which would make any notification sent to them
1009 * get received by one or both of the event loops, more or less at
1012 if (base->sig.ev_signal_added) {
1013 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1014 event_debug_unassign(&base->sig.ev_signal);
1015 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1016 had_signal_added = 1;
1017 base->sig.ev_signal_added = 0;
1019 if (base->sig.ev_signal_pair[0] != -1)
1020 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1021 if (base->sig.ev_signal_pair[1] != -1)
1022 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1023 if (base->th_notify_fn != NULL) {
1025 base->th_notify_fn = NULL;
1027 if (base->th_notify_fd[0] != -1) {
1028 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1029 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1030 if (base->th_notify_fd[1] != -1)
1031 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1032 base->th_notify_fd[0] = -1;
1033 base->th_notify_fd[1] = -1;
1034 event_debug_unassign(&base->th_notify);
1037 /* Replace the original evsel. */
1038 base->evsel = evsel;
1040 if (evsel->need_reinit) {
1041 /* Reconstruct the backend through brute-force, so that we do
1042 * not share any structures with the parent process. For some
1043 * backends, this is necessary: epoll and kqueue, for
1044 * instance, have events associated with a kernel
1045 * structure. If didn't reinitialize, we'd share that
1046 * structure with the parent process, and any changes made by
1047 * the parent would affect our backend's behavior (and vice
1050 if (base->evsel->dealloc != NULL)
1051 base->evsel->dealloc(base);
1052 base->evbase = evsel->init(base);
1053 if (base->evbase == NULL) {
1055 "%s: could not reinitialize event mechanism",
1061 /* Empty out the changelist (if any): we are starting from a
1063 event_changelist_freemem_(&base->changelist);
1065 /* Tell the event maps to re-inform the backend about all
1066 * pending events. This will make the signal notification
1067 * event get re-created if necessary. */
1068 if (evmap_reinit_(base) < 0)
1071 res = evsig_init_(base);
1072 if (res == 0 && had_signal_added) {
1073 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1075 base->sig.ev_signal_added = 1;
1079 /* If we were notifiable before, and nothing just exploded, become
1080 * notifiable again. */
1081 if (was_notifiable && res == 0)
1082 res = evthread_make_base_notifiable_nolock_(base);
1085 EVBASE_RELEASE_LOCK(base, th_base_lock);
1089 /* Get the monotonic time for this event_base' timer */
1091 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1096 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1097 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1098 EVBASE_RELEASE_LOCK(base, th_base_lock);
1105 event_get_supported_methods(void)
1107 static const char **methods = NULL;
1108 const struct eventop **method;
1112 /* count all methods */
1113 for (method = &eventops[0]; *method != NULL; ++method) {
1117 /* allocate one more than we need for the NULL pointer */
1118 tmp = mm_calloc((i + 1), sizeof(char *));
1122 /* populate the array with the supported methods */
1123 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1124 tmp[i++] = eventops[k]->name;
1128 if (methods != NULL)
1129 mm_free((char**)methods);
1136 struct event_config *
1137 event_config_new(void)
1139 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1144 TAILQ_INIT(&cfg->entries);
1145 cfg->max_dispatch_interval.tv_sec = -1;
1146 cfg->max_dispatch_callbacks = INT_MAX;
1147 cfg->limit_callbacks_after_prio = 1;
1153 event_config_entry_free(struct event_config_entry *entry)
1155 if (entry->avoid_method != NULL)
1156 mm_free((char *)entry->avoid_method);
1161 event_config_free(struct event_config *cfg)
1163 struct event_config_entry *entry;
1165 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1166 TAILQ_REMOVE(&cfg->entries, entry, next);
1167 event_config_entry_free(entry);
1173 event_config_set_flag(struct event_config *cfg, int flag)
1182 event_config_avoid_method(struct event_config *cfg, const char *method)
1184 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1188 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1193 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1199 event_config_require_features(struct event_config *cfg,
1204 cfg->require_features = features;
1209 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1213 cfg->n_cpus_hint = cpus;
1218 event_config_set_max_dispatch_interval(struct event_config *cfg,
1219 const struct timeval *max_interval, int max_callbacks, int min_priority)
1222 memcpy(&cfg->max_dispatch_interval, max_interval,
1223 sizeof(struct timeval));
1225 cfg->max_dispatch_interval.tv_sec = -1;
1226 cfg->max_dispatch_callbacks =
1227 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1228 if (min_priority < 0)
1230 cfg->limit_callbacks_after_prio = min_priority;
1235 event_priority_init(int npriorities)
1237 return event_base_priority_init(current_base, npriorities);
1241 event_base_priority_init(struct event_base *base, int npriorities)
1246 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1248 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1249 || npriorities >= EVENT_MAX_PRIORITIES)
1252 if (npriorities == base->nactivequeues)
1255 if (base->nactivequeues) {
1256 mm_free(base->activequeues);
1257 base->nactivequeues = 0;
1260 /* Allocate our priority queues */
1261 base->activequeues = (struct evcallback_list *)
1262 mm_calloc(npriorities, sizeof(struct evcallback_list));
1263 if (base->activequeues == NULL) {
1264 event_warn("%s: calloc", __func__);
1267 base->nactivequeues = npriorities;
1269 for (i = 0; i < base->nactivequeues; ++i) {
1270 TAILQ_INIT(&base->activequeues[i]);
1276 EVBASE_RELEASE_LOCK(base, th_base_lock);
1281 event_base_get_npriorities(struct event_base *base)
1286 base = current_base;
1288 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1289 n = base->nactivequeues;
1290 EVBASE_RELEASE_LOCK(base, th_base_lock);
1295 event_base_get_num_events(struct event_base *base, unsigned int type)
1299 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1301 if (type & EVENT_BASE_COUNT_ACTIVE)
1302 r += base->event_count_active;
1304 if (type & EVENT_BASE_COUNT_VIRTUAL)
1305 r += base->virtual_event_count;
1307 if (type & EVENT_BASE_COUNT_ADDED)
1308 r += base->event_count;
1310 EVBASE_RELEASE_LOCK(base, th_base_lock);
1316 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1320 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1322 if (type & EVENT_BASE_COUNT_ACTIVE) {
1323 r += base->event_count_active_max;
1325 base->event_count_active_max = 0;
1328 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1329 r += base->virtual_event_count_max;
1331 base->virtual_event_count_max = 0;
1334 if (type & EVENT_BASE_COUNT_ADDED) {
1335 r += base->event_count_max;
1337 base->event_count_max = 0;
1340 EVBASE_RELEASE_LOCK(base, th_base_lock);
1345 /* Returns true iff we're currently watching any events. */
1347 event_haveevents(struct event_base *base)
1349 /* Caller must hold th_base_lock */
1350 return (base->virtual_event_count > 0 || base->event_count > 0);
1353 /* "closure" function called when processing active signal events */
1355 event_signal_closure(struct event_base *base, struct event *ev)
1360 /* Allows deletes to work */
1361 ncalls = ev->ev_ncalls;
1363 ev->ev_pncalls = &ncalls;
1364 EVBASE_RELEASE_LOCK(base, th_base_lock);
1367 ev->ev_ncalls = ncalls;
1369 ev->ev_pncalls = NULL;
1370 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1372 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1373 should_break = base->event_break;
1374 EVBASE_RELEASE_LOCK(base, th_base_lock);
1378 ev->ev_pncalls = NULL;
1384 /* Common timeouts are special timeouts that are handled as queues rather than
1385 * in the minheap. This is more efficient than the minheap if we happen to
1386 * know that we're going to get several thousands of timeout events all with
1387 * the same timeout value.
1389 * Since all our timeout handling code assumes timevals can be copied,
1390 * assigned, etc, we can't use "magic pointer" to encode these common
1391 * timeouts. Searching through a list to see if every timeout is common could
1392 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1393 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1394 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1395 * of index into the event_base's aray of common timeouts.
1398 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1399 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1400 #define COMMON_TIMEOUT_IDX_SHIFT 20
1401 #define COMMON_TIMEOUT_MASK 0xf0000000
1402 #define COMMON_TIMEOUT_MAGIC 0x50000000
1404 #define COMMON_TIMEOUT_IDX(tv) \
1405 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1407 /** Return true iff if 'tv' is a common timeout in 'base' */
1409 is_common_timeout(const struct timeval *tv,
1410 const struct event_base *base)
1413 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1415 idx = COMMON_TIMEOUT_IDX(tv);
1416 return idx < base->n_common_timeouts;
1419 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1420 * one is a common timeout. */
1422 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1424 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1425 (tv2->tv_usec & ~MICROSECONDS_MASK);
1428 /** Requires that 'tv' is a common timeout. Return the corresponding
1429 * common_timeout_list. */
1430 static inline struct common_timeout_list *
1431 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1433 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1438 common_timeout_ok(const struct timeval *tv,
1439 struct event_base *base)
1441 const struct timeval *expect =
1442 &get_common_timeout_list(base, tv)->duration;
1443 return tv->tv_sec == expect->tv_sec &&
1444 tv->tv_usec == expect->tv_usec;
1448 /* Add the timeout for the first event in given common timeout list to the
1449 * event_base's minheap. */
1451 common_timeout_schedule(struct common_timeout_list *ctl,
1452 const struct timeval *now, struct event *head)
1454 struct timeval timeout = head->ev_timeout;
1455 timeout.tv_usec &= MICROSECONDS_MASK;
1456 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1459 /* Callback: invoked when the timeout for a common timeout queue triggers.
1460 * This means that (at least) the first event in that queue should be run,
1461 * and the timeout should be rescheduled if there are more events. */
1463 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1466 struct common_timeout_list *ctl = arg;
1467 struct event_base *base = ctl->base;
1468 struct event *ev = NULL;
1469 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1470 gettime(base, &now);
1472 ev = TAILQ_FIRST(&ctl->events);
1473 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1474 (ev->ev_timeout.tv_sec == now.tv_sec &&
1475 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1477 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1478 event_active_nolock_(ev, EV_TIMEOUT, 1);
1481 common_timeout_schedule(ctl, &now, ev);
1482 EVBASE_RELEASE_LOCK(base, th_base_lock);
1485 #define MAX_COMMON_TIMEOUTS 256
1487 const struct timeval *
1488 event_base_init_common_timeout(struct event_base *base,
1489 const struct timeval *duration)
1493 const struct timeval *result=NULL;
1494 struct common_timeout_list *new_ctl;
1496 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1497 if (duration->tv_usec > 1000000) {
1498 memcpy(&tv, duration, sizeof(struct timeval));
1499 if (is_common_timeout(duration, base))
1500 tv.tv_usec &= MICROSECONDS_MASK;
1501 tv.tv_sec += tv.tv_usec / 1000000;
1502 tv.tv_usec %= 1000000;
1505 for (i = 0; i < base->n_common_timeouts; ++i) {
1506 const struct common_timeout_list *ctl =
1507 base->common_timeout_queues[i];
1508 if (duration->tv_sec == ctl->duration.tv_sec &&
1509 duration->tv_usec ==
1510 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1511 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1512 result = &ctl->duration;
1516 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1517 event_warnx("%s: Too many common timeouts already in use; "
1518 "we only support %d per event_base", __func__,
1519 MAX_COMMON_TIMEOUTS);
1522 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1523 int n = base->n_common_timeouts < 16 ? 16 :
1524 base->n_common_timeouts*2;
1525 struct common_timeout_list **newqueues =
1526 mm_realloc(base->common_timeout_queues,
1527 n*sizeof(struct common_timeout_queue *));
1529 event_warn("%s: realloc",__func__);
1532 base->n_common_timeouts_allocated = n;
1533 base->common_timeout_queues = newqueues;
1535 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1537 event_warn("%s: calloc",__func__);
1540 TAILQ_INIT(&new_ctl->events);
1541 new_ctl->duration.tv_sec = duration->tv_sec;
1542 new_ctl->duration.tv_usec =
1543 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1544 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1545 evtimer_assign(&new_ctl->timeout_event, base,
1546 common_timeout_callback, new_ctl);
1547 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1548 event_priority_set(&new_ctl->timeout_event, 0);
1549 new_ctl->base = base;
1550 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1551 result = &new_ctl->duration;
1555 EVUTIL_ASSERT(is_common_timeout(result, base));
1557 EVBASE_RELEASE_LOCK(base, th_base_lock);
1561 /* Closure function invoked when we're activating a persistent event. */
1563 event_persist_closure(struct event_base *base, struct event *ev)
1565 void (*evcb_callback)(evutil_socket_t, short, void *);
1567 // Other fields of *ev that must be stored before executing
1568 evutil_socket_t evcb_fd;
1572 /* reschedule the persistent event if we have a timeout. */
1573 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1574 /* If there was a timeout, we want it to run at an interval of
1575 * ev_io_timeout after the last time it was _scheduled_ for,
1576 * not ev_io_timeout after _now_. If it fired for another
1577 * reason, though, the timeout ought to start ticking _now_. */
1578 struct timeval run_at, relative_to, delay, now;
1579 ev_uint32_t usec_mask = 0;
1580 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1581 &ev->ev_io_timeout));
1582 gettime(base, &now);
1583 if (is_common_timeout(&ev->ev_timeout, base)) {
1584 delay = ev->ev_io_timeout;
1585 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1586 delay.tv_usec &= MICROSECONDS_MASK;
1587 if (ev->ev_res & EV_TIMEOUT) {
1588 relative_to = ev->ev_timeout;
1589 relative_to.tv_usec &= MICROSECONDS_MASK;
1594 delay = ev->ev_io_timeout;
1595 if (ev->ev_res & EV_TIMEOUT) {
1596 relative_to = ev->ev_timeout;
1601 evutil_timeradd(&relative_to, &delay, &run_at);
1602 if (evutil_timercmp(&run_at, &now, <)) {
1603 /* Looks like we missed at least one invocation due to
1604 * a clock jump, not running the event loop for a
1605 * while, really slow callbacks, or
1606 * something. Reschedule relative to now.
1608 evutil_timeradd(&now, &delay, &run_at);
1610 run_at.tv_usec |= usec_mask;
1611 event_add_nolock_(ev, &run_at, 1);
1614 // Save our callback before we release the lock
1615 evcb_callback = ev->ev_callback;
1616 evcb_fd = ev->ev_fd;
1617 evcb_res = ev->ev_res;
1618 evcb_arg = ev->ev_arg;
1621 EVBASE_RELEASE_LOCK(base, th_base_lock);
1623 // Execute the callback
1624 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1628 Helper for event_process_active to process all the events in a single queue,
1629 releasing the lock as we go. This function requires that the lock be held
1630 when it's invoked. Returns -1 if we get a signal or an event_break that
1631 means we should stop processing any active events now. Otherwise returns
1632 the number of non-internal event_callbacks that we processed.
1635 event_process_active_single_queue(struct event_base *base,
1636 struct evcallback_list *activeq,
1637 int max_to_process, const struct timeval *endtime)
1639 struct event_callback *evcb;
1642 EVUTIL_ASSERT(activeq != NULL);
1644 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1645 struct event *ev=NULL;
1646 if (evcb->evcb_flags & EVLIST_INIT) {
1647 ev = event_callback_to_event(evcb);
1649 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1650 event_queue_remove_active(base, evcb);
1652 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1654 "event_process_active: event: %p, %s%s%scall %p",
1656 ev->ev_res & EV_READ ? "EV_READ " : " ",
1657 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1658 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1661 event_queue_remove_active(base, evcb);
1662 event_debug(("event_process_active: event_callback %p, "
1663 "closure %d, call %p",
1664 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1667 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1671 base->current_event = evcb;
1672 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1673 base->current_event_waiters = 0;
1676 switch (evcb->evcb_closure) {
1677 case EV_CLOSURE_EVENT_SIGNAL:
1678 EVUTIL_ASSERT(ev != NULL);
1679 event_signal_closure(base, ev);
1681 case EV_CLOSURE_EVENT_PERSIST:
1682 EVUTIL_ASSERT(ev != NULL);
1683 event_persist_closure(base, ev);
1685 case EV_CLOSURE_EVENT: {
1686 void (*evcb_callback)(evutil_socket_t, short, void *);
1688 EVUTIL_ASSERT(ev != NULL);
1689 evcb_callback = *ev->ev_callback;
1691 EVBASE_RELEASE_LOCK(base, th_base_lock);
1692 evcb_callback(ev->ev_fd, res, ev->ev_arg);
1695 case EV_CLOSURE_CB_SELF: {
1696 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1697 EVBASE_RELEASE_LOCK(base, th_base_lock);
1698 evcb_selfcb(evcb, evcb->evcb_arg);
1701 case EV_CLOSURE_EVENT_FINALIZE:
1702 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1703 void (*evcb_evfinalize)(struct event *, void *);
1704 int evcb_closure = evcb->evcb_closure;
1705 EVUTIL_ASSERT(ev != NULL);
1706 base->current_event = NULL;
1707 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1708 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1709 EVBASE_RELEASE_LOCK(base, th_base_lock);
1710 evcb_evfinalize(ev, ev->ev_arg);
1711 event_debug_note_teardown_(ev);
1712 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1716 case EV_CLOSURE_CB_FINALIZE: {
1717 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1718 base->current_event = NULL;
1719 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1720 EVBASE_RELEASE_LOCK(base, th_base_lock);
1721 evcb_cbfinalize(evcb, evcb->evcb_arg);
1728 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1729 base->current_event = NULL;
1730 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1731 if (base->current_event_waiters) {
1732 base->current_event_waiters = 0;
1733 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1737 if (base->event_break)
1739 if (count >= max_to_process)
1741 if (count && endtime) {
1743 update_time_cache(base);
1744 gettime(base, &now);
1745 if (evutil_timercmp(&now, endtime, >=))
1748 if (base->event_continue)
1755 * Active events are stored in priority queues. Lower priorities are always
1756 * process before higher priorities. Low priority events can starve high
1761 event_process_active(struct event_base *base)
1763 /* Caller must hold th_base_lock */
1764 struct evcallback_list *activeq = NULL;
1766 const struct timeval *endtime;
1768 const int maxcb = base->max_dispatch_callbacks;
1769 const int limit_after_prio = base->limit_callbacks_after_prio;
1770 if (base->max_dispatch_time.tv_sec >= 0) {
1771 update_time_cache(base);
1773 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1779 for (i = 0; i < base->nactivequeues; ++i) {
1780 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1781 base->event_running_priority = i;
1782 activeq = &base->activequeues[i];
1783 if (i < limit_after_prio)
1784 c = event_process_active_single_queue(base, activeq,
1787 c = event_process_active_single_queue(base, activeq,
1792 break; /* Processed a real event; do not
1793 * consider lower-priority events */
1794 /* If we get here, all of the events we processed
1795 * were internal. Continue. */
1800 base->event_running_priority = -1;
1806 * Wait continuously for events. We exit only if no events are left.
1810 event_dispatch(void)
1812 return (event_loop(0));
1816 event_base_dispatch(struct event_base *event_base)
1818 return (event_base_loop(event_base, 0));
1822 event_base_get_method(const struct event_base *base)
1824 EVUTIL_ASSERT(base);
1825 return (base->evsel->name);
1828 /** Callback: used to implement event_base_loopexit by telling the event_base
1829 * that it's time to exit its loop. */
1831 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1833 struct event_base *base = arg;
1834 base->event_gotterm = 1;
1838 event_loopexit(const struct timeval *tv)
1840 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1845 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1847 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1852 event_loopbreak(void)
1854 return (event_base_loopbreak(current_base));
1858 event_base_loopbreak(struct event_base *event_base)
1861 if (event_base == NULL)
1864 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1865 event_base->event_break = 1;
1867 if (EVBASE_NEED_NOTIFY(event_base)) {
1868 r = evthread_notify_base(event_base);
1872 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1877 event_base_loopcontinue(struct event_base *event_base)
1880 if (event_base == NULL)
1883 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1884 event_base->event_continue = 1;
1886 if (EVBASE_NEED_NOTIFY(event_base)) {
1887 r = evthread_notify_base(event_base);
1891 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1896 event_base_got_break(struct event_base *event_base)
1899 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1900 res = event_base->event_break;
1901 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1906 event_base_got_exit(struct event_base *event_base)
1909 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1910 res = event_base->event_gotterm;
1911 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1915 /* not thread safe */
1918 event_loop(int flags)
1920 return event_base_loop(current_base, flags);
1924 event_base_loop(struct event_base *base, int flags)
1926 const struct eventop *evsel = base->evsel;
1928 struct timeval *tv_p;
1929 int res, done, retval = 0;
1931 /* Grab the lock. We will release it inside evsel.dispatch, and again
1932 * as we invoke user callbacks. */
1933 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1935 if (base->running_loop) {
1936 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1937 " can run on each event_base at once.", __func__);
1938 EVBASE_RELEASE_LOCK(base, th_base_lock);
1942 base->running_loop = 1;
1944 clear_time_cache(base);
1946 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1947 evsig_set_base_(base);
1951 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1952 base->th_owner_id = EVTHREAD_GET_ID();
1955 base->event_gotterm = base->event_break = 0;
1958 base->event_continue = 0;
1959 base->n_deferreds_queued = 0;
1961 /* Terminate the loop if we have been asked to */
1962 if (base->event_gotterm) {
1966 if (base->event_break) {
1971 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1972 timeout_next(base, &tv_p);
1975 * if we have active events, we just poll new events
1978 evutil_timerclear(&tv);
1981 /* If we have no events, we just exit */
1982 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1983 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1984 event_debug(("%s: no events registered.", __func__));
1989 event_queue_make_later_events_active(base);
1991 clear_time_cache(base);
1993 res = evsel->dispatch(base, tv_p);
1996 event_debug(("%s: dispatch returned unsuccessfully.",
2002 update_time_cache(base);
2004 timeout_process(base);
2006 if (N_ACTIVE_CALLBACKS(base)) {
2007 int n = event_process_active(base);
2008 if ((flags & EVLOOP_ONCE)
2009 && N_ACTIVE_CALLBACKS(base) == 0
2012 } else if (flags & EVLOOP_NONBLOCK)
2015 event_debug(("%s: asked to terminate loop.", __func__));
2018 clear_time_cache(base);
2019 base->running_loop = 0;
2021 EVBASE_RELEASE_LOCK(base, th_base_lock);
2026 /* One-time callback to implement event_base_once: invokes the user callback,
2027 * then deletes the allocated storage */
2029 event_once_cb(evutil_socket_t fd, short events, void *arg)
2031 struct event_once *eonce = arg;
2033 (*eonce->cb)(fd, events, eonce->arg);
2034 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2035 LIST_REMOVE(eonce, next_once);
2036 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2037 event_debug_unassign(&eonce->ev);
2041 /* not threadsafe, event scheduled once. */
2043 event_once(evutil_socket_t fd, short events,
2044 void (*callback)(evutil_socket_t, short, void *),
2045 void *arg, const struct timeval *tv)
2047 return event_base_once(current_base, fd, events, callback, arg, tv);
2050 /* Schedules an event once */
2052 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2053 void (*callback)(evutil_socket_t, short, void *),
2054 void *arg, const struct timeval *tv)
2056 struct event_once *eonce;
2060 /* We cannot support signals that just fire once, or persistent
2062 if (events & (EV_SIGNAL|EV_PERSIST))
2065 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2068 eonce->cb = callback;
2071 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2072 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2074 if (tv == NULL || ! evutil_timerisset(tv)) {
2075 /* If the event is going to become active immediately,
2076 * don't put it on the timeout queue. This is one
2077 * idiom for scheduling a callback, so let's make
2078 * it fast (and order-preserving). */
2081 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2082 events &= EV_READ|EV_WRITE|EV_CLOSED;
2084 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2086 /* Bad event combination */
2092 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2094 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2096 res = event_add_nolock_(&eonce->ev, tv, 0);
2102 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2104 EVBASE_RELEASE_LOCK(base, th_base_lock);
2111 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2114 base = current_base;
2115 if (arg == &event_self_cbarg_ptr_)
2118 if (!(events & EV_SIGNAL))
2119 event_debug_assert_socket_nonblocking_(fd);
2120 event_debug_assert_not_added_(ev);
2124 ev->ev_callback = callback;
2127 ev->ev_events = events;
2129 ev->ev_flags = EVLIST_INIT;
2131 ev->ev_pncalls = NULL;
2133 if (events & EV_SIGNAL) {
2134 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2135 event_warnx("%s: EV_SIGNAL is not compatible with "
2136 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2139 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2141 if (events & EV_PERSIST) {
2142 evutil_timerclear(&ev->ev_io_timeout);
2143 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2145 ev->ev_closure = EV_CLOSURE_EVENT;
2149 min_heap_elem_init_(ev);
2152 /* by default, we put new events into the middle priority */
2153 ev->ev_pri = base->nactivequeues / 2;
2156 event_debug_note_setup_(ev);
2162 event_base_set(struct event_base *base, struct event *ev)
2164 /* Only innocent events may be assigned to a different base */
2165 if (ev->ev_flags != EVLIST_INIT)
2168 event_debug_assert_is_setup_(ev);
2171 ev->ev_pri = base->nactivequeues/2;
2177 event_set(struct event *ev, evutil_socket_t fd, short events,
2178 void (*callback)(evutil_socket_t, short, void *), void *arg)
2181 r = event_assign(ev, current_base, fd, events, callback, arg);
2182 EVUTIL_ASSERT(r == 0);
2186 event_self_cbarg(void)
2188 return &event_self_cbarg_ptr_;
2192 event_base_get_running_event(struct event_base *base)
2194 struct event *ev = NULL;
2195 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2196 if (EVBASE_IN_THREAD(base)) {
2197 struct event_callback *evcb = base->current_event;
2198 if (evcb->evcb_flags & EVLIST_INIT)
2199 ev = event_callback_to_event(evcb);
2201 EVBASE_RELEASE_LOCK(base, th_base_lock);
2206 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2209 ev = mm_malloc(sizeof(struct event));
2212 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2221 event_free(struct event *ev)
2223 /* This is disabled, so that events which have been finalized be a
2224 * valid target for event_free(). That's */
2225 // event_debug_assert_is_setup_(ev);
2227 /* make sure that this event won't be coming back to haunt us. */
2229 event_debug_note_teardown_(ev);
2235 event_debug_unassign(struct event *ev)
2237 event_debug_assert_not_added_(ev);
2238 event_debug_note_teardown_(ev);
2240 ev->ev_flags &= ~EVLIST_INIT;
2243 #define EVENT_FINALIZE_FREE_ 0x10000
2245 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2247 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2248 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2250 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2251 ev->ev_closure = closure;
2252 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2253 event_active_nolock_(ev, EV_FINALIZE, 1);
2254 ev->ev_flags |= EVLIST_FINALIZING;
2259 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2262 struct event_base *base = ev->ev_base;
2263 if (EVUTIL_FAILURE_CHECK(!base)) {
2264 event_warnx("%s: event has no event_base set.", __func__);
2268 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2269 r = event_finalize_nolock_(base, flags, ev, cb);
2270 EVBASE_RELEASE_LOCK(base, th_base_lock);
2275 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2277 return event_finalize_impl_(flags, ev, cb);
2281 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2283 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2287 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2289 struct event *ev = NULL;
2290 if (evcb->evcb_flags & EVLIST_INIT) {
2291 ev = event_callback_to_event(evcb);
2292 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2294 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2297 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2298 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2299 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2300 evcb->evcb_flags |= EVLIST_FINALIZING;
2304 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2306 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2307 event_callback_finalize_nolock_(base, flags, evcb, cb);
2308 EVBASE_RELEASE_LOCK(base, th_base_lock);
2311 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2312 * callback will be invoked on *one of them*, after they have *all* been
2315 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2317 int n_pending = 0, i;
2320 base = current_base;
2322 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2324 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2326 /* At most one can be currently executing; the rest we just
2327 * cancel... But we always make sure that the finalize callback
2329 for (i = 0; i < n_cbs; ++i) {
2330 struct event_callback *evcb = evcbs[i];
2331 if (evcb == base->current_event) {
2332 event_callback_finalize_nolock_(base, 0, evcb, cb);
2335 event_callback_cancel_nolock_(base, evcb, 0);
2339 if (n_pending == 0) {
2340 /* Just do the first one. */
2341 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2344 EVBASE_RELEASE_LOCK(base, th_base_lock);
2349 * Set's the priority of an event - if an event is already scheduled
2350 * changing the priority is going to fail.
2354 event_priority_set(struct event *ev, int pri)
2356 event_debug_assert_is_setup_(ev);
2358 if (ev->ev_flags & EVLIST_ACTIVE)
2360 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2369 * Checks if a specific event is pending or scheduled.
2373 event_pending(const struct event *ev, short event, struct timeval *tv)
2377 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2378 event_warnx("%s: event has no event_base set.", __func__);
2382 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2383 event_debug_assert_is_setup_(ev);
2385 if (ev->ev_flags & EVLIST_INSERTED)
2386 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2387 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2388 flags |= ev->ev_res;
2389 if (ev->ev_flags & EVLIST_TIMEOUT)
2390 flags |= EV_TIMEOUT;
2392 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2394 /* See if there is a timeout that we should report */
2395 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2396 struct timeval tmp = ev->ev_timeout;
2397 tmp.tv_usec &= MICROSECONDS_MASK;
2398 /* correctly remamp to real time */
2399 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2402 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2404 return (flags & event);
2408 event_initialized(const struct event *ev)
2410 if (!(ev->ev_flags & EVLIST_INIT))
2417 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2419 event_debug_assert_is_setup_(event);
2422 *base_out = event->ev_base;
2424 *fd_out = event->ev_fd;
2426 *events_out = event->ev_events;
2428 *callback_out = event->ev_callback;
2430 *arg_out = event->ev_arg;
2434 event_get_struct_event_size(void)
2436 return sizeof(struct event);
2440 event_get_fd(const struct event *ev)
2442 event_debug_assert_is_setup_(ev);
2447 event_get_base(const struct event *ev)
2449 event_debug_assert_is_setup_(ev);
2454 event_get_events(const struct event *ev)
2456 event_debug_assert_is_setup_(ev);
2457 return ev->ev_events;
2461 event_get_callback(const struct event *ev)
2463 event_debug_assert_is_setup_(ev);
2464 return ev->ev_callback;
2468 event_get_callback_arg(const struct event *ev)
2470 event_debug_assert_is_setup_(ev);
2475 event_get_priority(const struct event *ev)
2477 event_debug_assert_is_setup_(ev);
2482 event_add(struct event *ev, const struct timeval *tv)
2486 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2487 event_warnx("%s: event has no event_base set.", __func__);
2491 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2493 res = event_add_nolock_(ev, tv, 0);
2495 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2500 /* Helper callback: wake an event_base from another thread. This version
2501 * works by writing a byte to one end of a socketpair, so that the event_base
2502 * listening on the other end will wake up as the corresponding event
2505 evthread_notify_base_default(struct event_base *base)
2511 r = send(base->th_notify_fd[1], buf, 1, 0);
2513 r = write(base->th_notify_fd[1], buf, 1);
2515 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2518 #ifdef EVENT__HAVE_EVENTFD
2519 /* Helper callback: wake an event_base from another thread. This version
2520 * assumes that you have a working eventfd() implementation. */
2522 evthread_notify_base_eventfd(struct event_base *base)
2524 ev_uint64_t msg = 1;
2527 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2528 } while (r < 0 && errno == EAGAIN);
2530 return (r < 0) ? -1 : 0;
2535 /** Tell the thread currently running the event_loop for base (if any) that it
2536 * needs to stop waiting in its dispatch function (if it is) and process all
2537 * active callbacks. */
2539 evthread_notify_base(struct event_base *base)
2541 EVENT_BASE_ASSERT_LOCKED(base);
2542 if (!base->th_notify_fn)
2544 if (base->is_notify_pending)
2546 base->is_notify_pending = 1;
2547 return base->th_notify_fn(base);
2550 /* Implementation function to remove a timeout on a currently pending event.
2553 event_remove_timer_nolock_(struct event *ev)
2555 struct event_base *base = ev->ev_base;
2557 EVENT_BASE_ASSERT_LOCKED(base);
2558 event_debug_assert_is_setup_(ev);
2560 event_debug(("event_remove_timer_nolock: event: %p", ev));
2562 /* If it's not pending on a timeout, we don't need to do anything. */
2563 if (ev->ev_flags & EVLIST_TIMEOUT) {
2564 event_queue_remove_timeout(base, ev);
2565 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2572 event_remove_timer(struct event *ev)
2576 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2577 event_warnx("%s: event has no event_base set.", __func__);
2581 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2583 res = event_remove_timer_nolock_(ev);
2585 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2590 /* Implementation function to add an event. Works just like event_add,
2591 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2592 * we treat tv as an absolute time, not as an interval to add to the current
2595 event_add_nolock_(struct event *ev, const struct timeval *tv,
2598 struct event_base *base = ev->ev_base;
2602 EVENT_BASE_ASSERT_LOCKED(base);
2603 event_debug_assert_is_setup_(ev);
2606 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2608 EV_SOCK_ARG(ev->ev_fd),
2609 ev->ev_events & EV_READ ? "EV_READ " : " ",
2610 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2611 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2612 tv ? "EV_TIMEOUT " : " ",
2615 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2617 if (ev->ev_flags & EVLIST_FINALIZING) {
2623 * prepare for timeout insertion further below, if we get a
2624 * failure on any step, we should not change any state.
2626 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2627 if (min_heap_reserve_(&base->timeheap,
2628 1 + min_heap_size_(&base->timeheap)) == -1)
2629 return (-1); /* ENOMEM == errno */
2632 /* If the main thread is currently executing a signal event's
2633 * callback, and we are not the main thread, then we want to wait
2634 * until the callback is done before we mess with the event, or else
2635 * we can race on ev_ncalls and ev_pncalls below. */
2636 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2637 if (base->current_event == event_to_event_callback(ev) &&
2638 (ev->ev_events & EV_SIGNAL)
2639 && !EVBASE_IN_THREAD(base)) {
2640 ++base->current_event_waiters;
2641 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2645 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2646 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2647 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2648 res = evmap_io_add_(base, ev->ev_fd, ev);
2649 else if (ev->ev_events & EV_SIGNAL)
2650 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2652 event_queue_insert_inserted(base, ev);
2654 /* evmap says we need to notify the main thread. */
2661 * we should change the timeout state only if the previous event
2662 * addition succeeded.
2664 if (res != -1 && tv != NULL) {
2667 #ifdef USE_REINSERT_TIMEOUT
2669 int old_timeout_idx;
2673 * for persistent timeout events, we remember the
2674 * timeout value and re-add the event.
2676 * If tv_is_absolute, this was already set.
2678 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2679 ev->ev_io_timeout = *tv;
2681 #ifndef USE_REINSERT_TIMEOUT
2682 if (ev->ev_flags & EVLIST_TIMEOUT) {
2683 event_queue_remove_timeout(base, ev);
2687 /* Check if it is active due to a timeout. Rescheduling
2688 * this timeout before the callback can be executed
2689 * removes it from the active list. */
2690 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2691 (ev->ev_res & EV_TIMEOUT)) {
2692 if (ev->ev_events & EV_SIGNAL) {
2693 /* See if we are just active executing
2694 * this event in a loop
2696 if (ev->ev_ncalls && ev->ev_pncalls) {
2698 *ev->ev_pncalls = 0;
2702 event_queue_remove_active(base, event_to_event_callback(ev));
2705 gettime(base, &now);
2707 common_timeout = is_common_timeout(tv, base);
2708 #ifdef USE_REINSERT_TIMEOUT
2709 was_common = is_common_timeout(&ev->ev_timeout, base);
2710 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2713 if (tv_is_absolute) {
2714 ev->ev_timeout = *tv;
2715 } else if (common_timeout) {
2716 struct timeval tmp = *tv;
2717 tmp.tv_usec &= MICROSECONDS_MASK;
2718 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2719 ev->ev_timeout.tv_usec |=
2720 (tv->tv_usec & ~MICROSECONDS_MASK);
2722 evutil_timeradd(&now, tv, &ev->ev_timeout);
2726 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2727 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2729 #ifdef USE_REINSERT_TIMEOUT
2730 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2732 event_queue_insert_timeout(base, ev);
2735 if (common_timeout) {
2736 struct common_timeout_list *ctl =
2737 get_common_timeout_list(base, &ev->ev_timeout);
2738 if (ev == TAILQ_FIRST(&ctl->events)) {
2739 common_timeout_schedule(ctl, &now, ev);
2742 struct event* top = NULL;
2743 /* See if the earliest timeout is now earlier than it
2744 * was before: if so, we will need to tell the main
2745 * thread to wake up earlier than it would otherwise.
2746 * We double check the timeout of the top element to
2747 * handle time distortions due to system suspension.
2749 if (min_heap_elt_is_top_(ev))
2751 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2752 evutil_timercmp(&top->ev_timeout, &now, <))
2757 /* if we are not in the right thread, we need to wake up the loop */
2758 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2759 evthread_notify_base(base);
2761 event_debug_note_add_(ev);
2767 event_del_(struct event *ev, int blocking)
2770 struct event_base *base = ev->ev_base;
2772 if (EVUTIL_FAILURE_CHECK(!base)) {
2773 event_warnx("%s: event has no event_base set.", __func__);
2777 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2778 res = event_del_nolock_(ev, blocking);
2779 EVBASE_RELEASE_LOCK(base, th_base_lock);
2785 event_del(struct event *ev)
2787 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2791 event_del_block(struct event *ev)
2793 return event_del_(ev, EVENT_DEL_BLOCK);
2797 event_del_noblock(struct event *ev)
2799 return event_del_(ev, EVENT_DEL_NOBLOCK);
2802 /** Helper for event_del: always called with th_base_lock held.
2804 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2805 * EVEN_IF_FINALIZING} values. See those for more information.
2808 event_del_nolock_(struct event *ev, int blocking)
2810 struct event_base *base;
2811 int res = 0, notify = 0;
2813 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2814 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2816 /* An event without a base has not been added */
2817 if (ev->ev_base == NULL)
2820 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2822 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2823 if (ev->ev_flags & EVLIST_FINALIZING) {
2831 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2833 /* See if we are just active executing this event in a loop */
2834 if (ev->ev_events & EV_SIGNAL) {
2835 if (ev->ev_ncalls && ev->ev_pncalls) {
2837 *ev->ev_pncalls = 0;
2841 if (ev->ev_flags & EVLIST_TIMEOUT) {
2842 /* NOTE: We never need to notify the main thread because of a
2843 * deleted timeout event: all that could happen if we don't is
2844 * that the dispatch loop might wake up too early. But the
2845 * point of notifying the main thread _is_ to wake up the
2846 * dispatch loop early anyway, so we wouldn't gain anything by
2849 event_queue_remove_timeout(base, ev);
2852 if (ev->ev_flags & EVLIST_ACTIVE)
2853 event_queue_remove_active(base, event_to_event_callback(ev));
2854 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2855 event_queue_remove_active_later(base, event_to_event_callback(ev));
2857 if (ev->ev_flags & EVLIST_INSERTED) {
2858 event_queue_remove_inserted(base, ev);
2859 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2860 res = evmap_io_del_(base, ev->ev_fd, ev);
2862 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2864 /* evmap says we need to notify the main thread. */
2868 /* If we do not have events, let's notify event base so it can
2869 * exit without waiting */
2870 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2874 /* if we are not in the right thread, we need to wake up the loop */
2875 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2876 evthread_notify_base(base);
2878 event_debug_note_del_(ev);
2880 /* If the main thread is currently executing this event's callback,
2881 * and we are not the main thread, then we want to wait until the
2882 * callback is done before returning. That way, when this function
2883 * returns, it will be safe to free the user-supplied argument.
2885 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2886 if (blocking != EVENT_DEL_NOBLOCK &&
2887 base->current_event == event_to_event_callback(ev) &&
2888 !EVBASE_IN_THREAD(base) &&
2889 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2890 ++base->current_event_waiters;
2891 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2899 event_active(struct event *ev, int res, short ncalls)
2901 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2902 event_warnx("%s: event has no event_base set.", __func__);
2906 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2908 event_debug_assert_is_setup_(ev);
2910 event_active_nolock_(ev, res, ncalls);
2912 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2917 event_active_nolock_(struct event *ev, int res, short ncalls)
2919 struct event_base *base;
2921 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2922 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2925 EVENT_BASE_ASSERT_LOCKED(base);
2927 if (ev->ev_flags & EVLIST_FINALIZING) {
2932 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2934 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2938 /* We get different kinds of events, add them together */
2941 case EVLIST_ACTIVE_LATER:
2949 if (ev->ev_pri < base->event_running_priority)
2950 base->event_continue = 1;
2952 if (ev->ev_events & EV_SIGNAL) {
2953 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2954 if (base->current_event == event_to_event_callback(ev) &&
2955 !EVBASE_IN_THREAD(base)) {
2956 ++base->current_event_waiters;
2957 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2960 ev->ev_ncalls = ncalls;
2961 ev->ev_pncalls = NULL;
2964 event_callback_activate_nolock_(base, event_to_event_callback(ev));
2968 event_active_later_(struct event *ev, int res)
2970 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2971 event_active_later_nolock_(ev, res);
2972 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2976 event_active_later_nolock_(struct event *ev, int res)
2978 struct event_base *base = ev->ev_base;
2979 EVENT_BASE_ASSERT_LOCKED(base);
2981 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2982 /* We get different kinds of events, add them together */
2989 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2993 event_callback_activate_(struct event_base *base,
2994 struct event_callback *evcb)
2997 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2998 r = event_callback_activate_nolock_(base, evcb);
2999 EVBASE_RELEASE_LOCK(base, th_base_lock);
3004 event_callback_activate_nolock_(struct event_base *base,
3005 struct event_callback *evcb)
3009 if (evcb->evcb_flags & EVLIST_FINALIZING)
3012 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3016 case EVLIST_ACTIVE_LATER:
3017 event_queue_remove_active_later(base, evcb);
3026 event_queue_insert_active(base, evcb);
3028 if (EVBASE_NEED_NOTIFY(base))
3029 evthread_notify_base(base);
3035 event_callback_activate_later_nolock_(struct event_base *base,
3036 struct event_callback *evcb)
3038 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3041 event_queue_insert_active_later(base, evcb);
3042 if (EVBASE_NEED_NOTIFY(base))
3043 evthread_notify_base(base);
3048 event_callback_init_(struct event_base *base,
3049 struct event_callback *cb)
3051 memset(cb, 0, sizeof(*cb));
3052 cb->evcb_pri = base->nactivequeues - 1;
3056 event_callback_cancel_(struct event_base *base,
3057 struct event_callback *evcb)
3060 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3061 r = event_callback_cancel_nolock_(base, evcb, 0);
3062 EVBASE_RELEASE_LOCK(base, th_base_lock);
3067 event_callback_cancel_nolock_(struct event_base *base,
3068 struct event_callback *evcb, int even_if_finalizing)
3070 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3073 if (evcb->evcb_flags & EVLIST_INIT)
3074 return event_del_nolock_(event_callback_to_event(evcb),
3075 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3077 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3079 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3083 /* We get different kinds of events, add them together */
3084 event_queue_remove_active(base, evcb);
3086 case EVLIST_ACTIVE_LATER:
3087 event_queue_remove_active_later(base, evcb);
3097 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3099 memset(cb, 0, sizeof(*cb));
3100 cb->evcb_cb_union.evcb_selfcb = fn;
3102 cb->evcb_pri = priority;
3103 cb->evcb_closure = EV_CLOSURE_CB_SELF;
3107 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3109 cb->evcb_pri = priority;
3113 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3116 base = current_base;
3117 event_callback_cancel_(base, cb);
3120 #define MAX_DEFERREDS_QUEUED 32
3122 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3126 base = current_base;
3127 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3128 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3129 r = event_callback_activate_later_nolock_(base, cb);
3131 r = event_callback_activate_nolock_(base, cb);
3133 ++base->n_deferreds_queued;
3136 EVBASE_RELEASE_LOCK(base, th_base_lock);
3141 timeout_next(struct event_base *base, struct timeval **tv_p)
3143 /* Caller must hold th_base_lock */
3146 struct timeval *tv = *tv_p;
3149 ev = min_heap_top_(&base->timeheap);
3152 /* if no time-based events are active wait for I/O */
3157 if (gettime(base, &now) == -1) {
3162 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3163 evutil_timerclear(tv);
3167 evutil_timersub(&ev->ev_timeout, &now, tv);
3169 EVUTIL_ASSERT(tv->tv_sec >= 0);
3170 EVUTIL_ASSERT(tv->tv_usec >= 0);
3171 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3177 /* Activate every event whose timeout has elapsed. */
3179 timeout_process(struct event_base *base)
3181 /* Caller must hold lock. */
3185 if (min_heap_empty_(&base->timeheap)) {
3189 gettime(base, &now);
3191 while ((ev = min_heap_top_(&base->timeheap))) {
3192 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3195 /* delete this event from the I/O queues */
3196 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3198 event_debug(("timeout_process: event: %p, call %p",
3199 ev, ev->ev_callback));
3200 event_active_nolock_(ev, EV_TIMEOUT, 1);
3205 #define MAX(a,b) (((a)>(b))?(a):(b))
3208 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3210 /* These are a fancy way to spell
3211 if (~flags & EVLIST_INTERNAL)
3212 base->event_count--/++;
3214 #define DECR_EVENT_COUNT(base,flags) \
3215 ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3216 #define INCR_EVENT_COUNT(base,flags) do { \
3217 ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
3218 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3222 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3224 EVENT_BASE_ASSERT_LOCKED(base);
3225 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3226 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3227 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3230 DECR_EVENT_COUNT(base, ev->ev_flags);
3231 ev->ev_flags &= ~EVLIST_INSERTED;
3234 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3236 EVENT_BASE_ASSERT_LOCKED(base);
3237 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3238 event_errx(1, "%s: %p not on queue %x", __func__,
3239 evcb, EVLIST_ACTIVE);
3242 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3243 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3244 base->event_count_active--;
3246 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3247 evcb, evcb_active_next);
3250 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3252 EVENT_BASE_ASSERT_LOCKED(base);
3253 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3254 event_errx(1, "%s: %p not on queue %x", __func__,
3255 evcb, EVLIST_ACTIVE_LATER);
3258 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3259 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3260 base->event_count_active--;
3262 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3265 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3267 EVENT_BASE_ASSERT_LOCKED(base);
3268 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3269 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3270 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3273 DECR_EVENT_COUNT(base, ev->ev_flags);
3274 ev->ev_flags &= ~EVLIST_TIMEOUT;
3276 if (is_common_timeout(&ev->ev_timeout, base)) {
3277 struct common_timeout_list *ctl =
3278 get_common_timeout_list(base, &ev->ev_timeout);
3279 TAILQ_REMOVE(&ctl->events, ev,
3280 ev_timeout_pos.ev_next_with_common_timeout);
3282 min_heap_erase_(&base->timeheap, ev);
3286 #ifdef USE_REINSERT_TIMEOUT
3287 /* Remove and reinsert 'ev' into the timeout queue. */
3289 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3290 int was_common, int is_common, int old_timeout_idx)
3292 struct common_timeout_list *ctl;
3293 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3294 event_queue_insert_timeout(base, ev);
3298 switch ((was_common<<1) | is_common) {
3299 case 3: /* Changing from one common timeout to another */
3300 ctl = base->common_timeout_queues[old_timeout_idx];
3301 TAILQ_REMOVE(&ctl->events, ev,
3302 ev_timeout_pos.ev_next_with_common_timeout);
3303 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3304 insert_common_timeout_inorder(ctl, ev);
3306 case 2: /* Was common; is no longer common */
3307 ctl = base->common_timeout_queues[old_timeout_idx];
3308 TAILQ_REMOVE(&ctl->events, ev,
3309 ev_timeout_pos.ev_next_with_common_timeout);
3310 min_heap_push_(&base->timeheap, ev);
3312 case 1: /* Wasn't common; has become common. */
3313 min_heap_erase_(&base->timeheap, ev);
3314 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3315 insert_common_timeout_inorder(ctl, ev);
3317 case 0: /* was in heap; is still on heap. */
3318 min_heap_adjust_(&base->timeheap, ev);
3321 EVUTIL_ASSERT(0); /* unreachable */
3327 /* Add 'ev' to the common timeout list in 'ev'. */
3329 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3333 /* By all logic, we should just be able to append 'ev' to the end of
3334 * ctl->events, since the timeout on each 'ev' is set to {the common
3335 * timeout} + {the time when we add the event}, and so the events
3336 * should arrive in order of their timeeouts. But just in case
3337 * there's some wacky threading issue going on, we do a search from
3338 * the end of 'ev' to find the right insertion point.
3340 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3341 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3342 /* This timercmp is a little sneaky, since both ev and e have
3343 * magic values in tv_usec. Fortunately, they ought to have
3344 * the _same_ magic values in tv_usec. Let's assert for that.
3347 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3348 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3349 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3350 ev_timeout_pos.ev_next_with_common_timeout);
3354 TAILQ_INSERT_HEAD(&ctl->events, ev,
3355 ev_timeout_pos.ev_next_with_common_timeout);
3359 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3361 EVENT_BASE_ASSERT_LOCKED(base);
3363 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3364 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3365 ev, EV_SOCK_ARG(ev->ev_fd));
3369 INCR_EVENT_COUNT(base, ev->ev_flags);
3371 ev->ev_flags |= EVLIST_INSERTED;
3375 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3377 EVENT_BASE_ASSERT_LOCKED(base);
3379 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3380 /* Double insertion is possible for active events */
3384 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3386 evcb->evcb_flags |= EVLIST_ACTIVE;
3388 base->event_count_active++;
3389 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3390 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3391 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3392 evcb, evcb_active_next);
3396 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3398 EVENT_BASE_ASSERT_LOCKED(base);
3399 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3400 /* Double insertion is possible */
3404 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3405 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3406 base->event_count_active++;
3407 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3408 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3409 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3413 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3415 EVENT_BASE_ASSERT_LOCKED(base);
3417 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3418 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3419 ev, EV_SOCK_ARG(ev->ev_fd));
3423 INCR_EVENT_COUNT(base, ev->ev_flags);
3425 ev->ev_flags |= EVLIST_TIMEOUT;
3427 if (is_common_timeout(&ev->ev_timeout, base)) {
3428 struct common_timeout_list *ctl =
3429 get_common_timeout_list(base, &ev->ev_timeout);
3430 insert_common_timeout_inorder(ctl, ev);
3432 min_heap_push_(&base->timeheap, ev);
3437 event_queue_make_later_events_active(struct event_base *base)
3439 struct event_callback *evcb;
3440 EVENT_BASE_ASSERT_LOCKED(base);
3442 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3443 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3444 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3445 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3446 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3447 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3451 /* Functions for debugging */
3454 event_get_version(void)
3456 return (EVENT__VERSION);
3460 event_get_version_number(void)
3462 return (EVENT__NUMERIC_VERSION);
3466 * No thread-safe interface needed - the information should be the same
3471 event_get_method(void)
3473 return (current_base->evsel->name);
3476 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3477 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3478 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3479 static void (*mm_free_fn_)(void *p) = NULL;
3482 event_mm_malloc_(size_t sz)
3488 return mm_malloc_fn_(sz);
3494 event_mm_calloc_(size_t count, size_t size)
3496 if (count == 0 || size == 0)
3499 if (mm_malloc_fn_) {
3500 size_t sz = count * size;
3502 if (count > EV_SIZE_MAX / size)
3504 p = mm_malloc_fn_(sz);
3506 return memset(p, 0, sz);
3508 void *p = calloc(count, size);
3510 /* Windows calloc doesn't reliably set ENOMEM */
3523 event_mm_strdup_(const char *str)
3530 if (mm_malloc_fn_) {
3531 size_t ln = strlen(str);
3533 if (ln == EV_SIZE_MAX)
3535 p = mm_malloc_fn_(ln+1);
3537 return memcpy(p, str, ln+1);
3540 return _strdup(str);
3551 event_mm_realloc_(void *ptr, size_t sz)
3554 return mm_realloc_fn_(ptr, sz);
3556 return realloc(ptr, sz);
3560 event_mm_free_(void *ptr)
3569 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3570 void *(*realloc_fn)(void *ptr, size_t sz),
3571 void (*free_fn)(void *ptr))
3573 mm_malloc_fn_ = malloc_fn;
3574 mm_realloc_fn_ = realloc_fn;
3575 mm_free_fn_ = free_fn;
3579 #ifdef EVENT__HAVE_EVENTFD
3581 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3585 struct event_base *base = arg;
3587 r = read(fd, (void*) &msg, sizeof(msg));
3588 if (r<0 && errno != EAGAIN) {
3589 event_sock_warn(fd, "Error reading from eventfd");
3591 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3592 base->is_notify_pending = 0;
3593 EVBASE_RELEASE_LOCK(base, th_base_lock);
3598 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3600 unsigned char buf[1024];
3601 struct event_base *base = arg;
3603 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3606 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3610 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3611 base->is_notify_pending = 0;
3612 EVBASE_RELEASE_LOCK(base, th_base_lock);
3616 evthread_make_base_notifiable(struct event_base *base)
3622 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3623 r = evthread_make_base_notifiable_nolock_(base);
3624 EVBASE_RELEASE_LOCK(base, th_base_lock);
3629 evthread_make_base_notifiable_nolock_(struct event_base *base)
3631 void (*cb)(evutil_socket_t, short, void *);
3632 int (*notify)(struct event_base *);
3634 if (base->th_notify_fn != NULL) {
3635 /* The base is already notifiable: we're doing fine. */
3639 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3640 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3641 base->th_notify_fn = event_kq_notify_base_;
3642 /* No need to add an event here; the backend can wake
3643 * itself up just fine. */
3648 #ifdef EVENT__HAVE_EVENTFD
3649 base->th_notify_fd[0] = evutil_eventfd_(0,
3650 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3651 if (base->th_notify_fd[0] >= 0) {
3652 base->th_notify_fd[1] = -1;
3653 notify = evthread_notify_base_eventfd;
3654 cb = evthread_notify_drain_eventfd;
3657 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3658 notify = evthread_notify_base_default;
3659 cb = evthread_notify_drain_default;
3664 base->th_notify_fn = notify;
3666 /* prepare an event that we can use for wakeup */
3667 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3668 EV_READ|EV_PERSIST, cb, base);
3670 /* we need to mark this as internal event */
3671 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3672 event_priority_set(&base->th_notify, 0);
3674 return event_add_nolock_(&base->th_notify, NULL, 0);
3678 event_base_foreach_event_nolock_(struct event_base *base,
3679 event_base_foreach_event_cb fn, void *arg)
3685 /* Start out with all the EVLIST_INSERTED events. */
3686 if ((r = evmap_foreach_event_(base, fn, arg)))
3689 /* Okay, now we deal with those events that have timeouts and are in
3691 for (u = 0; u < base->timeheap.n; ++u) {
3692 ev = base->timeheap.p[u];
3693 if (ev->ev_flags & EVLIST_INSERTED) {
3694 /* we already processed this one */
3697 if ((r = fn(base, ev, arg)))
3701 /* Now for the events in one of the timeout queues.
3703 for (i = 0; i < base->n_common_timeouts; ++i) {
3704 struct common_timeout_list *ctl =
3705 base->common_timeout_queues[i];
3706 TAILQ_FOREACH(ev, &ctl->events,
3707 ev_timeout_pos.ev_next_with_common_timeout) {
3708 if (ev->ev_flags & EVLIST_INSERTED) {
3709 /* we already processed this one */
3712 if ((r = fn(base, ev, arg)))
3717 /* Finally, we deal wit all the active events that we haven't touched
3719 for (i = 0; i < base->nactivequeues; ++i) {
3720 struct event_callback *evcb;
3721 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3722 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3723 /* This isn't an event (evlist_init clear), or
3724 * we already processed it. (inserted or
3728 ev = event_callback_to_event(evcb);
3729 if ((r = fn(base, ev, arg)))
3737 /* Helper for event_base_dump_events: called on each event in the event base;
3738 * dumps only the inserted events. */
3740 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3743 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3746 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3749 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3750 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3751 (e->ev_events&EV_READ)?" Read":"",
3752 (e->ev_events&EV_WRITE)?" Write":"",
3753 (e->ev_events&EV_CLOSED)?" EOF":"",
3754 (e->ev_events&EV_SIGNAL)?" Signal":"",
3755 (e->ev_events&EV_PERSIST)?" Persist":"",
3756 (e->ev_events&EV_ET)?" ET":"",
3757 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3758 if (e->ev_flags & EVLIST_TIMEOUT) {
3760 tv.tv_sec = e->ev_timeout.tv_sec;
3761 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3762 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3763 fprintf(output, " Timeout=%ld.%06d",
3764 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3766 fputc('\n', output);
3771 /* Helper for event_base_dump_events: called on each event in the event base;
3772 * dumps only the active events. */
3774 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3777 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3780 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3783 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3784 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3785 (e->ev_res&EV_READ)?" Read":"",
3786 (e->ev_res&EV_WRITE)?" Write":"",
3787 (e->ev_res&EV_CLOSED)?" EOF":"",
3788 (e->ev_res&EV_SIGNAL)?" Signal":"",
3789 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3790 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3791 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3797 event_base_foreach_event(struct event_base *base,
3798 event_base_foreach_event_cb fn, void *arg)
3801 if ((!fn) || (!base)) {
3804 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3805 r = event_base_foreach_event_nolock_(base, fn, arg);
3806 EVBASE_RELEASE_LOCK(base, th_base_lock);
3812 event_base_dump_events(struct event_base *base, FILE *output)
3814 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3815 fprintf(output, "Inserted events:\n");
3816 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3818 fprintf(output, "Active events:\n");
3819 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3820 EVBASE_RELEASE_LOCK(base, th_base_lock);
3824 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3826 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3828 /* Activate any non timer events */
3829 if (!(events & EV_TIMEOUT)) {
3830 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3832 /* If we want to activate timer events, loop and activate each event with
3833 * the same fd in both the timeheap and common timeouts list */
3838 for (u = 0; u < base->timeheap.n; ++u) {
3839 ev = base->timeheap.p[u];
3840 if (ev->ev_fd == fd) {
3841 event_active_nolock_(ev, EV_TIMEOUT, 1);
3845 for (i = 0; i < base->n_common_timeouts; ++i) {
3846 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3847 TAILQ_FOREACH(ev, &ctl->events,
3848 ev_timeout_pos.ev_next_with_common_timeout) {
3849 if (ev->ev_fd == fd) {
3850 event_active_nolock_(ev, EV_TIMEOUT, 1);
3856 EVBASE_RELEASE_LOCK(base, th_base_lock);
3860 event_base_active_by_signal(struct event_base *base, int sig)
3862 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3863 evmap_signal_active_(base, sig, 1);
3864 EVBASE_RELEASE_LOCK(base, th_base_lock);
3869 event_base_add_virtual_(struct event_base *base)
3871 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3872 base->virtual_event_count++;
3873 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3874 EVBASE_RELEASE_LOCK(base, th_base_lock);
3878 event_base_del_virtual_(struct event_base *base)
3880 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3881 EVUTIL_ASSERT(base->virtual_event_count > 0);
3882 base->virtual_event_count--;
3883 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3884 evthread_notify_base(base);
3885 EVBASE_RELEASE_LOCK(base, th_base_lock);
3889 event_free_debug_globals_locks(void)
3891 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3892 #ifndef EVENT__DISABLE_DEBUG_MODE
3893 if (event_debug_map_lock_ != NULL) {
3894 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3895 event_debug_map_lock_ = NULL;
3896 evthreadimpl_disable_lock_debugging_();
3898 #endif /* EVENT__DISABLE_DEBUG_MODE */
3899 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3904 event_free_debug_globals(void)
3906 event_free_debug_globals_locks();
3910 event_free_evsig_globals(void)
3912 evsig_free_globals_();
3916 event_free_evutil_globals(void)
3918 evutil_free_globals_();
3922 event_free_globals(void)
3924 event_free_debug_globals();
3925 event_free_evsig_globals();
3926 event_free_evutil_globals();
3930 libevent_global_shutdown(void)
3932 event_disable_debug_mode();
3933 event_free_globals();
3936 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3938 event_global_setup_locks_(const int enable_locks)
3940 #ifndef EVENT__DISABLE_DEBUG_MODE
3941 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3943 if (evsig_global_setup_locks_(enable_locks) < 0)
3945 if (evutil_global_setup_locks_(enable_locks) < 0)
3947 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3954 event_base_assert_ok_(struct event_base *base)
3956 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3957 event_base_assert_ok_nolock_(base);
3958 EVBASE_RELEASE_LOCK(base, th_base_lock);
3962 event_base_assert_ok_nolock_(struct event_base *base)
3968 /* First do checks on the per-fd and per-signal lists */
3969 evmap_check_integrity_(base);
3971 /* Check the heap property */
3972 for (u = 1; u < base->timeheap.n; ++u) {
3973 size_t parent = (u - 1) / 2;
3974 struct event *ev, *p_ev;
3975 ev = base->timeheap.p[u];
3976 p_ev = base->timeheap.p[parent];
3977 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3978 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3979 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
3982 /* Check that the common timeouts are fine */
3983 for (i = 0; i < base->n_common_timeouts; ++i) {
3984 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3985 struct event *last=NULL, *ev;
3987 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3989 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3991 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3992 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3993 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3994 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3999 /* Check the active queues. */
4001 for (i = 0; i < base->nactivequeues; ++i) {
4002 struct event_callback *evcb;
4003 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4004 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4005 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4006 EVUTIL_ASSERT(evcb->evcb_pri == i);
4012 struct event_callback *evcb;
4013 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4014 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4018 EVUTIL_ASSERT(count == base->event_count_active);