2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "event2/event-config.h"
27 #include "evconfig-private.h"
31 #define WIN32_LEAN_AND_MEAN
33 #undef WIN32_LEAN_AND_MEAN
35 #include <sys/types.h>
36 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
39 #include <sys/queue.h>
51 #include "event-internal.h"
52 #include "evmap-internal.h"
53 #include "mm-internal.h"
54 #include "changelist-internal.h"
56 /** An entry for an evmap_io list: notes all the events that want to read or
57 write on a given fd, and the number of each.
60 struct event_dlist events;
66 /* An entry for an evmap_signal list: notes all the events that want to know
67 when a signal triggers. */
69 struct event_dlist events;
72 /* On some platforms, fds start at 0 and increment by 1 as they are
73 allocated, and old numbers get used. For these platforms, we
74 implement io maps just like signal maps: as an array of pointers to
75 struct evmap_io. But on other platforms (windows), sockets are not
76 0-indexed, not necessarily consecutive, and not necessarily reused.
77 There, we use a hashtable to implement evmap_io.
80 struct event_map_entry {
81 HT_ENTRY(event_map_entry) map_node;
83 union { /* This is a union in case we need to make more things that can
84 be in the hashtable. */
85 struct evmap_io evmap_io;
89 /* Helper used by the event_io_map hashtable code; tries to return a good hash
90 * of the fd in e->fd. */
91 static inline unsigned
92 hashsocket(struct event_map_entry *e)
94 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95 * matter. Our hashtable implementation really likes low-order bits,
96 * though, so let's do the rotate-and-add trick. */
97 unsigned h = (unsigned) e->fd;
98 h += (h >> 2) | (h << 30);
102 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103 * have the same e->fd. */
105 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
107 return e1->fd == e2->fd;
110 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
111 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
112 0.5, mm_malloc, mm_realloc, mm_free)
114 #define GET_IO_SLOT(x, map, slot, type) \
116 struct event_map_entry key_, *ent_; \
118 ent_ = HT_FIND(event_io_map, map, &key_); \
119 (x) = ent_ ? &ent_->ent.type : NULL; \
122 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
124 struct event_map_entry key_, *ent_; \
126 HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
127 event_map_entry, &key_, ptr, \
132 ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133 if (EVUTIL_UNLIKELY(ent_ == NULL)) \
136 (ctor)(&ent_->ent.type); \
137 HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
139 (x) = &ent_->ent.type; \
142 void evmap_io_initmap_(struct event_io_map *ctx)
144 HT_INIT(event_io_map, ctx);
147 void evmap_io_clear_(struct event_io_map *ctx)
149 struct event_map_entry **ent, **next, *this;
150 for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
152 next = HT_NEXT_RMV(event_io_map, ctx, ent);
155 HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
159 /* Set the variable 'x' to the field in event_map 'map' with fields of type
160 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
161 if there are no entries for 'slot'. Does no bounds-checking. */
162 #define GET_SIGNAL_SLOT(x, map, slot, type) \
163 (x) = (struct type *)((map)->entries[slot])
164 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165 by allocating enough memory for a 'struct type', and initializing the new
166 value by calling the function 'ctor' on it. Makes the function
167 return -1 on allocation failure.
169 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
171 if ((map)->entries[slot] == NULL) { \
172 (map)->entries[slot] = \
173 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
176 (ctor)((struct type *)(map)->entries[slot]); \
178 (x) = (struct type *)((map)->entries[slot]); \
181 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
182 as thin aliases over the SIGNAL_SLOT versions. */
184 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
186 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187 #define FDINFO_OFFSET sizeof(struct evmap_io)
189 evmap_io_initmap_(struct event_io_map* ctx)
191 evmap_signal_initmap_(ctx);
194 evmap_io_clear_(struct event_io_map* ctx)
196 evmap_signal_clear_(ctx);
201 /** Expand 'map' with new entries of width 'msize' until it is big enough
202 to store a value in 'slot'.
205 evmap_make_space(struct event_signal_map *map, int slot, int msize)
207 if (map->nentries <= slot) {
208 int nentries = map->nentries ? map->nentries : 32;
211 if (slot > INT_MAX / 2)
214 while (nentries <= slot)
217 if (nentries > INT_MAX / msize)
220 tmp = (void **)mm_realloc(map->entries, nentries * msize);
224 memset(&tmp[map->nentries], 0,
225 (nentries - map->nentries) * msize);
227 map->nentries = nentries;
235 evmap_signal_initmap_(struct event_signal_map *ctx)
242 evmap_signal_clear_(struct event_signal_map *ctx)
244 if (ctx->entries != NULL) {
246 for (i = 0; i < ctx->nentries; ++i) {
247 if (ctx->entries[i] != NULL)
248 mm_free(ctx->entries[i]);
250 mm_free(ctx->entries);
257 /* code specific to file descriptors */
259 /** Constructor for struct evmap_io */
261 evmap_io_init(struct evmap_io *entry)
263 LIST_INIT(&entry->events);
270 /* return -1 on error, 0 on success if nothing changed in the event backend,
271 * and 1 on success if something did. */
273 evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
275 const struct eventop *evsel = base->evsel;
276 struct event_io_map *io = &base->io;
277 struct evmap_io *ctx = NULL;
278 int nread, nwrite, nclose, retval = 0;
279 short res = 0, old = 0;
280 struct event *old_ev;
282 EVUTIL_ASSERT(fd == ev->ev_fd);
288 if (fd >= io->nentries) {
289 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
293 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
297 nwrite = ctx->nwrite;
298 nclose = ctx->nclose;
307 if (ev->ev_events & EV_READ) {
311 if (ev->ev_events & EV_WRITE) {
315 if (ev->ev_events & EV_CLOSED) {
319 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
320 event_warnx("Too many events reading or writing on fd %d",
324 if (EVENT_DEBUG_MODE_IS_ON() &&
325 (old_ev = LIST_FIRST(&ctx->events)) &&
326 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
327 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
328 " events on fd %d", (int)fd);
333 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
334 /* XXX(niels): we cannot mix edge-triggered and
335 * level-triggered, we should probably assert on
337 if (evsel->add(base, ev->ev_fd,
338 old, (ev->ev_events & EV_ET) | res, extra) == -1)
343 ctx->nread = (ev_uint16_t) nread;
344 ctx->nwrite = (ev_uint16_t) nwrite;
345 ctx->nclose = (ev_uint16_t) nclose;
346 LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
351 /* return -1 on error, 0 on success if nothing changed in the event backend,
352 * and 1 on success if something did. */
354 evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
356 const struct eventop *evsel = base->evsel;
357 struct event_io_map *io = &base->io;
358 struct evmap_io *ctx;
359 int nread, nwrite, nclose, retval = 0;
360 short res = 0, old = 0;
365 EVUTIL_ASSERT(fd == ev->ev_fd);
368 if (fd >= io->nentries)
372 GET_IO_SLOT(ctx, io, fd, evmap_io);
375 nwrite = ctx->nwrite;
376 nclose = ctx->nclose;
385 if (ev->ev_events & EV_READ) {
388 EVUTIL_ASSERT(nread >= 0);
390 if (ev->ev_events & EV_WRITE) {
393 EVUTIL_ASSERT(nwrite >= 0);
395 if (ev->ev_events & EV_CLOSED) {
398 EVUTIL_ASSERT(nclose >= 0);
402 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
403 if (evsel->del(base, ev->ev_fd,
404 old, (ev->ev_events & EV_ET) | res, extra) == -1) {
412 ctx->nwrite = nwrite;
413 ctx->nclose = nclose;
414 LIST_REMOVE(ev, ev_io_next);
420 evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
422 struct event_io_map *io = &base->io;
423 struct evmap_io *ctx;
427 if (fd < 0 || fd >= io->nentries)
430 GET_IO_SLOT(ctx, io, fd, evmap_io);
434 LIST_FOREACH(ev, &ctx->events, ev_io_next) {
435 if (ev->ev_events & events)
436 event_active_nolock_(ev, ev->ev_events & events, 1);
440 /* code specific to signals */
443 evmap_signal_init(struct evmap_signal *entry)
445 LIST_INIT(&entry->events);
450 evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
452 const struct eventop *evsel = base->evsigsel;
453 struct event_signal_map *map = &base->sigmap;
454 struct evmap_signal *ctx = NULL;
456 if (sig < 0 || sig >= NSIG)
459 if (sig >= map->nentries) {
460 if (evmap_make_space(
461 map, sig, sizeof(struct evmap_signal *)) == -1)
464 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
465 base->evsigsel->fdinfo_len);
467 if (LIST_EMPTY(&ctx->events)) {
468 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
473 LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
479 evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
481 const struct eventop *evsel = base->evsigsel;
482 struct event_signal_map *map = &base->sigmap;
483 struct evmap_signal *ctx;
485 if (sig < 0 || sig >= map->nentries)
488 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
490 LIST_REMOVE(ev, ev_signal_next);
492 if (LIST_FIRST(&ctx->events) == NULL) {
493 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
501 evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
503 struct event_signal_map *map = &base->sigmap;
504 struct evmap_signal *ctx;
507 if (sig < 0 || sig >= map->nentries)
509 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
513 LIST_FOREACH(ev, &ctx->events, ev_signal_next)
514 event_active_nolock_(ev, EV_SIGNAL, ncalls);
518 evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
520 struct evmap_io *ctx;
521 GET_IO_SLOT(ctx, map, fd, evmap_io);
523 return ((char*)ctx) + sizeof(struct evmap_io);
528 /* Callback type for evmap_io_foreach_fd */
529 typedef int (*evmap_io_foreach_fd_cb)(
530 struct event_base *, evutil_socket_t, struct evmap_io *, void *);
532 /* Multipurpose helper function: Iterate over every file descriptor event_base
533 * for which we could have EV_READ or EV_WRITE events. For each such fd, call
534 * fn(base, signum, evmap_io, arg), where fn is the user-provided
535 * function, base is the event_base, signum is the signal number, evmap_io
536 * is an evmap_io structure containing a list of events pending on the
537 * file descriptor, and arg is the user-supplied argument.
539 * If fn returns 0, continue on to the next signal. Otherwise, return the same
540 * value that fn returned.
542 * Note that there is no guarantee that the file descriptors will be processed
543 * in any particular order.
546 evmap_io_foreach_fd(struct event_base *base,
547 evmap_io_foreach_fd_cb fn,
551 struct event_io_map *iomap = &base->io;
554 struct event_map_entry **mapent;
555 HT_FOREACH(mapent, event_io_map, iomap) {
556 struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
559 for (fd = 0; fd < iomap->nentries; ++fd) {
560 struct evmap_io *ctx = iomap->entries[fd];
564 if ((r = fn(base, fd, ctx, arg)))
570 /* Callback type for evmap_signal_foreach_signal */
571 typedef int (*evmap_signal_foreach_signal_cb)(
572 struct event_base *, int, struct evmap_signal *, void *);
574 /* Multipurpose helper function: Iterate over every signal number in the
575 * event_base for which we could have signal events. For each such signal,
576 * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
577 * function, base is the event_base, signum is the signal number, evmap_signal
578 * is an evmap_signal structure containing a list of events pending on the
579 * signal, and arg is the user-supplied argument.
581 * If fn returns 0, continue on to the next signal. Otherwise, return the same
582 * value that fn returned.
585 evmap_signal_foreach_signal(struct event_base *base,
586 evmap_signal_foreach_signal_cb fn,
589 struct event_signal_map *sigmap = &base->sigmap;
593 for (signum = 0; signum < sigmap->nentries; ++signum) {
594 struct evmap_signal *ctx = sigmap->entries[signum];
597 if ((r = fn(base, signum, ctx, arg)))
603 /* Helper for evmap_reinit_: tell the backend to add every fd for which we have
604 * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
607 evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
608 struct evmap_io *ctx, void *arg)
610 const struct eventop *evsel = base->evsel;
617 extra = ((char*)ctx) + sizeof(struct evmap_io);
624 if (evsel->fdinfo_len)
625 memset(extra, 0, evsel->fdinfo_len);
627 (ev = LIST_FIRST(&ctx->events)) &&
628 (ev->ev_events & EV_ET))
630 if (evsel->add(base, fd, 0, events, extra) == -1)
636 /* Helper for evmap_reinit_: tell the backend to add every signal for which we
637 * have pending events. */
639 evmap_signal_reinit_iter_fn(struct event_base *base,
640 int signum, struct evmap_signal *ctx, void *arg)
642 const struct eventop *evsel = base->evsigsel;
645 if (!LIST_EMPTY(&ctx->events)) {
646 if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
653 evmap_reinit_(struct event_base *base)
657 evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
660 evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
666 /* Helper for evmap_delete_all_: delete every event in an event_dlist. */
668 delete_all_in_dlist(struct event_dlist *dlist)
671 while ((ev = LIST_FIRST(dlist)))
676 /* Helper for evmap_delete_all_: delete every event pending on an fd. */
678 evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
679 struct evmap_io *io_info, void *arg)
681 return delete_all_in_dlist(&io_info->events);
684 /* Helper for evmap_delete_all_: delete every event pending on a signal. */
686 evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
687 struct evmap_signal *sig_info, void *arg)
689 return delete_all_in_dlist(&sig_info->events);
693 evmap_delete_all_(struct event_base *base)
695 evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
696 evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
699 /** Per-fd structure for use with changelists. It keeps track, for each fd or
700 * signal using the changelist, of where its entry in the changelist is.
702 struct event_changelist_fdinfo {
703 int idxplus1; /* this is the index +1, so that memset(0) will make it
704 * a no-such-element */
708 event_changelist_init_(struct event_changelist *changelist)
710 changelist->changes = NULL;
711 changelist->changes_size = 0;
712 changelist->n_changes = 0;
715 /** Helper: return the changelist_fdinfo corresponding to a given change. */
716 static inline struct event_changelist_fdinfo *
717 event_change_get_fdinfo(struct event_base *base,
718 const struct event_change *change)
721 if (change->read_change & EV_CHANGE_SIGNAL) {
722 struct evmap_signal *ctx;
723 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
724 ptr = ((char*)ctx) + sizeof(struct evmap_signal);
726 struct evmap_io *ctx;
727 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
728 ptr = ((char*)ctx) + sizeof(struct evmap_io);
733 /** Callback helper for event_changelist_assert_ok */
735 event_changelist_assert_ok_foreach_iter_fn(
736 struct event_base *base,
737 evutil_socket_t fd, struct evmap_io *io, void *arg)
739 struct event_changelist *changelist = &base->changelist;
740 struct event_changelist_fdinfo *f;
742 ( ((char*)io) + sizeof(struct evmap_io) );
744 struct event_change *c = &changelist->changes[f->idxplus1 - 1];
745 EVUTIL_ASSERT(c->fd == fd);
750 /** Make sure that the changelist is consistent with the evmap structures. */
752 event_changelist_assert_ok(struct event_base *base)
755 struct event_changelist *changelist = &base->changelist;
757 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
758 for (i = 0; i < changelist->n_changes; ++i) {
759 struct event_change *c = &changelist->changes[i];
760 struct event_changelist_fdinfo *f;
761 EVUTIL_ASSERT(c->fd >= 0);
762 f = event_change_get_fdinfo(base, c);
764 EVUTIL_ASSERT(f->idxplus1 == i + 1);
767 evmap_io_foreach_fd(base,
768 event_changelist_assert_ok_foreach_iter_fn,
772 #ifdef DEBUG_CHANGELIST
773 #define event_changelist_check(base) event_changelist_assert_ok((base))
775 #define event_changelist_check(base) ((void)0)
779 event_changelist_remove_all_(struct event_changelist *changelist,
780 struct event_base *base)
784 event_changelist_check(base);
786 for (i = 0; i < changelist->n_changes; ++i) {
787 struct event_change *ch = &changelist->changes[i];
788 struct event_changelist_fdinfo *fdinfo =
789 event_change_get_fdinfo(base, ch);
790 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
791 fdinfo->idxplus1 = 0;
794 changelist->n_changes = 0;
796 event_changelist_check(base);
800 event_changelist_freemem_(struct event_changelist *changelist)
802 if (changelist->changes)
803 mm_free(changelist->changes);
804 event_changelist_init_(changelist); /* zero it all out. */
807 /** Increase the size of 'changelist' to hold more changes. */
809 event_changelist_grow(struct event_changelist *changelist)
812 struct event_change *new_changes;
813 if (changelist->changes_size < 64)
816 new_size = changelist->changes_size * 2;
818 new_changes = mm_realloc(changelist->changes,
819 new_size * sizeof(struct event_change));
821 if (EVUTIL_UNLIKELY(new_changes == NULL))
824 changelist->changes = new_changes;
825 changelist->changes_size = new_size;
830 /** Return a pointer to the changelist entry for the file descriptor or signal
831 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
832 * old_events field to old_events.
834 static struct event_change *
835 event_changelist_get_or_construct(struct event_changelist *changelist,
838 struct event_changelist_fdinfo *fdinfo)
840 struct event_change *change;
842 if (fdinfo->idxplus1 == 0) {
844 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
846 if (changelist->n_changes == changelist->changes_size) {
847 if (event_changelist_grow(changelist) < 0)
851 idx = changelist->n_changes++;
852 change = &changelist->changes[idx];
853 fdinfo->idxplus1 = idx + 1;
855 memset(change, 0, sizeof(struct event_change));
857 change->old_events = old_events;
859 change = &changelist->changes[fdinfo->idxplus1 - 1];
860 EVUTIL_ASSERT(change->fd == fd);
866 event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
869 struct event_changelist *changelist = &base->changelist;
870 struct event_changelist_fdinfo *fdinfo = p;
871 struct event_change *change;
872 ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
874 event_changelist_check(base);
876 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
880 /* An add replaces any previous delete, but doesn't result in a no-op,
881 * since the delete might fail (because the fd had been closed since
882 * the last add, for instance. */
884 if (events & (EV_READ|EV_SIGNAL))
885 change->read_change = evchange;
886 if (events & EV_WRITE)
887 change->write_change = evchange;
888 if (events & EV_CLOSED)
889 change->close_change = evchange;
891 event_changelist_check(base);
896 event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
899 struct event_changelist *changelist = &base->changelist;
900 struct event_changelist_fdinfo *fdinfo = p;
901 struct event_change *change;
902 ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET);
904 event_changelist_check(base);
905 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
906 event_changelist_check(base);
910 /* A delete on an event set that doesn't contain the event to be
911 deleted produces a no-op. This effectively emoves any previous
912 uncommitted add, rather than replacing it: on those platforms where
913 "add, delete, dispatch" is not the same as "no-op, dispatch", we
914 want the no-op behavior.
916 If we have a no-op item, we could remove it it from the list
917 entirely, but really there's not much point: skipping the no-op
918 change when we do the dispatch later is far cheaper than rejuggling
921 As this stands, it also lets through deletions of events that are
925 if (events & (EV_READ|EV_SIGNAL)) {
926 if (!(change->old_events & (EV_READ | EV_SIGNAL)))
927 change->read_change = 0;
929 change->read_change = del;
931 if (events & EV_WRITE) {
932 if (!(change->old_events & EV_WRITE))
933 change->write_change = 0;
935 change->write_change = del;
937 if (events & EV_CLOSED) {
938 if (!(change->old_events & EV_CLOSED))
939 change->close_change = 0;
941 change->close_change = del;
944 event_changelist_check(base);
948 /* Helper for evmap_check_integrity_: verify that all of the events pending on
949 * given fd are set up correctly, and that the nread and nwrite counts on that
952 evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
953 struct evmap_io *io_info, void *arg)
956 int n_read = 0, n_write = 0, n_close = 0;
958 /* First, make sure the list itself isn't corrupt. Otherwise,
959 * running LIST_FOREACH could be an exciting adventure. */
960 EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
962 LIST_FOREACH(ev, &io_info->events, ev_io_next) {
963 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
964 EVUTIL_ASSERT(ev->ev_fd == fd);
965 EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
966 EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
967 if (ev->ev_events & EV_READ)
969 if (ev->ev_events & EV_WRITE)
971 if (ev->ev_events & EV_CLOSED)
975 EVUTIL_ASSERT(n_read == io_info->nread);
976 EVUTIL_ASSERT(n_write == io_info->nwrite);
977 EVUTIL_ASSERT(n_close == io_info->nclose);
982 /* Helper for evmap_check_integrity_: verify that all of the events pending
983 * on given signal are set up correctly. */
985 evmap_signal_check_integrity_fn(struct event_base *base,
986 int signum, struct evmap_signal *sig_info, void *arg)
989 /* First, make sure the list itself isn't corrupt. */
990 EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
992 LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
993 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
994 EVUTIL_ASSERT(ev->ev_fd == signum);
995 EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
996 EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
1002 evmap_check_integrity_(struct event_base *base)
1004 evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
1005 evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
1007 if (base->evsel->add == event_changelist_add_)
1008 event_changelist_assert_ok(base);
1011 /* Helper type for evmap_foreach_event_: Bundles a function to call on every
1012 * event, and the user-provided void* to use as its third argument. */
1013 struct evmap_foreach_event_helper {
1014 event_base_foreach_event_cb fn;
1018 /* Helper for evmap_foreach_event_: calls a provided function on every event
1019 * pending on a given fd. */
1021 evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
1022 struct evmap_io *io_info, void *arg)
1024 struct evmap_foreach_event_helper *h = arg;
1027 LIST_FOREACH(ev, &io_info->events, ev_io_next) {
1028 if ((r = h->fn(base, ev, h->arg)))
1034 /* Helper for evmap_foreach_event_: calls a provided function on every event
1035 * pending on a given signal. */
1037 evmap_signal_foreach_event_fn(struct event_base *base, int signum,
1038 struct evmap_signal *sig_info, void *arg)
1041 struct evmap_foreach_event_helper *h = arg;
1043 LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
1044 if ((r = h->fn(base, ev, h->arg)))
1051 evmap_foreach_event_(struct event_base *base,
1052 event_base_foreach_event_cb fn, void *arg)
1054 struct evmap_foreach_event_helper h;
1058 if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1060 return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);