2 * Copyright (c) 2009 Mark Heily <mark@heily.com>
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #include <sys/queue.h>
24 #include <sys/socket.h>
25 #include <sys/types.h>
30 #include "sys/event.h"
33 /* A request to sleep for a certain time */
35 int pfd; /* fd to poll for ACKs */
36 int wfd; /* fd to wake up when sleep is over */
37 uintptr_t ident; /* from kevent */
38 intptr_t interval; /* sleep time, in milliseconds */
39 struct sleepstat *stat;
42 /* Information about a successful sleep operation */
44 uintptr_t ident; /* from kevent */
45 uintptr_t counter; /* number of times the timer expired */
49 sleeper_thread(void *arg)
53 struct timespec req, rem;
56 bool cts = true; /* Clear To Send */
59 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
61 /* Copyin the request */
62 memcpy(&sr, arg, sizeof(sr));
65 /* Initialize the response */
69 /* Convert milliseconds into seconds+nanoseconds */
70 req.tv_sec = sr.interval / 1000;
71 req.tv_nsec = (sr.interval % 1000) * 1000000;
73 /* Block all signals */
75 (void) pthread_sigmask(SIG_BLOCK, &mask, NULL);
80 if (nanosleep(&req, &rem) < 0) {
81 //TODO: handle eintr, spurious wakeups
82 dbg_perror("nanosleep(2)");
85 dbg_printf(" -------- sleep over (CTS=%d)----------", cts);
87 /* Test if the previous wakeup has been acknowledged */
89 cnt = read(sr.wfd, &buf, 1);
91 if (errno == EAGAIN || errno == EWOULDBLOCK) {
94 dbg_perror("read(2)");
97 } else if (cnt == 0) {
98 dbg_perror("short read(2)");
105 /* Wake up kevent waiters if they are ready */
107 cnt = write(sr.wfd, &si, sizeof(si));
109 /* FIXME: handle EAGAIN and EINTR */
110 dbg_perror("write(2)");
111 } else if (cnt < sizeof(si)) {
112 dbg_puts("FIXME: handle short write");
123 _timer_create(struct filter *filt, struct knote *kn)
126 struct sleepreq *req;
127 kn->kev.flags |= EV_CLEAR;
129 req = malloc(sizeof(*req));
131 dbg_perror("malloc");
134 req->pfd = filt->kf_pfd;
135 req->wfd = filt->kf_wfd;
136 req->ident = kn->kev.ident;
137 req->interval = kn->kev.data;
139 pthread_attr_init(&attr);
140 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
141 if (pthread_create(&kn->data.tid, &attr, sleeper_thread, req) != 0) {
142 dbg_perror("pthread_create");
143 pthread_attr_destroy(&attr);
147 pthread_attr_destroy(&attr);
153 _timer_delete(struct knote *kn)
155 if (pthread_cancel(kn->data.tid) != 0) {
156 /* Race condition: sleeper_thread exits before it is cancelled */
159 dbg_perror("pthread_cancel(3)");
166 evfilt_timer_init(struct filter *filt)
170 if (socketpair(AF_UNIX, SOCK_STREAM, 0, fd) < 0) {
171 dbg_perror("socketpair(3)");
174 if (fcntl(fd[0], F_SETFL, O_NONBLOCK) < 0
175 || fcntl(fd[1], F_SETFL, O_NONBLOCK) < 0) {
176 dbg_perror("fcntl(2)");
182 filt->kf_wfd = fd[0];
183 filt->kf_pfd = fd[1];
189 evfilt_timer_destroy(struct filter *filt)
191 (void) close(filt->kf_wfd);
192 (void) close(filt->kf_pfd);
196 evfilt_timer_copyout(struct filter *filt,
205 cnt = read(filt->kf_pfd, &si, sizeof(si));
207 /* FIXME: handle EAGAIN and EINTR */
208 dbg_printf("read(2): %s", strerror(errno));
210 } else if (cnt < sizeof(si)) {
211 dbg_puts("error: short read");
215 /* Acknowlege receipt */
216 cnt = write(filt->kf_pfd, ".", 1);
218 /* FIXME: handle EAGAIN and EINTR */
219 dbg_printf("write(2): %s", strerror(errno));
221 } else if (cnt < 1) {
222 dbg_puts("error: short write");
226 kn = knote_lookup(filt, si.ident);
228 /* Race condition: timer events remain queued even after
229 the knote is deleted. Ignore these events */
233 dbg_printf("knote=%p", kn);
234 memcpy(dst, &kn->kev, sizeof(*dst));
236 dst->data = si.counter;
238 if (kn->kev.flags & EV_DISPATCH) {
241 } else if (kn->kev.flags & EV_ONESHOT) {
243 knote_free(filt, kn);
250 evfilt_timer_knote_create(struct filter *filt, struct knote *kn)
252 return _timer_create(filt, kn);
256 evfilt_timer_knote_modify(struct filter *filt, struct knote *kn,
257 const struct kevent *kev)
259 return (-1); /* STUB */
263 evfilt_timer_knote_delete(struct filter *filt, struct knote *kn)
265 if (kn->kev.flags & EV_DISABLE)
268 dbg_printf("deleting timer # %d", (int) kn->kev.ident);
269 return _timer_delete(kn);
273 evfilt_timer_knote_enable(struct filter *filt, struct knote *kn)
275 return evfilt_timer_knote_create(filt, kn);
279 evfilt_timer_knote_disable(struct filter *filt, struct knote *kn)
281 return evfilt_timer_knote_delete(filt, kn);
284 const struct filter evfilt_timer = {
287 evfilt_timer_destroy,
288 evfilt_timer_copyout,
289 evfilt_timer_knote_create,
290 evfilt_timer_knote_modify,
291 evfilt_timer_knote_delete,
292 evfilt_timer_knote_enable,
293 evfilt_timer_knote_disable,