4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "qemu/timer.h"
27 #include "qemu/sockets.h" // struct in_addr needed for libslirp.h
28 #include "slirp/libslirp.h"
29 #include "qemu/main-loop.h"
30 #include "block/aio.h"
32 #include "sysemu/hax.h"
36 #include "qemu/compatfd.h"
38 /* If we have signalfd, we mask out the signals we want to handle and then
39 * use signalfd to listen for them. We rely on whatever the current signal
40 * handler is to dispatch the signals when we receive them.
42 static void sigfd_handler(void *opaque)
44 int fd = (intptr_t)opaque;
45 struct qemu_signalfd_siginfo info;
46 struct sigaction action;
51 len = read(fd, &info, sizeof(info));
52 } while (len == -1 && errno == EINTR);
54 if (len == -1 && errno == EAGAIN) {
58 if (len != sizeof(info)) {
59 printf("read from sigfd returned %zd: %m\n", len);
63 sigaction(info.ssi_signo, NULL, &action);
64 if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
65 action.sa_sigaction(info.ssi_signo,
66 (siginfo_t *)&info, NULL);
67 } else if (action.sa_handler) {
68 action.sa_handler(info.ssi_signo);
73 static int qemu_signal_init(void)
79 * SIG_IPI must be blocked in the main thread and must not be caught
80 * by sigwait() in the signal thread. Otherwise, the cpu thread will
81 * not catch it reliably.
84 sigaddset(&set, SIG_IPI);
85 sigaddset(&set, SIGIO);
86 sigaddset(&set, SIGALRM);
87 sigaddset(&set, SIGBUS);
88 pthread_sigmask(SIG_BLOCK, &set, NULL);
90 sigdelset(&set, SIG_IPI);
91 sigfd = qemu_signalfd(&set);
93 fprintf(stderr, "failed to create signalfd\n");
97 fcntl_setfl(sigfd, O_NONBLOCK);
99 qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL,
100 (void *)(intptr_t)sigfd);
107 static int qemu_signal_init(void)
113 static AioContext *qemu_aio_context;
115 AioContext *qemu_get_aio_context(void)
117 return qemu_aio_context;
121 static void qemu_notify_hax_event(void)
123 CPUArchState *env = NULL;
126 for (env = first_cpu; env != NULL; env = env->next_cpu) {
127 hax_raise_event(env);
133 void qemu_notify_event(void)
135 if (!qemu_aio_context) {
139 qemu_notify_hax_event();
141 aio_notify(qemu_aio_context);
144 static GArray *gpollfds;
146 int qemu_init_main_loop(void)
153 ret = qemu_signal_init();
158 gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
159 qemu_aio_context = aio_context_new();
160 src = aio_get_g_source(qemu_aio_context);
161 g_source_attach(src, NULL);
166 static int max_priority;
169 static int glib_pollfds_idx;
170 static int glib_n_poll_fds;
172 static void glib_pollfds_fill(int64_t *cur_timeout)
174 GMainContext *context = g_main_context_default();
179 g_main_context_prepare(context, &max_priority);
181 glib_pollfds_idx = gpollfds->len;
186 g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds);
187 pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
188 n = g_main_context_query(context, max_priority, &timeout, pfds,
190 } while (n != glib_n_poll_fds);
195 timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
198 *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
201 static void glib_pollfds_poll(void)
203 GMainContext *context = g_main_context_default();
204 GPollFD *pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
206 if (g_main_context_check(context, max_priority, pfds, glib_n_poll_fds)) {
207 g_main_context_dispatch(context);
211 #define MAX_MAIN_LOOP_SPIN (1000)
213 static int os_host_main_loop_wait(int64_t timeout)
216 static int spin_counter;
218 glib_pollfds_fill(&timeout);
220 /* If the I/O thread is very busy or we are incorrectly busy waiting in
221 * the I/O thread, this can lead to starvation of the BQL such that the
222 * VCPU threads never run. To make sure we can detect the later case,
223 * print a message to the screen. If we run into this condition, create
224 * a fake timeout in order to give the VCPU threads a chance to run.
226 if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
227 static bool notified;
231 "main-loop: WARNING: I/O thread spun for %d iterations\n",
241 qemu_mutex_unlock_iothread();
246 ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
249 qemu_mutex_lock_iothread();
256 /***********************************************************/
257 /* Polling handling */
259 typedef struct PollingEntry {
262 struct PollingEntry *next;
265 static PollingEntry *first_polling_entry;
267 int qemu_add_polling_cb(PollingFunc *func, void *opaque)
269 PollingEntry **ppe, *pe;
270 pe = g_malloc0(sizeof(PollingEntry));
273 for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
278 void qemu_del_polling_cb(PollingFunc *func, void *opaque)
280 PollingEntry **ppe, *pe;
281 for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
283 if (pe->func == func && pe->opaque == opaque) {
291 /***********************************************************/
292 /* Wait objects support */
293 typedef struct WaitObjects {
295 int revents[MAXIMUM_WAIT_OBJECTS + 1];
296 HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
297 WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
298 void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
301 static WaitObjects wait_objects = {0};
303 int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
305 WaitObjects *w = &wait_objects;
306 if (w->num >= MAXIMUM_WAIT_OBJECTS) {
309 w->events[w->num] = handle;
310 w->func[w->num] = func;
311 w->opaque[w->num] = opaque;
312 w->revents[w->num] = 0;
317 void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
320 WaitObjects *w = &wait_objects;
323 for (i = 0; i < w->num; i++) {
324 if (w->events[i] == handle) {
328 w->events[i] = w->events[i + 1];
329 w->func[i] = w->func[i + 1];
330 w->opaque[i] = w->opaque[i + 1];
331 w->revents[i] = w->revents[i + 1];
339 void qemu_fd_register(int fd)
341 WSAEventSelect(fd, event_notifier_get_handle(&qemu_aio_context->notifier),
342 FD_READ | FD_ACCEPT | FD_CLOSE |
343 FD_CONNECT | FD_WRITE | FD_OOB);
346 static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds,
352 for (i = 0; i < pollfds->len; i++) {
353 GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
355 int events = pfd->events;
356 if (events & G_IO_IN) {
358 nfds = MAX(nfds, fd);
360 if (events & G_IO_OUT) {
362 nfds = MAX(nfds, fd);
364 if (events & G_IO_PRI) {
366 nfds = MAX(nfds, fd);
372 static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
373 fd_set *wfds, fd_set *xfds)
377 for (i = 0; i < pollfds->len; i++) {
378 GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
382 if (FD_ISSET(fd, rfds)) {
385 if (FD_ISSET(fd, wfds)) {
388 if (FD_ISSET(fd, xfds)) {
391 pfd->revents = revents & pfd->events;
395 static int os_host_main_loop_wait(int64_t timeout)
397 GMainContext *context = g_main_context_default();
398 GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
400 int g_poll_ret, ret, i, n_poll_fds;
402 WaitObjects *w = &wait_objects;
404 int64_t poll_timeout_ns;
405 static struct timeval tv0;
406 fd_set rfds, wfds, xfds;
409 /* XXX: need to suppress polling by better using win32 events */
411 for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
412 ret |= pe->func(pe->opaque);
421 nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
423 select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
424 if (select_ret != 0) {
427 if (select_ret > 0) {
428 pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
432 g_main_context_prepare(context, &max_priority);
433 n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
434 poll_fds, ARRAY_SIZE(poll_fds));
435 g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));
437 for (i = 0; i < w->num; i++) {
438 poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];
439 poll_fds[n_poll_fds + i].events = G_IO_IN;
442 if (poll_timeout < 0) {
443 poll_timeout_ns = -1;
445 poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
448 poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
450 qemu_mutex_unlock_iothread();
451 g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns);
453 qemu_mutex_lock_iothread();
454 if (g_poll_ret > 0) {
455 for (i = 0; i < w->num; i++) {
456 w->revents[i] = poll_fds[n_poll_fds + i].revents;
458 for (i = 0; i < w->num; i++) {
459 if (w->revents[i] && w->func[i]) {
460 w->func[i](w->opaque[i]);
465 if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
466 g_main_context_dispatch(context);
469 return select_ret || g_poll_ret;
473 int main_loop_wait(int nonblocking)
476 uint32_t timeout = UINT32_MAX;
483 /* poll any events */
484 g_array_set_size(gpollfds, 0); /* reset for new iteration */
485 /* XXX: separate device handlers from system ones */
487 slirp_pollfds_fill(gpollfds, &timeout);
489 qemu_iohandler_fill(gpollfds);
491 if (timeout == UINT32_MAX) {
494 timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS);
497 timeout_ns = qemu_soonest_timeout(timeout_ns,
498 timerlistgroup_deadline_ns(
501 ret = os_host_main_loop_wait(timeout_ns);
502 qemu_iohandler_poll(gpollfds, ret);
504 slirp_pollfds_poll(gpollfds, (ret < 0));
507 qemu_clock_run_all_timers();
512 /* Functions to operate on the main QEMU AioContext. */
514 QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
516 return aio_bh_new(qemu_aio_context, cb, opaque);
519 bool qemu_aio_wait(void)
521 return aio_poll(qemu_aio_context, true);
525 void qemu_aio_set_fd_handler(int fd,
530 aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque);
534 void qemu_aio_set_event_notifier(EventNotifier *notifier,
535 EventNotifierHandler *io_read)
537 aio_set_event_notifier(qemu_aio_context, notifier, io_read);