bh->deleted = 1;
}
-void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout)
+static gboolean
+aio_ctx_prepare(GSource *source, gint *timeout)
{
+ AioContext *ctx = (AioContext *) source;
QEMUBH *bh;
+ bool scheduled = false;
for (bh = ctx->first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) {
+ scheduled = true;
if (bh->idle) {
/* idle bottom halves will be polled at least
* every 10ms */
- *timeout = MIN(10, *timeout);
+ *timeout = 10;
} else {
/* non-idle bottom halves will be executed
* immediately */
}
}
}
-}
-
-static gboolean
-aio_ctx_prepare(GSource *source, gint *timeout)
-{
- AioContext *ctx = (AioContext *) source;
- uint32_t wait = -1;
- aio_bh_update_timeout(ctx, &wait);
-
- if (wait != -1) {
- *timeout = MIN(*timeout, wait);
- return wait == 0;
- }
- return false;
+ return scheduled;
}
static gboolean
#ifdef CONFIG_LINUX
#include <sys/syscall.h>
#endif
-#ifdef CONFIG_EVENTFD
-#include <sys/eventfd.h>
-#endif
int qemu_get_thread_id(void)
{
return ret;
}
-/*
- * Creates an eventfd that looks like a pipe and has EFD_CLOEXEC set.
- */
-int qemu_eventfd(int fds[2])
-{
-#ifdef CONFIG_EVENTFD
- int ret;
-
- ret = eventfd(0, 0);
- if (ret >= 0) {
- fds[0] = ret;
- fds[1] = dup(ret);
- if (fds[1] == -1) {
- close(ret);
- return -1;
- }
- qemu_set_cloexec(ret);
- qemu_set_cloexec(fds[1]);
- return 0;
- }
- if (errno != ENOSYS) {
- return -1;
- }
-#endif
-
- return qemu_pipe(fds);
-}
-
int qemu_utimens(const char *path, const struct timespec *times)
{
struct timeval tv[2], tv_now;
* These are internal functions used by the QEMU main loop.
*/
int aio_bh_poll(AioContext *ctx);
-void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout);
/**
* qemu_bh_schedule: Schedule a bottom half.