X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=async.c;h=b2de360c2303f7ccab8278a55ce63b4da512f723;hb=HEAD;hp=9a98a74acb99bbf74c004ab2de0d09ca78eac84c;hpb=05e514b1d4d5bd4209e2c8bbc76ff05c85a235f3;p=sdk%2Femulator%2Fqemu.git diff --git a/async.c b/async.c index 9a98a74..b2de360 100644 --- a/async.c +++ b/async.c @@ -22,11 +22,14 @@ * THE SOFTWARE. */ +#include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu-common.h" #include "block/aio.h" #include "block/thread-pool.h" #include "qemu/main-loop.h" #include "qemu/atomic.h" +#include "block/raw-aio.h" /***********************************************************/ /* bottom halves (can be seen as timers which expire ASAP) */ @@ -41,6 +44,26 @@ struct QEMUBH { bool deleted; }; +void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque) +{ + QEMUBH *bh; + bh = g_new(QEMUBH, 1); + *bh = (QEMUBH){ + .ctx = ctx, + .cb = cb, + .opaque = opaque, + }; + qemu_mutex_lock(&ctx->bh_lock); + bh->next = ctx->first_bh; + bh->scheduled = 1; + bh->deleted = 1; + /* Make sure that the members are ready before putting bh into list */ + smp_wmb(); + ctx->first_bh = bh; + qemu_mutex_unlock(&ctx->bh_lock); + aio_notify(ctx); +} + QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; @@ -59,6 +82,11 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) return bh; } +void aio_bh_call(QEMUBH *bh) +{ + bh->cb(bh->opaque); +} + /* Multiple occurrences of aio_bh_poll cannot be called concurrently */ int aio_bh_poll(AioContext *ctx) { @@ -78,11 +106,13 @@ int aio_bh_poll(AioContext *ctx) * thread sees the zero before bh->cb has run, and thus will call * aio_notify again if necessary. */ - if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) { - if (!bh->idle) + if (atomic_xchg(&bh->scheduled, 0)) { + /* Idle BHs don't count as progress */ + if (!bh->idle) { ret = 1; + } bh->idle = 0; - bh->cb(bh->opaque); + aio_bh_call(bh); } } @@ -94,7 +124,7 @@ int aio_bh_poll(AioContext *ctx) bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; - if (bh->deleted) { + if (bh->deleted && !bh->scheduled) { *bhp = bh->next; g_free(bh); } else { @@ -158,7 +188,7 @@ aio_compute_timeout(AioContext *ctx) QEMUBH *bh; for (bh = ctx->first_bh; bh; bh = bh->next) { - if (!bh->deleted && bh->scheduled) { + if (bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ @@ -206,9 +236,9 @@ aio_ctx_check(GSource *source) aio_notify_accept(ctx); for (bh = ctx->first_bh; bh; bh = bh->next) { - if (!bh->deleted && bh->scheduled) { + if (bh->scheduled) { return true; - } + } } return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); } @@ -231,9 +261,30 @@ aio_ctx_finalize(GSource *source) AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); - aio_set_event_notifier(ctx, &ctx->notifier, NULL); + +#ifdef CONFIG_LINUX_AIO + if (ctx->linux_aio) { + laio_detach_aio_context(ctx->linux_aio, ctx); + laio_cleanup(ctx->linux_aio); + ctx->linux_aio = NULL; + } +#endif + + qemu_mutex_lock(&ctx->bh_lock); + while (ctx->first_bh) { + QEMUBH *next = ctx->first_bh->next; + + /* qemu_bh_delete() must have been called on BHs in this AioContext */ + assert(ctx->first_bh->deleted); + + g_free(ctx->first_bh); + ctx->first_bh = next; + } + qemu_mutex_unlock(&ctx->bh_lock); + + aio_set_event_notifier(ctx, &ctx->notifier, false, NULL); event_notifier_cleanup(&ctx->notifier); - rfifolock_destroy(&ctx->lock); + qemu_rec_mutex_destroy(&ctx->lock); qemu_mutex_destroy(&ctx->bh_lock); timerlistgroup_deinit(&ctx->tlg); } @@ -259,6 +310,17 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx) return ctx->thread_pool; } +#ifdef CONFIG_LINUX_AIO +LinuxAioState *aio_get_linux_aio(AioContext *ctx) +{ + if (!ctx->linux_aio) { + ctx->linux_aio = laio_init(); + laio_attach_aio_context(ctx->linux_aio, ctx); + } + return ctx->linux_aio; +} +#endif + void aio_notify(AioContext *ctx) { /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs @@ -283,12 +345,6 @@ static void aio_timerlist_notify(void *opaque) aio_notify(opaque); } -static void aio_rfifolock_cb(void *opaque) -{ - /* Kick owner thread in case they are blocked in aio_poll() */ - aio_notify(opaque); -} - static void event_notifier_dummy_cb(EventNotifier *e) { } @@ -297,23 +353,32 @@ AioContext *aio_context_new(Error **errp) { int ret; AioContext *ctx; + ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); + aio_context_setup(ctx); + ret = event_notifier_init(&ctx->notifier, false); if (ret < 0) { - g_source_destroy(&ctx->source); error_setg_errno(errp, -ret, "Failed to initialize event notifier"); - return NULL; + goto fail; } g_source_set_can_recurse(&ctx->source, true); aio_set_event_notifier(ctx, &ctx->notifier, + false, (EventNotifierHandler *) event_notifier_dummy_cb); +#ifdef CONFIG_LINUX_AIO + ctx->linux_aio = NULL; +#endif ctx->thread_pool = NULL; qemu_mutex_init(&ctx->bh_lock); - rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx); + qemu_rec_mutex_init(&ctx->lock); timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); return ctx; +fail: + g_source_destroy(&ctx->source); + return NULL; } void aio_context_ref(AioContext *ctx) @@ -328,10 +393,10 @@ void aio_context_unref(AioContext *ctx) void aio_context_acquire(AioContext *ctx) { - rfifolock_lock(&ctx->lock); + qemu_rec_mutex_lock(&ctx->lock); } void aio_context_release(AioContext *ctx) { - rfifolock_unlock(&ctx->lock); + qemu_rec_mutex_unlock(&ctx->lock); }