BdrvRequestFlags flags)
{
int ret;
+ BlockDriverState *bs = blk_bs(blk);
- trace_blk_co_preadv(blk, blk_bs(blk), offset, bytes, flags);
+ trace_blk_co_preadv(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
+ bdrv_inc_in_flight(bs);
+
/* throttling disk I/O */
if (blk->public.throttle_state) {
throttle_group_co_io_limits_intercept(blk, bytes, false);
}
- return bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
+ ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
+ bdrv_dec_in_flight(bs);
+ return ret;
}
int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
BdrvRequestFlags flags)
{
int ret;
+ BlockDriverState *bs = blk_bs(blk);
- trace_blk_co_pwritev(blk, blk_bs(blk), offset, bytes, flags);
+ trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
+ bdrv_inc_in_flight(bs);
+
/* throttling disk I/O */
if (blk->public.throttle_state) {
throttle_group_co_io_limits_intercept(blk, bytes, true);
flags |= BDRV_REQ_FUA;
}
- return bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
+ ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
+ bdrv_dec_in_flight(bs);
+ return ret;
}
typedef struct BlkRwCo {
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
+
+ bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_aio_unref(acb);
}
{
struct BlockBackendAIOCB *acb;
+ bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
acb->blk = blk;
acb->ret = ret;
static void blk_aio_complete(BlkAioEmAIOCB *acb)
{
if (acb->has_returned) {
+ bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->rwco.ret);
qemu_aio_unref(acb);
}
BlkAioEmAIOCB *acb;
Coroutine *co;
+ bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
{
BdrvChild *child;
- if (!QLIST_EMPTY(&bs->tracked_requests)) {
+ if (atomic_read(&bs->in_flight)) {
return true;
}
static void bdrv_drain_poll(BlockDriverState *bs)
{
- bool busy = true;
-
- while (busy) {
+ while (bdrv_requests_pending(bs)) {
/* Keep iterating */
- busy = bdrv_requests_pending(bs);
- busy |= aio_poll(bdrv_get_aio_context(bs), busy);
+ aio_poll(bdrv_get_aio_context(bs), true);
}
}
{
BdrvCoDrainData *data = opaque;
Coroutine *co = data->co;
+ BlockDriverState *bs = data->bs;
- bdrv_drain_poll(data->bs);
+ bdrv_dec_in_flight(bs);
+ bdrv_drain_poll(bs);
data->done = true;
qemu_coroutine_enter(co);
}
.bs = bs,
.done = false,
};
+ bdrv_inc_in_flight(bs);
aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
bdrv_co_drain_bh_cb, &data);
void bdrv_drain_all(void)
{
/* Always run first iteration so any pending completion BHs run */
- bool busy = true;
+ bool waited = true;
BlockDriverState *bs;
BdrvNextIterator it;
BlockJob *job = NULL;
* request completion. Therefore we must keep looping until there was no
* more activity rather than simply draining each device independently.
*/
- while (busy) {
- busy = false;
+ while (waited) {
+ waited = false;
for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
AioContext *aio_context = ctx->data;
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
if (aio_context == bdrv_get_aio_context(bs)) {
if (bdrv_requests_pending(bs)) {
- busy = true;
- aio_poll(aio_context, busy);
+ aio_poll(aio_context, true);
+ waited = true;
}
}
}
- busy |= aio_poll(aio_context, false);
aio_context_release(aio_context);
}
}
return true;
}
+void bdrv_inc_in_flight(BlockDriverState *bs)
+{
+ atomic_inc(&bs->in_flight);
+}
+
+void bdrv_dec_in_flight(BlockDriverState *bs)
+{
+ atomic_dec(&bs->in_flight);
+}
+
static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
{
BlockDriverState *bs = self->bs;
return ret;
}
+ bdrv_inc_in_flight(bs);
+
/* Don't do copy-on-read if we read data before write operation */
if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
flags |= BDRV_REQ_COPY_ON_READ;
use_local_qiov ? &local_qiov : qiov,
flags);
tracked_request_end(&req);
+ bdrv_dec_in_flight(bs);
if (use_local_qiov) {
qemu_iovec_destroy(&local_qiov);
return ret;
}
+ bdrv_inc_in_flight(bs);
/*
* Align write if necessary by performing a read-modify-write cycle.
* Pad qiov with the read parts and be sure to have a tracked request not
qemu_vfree(tail_buf);
out:
tracked_request_end(&req);
+ bdrv_dec_in_flight(bs);
return ret;
}
}
*file = NULL;
+ bdrv_inc_in_flight(bs);
ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
file);
if (ret < 0) {
*pnum = 0;
- return ret;
+ goto out;
}
if (ret & BDRV_BLOCK_RAW) {
assert(ret & BDRV_BLOCK_OFFSET_VALID);
- return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
- *pnum, pnum, file);
+ ret = bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
+ *pnum, pnum, file);
+ goto out;
}
if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
}
}
+out:
+ bdrv_dec_in_flight(bs);
return ret;
}
static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
{
if (!acb->need_bh) {
+ bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->req.error);
qemu_aio_unref(acb);
}
Coroutine *co;
BlockAIOCBCoroutine *acb;
+ /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
+ bdrv_inc_in_flight(child->bs);
+
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
acb->child = child;
acb->need_bh = true;
Coroutine *co;
BlockAIOCBCoroutine *acb;
+ /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
+ bdrv_inc_in_flight(bs);
+
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->need_bh = true;
acb->req.error = -EINPROGRESS;
int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
{
int ret;
- BdrvTrackedRequest req;
if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
bdrv_is_sg(bs)) {
return 0;
}
- tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
+ bdrv_inc_in_flight(bs);
int current_gen = bs->write_gen;
/* Wait until any previous flushes are completed */
- while (bs->active_flush_req != NULL) {
+ while (bs->active_flush_req) {
qemu_co_queue_wait(&bs->flush_queue);
}
- bs->active_flush_req = &req;
+ bs->active_flush_req = true;
/* Write back all layers by calling one driver function */
if (bs->drv->bdrv_co_flush) {
out:
/* Notify any pending flushes that we have completed */
bs->flushed_gen = current_gen;
- bs->active_flush_req = NULL;
+ bs->active_flush_req = false;
/* Return value is ignored - it's ok if wait queue is empty */
qemu_co_queue_next(&bs->flush_queue);
- tracked_request_end(&req);
+ bdrv_dec_in_flight(bs);
return ret;
}
return 0;
}
+ bdrv_inc_in_flight(bs);
tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
req.bytes >> BDRV_SECTOR_BITS);
tracked_request_end(&req);
+ bdrv_dec_in_flight(bs);
return ret;
}
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
{
BlockDriver *drv = bs->drv;
- BdrvTrackedRequest tracked_req;
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
BlockAIOCB *acb;
- tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
+ bdrv_inc_in_flight(bs);
if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
co.ret = -ENOTSUP;
goto out;
qemu_coroutine_yield();
}
out:
- tracked_request_end(&tracked_req);
+ bdrv_dec_in_flight(bs);
return co.ret;
}