"'wr_highest_offset': %" PRId64 ","
"'flush_operations': %" PRId64
"} }",
- bs->rd_bytes,
- bs->wr_bytes,
- bs->rd_ops,
- bs->wr_ops,
+ bs->nr_bytes[BDRV_ACCT_READ],
+ bs->nr_bytes[BDRV_ACCT_WRITE],
+ bs->nr_ops[BDRV_ACCT_READ],
+ bs->nr_ops[BDRV_ACCT_WRITE],
bs->wr_highest_sector *
(uint64_t)BDRV_SECTOR_SIZE,
- bs->flush_ops);
+ bs->nr_ops[BDRV_ACCT_FLUSH]);
dict = qobject_to_qdict(res);
if (*bs->device_name) {
return buf;
}
-
/**************************************************************/
/* async I/Os */
BlockDriverCompletionFunc *cb, void *opaque)
{
BlockDriver *drv = bs->drv;
- BlockDriverAIOCB *ret;
trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
if (bdrv_check_request(bs, sector_num, nb_sectors))
return NULL;
- ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
- cb, opaque);
-
- if (ret) {
- /* Update stats even though technically transfer has not happened. */
- bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
- bs->rd_ops++;
- }
-
- return ret;
+ return drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
+ cb, opaque);
}
typedef struct BlockCompleteData {
cb, opaque);
if (ret) {
- /* Update stats even though technically transfer has not happened. */
- bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
- bs->wr_ops ++;
if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
bs->wr_highest_sector = sector_num + nb_sectors - 1;
}
trace_bdrv_aio_flush(bs, opaque);
- bs->flush_ops++;
-
if (bs->open_flags & BDRV_O_NO_FLUSH) {
return bdrv_aio_noop_em(bs, cb, opaque);
}
return bs->in_use;
}
+void
+bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
+ enum BlockAcctType type)
+{
+ assert(type < BDRV_MAX_IOTYPE);
+
+ cookie->bytes = bytes;
+ cookie->type = type;
+}
+
+void
+bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
+{
+ assert(cookie->type < BDRV_MAX_IOTYPE);
+
+ bs->nr_bytes[cookie->type] += cookie->bytes;
+ bs->nr_ops[cookie->type]++;
+}
+
int bdrv_img_create(const char *filename, const char *fmt,
const char *base_filename, const char *base_fmt,
char *options, uint64_t img_size, int flags)
void bdrv_set_in_use(BlockDriverState *bs, int in_use);
int bdrv_in_use(BlockDriverState *bs);
+enum BlockAcctType {
+ BDRV_ACCT_READ,
+ BDRV_ACCT_WRITE,
+ BDRV_ACCT_FLUSH,
+ BDRV_MAX_IOTYPE,
+};
+
+typedef struct BlockAcctCookie {
+ int64_t bytes;
+ enum BlockAcctType type;
+} BlockAcctCookie;
+
+void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
+ int64_t bytes, enum BlockAcctType type);
+void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie);
+
typedef enum {
BLKDBG_L1_UPDATE,
void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
#endif
+
void *sync_aiocb;
/* I/O stats (display with "info blockstats"). */
- uint64_t rd_bytes;
- uint64_t wr_bytes;
- uint64_t rd_ops;
- uint64_t wr_ops;
- uint64_t flush_ops;
+ uint64_t nr_bytes[BDRV_MAX_IOTYPE];
+ uint64_t nr_ops[BDRV_MAX_IOTYPE];
uint64_t wr_highest_sector;
/* Whether the disk can expand beyond total_sectors */
DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n",
ncq_tfs->tag);
+ bdrv_acct_done(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct);
qemu_sglist_destroy(&ncq_tfs->sglist);
ncq_tfs->used = 0;
}
ncq_tfs->is_read = 1;
DPRINTF(port, "tag %d aio read %ld\n", ncq_tfs->tag, ncq_tfs->lba);
+
+ bdrv_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
+ (ncq_tfs->sector_count-1) * BDRV_SECTOR_SIZE,
+ BDRV_ACCT_READ);
ncq_tfs->aiocb = dma_bdrv_read(ncq_tfs->drive->port.ifs[0].bs,
&ncq_tfs->sglist, ncq_tfs->lba,
ncq_cb, ncq_tfs);
ncq_tfs->is_read = 0;
DPRINTF(port, "tag %d aio write %ld\n", ncq_tfs->tag, ncq_tfs->lba);
+
+ bdrv_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
+ (ncq_tfs->sector_count-1) * BDRV_SECTOR_SIZE,
+ BDRV_ACCT_WRITE);
ncq_tfs->aiocb = dma_bdrv_write(ncq_tfs->drive->port.ifs[0].bs,
&ncq_tfs->sglist, ncq_tfs->lba,
ncq_cb, ncq_tfs);
AHCIDevice *drive;
BlockDriverAIOCB *aiocb;
QEMUSGList sglist;
+ BlockAcctCookie acct;
int is_read;
uint16_t sector_count;
uint64_t lba;
memset(buf, 0, 288);
}
-static int cd_read_sector(BlockDriverState *bs, int lba, uint8_t *buf,
- int sector_size)
+static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size)
{
int ret;
switch(sector_size) {
case 2048:
- ret = bdrv_read(bs, (int64_t)lba << 2, buf, 4);
+ bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
+ ret = bdrv_read(s->bs, (int64_t)lba << 2, buf, 4);
+ bdrv_acct_done(s->bs, &s->acct);
break;
case 2352:
- ret = bdrv_read(bs, (int64_t)lba << 2, buf + 16, 4);
+ bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
+ ret = bdrv_read(s->bs, (int64_t)lba << 2, buf + 16, 4);
+ bdrv_acct_done(s->bs, &s->acct);
if (ret < 0)
return ret;
cd_data_to_raw(buf, lba);
} else {
/* see if a new sector must be read */
if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) {
- ret = cd_read_sector(s->bs, s->lba, s->io_buffer, s->cd_sector_size);
+ ret = cd_read_sector(s, s->lba, s->io_buffer, s->cd_sector_size);
if (ret < 0) {
ide_transfer_stop(s);
ide_atapi_io_error(s, ret);
s->io_buffer_index = 0;
if (s->atapi_dma) {
+ bdrv_acct_start(s->bs, &s->acct, size, BDRV_ACCT_READ);
s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
s->bus->dma->ops->start_dma(s->bus->dma, s,
ide_atapi_cmd_read_dma_cb);
s->status = READY_STAT | SEEK_STAT;
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
ide_set_irq(s->bus);
- eot:
- s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT);
- ide_set_inactive(s);
- return;
+ goto eot;
}
s->io_buffer_index = 0;
#ifdef DEBUG_AIO
printf("aio_read_cd: lba=%u n=%d\n", s->lba, n);
#endif
+
s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset);
s->bus->dma->iov.iov_len = n * 4 * 512;
qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1);
+
s->bus->dma->aiocb = bdrv_aio_readv(s->bs, (int64_t)s->lba << 2,
&s->bus->dma->qiov, n * 4,
ide_atapi_cmd_read_dma_cb, s);
ASC_MEDIUM_NOT_PRESENT);
goto eot;
}
+
+ return;
+eot:
+ bdrv_acct_done(s->bs, &s->acct);
+ s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT);
+ ide_set_inactive(s);
}
/* start a CD-CDROM read command with DMA */
s->io_buffer_size = 0;
s->cd_sector_size = sector_size;
+ bdrv_acct_start(s->bs, &s->acct, s->packet_transfer_size, BDRV_ACCT_READ);
+
/* XXX: check if BUSY_STAT should be set */
s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
s->bus->dma->ops->start_dma(s->bus->dma, s,
#endif
if (n > s->req_nb_sectors)
n = s->req_nb_sectors;
+
+ bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
ret = bdrv_read(s->bs, sector_num, s->io_buffer, n);
+ bdrv_acct_done(s->bs, &s->acct);
if (ret != 0) {
if (ide_handle_rw_error(s, -ret,
BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ))
return;
eot:
- ide_set_inactive(s);
+ if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
+ bdrv_acct_done(s->bs, &s->acct);
+ }
+ ide_set_inactive(s);
}
static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
s->io_buffer_index = 0;
s->io_buffer_size = 0;
s->dma_cmd = dma_cmd;
+
+ switch (dma_cmd) {
+ case IDE_DMA_READ:
+ bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
+ BDRV_ACCT_READ);
+ break;
+ case IDE_DMA_WRITE:
+ bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
+ BDRV_ACCT_WRITE);
+ break;
+ default:
+ break;
+ }
+
s->bus->dma->ops->start_dma(s->bus->dma, s, ide_dma_cb);
}
n = s->nsector;
if (n > s->req_nb_sectors)
n = s->req_nb_sectors;
+
+ bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
ret = bdrv_write(s->bs, sector_num, s->io_buffer, n);
+ bdrv_acct_done(s->bs, &s->acct);
if (ret != 0) {
if (ide_handle_rw_error(s, -ret, BM_STATUS_PIO_RETRY))
}
}
+ bdrv_acct_done(s->bs, &s->acct);
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
}
return;
}
+ bdrv_acct_start(s->bs, &s->acct, 0, BDRV_ACCT_FLUSH);
acb = bdrv_aio_flush(s->bs, ide_flush_cb, s);
if (acb == NULL) {
ide_flush_cb(s, -EIO);
int lba;
int cd_sector_size;
int atapi_dma; /* true if dma is requested for the packet cmd */
+ BlockAcctCookie acct;
/* ATA DMA state */
int io_buffer_size;
QEMUSGList sg;
m->aiocb = NULL;
qemu_sglist_destroy(&s->sg);
ide_atapi_io_error(s, ret);
- io->dma_end(opaque);
- return;
+ goto done;
}
if (s->io_buffer_size > 0) {
ide_atapi_cmd_ok(s);
if (io->len == 0) {
- io->dma_end(opaque);
- return;
+ goto done;
}
/* launch next transfer */
/* Note: media not present is the most likely case */
ide_atapi_cmd_error(s, SENSE_NOT_READY,
ASC_MEDIUM_NOT_PRESENT);
- io->dma_end(opaque);
- return;
+ goto done;
}
+ return;
+
+done:
+ bdrv_acct_done(s->bs, &s->acct);
+ io->dma_end(opaque);
+ return;
}
static void pmac_ide_transfer_cb(void *opaque, int ret)
m->aiocb = NULL;
qemu_sglist_destroy(&s->sg);
ide_dma_error(s);
- io->dma_end(io);
- return;
+ goto done;
}
sector_num = ide_get_sector(s);
}
/* end of DMA ? */
-
if (io->len == 0) {
- io->dma_end(io);
- return;
+ goto done;
}
/* launch next transfer */
if (!m->aiocb)
pmac_ide_transfer_cb(io, -1);
+ return;
+done:
+ if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
+ bdrv_acct_done(s->bs, &s->acct);
+ }
+ io->dma_end(io);
}
static void pmac_ide_transfer(DBDMA_io *io)
s->io_buffer_size = 0;
if (s->drive_kind == IDE_CD) {
+ bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
pmac_ide_atapi_transfer_cb(io, 0);
return;
}
+ switch (s->dma_cmd) {
+ case IDE_DMA_READ:
+ bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
+ break;
+ case IDE_DMA_WRITE:
+ bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_WRITE);
+ break;
+ default:
+ break;
+ }
+
pmac_ide_transfer_cb(io, 0);
}
struct iovec iov;
QEMUIOVector qiov;
uint32_t status;
+ BlockAcctCookie acct;
} SCSIDiskReq;
struct SCSIDiskState
static void scsi_read_complete(void * opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
int n;
r->req.aiocb = NULL;
+ bdrv_acct_done(s->bs, &r->acct);
+
if (ret) {
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_READ)) {
return;
r->iov.iov_len = n * 512;
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
+
+ bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n,
scsi_read_complete, r);
if (r->req.aiocb == NULL) {
static void scsi_write_complete(void * opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t len;
uint32_t n;
r->req.aiocb = NULL;
+ bdrv_acct_done(s->bs, &r->acct);
+
if (ret) {
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_WRITE)) {
return;
n = r->iov.iov_len / 512;
if (n) {
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
+
+ bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->bs, r->sector, &r->qiov, n,
scsi_write_complete, r);
if (r->req.aiocb == NULL) {
buflen = 8;
break;
case SYNCHRONIZE_CACHE:
+ {
+ BlockAcctCookie acct;
+
+ bdrv_acct_start(s->bs, &acct, 0, BDRV_ACCT_FLUSH);
ret = bdrv_flush(s->bs);
+ bdrv_acct_done(s->bs, &acct);
if (ret < 0) {
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) {
return -1;
}
}
break;
+ }
case GET_CONFIGURATION:
memset(outbuf, 0, 8);
/* ??? This should probably return much more information. For now
struct virtio_scsi_inhdr *scsi;
QEMUIOVector qiov;
struct VirtIOBlockReq *next;
+ BlockAcctCookie acct;
} VirtIOBlockReq;
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
stb_p(&req->in->status, status);
virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
virtio_notify(&s->vdev, s->vq);
-
- g_free(req);
}
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
vm_stop(VMSTOP_DISKFULL);
} else {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
+ bdrv_acct_done(s->bs, &req->acct);
+ g_free(req);
bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read);
}
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+ bdrv_acct_done(req->dev->bs, &req->acct);
+ g_free(req);
}
static void virtio_blk_flush_complete(void *opaque, int ret)
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+ bdrv_acct_done(req->dev->bs, &req->acct);
+ g_free(req);
}
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
*/
if (req->elem.out_num < 2 || req->elem.in_num < 3) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
+ g_free(req);
return;
}
*/
if (req->elem.out_num > 2 && req->elem.in_num > 3) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
+ g_free(req);
return;
}
stl_p(&req->scsi->data_len, hdr.dxfer_len);
virtio_blk_req_complete(req, status);
+ g_free(req);
}
#else
static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
{
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
+ g_free(req);
}
#endif /* __linux__ */
{
BlockDriverAIOCB *acb;
+ bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);
+
/*
* Make sure all outstanding writes are posted to the backing device.
*/
sector = ldq_p(&req->out->sector);
+ bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE);
+
trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
if (sector & req->dev->sector_mask) {
sector = ldq_p(&req->out->sector);
+ bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);
+
if (sector & req->dev->sector_mask) {
virtio_blk_rw_complete(req, -EIO);
return;
s->serial ? s->serial : "",
MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+ g_free(req);
} else if (type & VIRTIO_BLK_T_OUT) {
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
req->elem.out_num - 1);
struct XenBlkDev *blkdev;
QLIST_ENTRY(ioreq) list;
+ BlockAcctCookie acct;
};
struct XenBlkDev {
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
ioreq_unmap(ioreq);
ioreq_finish(ioreq);
+ bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
qemu_bh_schedule(ioreq->blkdev->bh);
}
switch (ioreq->req.operation) {
case BLKIF_OP_READ:
+ bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
ioreq->aio_inflight++;
bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
if (!ioreq->req.nr_segments) {
break;
}
+
+ bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
ioreq->aio_inflight++;
bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
&ioreq->v, ioreq->v.size / BLOCK_SIZE,