#include "block-migration.h"
#include <assert.h>
-#define SECTOR_BITS 9
-#define SECTOR_SIZE (1 << SECTOR_BITS)
-#define SECTOR_MASK ~(SECTOR_SIZE - 1);
-
-#define BLOCK_SIZE (block_mig_state->sectors_per_block << SECTOR_BITS)
+#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
#define BLK_MIG_FLAG_EOS 0x02
int no_dirty;
QEMUFile *load_file;
BlkMigDevState *bmds_first;
- int sectors_per_block;
BlkMigBlock *first_blk;
BlkMigBlock *last_blk;
int submitted;
blk->buf = qemu_malloc(BLOCK_SIZE);
cur_sector = bms->cur_sector;
- total_sectors = bdrv_getlength(bs) >> SECTOR_BITS;
+ total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
if (bms->shared_base) {
while (cur_sector < bms->total_sectors &&
printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
fflush(stdout);
block_mig_state->print_completion +=
- (block_mig_state->sectors_per_block * 10000);
+ (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
}
- /* we going to transfder BLOCK_SIZE any way even if it is not allocated */
- nr_sectors = block_mig_state->sectors_per_block;
+ /* we are going to transfer a full block even if it is not allocated */
+ nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
- cur_sector &= ~((int64_t)block_mig_state->sectors_per_block -1);
+ cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
- if (total_sectors - cur_sector < block_mig_state->sectors_per_block) {
+ if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
nr_sectors = (total_sectors - cur_sector);
}
blk->next = NULL;
blk->iov.iov_base = blk->buf;
- blk->iov.iov_len = nr_sectors * SECTOR_SIZE;
+ blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
fflush(stdout);
block_mig_state->print_completion +=
- (block_mig_state->sectors_per_block * 10000);
+ (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
}
- cur_sector &= ~((int64_t)block_mig_state->sectors_per_block -1);
+ cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
- /* we going to transfer BLOCK_SIZE any way even if it is not allocated */
- nr_sectors = block_mig_state->sectors_per_block;
+ /* we are going to transfer a full block even if it is not allocated */
+ nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
- if (total_sectors - cur_sector < block_mig_state->sectors_per_block) {
+ if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
nr_sectors = (total_sectors - cur_sector);
}
bdrv_reset_dirty(bs, cur_sector, nr_sectors);
/* sector number and flags */
- qemu_put_be64(f, (cur_sector << SECTOR_BITS) | BLK_MIG_FLAG_DEVICE_BLOCK);
+ qemu_put_be64(f, (cur_sector << BDRV_SECTOR_BITS)
+ | BLK_MIG_FLAG_DEVICE_BLOCK);
/* device name */
len = strlen(bs->device_name);
qemu_put_buffer(f, tmp_buf, BLOCK_SIZE);
- bmds->cur_sector = cur_sector + block_mig_state->sectors_per_block;
+ bmds->cur_sector = cur_sector + BDRV_SECTORS_PER_DIRTY_CHUNK;
qemu_free(tmp_buf);
int len;
/* sector number and flags */
- qemu_put_be64(f, (blk->sector << SECTOR_BITS) | BLK_MIG_FLAG_DEVICE_BLOCK);
+ qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
+ | BLK_MIG_FLAG_DEVICE_BLOCK);
/* device name */
len = strlen(blk->bmds->bs->device_name);
bmds = qemu_mallocz(sizeof(BlkMigDevState));
bmds->bs = bs;
bmds->bulk_completed = 0;
- bmds->total_sectors = bdrv_getlength(bs) >> SECTOR_BITS;
+ bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
bmds->shared_base = block_mig_state->shared_base;
if (bmds->shared_base) {
blk_mig_save_dev_info(f, bmds);
}
}
-
- block_mig_state->sectors_per_block = bdrv_get_sectors_per_chunk();
}
static int blk_mig_save_bulked_block(QEMUFile *f, int is_async)
for (sector = 0; sector < bmds->cur_sector;) {
if (bdrv_get_dirty(bmds->bs, sector)) {
if (bdrv_read(bmds->bs, sector, buf,
- block_mig_state->sectors_per_block) < 0) {
+ BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
/* FIXME: add error handling */
}
/* sector number and flags */
- qemu_put_be64(f, (sector << SECTOR_BITS)
+ qemu_put_be64(f, (sector << BDRV_SECTOR_BITS)
| BLK_MIG_FLAG_DEVICE_BLOCK);
/* device name */
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)bmds->bs->device_name, len);
- qemu_put_buffer(f, buf,
- (block_mig_state->sectors_per_block *
- SECTOR_SIZE));
+ qemu_put_buffer(f, buf, BLOCK_SIZE);
bdrv_reset_dirty(bmds->bs, sector,
- block_mig_state->sectors_per_block);
+ BDRV_SECTORS_PER_DIRTY_CHUNK);
}
- sector += block_mig_state->sectors_per_block;
+ sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
}
}
}
BlockDriverState *bs;
uint8_t *buf;
- block_mig_state->sectors_per_block = bdrv_get_sectors_per_chunk();
buf = qemu_malloc(BLOCK_SIZE);
do {
addr = qemu_get_be64(f);
- flags = addr & ~SECTOR_MASK;
- addr &= SECTOR_MASK;
+ flags = addr & ~BDRV_SECTOR_MASK;
+ addr >>= BDRV_SECTOR_BITS;
if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
/* get device name */
qemu_get_buffer(f, buf, BLOCK_SIZE);
if (bs != NULL) {
- bdrv_write(bs, (addr >> SECTOR_BITS),
- buf, block_mig_state->sectors_per_block);
+ bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
} else {
printf("Error unknown block device %s\n", device_name);
/* FIXME: add error handling */
#include <windows.h>
#endif
-#define SECTOR_BITS 9
-#define SECTOR_SIZE (1 << SECTOR_BITS)
-#define SECTORS_PER_DIRTY_CHUNK 8
-
static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque);
bdrv_delete(bs1);
return ret;
}
- total_size = bdrv_getlength(bs1) >> SECTOR_BITS;
+ total_size = bdrv_getlength(bs1) >> BDRV_SECTOR_BITS;
if (bs1->drv && bs1->drv->protocol_name)
is_protocol = 1;
return ret;
}
if (drv->bdrv_getlength) {
- bs->total_sectors = bdrv_getlength(bs) >> SECTOR_BITS;
+ bs->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
}
#ifndef _WIN32
if (bs->is_temporary) {
return -ENOTSUP;
}
- total_sectors = bdrv_getlength(bs) >> SECTOR_BITS;
+ total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
for (i = 0; i < total_sectors;) {
if (drv->bdrv_is_allocated(bs, i, 65536, &n)) {
for(j = 0; j < n; j++) {
{
int64_t start, end;
- start = sector_num / SECTORS_PER_DIRTY_CHUNK;
- end = (sector_num + nb_sectors) / SECTORS_PER_DIRTY_CHUNK;
+ start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
+ end = (sector_num + nb_sectors) / BDRV_SECTORS_PER_DIRTY_CHUNK;
for (; start <= end; start++) {
bs->dirty_bitmap[start] = dirty;
int bdrv_pread(BlockDriverState *bs, int64_t offset,
void *buf, int count1)
{
- uint8_t tmp_buf[SECTOR_SIZE];
+ uint8_t tmp_buf[BDRV_SECTOR_SIZE];
int len, nb_sectors, count;
int64_t sector_num;
count = count1;
/* first read to align to sector start */
- len = (SECTOR_SIZE - offset) & (SECTOR_SIZE - 1);
+ len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
if (len > count)
len = count;
- sector_num = offset >> SECTOR_BITS;
+ sector_num = offset >> BDRV_SECTOR_BITS;
if (len > 0) {
if (bdrv_read(bs, sector_num, tmp_buf, 1) < 0)
return -EIO;
- memcpy(buf, tmp_buf + (offset & (SECTOR_SIZE - 1)), len);
+ memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
count -= len;
if (count == 0)
return count1;
}
/* read the sectors "in place" */
- nb_sectors = count >> SECTOR_BITS;
+ nb_sectors = count >> BDRV_SECTOR_BITS;
if (nb_sectors > 0) {
if (bdrv_read(bs, sector_num, buf, nb_sectors) < 0)
return -EIO;
sector_num += nb_sectors;
- len = nb_sectors << SECTOR_BITS;
+ len = nb_sectors << BDRV_SECTOR_BITS;
buf += len;
count -= len;
}
int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
const void *buf, int count1)
{
- uint8_t tmp_buf[SECTOR_SIZE];
+ uint8_t tmp_buf[BDRV_SECTOR_SIZE];
int len, nb_sectors, count;
int64_t sector_num;
count = count1;
/* first write to align to sector start */
- len = (SECTOR_SIZE - offset) & (SECTOR_SIZE - 1);
+ len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
if (len > count)
len = count;
- sector_num = offset >> SECTOR_BITS;
+ sector_num = offset >> BDRV_SECTOR_BITS;
if (len > 0) {
if (bdrv_read(bs, sector_num, tmp_buf, 1) < 0)
return -EIO;
- memcpy(tmp_buf + (offset & (SECTOR_SIZE - 1)), buf, len);
+ memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
if (bdrv_write(bs, sector_num, tmp_buf, 1) < 0)
return -EIO;
count -= len;
}
/* write the sectors "in place" */
- nb_sectors = count >> SECTOR_BITS;
+ nb_sectors = count >> BDRV_SECTOR_BITS;
if (nb_sectors > 0) {
if (bdrv_write(bs, sector_num, buf, nb_sectors) < 0)
return -EIO;
sector_num += nb_sectors;
- len = nb_sectors << SECTOR_BITS;
+ len = nb_sectors << BDRV_SECTOR_BITS;
buf += len;
count -= len;
}
return -ENOMEDIUM;
if (!drv->bdrv_getlength) {
/* legacy mode */
- return bs->total_sectors * SECTOR_SIZE;
+ return bs->total_sectors * BDRV_SECTOR_SIZE;
}
return drv->bdrv_getlength(bs);
}
if (length < 0)
length = 0;
else
- length = length >> SECTOR_BITS;
+ length = length >> BDRV_SECTOR_BITS;
*nb_sectors_ptr = length;
}
if (ret) {
/* Update stats even though technically transfer has not happened. */
- bs->rd_bytes += (unsigned) nb_sectors * SECTOR_SIZE;
+ bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
bs->rd_ops ++;
}
if (ret) {
/* Update stats even though technically transfer has not happened. */
- bs->wr_bytes += (unsigned) nb_sectors * SECTOR_SIZE;
+ bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
bs->wr_ops ++;
}
int64_t i;
uint8_t test;
- bitmap_size = (bdrv_getlength(bs) >> SECTOR_BITS);
- bitmap_size /= SECTORS_PER_DIRTY_CHUNK;
+ bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
+ bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK;
bitmap_size++;
bs->dirty_bitmap = qemu_mallocz(bitmap_size);
int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
{
- int64_t chunk = sector / (int64_t)SECTORS_PER_DIRTY_CHUNK;
+ int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
if (bs->dirty_bitmap != NULL &&
- (sector << SECTOR_BITS) <= bdrv_getlength(bs)) {
+ (sector << BDRV_SECTOR_BITS) <= bdrv_getlength(bs)) {
return bs->dirty_bitmap[chunk];
} else {
return 0;
{
set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
}
-
-int bdrv_get_sectors_per_chunk(void)
-{
- /* size must be 2^x */
- return SECTORS_PER_DIRTY_CHUNK;
-}
#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB)
+#define BDRV_SECTOR_BITS 9
+#define BDRV_SECTOR_SIZE (1 << BDRV_SECTOR_BITS)
+#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1);
+
void bdrv_info(Monitor *mon);
void bdrv_info_stats(Monitor *mon);
int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
int64_t pos, int size);
+#define BDRV_SECTORS_PER_DIRTY_CHUNK 8
+
void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable);
int bdrv_get_dirty(BlockDriverState *bs, int64_t sector);
void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
int nr_sectors);
-int bdrv_get_sectors_per_chunk(void);
#endif