int head = 0;
int tail = 0;
- int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
- BDRV_REQUEST_MAX_SECTORS);
- if (bs->bl.write_zeroes_alignment) {
- assert(is_power_of_2(bs->bl.write_zeroes_alignment));
- head = sector_num & (bs->bl.write_zeroes_alignment - 1);
- tail = (sector_num + nb_sectors) & (bs->bl.write_zeroes_alignment - 1);
- max_write_zeroes &= ~(bs->bl.write_zeroes_alignment - 1);
+ int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
+ int write_zeroes_sector_align =
+ bs->bl.pwrite_zeroes_alignment >> BDRV_SECTOR_BITS;
+
+ max_write_zeroes >>= BDRV_SECTOR_BITS;
+ if (write_zeroes_sector_align) {
+ assert(is_power_of_2(bs->bl.pwrite_zeroes_alignment));
+ head = sector_num & (write_zeroes_sector_align - 1);
+ tail = (sector_num + nb_sectors) & (write_zeroes_sector_align - 1);
+ max_write_zeroes &= ~(write_zeroes_sector_align - 1);
}
+ assert(nb_sectors <= BDRV_REQUEST_MAX_SECTORS);
while (nb_sectors > 0 && !ret) {
int num = nb_sectors;
*/
if (head) {
/* Make a small request up to the first aligned sector. */
- num = MIN(nb_sectors, bs->bl.write_zeroes_alignment - head);
+ num = MIN(nb_sectors, write_zeroes_sector_align - head);
head = 0;
- } else if (tail && num > bs->bl.write_zeroes_alignment) {
+ } else if (tail && num > write_zeroes_sector_align) {
/* Shorten the request to the last aligned sector. */
num -= tail;
}
bs->bl.discard_alignment = iscsilun->block_size >> BDRV_SECTOR_BITS;
}
- if (iscsilun->bl.max_ws_len < 0xffffffff) {
- bs->bl.max_write_zeroes =
- sector_limits_lun2qemu(iscsilun->bl.max_ws_len, iscsilun);
+ if (iscsilun->bl.max_ws_len < 0xffffffff / iscsilun->block_size) {
+ bs->bl.max_pwrite_zeroes =
+ iscsilun->bl.max_ws_len * iscsilun->block_size;
}
if (iscsilun->lbp.lbpws) {
- bs->bl.write_zeroes_alignment =
- sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun);
+ bs->bl.pwrite_zeroes_alignment =
+ iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
} else {
- bs->bl.write_zeroes_alignment =
- iscsilun->block_size >> BDRV_SECTOR_BITS;
+ bs->bl.pwrite_zeroes_alignment = iscsilun->block_size;
}
bs->bl.opt_transfer_length =
sector_limits_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun);
{
BDRVQcow2State *s = bs->opaque;
- bs->bl.write_zeroes_alignment = s->cluster_sectors;
+ bs->bl.pwrite_zeroes_alignment = s->cluster_size;
}
static int qcow2_set_key(BlockDriverState *bs, const char *key)
{
BDRVQEDState *s = bs->opaque;
- bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS;
+ bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
}
/* We have nothing to do for QED reopen, stubs just return
for (i = 0; i < s->num_extents; i++) {
if (!s->extents[i].flat) {
- bs->bl.write_zeroes_alignment =
- MAX(bs->bl.write_zeroes_alignment,
- s->extents[i].cluster_sectors);
+ bs->bl.pwrite_zeroes_alignment =
+ MAX(bs->bl.pwrite_zeroes_alignment,
+ s->extents[i].cluster_sectors << BDRV_SECTOR_BITS);
}
}
}
/* optimal alignment for discard requests in sectors */
int64_t discard_alignment;
- /* maximum number of sectors that can zeroized at once */
- int max_write_zeroes;
+ /* maximum number of bytes that can zeroized at once (since it is
+ * signed, it must be < 2G, if set) */
+ int32_t max_pwrite_zeroes;
- /* optimal alignment for write zeroes requests in sectors */
- int64_t write_zeroes_alignment;
+ /* optimal alignment for write zeroes requests in bytes, must be
+ * power of 2, and less than max_pwrite_zeroes if that is set */
+ uint32_t pwrite_zeroes_alignment;
/* optimal transfer length in sectors */
int opt_transfer_length;