return 0;
}
-static uint64_t get_cluster_offset(BlockDriverState *bs,
+static int get_cluster_offset(BlockDriverState *bs,
VmdkExtent *extent,
VmdkMetaData *m_data,
- uint64_t offset, int allocate)
+ uint64_t offset,
+ int allocate,
+ uint64_t *cluster_offset)
{
unsigned int l1_index, l2_offset, l2_index;
int min_index, i, j;
uint32_t min_count, *l2_table, tmp = 0;
- uint64_t cluster_offset;
if (m_data)
m_data->valid = 0;
+ if (extent->flat) {
+ *cluster_offset = 0;
+ return 0;
+ }
l1_index = (offset >> 9) / extent->l1_entry_sectors;
if (l1_index >= extent->l1_size) {
- return 0;
+ return -1;
}
l2_offset = extent->l1_table[l1_index];
if (!l2_offset) {
- return 0;
+ return -1;
}
for (i = 0; i < L2_CACHE_SIZE; i++) {
if (l2_offset == extent->l2_cache_offsets[i]) {
l2_table,
extent->l2_size * sizeof(uint32_t)
) != extent->l2_size * sizeof(uint32_t)) {
- return 0;
+ return -1;
}
extent->l2_cache_offsets[min_index] = l2_offset;
extent->l2_cache_counts[min_index] = 1;
found:
l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
- cluster_offset = le32_to_cpu(l2_table[l2_index]);
+ *cluster_offset = le32_to_cpu(l2_table[l2_index]);
- if (!cluster_offset) {
- if (!allocate)
- return 0;
+ if (!*cluster_offset) {
+ if (!allocate) {
+ return -1;
+ }
// Avoid the L2 tables update for the images that have snapshots.
- cluster_offset = bdrv_getlength(extent->file);
+ *cluster_offset = bdrv_getlength(extent->file);
bdrv_truncate(
extent->file,
- cluster_offset + (extent->cluster_sectors << 9)
+ *cluster_offset + (extent->cluster_sectors << 9)
);
- cluster_offset >>= 9;
- tmp = cpu_to_le32(cluster_offset);
+ *cluster_offset >>= 9;
+ tmp = cpu_to_le32(*cluster_offset);
l2_table[l2_index] = tmp;
/* First of all we write grain itself, to avoid race condition
* or inappropriate VM shutdown.
*/
if (get_whole_cluster(
- bs, extent, cluster_offset, offset, allocate) == -1)
- return 0;
+ bs, extent, *cluster_offset, offset, allocate) == -1)
+ return -1;
if (m_data) {
m_data->offset = tmp;
m_data->valid = 1;
}
}
- cluster_offset <<= 9;
- return cluster_offset;
+ *cluster_offset <<= 9;
+ return 0;
}
static VmdkExtent *find_extent(BDRVVmdkState *s,
int nb_sectors, int *pnum)
{
BDRVVmdkState *s = bs->opaque;
-
int64_t index_in_cluster, n, ret;
uint64_t offset;
VmdkExtent *extent;
if (!extent) {
return 0;
}
- if (extent->flat) {
- n = extent->end_sector - sector_num;
- ret = 1;
- } else {
- offset = get_cluster_offset(bs, extent, NULL, sector_num * 512, 0);
- index_in_cluster = sector_num % extent->cluster_sectors;
- n = extent->cluster_sectors - index_in_cluster;
- ret = offset ? 1 : 0;
- }
+ ret = get_cluster_offset(bs, extent, NULL,
+ sector_num * 512, 0, &offset);
+ /* get_cluster_offset returning 0 means success */
+ ret = !ret;
+
+ index_in_cluster = sector_num % extent->cluster_sectors;
+ n = extent->cluster_sectors - index_in_cluster;
if (n > nb_sectors)
n = nb_sectors;
*pnum = n;
if (!extent) {
return -EIO;
}
- cluster_offset = get_cluster_offset(
- bs, extent, NULL, sector_num << 9, 0);
+ ret = get_cluster_offset(
+ bs, extent, NULL,
+ sector_num << 9, 0, &cluster_offset);
index_in_cluster = sector_num % extent->cluster_sectors;
n = extent->cluster_sectors - index_in_cluster;
if (n > nb_sectors)
n = nb_sectors;
- if (!cluster_offset) {
- // try to read from parent image, if exist
+ if (ret) {
+ /* if not allocated, try to read from parent image, if exist */
if (bs->backing_hd) {
if (!vmdk_is_cid_valid(bs))
return -1;
{
BDRVVmdkState *s = bs->opaque;
VmdkExtent *extent = NULL;
- int n;
+ int n, ret;
int64_t index_in_cluster;
uint64_t cluster_offset;
VmdkMetaData m_data;
if (!extent) {
return -EIO;
}
- cluster_offset = get_cluster_offset(
+ ret = get_cluster_offset(
bs,
extent,
&m_data,
- sector_num << 9, 1);
- if (!cluster_offset) {
- return -1;
+ sector_num << 9, 1,
+ &cluster_offset);
+ if (ret) {
+ return -EINVAL;
}
index_in_cluster = sector_num % extent->cluster_sectors;
n = extent->cluster_sectors - index_in_cluster;