}
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
- uint64_t *l2_table, uint64_t mask)
+ uint64_t *l2_table, uint64_t start, uint64_t mask)
{
int i;
uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
if (!offset)
return 0;
- for (i = 0; i < nb_clusters; i++)
+ for (i = start; i < start + nb_clusters; i++)
if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
break;
- return i;
+ return (i - start);
}
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
} else {
/* how many allocated clusters ? */
c = count_contiguous_clusters(nb_clusters, s->cluster_size,
- &l2_table[l2_index], QCOW_OFLAG_COPIED);
+ &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
}
nb_available = (c * s->cluster_sectors);
if (cluster_offset & QCOW_OFLAG_COPIED) {
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
- &l2_table[l2_index], 0);
+ &l2_table[l2_index], 0, 0);
cluster_offset &= ~QCOW_OFLAG_COPIED;
m->nb_clusters = 0;
while (i < nb_clusters) {
i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
- &l2_table[l2_index + i], 0);
+ &l2_table[l2_index], i, 0);
if(be64_to_cpu(l2_table[l2_index + i]))
break;