}
if (sbi->extra_devices) {
- unsigned int i;
+ unsigned int i, ret;
+ erofs_blk_t nblocks;
+ nblocks = erofs_mapbh(NULL);
pos_out = erofs_btell(bh_devt, false);
i = 0;
do {
struct erofs_deviceslot dis = {
+ .mapped_blkaddr = cpu_to_le32(nblocks),
.blocks = cpu_to_le32(sbi->devs[i].blocks),
};
- int ret;
+ memcpy(dis.tag, sbi->devs[i].tag, sizeof(dis.tag));
ret = dev_write(sbi, &dis, pos_out, sizeof(dis));
if (ret)
return ret;
pos_out += sizeof(dis);
+ nblocks += sbi->devs[i].blocks;
} while (++i < sbi->extra_devices);
bh_devt->op = &erofs_drop_directly_bhops;
erofs_bdrop(bh_devt, false);
sbi->devs[i].mapped_blkaddr = le32_to_cpu(dis.mapped_blkaddr);
sbi->devs[i].blocks = le32_to_cpu(dis.blocks);
+ memcpy(sbi->devs[i].tag, dis.tag, sizeof(dis.tag));
sbi->total_blocks += sbi->devs[i].blocks;
pos += EROFS_DEVT_SLOT_SIZE;
}
struct erofs_sb_info *src;
unsigned int extra_devices = 0;
erofs_blk_t nblocks;
- int ret;
+ int ret, idx;
list_for_each_entry(src, &rebuild_src_list, list) {
ret = erofs_rebuild_load_tree(root, src);
return ret;
list_for_each_entry(src, &rebuild_src_list, list) {
- if (extra_devices)
+ u8 *tag = NULL;
+
+ if (extra_devices) {
nblocks = src->devs[0].blocks;
- else
+ tag = src->devs[0].tag;
+ } else {
nblocks = src->primarydevice_blocks;
+ }
DBG_BUGON(src->dev < 1);
- sbi.devs[src->dev - 1].blocks = nblocks;
+ idx = src->dev - 1;
+ sbi.devs[idx].blocks = nblocks;
+ if (tag && *tag)
+ memcpy(sbi.devs[idx].tag, tag, sizeof(sbi.devs[0].tag));
+ else
+ /* convert UUID of the source image to a hex string */
+ sprintf((char *)sbi.devs[idx].tag,
+ "%04x%04x%04x%04x%04x%04x%04x%04x",
+ (src->uuid[0] << 8) | src->uuid[1],
+ (src->uuid[2] << 8) | src->uuid[3],
+ (src->uuid[4] << 8) | src->uuid[5],
+ (src->uuid[6] << 8) | src->uuid[7],
+ (src->uuid[8] << 8) | src->uuid[9],
+ (src->uuid[10] << 8) | src->uuid[11],
+ (src->uuid[12] << 8) | src->uuid[13],
+ (src->uuid[14] << 8) | src->uuid[15]);
}
return 0;
}