};
char sha256[32];
unsigned int device_id;
- erofs_off_t chunksize;
+ union {
+ erofs_off_t chunksize;
+ erofs_off_t sourceoffset;
+ };
erofs_blk_t blkaddr;
};
static struct hashmap blob_hashmap;
static FILE *blobfile;
static erofs_blk_t remapped_base;
+static erofs_off_t datablob_size;
static bool multidev;
static struct erofs_buffer_head *bh_devt;
struct erofs_blobchunk erofs_holechunk = {
};
static LIST_HEAD(unhashed_blobchunks);
-struct erofs_blobchunk *erofs_get_unhashed_chunk(erofs_off_t chunksize,
- unsigned int device_id, erofs_blk_t blkaddr)
+static struct erofs_blobchunk *erofs_get_unhashed_chunk(unsigned int device_id,
+ erofs_blk_t blkaddr, erofs_off_t sourceoffset)
{
struct erofs_blobchunk *chunk;
if (!chunk)
return ERR_PTR(-ENOMEM);
- chunk->chunksize = chunksize;
chunk->device_id = device_id;
chunk->blkaddr = blkaddr;
+ chunk->sourceoffset = sourceoffset;
list_add_tail(&chunk->list, &unhashed_blobchunks);
return chunk;
}
blkpos = ftell(blobfile);
DBG_BUGON(erofs_blkoff(sbi, blkpos));
- if (multidev)
+ if (sbi->extra_devices)
chunk->device_id = 1;
else
chunk->device_id = 0;
struct erofs_inode_chunk_index idx = {0};
erofs_blk_t extent_start = EROFS_NULL_ADDR;
erofs_blk_t extent_end, chunkblks;
+ erofs_off_t source_offset;
unsigned int dst, src, unit;
bool first_extent = true;
if (extent_start == EROFS_NULL_ADDR ||
idx.blkaddr != extent_end) {
if (extent_start != EROFS_NULL_ADDR) {
+ tarerofs_blocklist_write(extent_start,
+ extent_end - extent_start,
+ source_offset);
erofs_droid_blocklist_write_extent(inode,
extent_start,
extent_end - extent_start,
first_extent = false;
}
extent_start = idx.blkaddr;
+ source_offset = chunk->sourceoffset;
}
extent_end = idx.blkaddr + chunkblks;
idx.device_id = cpu_to_le16(chunk->device_id);
memcpy(inode->chunkindexes + dst, &idx, sizeof(idx));
}
off = roundup(off, unit);
+ if (extent_start != EROFS_NULL_ADDR)
+ tarerofs_blocklist_write(extent_start, extent_end - extent_start,
+ source_offset);
erofs_droid_blocklist_write_extent(inode, extent_start,
extent_start == EROFS_NULL_ADDR ?
0 : extent_end - extent_start,
chunksize = 1ULL << chunkbits;
count = DIV_ROUND_UP(inode->i_size, chunksize);
- if (multidev)
+ if (sbi->extra_devices)
inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
unit = sizeof(struct erofs_inode_chunk_index);
return ret;
}
-int erofs_blob_remap(struct erofs_sb_info *sbi)
+int tarerofs_write_chunkes(struct erofs_inode *inode, erofs_off_t data_offset)
+{
+ struct erofs_sb_info *sbi = inode->sbi;
+ unsigned int chunkbits = ilog2(inode->i_size - 1) + 1;
+ unsigned int count, unit, device_id;
+ erofs_off_t chunksize, len, pos;
+ erofs_blk_t blkaddr;
+ struct erofs_inode_chunk_index *idx;
+
+ if (chunkbits < sbi->blkszbits)
+ chunkbits = sbi->blkszbits;
+ if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
+ chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
+
+ inode->u.chunkformat |= chunkbits - sbi->blkszbits;
+ if (sbi->extra_devices) {
+ device_id = 1;
+ inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
+ unit = sizeof(struct erofs_inode_chunk_index);
+ DBG_BUGON(erofs_blkoff(sbi, data_offset));
+ blkaddr = erofs_blknr(sbi, data_offset);
+ } else {
+ device_id = 0;
+ unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
+ DBG_BUGON(erofs_blkoff(sbi, datablob_size));
+ blkaddr = erofs_blknr(sbi, datablob_size);
+ datablob_size += round_up(inode->i_size, erofs_blksiz(sbi));
+ }
+ chunksize = 1ULL << chunkbits;
+ count = DIV_ROUND_UP(inode->i_size, chunksize);
+
+ inode->extent_isize = count * unit;
+ idx = calloc(count, max(sizeof(*idx), sizeof(void *)));
+ if (!idx)
+ return -ENOMEM;
+ inode->chunkindexes = idx;
+
+ for (pos = 0; pos < inode->i_size; pos += len) {
+ struct erofs_blobchunk *chunk;
+
+ len = min_t(erofs_off_t, inode->i_size - pos, chunksize);
+
+ chunk = erofs_get_unhashed_chunk(device_id, blkaddr,
+ data_offset);
+ if (IS_ERR(chunk)) {
+ free(inode->chunkindexes);
+ inode->chunkindexes = NULL;
+ return PTR_ERR(chunk);
+ }
+
+ *(void **)idx++ = chunk;
+ blkaddr += erofs_blknr(sbi, len);
+ data_offset += len;
+ }
+ inode->datalayout = EROFS_INODE_CHUNK_BASED;
+ return 0;
+}
+
+int erofs_mkfs_dump_blobs(struct erofs_sb_info *sbi)
{
struct erofs_buffer_head *bh;
ssize_t length;
erofs_off_t pos_in, pos_out;
ssize_t ret;
- fflush(blobfile);
- length = ftell(blobfile);
- if (length < 0)
- return -errno;
- if (multidev) {
- struct erofs_deviceslot dis = {
- .blocks = erofs_blknr(sbi, length),
- };
+ if (blobfile) {
+ fflush(blobfile);
+ length = ftell(blobfile);
+ if (length < 0)
+ return -errno;
- pos_out = erofs_btell(bh_devt, false);
- ret = dev_write(sbi, &dis, pos_out, sizeof(dis));
- if (ret)
- return ret;
+ if (sbi->extra_devices)
+ sbi->devs[0].blocks = erofs_blknr(sbi, length);
+ else
+ datablob_size = length;
+ }
+
+ if (sbi->extra_devices) {
+ unsigned int i;
+ pos_out = erofs_btell(bh_devt, false);
+ i = 0;
+ do {
+ struct erofs_deviceslot dis = {
+ .blocks = cpu_to_le32(sbi->devs[i].blocks),
+ };
+ int ret;
+
+ ret = dev_write(sbi, &dis, pos_out, sizeof(dis));
+ if (ret)
+ return ret;
+ pos_out += sizeof(dis);
+ } while (++i < sbi->extra_devices);
bh_devt->op = &erofs_drop_directly_bhops;
erofs_bdrop(bh_devt, false);
return 0;
}
- if (!length) /* bail out if there is no chunked data */
- return 0;
- bh = erofs_balloc(DATA, length, 0, 0);
+
+ bh = erofs_balloc(DATA, blobfile ? datablob_size : 0, 0, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
erofs_mapbh(bh->block);
+
pos_out = erofs_btell(bh, false);
- pos_in = 0;
remapped_base = erofs_blknr(sbi, pos_out);
- ret = erofs_copy_file_range(fileno(blobfile), &pos_in,
- sbi->devfd, &pos_out, length);
+ if (blobfile) {
+ pos_in = 0;
+ ret = erofs_copy_file_range(fileno(blobfile), &pos_in,
+ sbi->devfd, &pos_out, datablob_size);
+ ret = ret < datablob_size ? -EIO : 0;
+ } else {
+ ret = 0;
+ }
bh->op = &erofs_drop_directly_bhops;
erofs_bdrop(bh, false);
- return ret < length ? -EIO : 0;
+ return ret;
}
void erofs_blob_exit(void)
return 0;
}
-int erofs_generate_devtable(struct erofs_sb_info *sbi)
+int erofs_mkfs_init_devices(struct erofs_sb_info *sbi, unsigned int devices)
{
- struct erofs_deviceslot dis;
-
- if (!multidev)
+ if (!devices)
return 0;
- bh_devt = erofs_balloc(DEVT, sizeof(dis), 0, 0);
- if (IS_ERR(bh_devt))
- return PTR_ERR(bh_devt);
+ sbi->devs = calloc(devices, sizeof(sbi->devs[0]));
+ if (!sbi->devs)
+ return -ENOMEM;
- dis = (struct erofs_deviceslot) {};
+ bh_devt = erofs_balloc(DEVT,
+ sizeof(struct erofs_deviceslot) * devices, 0, 0);
+ if (IS_ERR(bh_devt)) {
+ free(sbi->devs);
+ return PTR_ERR(bh_devt);
+ }
erofs_mapbh(bh_devt->block);
bh_devt->op = &erofs_skip_write_bhops;
sbi->devt_slotoff = erofs_btell(bh_devt, false) / EROFS_DEVT_SLOT_SIZE;
- sbi->extra_devices = 1;
+ sbi->extra_devices = devices;
erofs_sb_set_device_table(sbi);
return 0;
}
return ret;
}
-int tarerofs_write_chunk_indexes(struct erofs_inode *inode, erofs_blk_t blkaddr)
-{
- struct erofs_sb_info *sbi = inode->sbi;
- unsigned int chunkbits = ilog2(inode->i_size - 1) + 1;
- unsigned int count, unit;
- erofs_off_t chunksize, len, pos;
- struct erofs_inode_chunk_index *idx;
-
- if (chunkbits < sbi->blkszbits)
- chunkbits = sbi->blkszbits;
- inode->u.chunkformat |= chunkbits - sbi->blkszbits;
- inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
- chunksize = 1ULL << chunkbits;
- count = DIV_ROUND_UP(inode->i_size, chunksize);
-
- unit = sizeof(struct erofs_inode_chunk_index);
- inode->extent_isize = count * unit;
- idx = calloc(count, max(sizeof(*idx), sizeof(void *)));
- if (!idx)
- return -ENOMEM;
- inode->chunkindexes = idx;
-
- for (pos = 0; pos < inode->i_size; pos += len) {
- struct erofs_blobchunk *chunk;
-
- len = min_t(erofs_off_t, inode->i_size - pos, chunksize);
-
- chunk = erofs_get_unhashed_chunk(chunksize, 1, blkaddr);
- if (IS_ERR(chunk))
- return PTR_ERR(chunk);
-
- *(void **)idx++ = chunk;
- blkaddr += erofs_blknr(sbi, len);
- }
- inode->datalayout = EROFS_INODE_CHUNK_BASED;
- return 0;
-}
-
void tarerofs_remove_inode(struct erofs_inode *inode)
{
struct erofs_dentry *d;
eh.link = strndup(th.linkname, sizeof(th.linkname));
}
- if (tar->index_mode && erofs_blkoff(sbi, tar_offset + sizeof(th))) {
+ if (tar->index_mode && !tar->mapfile &&
+ erofs_blkoff(sbi, data_offset)) {
erofs_err("invalid tar data alignment @ %llu", tar_offset);
ret = -EIO;
goto out;
inode->i_link = malloc(inode->i_size + 1);
memcpy(inode->i_link, eh.link, inode->i_size + 1);
} else if (tar->index_mode) {
- ret = tarerofs_write_chunk_indexes(inode,
- erofs_blknr(sbi, data_offset));
+ ret = tarerofs_write_chunkes(inode, data_offset);
if (ret)
goto out;
if (erofs_lskip(tar->fd, inode->i_size)) {
ret = -EIO;
goto out;
}
-
-static struct erofs_buffer_head *bh_devt;
-
-int tarerofs_reserve_devtable(struct erofs_sb_info *sbi, unsigned int devices)
-{
- if (!devices)
- return 0;
-
- bh_devt = erofs_balloc(DEVT,
- sizeof(struct erofs_deviceslot) * devices, 0, 0);
- if (IS_ERR(bh_devt))
- return PTR_ERR(bh_devt);
-
- erofs_mapbh(bh_devt->block);
- bh_devt->op = &erofs_skip_write_bhops;
- sbi->devt_slotoff = erofs_btell(bh_devt, false) / EROFS_DEVT_SLOT_SIZE;
- sbi->extra_devices = devices;
- erofs_sb_set_device_table(sbi);
- return 0;
-}
-
-int tarerofs_write_devtable(struct erofs_sb_info *sbi, struct erofs_tarfile *tar)
-{
- erofs_off_t pos_out;
- unsigned int i;
-
- if (!sbi->extra_devices)
- return 0;
- pos_out = erofs_btell(bh_devt, false);
- for (i = 0; i < sbi->extra_devices; ++i) {
- struct erofs_deviceslot dis = {
- .blocks = erofs_blknr(sbi, tar->offset),
- };
- int ret;
-
- ret = dev_write(sbi, &dis, pos_out, sizeof(dis));
- if (ret)
- return ret;
- pos_out += sizeof(dis);
- }
- bh_devt->op = &erofs_drop_directly_bhops;
- erofs_bdrop(bh_devt, false);
- return 0;
-}
break;
case 20:
if (optarg && (!strcmp(optarg, "i") ||
- !strcmp(optarg, "0")))
+ !strcmp(optarg, "0") || !memcmp(optarg, "0,", 2))) {
erofstar.index_mode = true;
+ if (!memcmp(optarg, "0,", 2))
+ erofstar.mapfile = strdup(optarg + 2);
+ }
tar_mode = true;
break;
case 21:
return 1;
}
- if (cfg.block_list_file && erofs_droid_blocklist_fopen() < 0) {
+ if (cfg.block_list_file &&
+ erofs_blocklist_open(cfg.block_list_file, false)) {
erofs_err("failed to open %s", cfg.block_list_file);
return 1;
}
if (cfg.c_random_pclusterblks)
srand(time(NULL));
#endif
- if (tar_mode && erofstar.index_mode)
- sbi.blkszbits = 9;
+ if (tar_mode && erofstar.index_mode) {
+ if (erofstar.mapfile) {
+ err = erofs_blocklist_open(erofstar.mapfile, true);
+ if (err) {
+ erofs_err("failed to open %s", erofstar.mapfile);
+ goto exit;
+ }
+ } else {
+ sbi.blkszbits = 9;
+ }
+ }
+
sb_bh = erofs_buffer_init();
if (IS_ERR(sb_bh)) {
err = PTR_ERR(sb_bh);
return 1;
}
- if (tar_mode && erofstar.index_mode)
- err = tarerofs_reserve_devtable(&sbi, 1);
- else
- err = erofs_generate_devtable(&sbi);
+ if ((erofstar.index_mode && !erofstar.mapfile) || cfg.c_blobdev_path)
+ err = erofs_mkfs_init_devices(&sbi, 1);
if (err) {
erofs_err("failed to generate device table: %s",
erofs_strerror(err));
root_nid = erofs_lookupnid(root_inode);
erofs_iput(root_inode);
- if (tar_mode)
- tarerofs_write_devtable(&sbi, &erofstar);
- if (cfg.c_chunkbits) {
+ if (erofstar.index_mode || cfg.c_chunkbits) {
erofs_info("total metadata: %u blocks", erofs_mapbh(NULL));
- err = erofs_blob_remap(&sbi);
+ if (erofstar.index_mode && !erofstar.mapfile)
+ sbi.devs[0].blocks =
+ BLK_ROUND_UP(&sbi, erofstar.offset);
+ err = erofs_mkfs_dump_blobs(&sbi);
if (err)
goto exit;
}
exit:
z_erofs_compress_exit();
z_erofs_dedupe_exit();
-#ifdef WITH_ANDROID
- erofs_droid_blocklist_fclose();
-#endif
+ erofs_blocklist_close();
dev_close(&sbi);
erofs_cleanup_compress_hints();
erofs_cleanup_exclude_rules();