1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
8 #include <linux/prefetch.h>
10 #include <trace/events/erofs.h>
12 void erofs_unmap_metabuf(struct erofs_buf *buf)
14 if (buf->kmap_type == EROFS_KMAP)
16 else if (buf->kmap_type == EROFS_KMAP_ATOMIC)
17 kunmap_atomic(buf->base);
19 buf->kmap_type = EROFS_NO_KMAP;
22 void erofs_put_metabuf(struct erofs_buf *buf)
26 erofs_unmap_metabuf(buf);
31 void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
32 erofs_blk_t blkaddr, enum erofs_kmap_type type)
34 struct address_space *const mapping = inode->i_mapping;
35 erofs_off_t offset = blknr_to_addr(blkaddr);
36 pgoff_t index = offset >> PAGE_SHIFT;
37 struct page *page = buf->page;
39 if (!page || page->index != index) {
40 erofs_put_metabuf(buf);
41 page = read_cache_page_gfp(mapping, index,
42 mapping_gfp_constraint(mapping, ~__GFP_FS));
45 /* should already be PageUptodate, no need to lock page */
48 if (buf->kmap_type == EROFS_NO_KMAP) {
49 if (type == EROFS_KMAP)
50 buf->base = kmap(page);
51 else if (type == EROFS_KMAP_ATOMIC)
52 buf->base = kmap_atomic(page);
53 buf->kmap_type = type;
54 } else if (buf->kmap_type != type) {
56 return ERR_PTR(-EFAULT);
58 if (type == EROFS_NO_KMAP)
60 return buf->base + (offset & ~PAGE_MASK);
63 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
64 erofs_blk_t blkaddr, enum erofs_kmap_type type)
66 return erofs_bread(buf, sb->s_bdev->bd_inode, blkaddr, type);
69 static int erofs_map_blocks_flatmode(struct inode *inode,
70 struct erofs_map_blocks *map,
73 erofs_blk_t nblocks, lastblk;
74 u64 offset = map->m_la;
75 struct erofs_inode *vi = EROFS_I(inode);
76 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
78 nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
79 lastblk = nblocks - tailendpacking;
81 /* there is no hole in flatmode */
82 map->m_flags = EROFS_MAP_MAPPED;
83 if (offset < blknr_to_addr(lastblk)) {
84 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
85 map->m_plen = blknr_to_addr(lastblk) - offset;
86 } else if (tailendpacking) {
87 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
88 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
90 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
91 vi->xattr_isize + erofs_blkoff(map->m_la);
92 map->m_plen = inode->i_size - offset;
94 /* inline data should be located in the same meta block */
95 if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) {
96 erofs_err(inode->i_sb,
97 "inline data cross block boundary @ nid %llu",
100 return -EFSCORRUPTED;
102 map->m_flags |= EROFS_MAP_META;
104 erofs_err(inode->i_sb,
105 "internal error @ nid: %llu (size %llu), m_la 0x%llx",
106 vi->nid, inode->i_size, map->m_la);
113 static int erofs_map_blocks(struct inode *inode,
114 struct erofs_map_blocks *map, int flags)
116 struct super_block *sb = inode->i_sb;
117 struct erofs_inode *vi = EROFS_I(inode);
118 struct erofs_inode_chunk_index *idx;
119 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
126 trace_erofs_map_blocks_enter(inode, map, flags);
128 if (map->m_la >= inode->i_size) {
129 /* leave out-of-bound access unmapped */
135 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
136 err = erofs_map_blocks_flatmode(inode, map, flags);
140 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
141 unit = sizeof(*idx); /* chunk index */
143 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
145 chunknr = map->m_la >> vi->chunkbits;
146 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
147 vi->xattr_isize, unit) + unit * chunknr;
149 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
151 err = PTR_ERR(kaddr);
154 map->m_la = chunknr << vi->chunkbits;
155 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
156 roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
158 /* handle block map */
159 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
160 __le32 *blkaddr = kaddr + erofs_blkoff(pos);
162 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
165 map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
166 map->m_flags = EROFS_MAP_MAPPED;
170 /* parse chunk indexes */
171 idx = kaddr + erofs_blkoff(pos);
172 switch (le32_to_cpu(idx->blkaddr)) {
173 case EROFS_NULL_ADDR:
177 map->m_deviceid = le16_to_cpu(idx->device_id) &
178 EROFS_SB(sb)->device_id_mask;
179 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
180 map->m_flags = EROFS_MAP_MAPPED;
184 erofs_put_metabuf(&buf);
187 map->m_llen = map->m_plen;
188 trace_erofs_map_blocks_exit(inode, map, flags, 0);
192 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
194 struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
195 struct erofs_device_info *dif;
198 /* primary device by default */
199 map->m_bdev = sb->s_bdev;
200 map->m_daxdev = EROFS_SB(sb)->dax_dev;
201 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
203 if (map->m_deviceid) {
204 down_read(&devs->rwsem);
205 dif = idr_find(&devs->tree, map->m_deviceid - 1);
207 up_read(&devs->rwsem);
210 map->m_bdev = dif->bdev;
211 map->m_daxdev = dif->dax_dev;
212 map->m_dax_part_off = dif->dax_part_off;
213 up_read(&devs->rwsem);
214 } else if (devs->extra_devices) {
215 down_read(&devs->rwsem);
216 idr_for_each_entry(&devs->tree, dif, id) {
217 erofs_off_t startoff, length;
219 if (!dif->mapped_blkaddr)
221 startoff = blknr_to_addr(dif->mapped_blkaddr);
222 length = blknr_to_addr(dif->blocks);
224 if (map->m_pa >= startoff &&
225 map->m_pa < startoff + length) {
226 map->m_pa -= startoff;
227 map->m_bdev = dif->bdev;
228 map->m_daxdev = dif->dax_dev;
229 map->m_dax_part_off = dif->dax_part_off;
233 up_read(&devs->rwsem);
238 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
239 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
242 struct erofs_map_blocks map;
243 struct erofs_map_dev mdev;
248 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
252 mdev = (struct erofs_map_dev) {
253 .m_deviceid = map.m_deviceid,
256 ret = erofs_map_dev(inode->i_sb, &mdev);
260 iomap->offset = map.m_la;
261 if (flags & IOMAP_DAX)
262 iomap->dax_dev = mdev.m_daxdev;
264 iomap->bdev = mdev.m_bdev;
265 iomap->length = map.m_llen;
267 iomap->private = NULL;
269 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
270 iomap->type = IOMAP_HOLE;
271 iomap->addr = IOMAP_NULL_ADDR;
273 iomap->length = length;
277 if (map.m_flags & EROFS_MAP_META) {
279 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
281 iomap->type = IOMAP_INLINE;
282 ptr = erofs_read_metabuf(&buf, inode->i_sb,
283 erofs_blknr(mdev.m_pa), EROFS_KMAP);
286 iomap->inline_data = ptr + erofs_blkoff(mdev.m_pa);
287 iomap->private = buf.base;
289 iomap->type = IOMAP_MAPPED;
290 iomap->addr = mdev.m_pa;
291 if (flags & IOMAP_DAX)
292 iomap->addr += mdev.m_dax_part_off;
297 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
298 ssize_t written, unsigned int flags, struct iomap *iomap)
300 void *ptr = iomap->private;
303 struct erofs_buf buf = {
304 .page = kmap_to_page(ptr),
306 .kmap_type = EROFS_KMAP,
309 DBG_BUGON(iomap->type != IOMAP_INLINE);
310 erofs_put_metabuf(&buf);
312 DBG_BUGON(iomap->type == IOMAP_INLINE);
317 static const struct iomap_ops erofs_iomap_ops = {
318 .iomap_begin = erofs_iomap_begin,
319 .iomap_end = erofs_iomap_end,
322 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
325 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
326 #ifdef CONFIG_EROFS_FS_ZIP
327 return iomap_fiemap(inode, fieinfo, start, len,
328 &z_erofs_iomap_report_ops);
333 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
337 * since we dont have write or truncate flows, so no inode
338 * locking needs to be held at the moment.
340 static int erofs_readpage(struct file *file, struct page *page)
342 return iomap_readpage(page, &erofs_iomap_ops);
345 static void erofs_readahead(struct readahead_control *rac)
347 return iomap_readahead(rac, &erofs_iomap_ops);
350 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
352 return iomap_bmap(mapping, block, &erofs_iomap_ops);
355 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
357 struct inode *inode = file_inode(iocb->ki_filp);
358 loff_t align = iocb->ki_pos | iov_iter_count(to) |
359 iov_iter_alignment(to);
360 struct block_device *bdev = inode->i_sb->s_bdev;
361 unsigned int blksize_mask;
364 blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
366 blksize_mask = (1 << inode->i_blkbits) - 1;
368 if (align & blksize_mask)
373 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
375 /* no need taking (shared) inode lock since it's a ro filesystem */
376 if (!iov_iter_count(to))
380 if (IS_DAX(iocb->ki_filp->f_mapping->host))
381 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
383 if (iocb->ki_flags & IOCB_DIRECT) {
384 int err = erofs_prepare_dio(iocb, to);
387 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
392 return filemap_read(iocb, to, 0);
395 /* for uncompressed (aligned) files and raw access for other files */
396 const struct address_space_operations erofs_raw_access_aops = {
397 .readpage = erofs_readpage,
398 .readahead = erofs_readahead,
400 .direct_IO = noop_direct_IO,
404 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
405 enum page_entry_size pe_size)
407 return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
410 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
412 return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
415 static const struct vm_operations_struct erofs_dax_vm_ops = {
416 .fault = erofs_dax_fault,
417 .huge_fault = erofs_dax_huge_fault,
420 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
422 if (!IS_DAX(file_inode(file)))
423 return generic_file_readonly_mmap(file, vma);
425 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
428 vma->vm_ops = &erofs_dax_vm_ops;
429 vma->vm_flags |= VM_HUGEPAGE;
433 #define erofs_file_mmap generic_file_readonly_mmap
436 const struct file_operations erofs_file_fops = {
437 .llseek = generic_file_llseek,
438 .read_iter = erofs_file_read_iter,
439 .mmap = erofs_file_mmap,
440 .splice_read = generic_file_splice_read,