1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/readpage.c
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
8 * This was originally taken from fs/mpage.c
10 * The ext4_mpage_readpages() function here is intended to
11 * replace mpage_readahead() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
16 * This will allow us to attach a callback function to support ext4
19 * If anything unusual happens, such as:
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
31 #include <linux/kernel.h>
32 #include <linux/export.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
49 #define NUM_PREALLOC_POST_READ_CTXS 128
51 static struct kmem_cache *bio_post_read_ctx_cache;
52 static mempool_t *bio_post_read_ctx_pool;
54 /* postprocessing steps for read bios */
55 enum bio_post_read_step {
62 struct bio_post_read_ctx {
64 struct work_struct work;
65 unsigned int cur_step;
66 unsigned int enabled_steps;
69 static void __read_end_io(struct bio *bio)
73 bio_for_each_folio_all(fi, bio) {
74 struct folio *folio = fi.folio;
77 folio_clear_uptodate(folio);
79 folio_mark_uptodate(folio);
83 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
87 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
89 static void decrypt_work(struct work_struct *work)
91 struct bio_post_read_ctx *ctx =
92 container_of(work, struct bio_post_read_ctx, work);
93 struct bio *bio = ctx->bio;
95 if (fscrypt_decrypt_bio(bio))
96 bio_post_read_processing(ctx);
101 static void verity_work(struct work_struct *work)
103 struct bio_post_read_ctx *ctx =
104 container_of(work, struct bio_post_read_ctx, work);
105 struct bio *bio = ctx->bio;
108 * fsverity_verify_bio() may call readahead() again, and although verity
109 * will be disabled for that, decryption may still be needed, causing
110 * another bio_post_read_ctx to be allocated. So to guarantee that
111 * mempool_alloc() never deadlocks we must free the current ctx first.
112 * This is safe because verity is the last post-read step.
114 BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
115 mempool_free(ctx, bio_post_read_ctx_pool);
116 bio->bi_private = NULL;
118 fsverity_verify_bio(bio);
123 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
126 * We use different work queues for decryption and for verity because
127 * verity may require reading metadata pages that need decryption, and
128 * we shouldn't recurse to the same workqueue.
130 switch (++ctx->cur_step) {
132 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
133 INIT_WORK(&ctx->work, decrypt_work);
134 fscrypt_enqueue_decrypt_work(&ctx->work);
140 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
141 INIT_WORK(&ctx->work, verity_work);
142 fsverity_enqueue_verify_work(&ctx->work);
148 __read_end_io(ctx->bio);
152 static bool bio_post_read_required(struct bio *bio)
154 return bio->bi_private && !bio->bi_status;
158 * I/O completion handler for multipage BIOs.
160 * The mpage code never puts partial pages into a BIO (except for end-of-file).
161 * If a page does not map to a contiguous run of blocks then it simply falls
162 * back to block_read_full_folio().
164 * Why is this? If a page's completion depends on a number of different BIOs
165 * which can complete in any order (or at the same time) then determining the
166 * status of that page is hard. See end_buffer_async_read() for the details.
167 * There is no point in duplicating all that complexity.
169 static void mpage_end_io(struct bio *bio)
171 if (bio_post_read_required(bio)) {
172 struct bio_post_read_ctx *ctx = bio->bi_private;
174 ctx->cur_step = STEP_INITIAL;
175 bio_post_read_processing(ctx);
181 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
183 return fsverity_active(inode) &&
184 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
187 static void ext4_set_bio_post_read_ctx(struct bio *bio,
188 const struct inode *inode,
191 unsigned int post_read_steps = 0;
193 if (fscrypt_inode_uses_fs_layer_crypto(inode))
194 post_read_steps |= 1 << STEP_DECRYPT;
196 if (ext4_need_verity(inode, first_idx))
197 post_read_steps |= 1 << STEP_VERITY;
199 if (post_read_steps) {
200 /* Due to the mempool, this never fails. */
201 struct bio_post_read_ctx *ctx =
202 mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
205 ctx->enabled_steps = post_read_steps;
206 bio->bi_private = ctx;
210 static inline loff_t ext4_readpage_limit(struct inode *inode)
212 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
213 return inode->i_sb->s_maxbytes;
215 return i_size_read(inode);
218 int ext4_mpage_readpages(struct inode *inode,
219 struct readahead_control *rac, struct folio *folio)
221 struct bio *bio = NULL;
222 sector_t last_block_in_bio = 0;
224 const unsigned blkbits = inode->i_blkbits;
225 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
226 const unsigned blocksize = 1 << blkbits;
228 sector_t block_in_file;
230 sector_t last_block_in_file;
231 sector_t blocks[MAX_BUF_PER_PAGE];
233 struct block_device *bdev = inode->i_sb->s_bdev;
235 unsigned relative_block = 0;
236 struct ext4_map_blocks map;
237 unsigned int nr_pages = rac ? readahead_count(rac) : 1;
244 for (; nr_pages; nr_pages--) {
245 int fully_mapped = 1;
246 unsigned first_hole = blocks_per_page;
249 folio = readahead_folio(rac);
250 prefetchw(&folio->flags);
252 if (folio_buffers(folio))
255 block_in_file = next_block =
256 (sector_t)folio->index << (PAGE_SHIFT - blkbits);
257 last_block = block_in_file + nr_pages * blocks_per_page;
258 last_block_in_file = (ext4_readpage_limit(inode) +
259 blocksize - 1) >> blkbits;
260 if (last_block > last_block_in_file)
261 last_block = last_block_in_file;
265 * Map blocks using the previous result first.
267 if ((map.m_flags & EXT4_MAP_MAPPED) &&
268 block_in_file > map.m_lblk &&
269 block_in_file < (map.m_lblk + map.m_len)) {
270 unsigned map_offset = block_in_file - map.m_lblk;
271 unsigned last = map.m_len - map_offset;
273 for (relative_block = 0; ; relative_block++) {
274 if (relative_block == last) {
276 map.m_flags &= ~EXT4_MAP_MAPPED;
279 if (page_block == blocks_per_page)
281 blocks[page_block] = map.m_pblk + map_offset +
289 * Then do more ext4_map_blocks() calls until we are
290 * done with this folio.
292 while (page_block < blocks_per_page) {
293 if (block_in_file < last_block) {
294 map.m_lblk = block_in_file;
295 map.m_len = last_block - block_in_file;
297 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
299 folio_set_error(folio);
300 folio_zero_segment(folio, 0,
306 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
308 if (first_hole == blocks_per_page)
309 first_hole = page_block;
314 if (first_hole != blocks_per_page)
315 goto confused; /* hole -> non-hole */
317 /* Contiguous blocks? */
318 if (page_block && blocks[page_block-1] != map.m_pblk-1)
320 for (relative_block = 0; ; relative_block++) {
321 if (relative_block == map.m_len) {
323 map.m_flags &= ~EXT4_MAP_MAPPED;
325 } else if (page_block == blocks_per_page)
327 blocks[page_block] = map.m_pblk+relative_block;
332 if (first_hole != blocks_per_page) {
333 folio_zero_segment(folio, first_hole << blkbits,
335 if (first_hole == 0) {
336 if (ext4_need_verity(inode, folio->index) &&
337 !fsverity_verify_folio(folio))
339 folio_mark_uptodate(folio);
343 } else if (fully_mapped) {
344 folio_set_mappedtodisk(folio);
348 * This folio will go to BIO. Do we need to send this
351 if (bio && (last_block_in_bio != blocks[0] - 1 ||
352 !fscrypt_mergeable_bio(bio, inode, next_block))) {
359 * bio_alloc will _always_ be able to allocate a bio if
360 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
362 bio = bio_alloc(bdev, bio_max_segs(nr_pages),
363 REQ_OP_READ, GFP_KERNEL);
364 fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
366 ext4_set_bio_post_read_ctx(bio, inode, folio->index);
367 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
368 bio->bi_end_io = mpage_end_io;
370 bio->bi_opf |= REQ_RAHEAD;
373 length = first_hole << blkbits;
374 if (!bio_add_folio(bio, folio, length, 0))
375 goto submit_and_realloc;
377 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
378 (relative_block == map.m_len)) ||
379 (first_hole != blocks_per_page)) {
383 last_block_in_bio = blocks[blocks_per_page - 1];
390 if (!folio_test_uptodate(folio))
391 block_read_full_folio(folio, ext4_get_block);
395 ; /* A label shall be followed by a statement until C23 */
402 int __init ext4_init_post_read_processing(void)
404 bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
406 if (!bio_post_read_ctx_cache)
408 bio_post_read_ctx_pool =
409 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
410 bio_post_read_ctx_cache);
411 if (!bio_post_read_ctx_pool)
412 goto fail_free_cache;
416 kmem_cache_destroy(bio_post_read_ctx_cache);
421 void ext4_exit_post_read_processing(void)
423 mempool_destroy(bio_post_read_ctx_pool);
424 kmem_cache_destroy(bio_post_read_ctx_cache);