Merge tag 'drm-misc-next-fixes-2023-09-01' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / fs / erofs / zdata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2022 Alibaba Cloud
6  */
7 #include "compress.h"
8 #include <linux/psi.h>
9 #include <linux/cpuhotplug.h>
10 #include <trace/events/erofs.h>
11
12 #define Z_EROFS_PCLUSTER_MAX_PAGES      (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_INLINE_BVECS            2
14
15 /*
16  * let's leave a type here in case of introducing
17  * another tagged pointer later.
18  */
19 typedef void *z_erofs_next_pcluster_t;
20
21 struct z_erofs_bvec {
22         struct page *page;
23         int offset;
24         unsigned int end;
25 };
26
27 #define __Z_EROFS_BVSET(name, total) \
28 struct name { \
29         /* point to the next page which contains the following bvecs */ \
30         struct page *nextpage; \
31         struct z_erofs_bvec bvec[total]; \
32 }
33 __Z_EROFS_BVSET(z_erofs_bvset,);
34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
35
36 /*
37  * Structure fields follow one of the following exclusion rules.
38  *
39  * I: Modifiable by initialization/destruction paths and read-only
40  *    for everyone else;
41  *
42  * L: Field should be protected by the pcluster lock;
43  *
44  * A: Field should be accessed / updated in atomic for parallelized code.
45  */
46 struct z_erofs_pcluster {
47         struct erofs_workgroup obj;
48         struct mutex lock;
49
50         /* A: point to next chained pcluster or TAILs */
51         z_erofs_next_pcluster_t next;
52
53         /* L: the maximum decompression size of this round */
54         unsigned int length;
55
56         /* L: total number of bvecs */
57         unsigned int vcnt;
58
59         /* I: page offset of start position of decompression */
60         unsigned short pageofs_out;
61
62         /* I: page offset of inline compressed data */
63         unsigned short pageofs_in;
64
65         union {
66                 /* L: inline a certain number of bvec for bootstrap */
67                 struct z_erofs_bvset_inline bvset;
68
69                 /* I: can be used to free the pcluster by RCU. */
70                 struct rcu_head rcu;
71         };
72
73         union {
74                 /* I: physical cluster size in pages */
75                 unsigned short pclusterpages;
76
77                 /* I: tailpacking inline compressed size */
78                 unsigned short tailpacking_size;
79         };
80
81         /* I: compression algorithm format */
82         unsigned char algorithmformat;
83
84         /* L: whether partial decompression or not */
85         bool partial;
86
87         /* L: indicate several pageofs_outs or not */
88         bool multibases;
89
90         /* A: compressed bvecs (can be cached or inplaced pages) */
91         struct z_erofs_bvec compressed_bvecs[];
92 };
93
94 /* the end of a chain of pclusters */
95 #define Z_EROFS_PCLUSTER_TAIL           ((void *) 0x700 + POISON_POINTER_DELTA)
96 #define Z_EROFS_PCLUSTER_NIL            (NULL)
97
98 struct z_erofs_decompressqueue {
99         struct super_block *sb;
100         atomic_t pending_bios;
101         z_erofs_next_pcluster_t head;
102
103         union {
104                 struct completion done;
105                 struct work_struct work;
106                 struct kthread_work kthread_work;
107         } u;
108         bool eio, sync;
109 };
110
111 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
112 {
113         return !pcl->obj.index;
114 }
115
116 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
117 {
118         if (z_erofs_is_inline_pcluster(pcl))
119                 return 1;
120         return pcl->pclusterpages;
121 }
122
123 /*
124  * bit 30: I/O error occurred on this page
125  * bit 0 - 29: remaining parts to complete this page
126  */
127 #define Z_EROFS_PAGE_EIO                        (1 << 30)
128
129 static inline void z_erofs_onlinepage_init(struct page *page)
130 {
131         union {
132                 atomic_t o;
133                 unsigned long v;
134         } u = { .o = ATOMIC_INIT(1) };
135
136         set_page_private(page, u.v);
137         smp_wmb();
138         SetPagePrivate(page);
139 }
140
141 static inline void z_erofs_onlinepage_split(struct page *page)
142 {
143         atomic_inc((atomic_t *)&page->private);
144 }
145
146 static inline void z_erofs_page_mark_eio(struct page *page)
147 {
148         int orig;
149
150         do {
151                 orig = atomic_read((atomic_t *)&page->private);
152         } while (atomic_cmpxchg((atomic_t *)&page->private, orig,
153                                 orig | Z_EROFS_PAGE_EIO) != orig);
154 }
155
156 static inline void z_erofs_onlinepage_endio(struct page *page)
157 {
158         unsigned int v;
159
160         DBG_BUGON(!PagePrivate(page));
161         v = atomic_dec_return((atomic_t *)&page->private);
162         if (!(v & ~Z_EROFS_PAGE_EIO)) {
163                 set_page_private(page, 0);
164                 ClearPagePrivate(page);
165                 if (!(v & Z_EROFS_PAGE_EIO))
166                         SetPageUptodate(page);
167                 unlock_page(page);
168         }
169 }
170
171 #define Z_EROFS_ONSTACK_PAGES           32
172
173 /*
174  * since pclustersize is variable for big pcluster feature, introduce slab
175  * pools implementation for different pcluster sizes.
176  */
177 struct z_erofs_pcluster_slab {
178         struct kmem_cache *slab;
179         unsigned int maxpages;
180         char name[48];
181 };
182
183 #define _PCLP(n) { .maxpages = n }
184
185 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
186         _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
187         _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
188 };
189
190 struct z_erofs_bvec_iter {
191         struct page *bvpage;
192         struct z_erofs_bvset *bvset;
193         unsigned int nr, cur;
194 };
195
196 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
197 {
198         if (iter->bvpage)
199                 kunmap_local(iter->bvset);
200         return iter->bvpage;
201 }
202
203 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
204 {
205         unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
206         /* have to access nextpage in advance, otherwise it will be unmapped */
207         struct page *nextpage = iter->bvset->nextpage;
208         struct page *oldpage;
209
210         DBG_BUGON(!nextpage);
211         oldpage = z_erofs_bvec_iter_end(iter);
212         iter->bvpage = nextpage;
213         iter->bvset = kmap_local_page(nextpage);
214         iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
215         iter->cur = 0;
216         return oldpage;
217 }
218
219 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
220                                     struct z_erofs_bvset_inline *bvset,
221                                     unsigned int bootstrap_nr,
222                                     unsigned int cur)
223 {
224         *iter = (struct z_erofs_bvec_iter) {
225                 .nr = bootstrap_nr,
226                 .bvset = (struct z_erofs_bvset *)bvset,
227         };
228
229         while (cur > iter->nr) {
230                 cur -= iter->nr;
231                 z_erofs_bvset_flip(iter);
232         }
233         iter->cur = cur;
234 }
235
236 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
237                                 struct z_erofs_bvec *bvec,
238                                 struct page **candidate_bvpage,
239                                 struct page **pagepool)
240 {
241         if (iter->cur >= iter->nr) {
242                 struct page *nextpage = *candidate_bvpage;
243
244                 if (!nextpage) {
245                         nextpage = erofs_allocpage(pagepool, GFP_NOFS);
246                         if (!nextpage)
247                                 return -ENOMEM;
248                         set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
249                 }
250                 DBG_BUGON(iter->bvset->nextpage);
251                 iter->bvset->nextpage = nextpage;
252                 z_erofs_bvset_flip(iter);
253
254                 iter->bvset->nextpage = NULL;
255                 *candidate_bvpage = NULL;
256         }
257         iter->bvset->bvec[iter->cur++] = *bvec;
258         return 0;
259 }
260
261 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
262                                  struct z_erofs_bvec *bvec,
263                                  struct page **old_bvpage)
264 {
265         if (iter->cur == iter->nr)
266                 *old_bvpage = z_erofs_bvset_flip(iter);
267         else
268                 *old_bvpage = NULL;
269         *bvec = iter->bvset->bvec[iter->cur++];
270 }
271
272 static void z_erofs_destroy_pcluster_pool(void)
273 {
274         int i;
275
276         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
277                 if (!pcluster_pool[i].slab)
278                         continue;
279                 kmem_cache_destroy(pcluster_pool[i].slab);
280                 pcluster_pool[i].slab = NULL;
281         }
282 }
283
284 static int z_erofs_create_pcluster_pool(void)
285 {
286         struct z_erofs_pcluster_slab *pcs;
287         struct z_erofs_pcluster *a;
288         unsigned int size;
289
290         for (pcs = pcluster_pool;
291              pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
292                 size = struct_size(a, compressed_bvecs, pcs->maxpages);
293
294                 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
295                 pcs->slab = kmem_cache_create(pcs->name, size, 0,
296                                               SLAB_RECLAIM_ACCOUNT, NULL);
297                 if (pcs->slab)
298                         continue;
299
300                 z_erofs_destroy_pcluster_pool();
301                 return -ENOMEM;
302         }
303         return 0;
304 }
305
306 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
307 {
308         int i;
309
310         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
311                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
312                 struct z_erofs_pcluster *pcl;
313
314                 if (nrpages > pcs->maxpages)
315                         continue;
316
317                 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
318                 if (!pcl)
319                         return ERR_PTR(-ENOMEM);
320                 pcl->pclusterpages = nrpages;
321                 return pcl;
322         }
323         return ERR_PTR(-EINVAL);
324 }
325
326 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
327 {
328         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
329         int i;
330
331         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
332                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
333
334                 if (pclusterpages > pcs->maxpages)
335                         continue;
336
337                 kmem_cache_free(pcs->slab, pcl);
338                 return;
339         }
340         DBG_BUGON(1);
341 }
342
343 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
344
345 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
346 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
347
348 static void erofs_destroy_percpu_workers(void)
349 {
350         struct kthread_worker *worker;
351         unsigned int cpu;
352
353         for_each_possible_cpu(cpu) {
354                 worker = rcu_dereference_protected(
355                                         z_erofs_pcpu_workers[cpu], 1);
356                 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
357                 if (worker)
358                         kthread_destroy_worker(worker);
359         }
360         kfree(z_erofs_pcpu_workers);
361 }
362
363 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
364 {
365         struct kthread_worker *worker =
366                 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
367
368         if (IS_ERR(worker))
369                 return worker;
370         if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
371                 sched_set_fifo_low(worker->task);
372         return worker;
373 }
374
375 static int erofs_init_percpu_workers(void)
376 {
377         struct kthread_worker *worker;
378         unsigned int cpu;
379
380         z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
381                         sizeof(struct kthread_worker *), GFP_ATOMIC);
382         if (!z_erofs_pcpu_workers)
383                 return -ENOMEM;
384
385         for_each_online_cpu(cpu) {      /* could miss cpu{off,on}line? */
386                 worker = erofs_init_percpu_worker(cpu);
387                 if (!IS_ERR(worker))
388                         rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
389         }
390         return 0;
391 }
392 #else
393 static inline void erofs_destroy_percpu_workers(void) {}
394 static inline int erofs_init_percpu_workers(void) { return 0; }
395 #endif
396
397 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
398 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
399 static enum cpuhp_state erofs_cpuhp_state;
400
401 static int erofs_cpu_online(unsigned int cpu)
402 {
403         struct kthread_worker *worker, *old;
404
405         worker = erofs_init_percpu_worker(cpu);
406         if (IS_ERR(worker))
407                 return PTR_ERR(worker);
408
409         spin_lock(&z_erofs_pcpu_worker_lock);
410         old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
411                         lockdep_is_held(&z_erofs_pcpu_worker_lock));
412         if (!old)
413                 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
414         spin_unlock(&z_erofs_pcpu_worker_lock);
415         if (old)
416                 kthread_destroy_worker(worker);
417         return 0;
418 }
419
420 static int erofs_cpu_offline(unsigned int cpu)
421 {
422         struct kthread_worker *worker;
423
424         spin_lock(&z_erofs_pcpu_worker_lock);
425         worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
426                         lockdep_is_held(&z_erofs_pcpu_worker_lock));
427         rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
428         spin_unlock(&z_erofs_pcpu_worker_lock);
429
430         synchronize_rcu();
431         if (worker)
432                 kthread_destroy_worker(worker);
433         return 0;
434 }
435
436 static int erofs_cpu_hotplug_init(void)
437 {
438         int state;
439
440         state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
441                         "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
442         if (state < 0)
443                 return state;
444
445         erofs_cpuhp_state = state;
446         return 0;
447 }
448
449 static void erofs_cpu_hotplug_destroy(void)
450 {
451         if (erofs_cpuhp_state)
452                 cpuhp_remove_state_nocalls(erofs_cpuhp_state);
453 }
454 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
455 static inline int erofs_cpu_hotplug_init(void) { return 0; }
456 static inline void erofs_cpu_hotplug_destroy(void) {}
457 #endif
458
459 void z_erofs_exit_zip_subsystem(void)
460 {
461         erofs_cpu_hotplug_destroy();
462         erofs_destroy_percpu_workers();
463         destroy_workqueue(z_erofs_workqueue);
464         z_erofs_destroy_pcluster_pool();
465 }
466
467 int __init z_erofs_init_zip_subsystem(void)
468 {
469         int err = z_erofs_create_pcluster_pool();
470
471         if (err)
472                 goto out_error_pcluster_pool;
473
474         z_erofs_workqueue = alloc_workqueue("erofs_worker",
475                         WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
476         if (!z_erofs_workqueue) {
477                 err = -ENOMEM;
478                 goto out_error_workqueue_init;
479         }
480
481         err = erofs_init_percpu_workers();
482         if (err)
483                 goto out_error_pcpu_worker;
484
485         err = erofs_cpu_hotplug_init();
486         if (err < 0)
487                 goto out_error_cpuhp_init;
488         return err;
489
490 out_error_cpuhp_init:
491         erofs_destroy_percpu_workers();
492 out_error_pcpu_worker:
493         destroy_workqueue(z_erofs_workqueue);
494 out_error_workqueue_init:
495         z_erofs_destroy_pcluster_pool();
496 out_error_pcluster_pool:
497         return err;
498 }
499
500 enum z_erofs_pclustermode {
501         Z_EROFS_PCLUSTER_INFLIGHT,
502         /*
503          * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
504          * could be dispatched into bypass queue later due to uptodated managed
505          * pages. All related online pages cannot be reused for inplace I/O (or
506          * bvpage) since it can be directly decoded without I/O submission.
507          */
508         Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
509         /*
510          * The current collection has been linked with the owned chain, and
511          * could also be linked with the remaining collections, which means
512          * if the processing page is the tail page of the collection, thus
513          * the current collection can safely use the whole page (since
514          * the previous collection is under control) for in-place I/O, as
515          * illustrated below:
516          *  ________________________________________________________________
517          * |  tail (partial) page |          head (partial) page           |
518          * |  (of the current cl) |      (of the previous collection)      |
519          * |                      |                                        |
520          * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________|
521          *
522          * [  (*) the above page can be used as inplace I/O.               ]
523          */
524         Z_EROFS_PCLUSTER_FOLLOWED,
525 };
526
527 struct z_erofs_decompress_frontend {
528         struct inode *const inode;
529         struct erofs_map_blocks map;
530         struct z_erofs_bvec_iter biter;
531
532         struct page *pagepool;
533         struct page *candidate_bvpage;
534         struct z_erofs_pcluster *pcl;
535         z_erofs_next_pcluster_t owned_head;
536         enum z_erofs_pclustermode mode;
537
538         /* used for applying cache strategy on the fly */
539         bool backmost;
540         erofs_off_t headoffset;
541
542         /* a pointer used to pick up inplace I/O pages */
543         unsigned int icur;
544 };
545
546 #define DECOMPRESS_FRONTEND_INIT(__i) { \
547         .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
548         .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
549
550 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
551 {
552         unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
553
554         if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
555                 return false;
556
557         if (fe->backmost)
558                 return true;
559
560         if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
561             fe->map.m_la < fe->headoffset)
562                 return true;
563
564         return false;
565 }
566
567 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
568 {
569         struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
570         struct z_erofs_pcluster *pcl = fe->pcl;
571         bool shouldalloc = z_erofs_should_alloc_cache(fe);
572         bool standalone = true;
573         /*
574          * optimistic allocation without direct reclaim since inplace I/O
575          * can be used if low memory otherwise.
576          */
577         gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
578                         __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
579         unsigned int i;
580
581         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
582                 return;
583
584         for (i = 0; i < pcl->pclusterpages; ++i) {
585                 struct page *page;
586                 void *t;        /* mark pages just found for debugging */
587                 struct page *newpage = NULL;
588
589                 /* the compressed page was loaded before */
590                 if (READ_ONCE(pcl->compressed_bvecs[i].page))
591                         continue;
592
593                 page = find_get_page(mc, pcl->obj.index + i);
594
595                 if (page) {
596                         t = (void *)((unsigned long)page | 1);
597                 } else {
598                         /* I/O is needed, no possible to decompress directly */
599                         standalone = false;
600                         if (!shouldalloc)
601                                 continue;
602
603                         /*
604                          * try to use cached I/O if page allocation
605                          * succeeds or fallback to in-place I/O instead
606                          * to avoid any direct reclaim.
607                          */
608                         newpage = erofs_allocpage(&fe->pagepool, gfp);
609                         if (!newpage)
610                                 continue;
611                         set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
612                         t = (void *)((unsigned long)newpage | 1);
613                 }
614
615                 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
616                         continue;
617
618                 if (page)
619                         put_page(page);
620                 else if (newpage)
621                         erofs_pagepool_add(&fe->pagepool, newpage);
622         }
623
624         /*
625          * don't do inplace I/O if all compressed pages are available in
626          * managed cache since it can be moved to the bypass queue instead.
627          */
628         if (standalone)
629                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
630 }
631
632 /* called by erofs_shrinker to get rid of all compressed_pages */
633 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
634                                        struct erofs_workgroup *grp)
635 {
636         struct z_erofs_pcluster *const pcl =
637                 container_of(grp, struct z_erofs_pcluster, obj);
638         int i;
639
640         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
641         /*
642          * refcount of workgroup is now freezed as 0,
643          * therefore no need to worry about available decompression users.
644          */
645         for (i = 0; i < pcl->pclusterpages; ++i) {
646                 struct page *page = pcl->compressed_bvecs[i].page;
647
648                 if (!page)
649                         continue;
650
651                 /* block other users from reclaiming or migrating the page */
652                 if (!trylock_page(page))
653                         return -EBUSY;
654
655                 if (!erofs_page_is_managed(sbi, page))
656                         continue;
657
658                 /* barrier is implied in the following 'unlock_page' */
659                 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
660                 detach_page_private(page);
661                 unlock_page(page);
662         }
663         return 0;
664 }
665
666 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
667 {
668         struct z_erofs_pcluster *pcl = folio_get_private(folio);
669         bool ret;
670         int i;
671
672         if (!folio_test_private(folio))
673                 return true;
674
675         ret = false;
676         spin_lock(&pcl->obj.lockref.lock);
677         if (pcl->obj.lockref.count > 0)
678                 goto out;
679
680         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
681         for (i = 0; i < pcl->pclusterpages; ++i) {
682                 if (pcl->compressed_bvecs[i].page == &folio->page) {
683                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
684                         ret = true;
685                         break;
686                 }
687         }
688         if (ret)
689                 folio_detach_private(folio);
690 out:
691         spin_unlock(&pcl->obj.lockref.lock);
692         return ret;
693 }
694
695 /*
696  * It will be called only on inode eviction. In case that there are still some
697  * decompression requests in progress, wait with rescheduling for a bit here.
698  * An extra lock could be introduced instead but it seems unnecessary.
699  */
700 static void z_erofs_cache_invalidate_folio(struct folio *folio,
701                                            size_t offset, size_t length)
702 {
703         const size_t stop = length + offset;
704
705         /* Check for potential overflow in debug mode */
706         DBG_BUGON(stop > folio_size(folio) || stop < length);
707
708         if (offset == 0 && stop == folio_size(folio))
709                 while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
710                         cond_resched();
711 }
712
713 static const struct address_space_operations z_erofs_cache_aops = {
714         .release_folio = z_erofs_cache_release_folio,
715         .invalidate_folio = z_erofs_cache_invalidate_folio,
716 };
717
718 int erofs_init_managed_cache(struct super_block *sb)
719 {
720         struct inode *const inode = new_inode(sb);
721
722         if (!inode)
723                 return -ENOMEM;
724
725         set_nlink(inode, 1);
726         inode->i_size = OFFSET_MAX;
727         inode->i_mapping->a_ops = &z_erofs_cache_aops;
728         mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
729         EROFS_SB(sb)->managed_cache = inode;
730         return 0;
731 }
732
733 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
734                                    struct z_erofs_bvec *bvec)
735 {
736         struct z_erofs_pcluster *const pcl = fe->pcl;
737
738         while (fe->icur > 0) {
739                 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
740                              NULL, bvec->page)) {
741                         pcl->compressed_bvecs[fe->icur] = *bvec;
742                         return true;
743                 }
744         }
745         return false;
746 }
747
748 /* callers must be with pcluster lock held */
749 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
750                                struct z_erofs_bvec *bvec, bool exclusive)
751 {
752         int ret;
753
754         if (exclusive) {
755                 /* give priority for inplaceio to use file pages first */
756                 if (z_erofs_try_inplace_io(fe, bvec))
757                         return 0;
758                 /* otherwise, check if it can be used as a bvpage */
759                 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
760                     !fe->candidate_bvpage)
761                         fe->candidate_bvpage = bvec->page;
762         }
763         ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
764                                    &fe->pagepool);
765         fe->pcl->vcnt += (ret >= 0);
766         return ret;
767 }
768
769 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
770 {
771         struct z_erofs_pcluster *pcl = f->pcl;
772         z_erofs_next_pcluster_t *owned_head = &f->owned_head;
773
774         /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
775         if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
776                     *owned_head) == Z_EROFS_PCLUSTER_NIL) {
777                 *owned_head = &pcl->next;
778                 /* so we can attach this pcluster to our submission chain. */
779                 f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
780                 return;
781         }
782
783         /* type 2, it belongs to an ongoing chain */
784         f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
785 }
786
787 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
788 {
789         struct erofs_map_blocks *map = &fe->map;
790         bool ztailpacking = map->m_flags & EROFS_MAP_META;
791         struct z_erofs_pcluster *pcl;
792         struct erofs_workgroup *grp;
793         int err;
794
795         if (!(map->m_flags & EROFS_MAP_ENCODED) ||
796             (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
797                 DBG_BUGON(1);
798                 return -EFSCORRUPTED;
799         }
800
801         /* no available pcluster, let's allocate one */
802         pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 :
803                                      map->m_plen >> PAGE_SHIFT);
804         if (IS_ERR(pcl))
805                 return PTR_ERR(pcl);
806
807         spin_lock_init(&pcl->obj.lockref.lock);
808         pcl->algorithmformat = map->m_algorithmformat;
809         pcl->length = 0;
810         pcl->partial = true;
811
812         /* new pclusters should be claimed as type 1, primary and followed */
813         pcl->next = fe->owned_head;
814         pcl->pageofs_out = map->m_la & ~PAGE_MASK;
815         fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
816
817         /*
818          * lock all primary followed works before visible to others
819          * and mutex_trylock *never* fails for a new pcluster.
820          */
821         mutex_init(&pcl->lock);
822         DBG_BUGON(!mutex_trylock(&pcl->lock));
823
824         if (ztailpacking) {
825                 pcl->obj.index = 0;     /* which indicates ztailpacking */
826                 pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
827                 pcl->tailpacking_size = map->m_plen;
828         } else {
829                 pcl->obj.index = map->m_pa >> PAGE_SHIFT;
830
831                 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
832                 if (IS_ERR(grp)) {
833                         err = PTR_ERR(grp);
834                         goto err_out;
835                 }
836
837                 if (grp != &pcl->obj) {
838                         fe->pcl = container_of(grp,
839                                         struct z_erofs_pcluster, obj);
840                         err = -EEXIST;
841                         goto err_out;
842                 }
843         }
844         fe->owned_head = &pcl->next;
845         fe->pcl = pcl;
846         return 0;
847
848 err_out:
849         mutex_unlock(&pcl->lock);
850         z_erofs_free_pcluster(pcl);
851         return err;
852 }
853
854 static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
855 {
856         struct erofs_map_blocks *map = &fe->map;
857         struct erofs_workgroup *grp = NULL;
858         int ret;
859
860         DBG_BUGON(fe->pcl);
861
862         /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
863         DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
864
865         if (!(map->m_flags & EROFS_MAP_META)) {
866                 grp = erofs_find_workgroup(fe->inode->i_sb,
867                                            map->m_pa >> PAGE_SHIFT);
868         } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
869                 DBG_BUGON(1);
870                 return -EFSCORRUPTED;
871         }
872
873         if (grp) {
874                 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
875                 ret = -EEXIST;
876         } else {
877                 ret = z_erofs_register_pcluster(fe);
878         }
879
880         if (ret == -EEXIST) {
881                 mutex_lock(&fe->pcl->lock);
882                 z_erofs_try_to_claim_pcluster(fe);
883         } else if (ret) {
884                 return ret;
885         }
886         z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
887                                 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
888         /* since file-backed online pages are traversed in reverse order */
889         fe->icur = z_erofs_pclusterpages(fe->pcl);
890         return 0;
891 }
892
893 /*
894  * keep in mind that no referenced pclusters will be freed
895  * only after a RCU grace period.
896  */
897 static void z_erofs_rcu_callback(struct rcu_head *head)
898 {
899         z_erofs_free_pcluster(container_of(head,
900                         struct z_erofs_pcluster, rcu));
901 }
902
903 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
904 {
905         struct z_erofs_pcluster *const pcl =
906                 container_of(grp, struct z_erofs_pcluster, obj);
907
908         call_rcu(&pcl->rcu, z_erofs_rcu_callback);
909 }
910
911 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
912 {
913         struct z_erofs_pcluster *pcl = fe->pcl;
914
915         if (!pcl)
916                 return false;
917
918         z_erofs_bvec_iter_end(&fe->biter);
919         mutex_unlock(&pcl->lock);
920
921         if (fe->candidate_bvpage)
922                 fe->candidate_bvpage = NULL;
923
924         /*
925          * if all pending pages are added, don't hold its reference
926          * any longer if the pcluster isn't hosted by ourselves.
927          */
928         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
929                 erofs_workgroup_put(&pcl->obj);
930
931         fe->pcl = NULL;
932         return true;
933 }
934
935 static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
936                                  struct page *page, unsigned int pageofs,
937                                  unsigned int len)
938 {
939         struct super_block *sb = inode->i_sb;
940         struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode;
941         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
942         u8 *src, *dst;
943         unsigned int i, cnt;
944
945         if (!packed_inode)
946                 return -EFSCORRUPTED;
947
948         buf.inode = packed_inode;
949         pos += EROFS_I(inode)->z_fragmentoff;
950         for (i = 0; i < len; i += cnt) {
951                 cnt = min_t(unsigned int, len - i,
952                             sb->s_blocksize - erofs_blkoff(sb, pos));
953                 src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
954                 if (IS_ERR(src)) {
955                         erofs_put_metabuf(&buf);
956                         return PTR_ERR(src);
957                 }
958
959                 dst = kmap_local_page(page);
960                 memcpy(dst + pageofs + i, src + erofs_blkoff(sb, pos), cnt);
961                 kunmap_local(dst);
962                 pos += cnt;
963         }
964         erofs_put_metabuf(&buf);
965         return 0;
966 }
967
968 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
969                                 struct page *page)
970 {
971         struct inode *const inode = fe->inode;
972         struct erofs_map_blocks *const map = &fe->map;
973         const loff_t offset = page_offset(page);
974         bool tight = true, exclusive;
975         unsigned int cur, end, spiltted;
976         int err = 0;
977
978         /* register locked file pages as online pages in pack */
979         z_erofs_onlinepage_init(page);
980
981         spiltted = 0;
982         end = PAGE_SIZE;
983 repeat:
984         cur = end - 1;
985
986         if (offset + cur < map->m_la ||
987             offset + cur >= map->m_la + map->m_llen) {
988                 if (z_erofs_collector_end(fe))
989                         fe->backmost = false;
990                 map->m_la = offset + cur;
991                 map->m_llen = 0;
992                 err = z_erofs_map_blocks_iter(inode, map, 0);
993                 if (err)
994                         goto out;
995         } else {
996                 if (fe->pcl)
997                         goto hitted;
998                 /* didn't get a valid pcluster previously (very rare) */
999         }
1000
1001         if (!(map->m_flags & EROFS_MAP_MAPPED) ||
1002             map->m_flags & EROFS_MAP_FRAGMENT)
1003                 goto hitted;
1004
1005         err = z_erofs_collector_begin(fe);
1006         if (err)
1007                 goto out;
1008
1009         if (z_erofs_is_inline_pcluster(fe->pcl)) {
1010                 void *mp;
1011
1012                 mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
1013                                         erofs_blknr(inode->i_sb, map->m_pa),
1014                                         EROFS_NO_KMAP);
1015                 if (IS_ERR(mp)) {
1016                         err = PTR_ERR(mp);
1017                         erofs_err(inode->i_sb,
1018                                   "failed to get inline page, err %d", err);
1019                         goto out;
1020                 }
1021                 get_page(fe->map.buf.page);
1022                 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
1023                            fe->map.buf.page);
1024                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
1025         } else {
1026                 /* bind cache first when cached decompression is preferred */
1027                 z_erofs_bind_cache(fe);
1028         }
1029 hitted:
1030         /*
1031          * Ensure the current partial page belongs to this submit chain rather
1032          * than other concurrent submit chains or the noio(bypass) chain since
1033          * those chains are handled asynchronously thus the page cannot be used
1034          * for inplace I/O or bvpage (should be processed in a strict order.)
1035          */
1036         tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
1037
1038         cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
1039         if (!(map->m_flags & EROFS_MAP_MAPPED)) {
1040                 zero_user_segment(page, cur, end);
1041                 goto next_part;
1042         }
1043         if (map->m_flags & EROFS_MAP_FRAGMENT) {
1044                 unsigned int pageofs, skip, len;
1045
1046                 if (offset > map->m_la) {
1047                         pageofs = 0;
1048                         skip = offset - map->m_la;
1049                 } else {
1050                         pageofs = map->m_la & ~PAGE_MASK;
1051                         skip = 0;
1052                 }
1053                 len = min_t(unsigned int, map->m_llen - skip, end - cur);
1054                 err = z_erofs_read_fragment(inode, skip, page, pageofs, len);
1055                 if (err)
1056                         goto out;
1057                 ++spiltted;
1058                 tight = false;
1059                 goto next_part;
1060         }
1061
1062         exclusive = (!cur && (!spiltted || tight));
1063         if (cur)
1064                 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
1065
1066         err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
1067                                         .page = page,
1068                                         .offset = offset - map->m_la,
1069                                         .end = end,
1070                                   }), exclusive);
1071         if (err)
1072                 goto out;
1073
1074         z_erofs_onlinepage_split(page);
1075         /* bump up the number of spiltted parts of a page */
1076         ++spiltted;
1077         if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
1078                 fe->pcl->multibases = true;
1079         if (fe->pcl->length < offset + end - map->m_la) {
1080                 fe->pcl->length = offset + end - map->m_la;
1081                 fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
1082         }
1083         if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
1084             !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
1085             fe->pcl->length == map->m_llen)
1086                 fe->pcl->partial = false;
1087 next_part:
1088         /* shorten the remaining extent to update progress */
1089         map->m_llen = offset + cur - map->m_la;
1090         map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
1091
1092         end = cur;
1093         if (end > 0)
1094                 goto repeat;
1095
1096 out:
1097         if (err)
1098                 z_erofs_page_mark_eio(page);
1099         z_erofs_onlinepage_endio(page);
1100         return err;
1101 }
1102
1103 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1104                                        unsigned int readahead_pages)
1105 {
1106         /* auto: enable for read_folio, disable for readahead */
1107         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1108             !readahead_pages)
1109                 return true;
1110
1111         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1112             (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1113                 return true;
1114
1115         return false;
1116 }
1117
1118 static bool z_erofs_page_is_invalidated(struct page *page)
1119 {
1120         return !page->mapping && !z_erofs_is_shortlived_page(page);
1121 }
1122
1123 struct z_erofs_decompress_backend {
1124         struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1125         struct super_block *sb;
1126         struct z_erofs_pcluster *pcl;
1127
1128         /* pages with the longest decompressed length for deduplication */
1129         struct page **decompressed_pages;
1130         /* pages to keep the compressed data */
1131         struct page **compressed_pages;
1132
1133         struct list_head decompressed_secondary_bvecs;
1134         struct page **pagepool;
1135         unsigned int onstack_used, nr_pages;
1136 };
1137
1138 struct z_erofs_bvec_item {
1139         struct z_erofs_bvec bvec;
1140         struct list_head list;
1141 };
1142
1143 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1144                                          struct z_erofs_bvec *bvec)
1145 {
1146         struct z_erofs_bvec_item *item;
1147         unsigned int pgnr;
1148
1149         if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1150             (bvec->end == PAGE_SIZE ||
1151              bvec->offset + bvec->end == be->pcl->length)) {
1152                 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1153                 DBG_BUGON(pgnr >= be->nr_pages);
1154                 if (!be->decompressed_pages[pgnr]) {
1155                         be->decompressed_pages[pgnr] = bvec->page;
1156                         return;
1157                 }
1158         }
1159
1160         /* (cold path) one pcluster is requested multiple times */
1161         item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1162         item->bvec = *bvec;
1163         list_add(&item->list, &be->decompressed_secondary_bvecs);
1164 }
1165
1166 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1167                                       int err)
1168 {
1169         unsigned int off0 = be->pcl->pageofs_out;
1170         struct list_head *p, *n;
1171
1172         list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1173                 struct z_erofs_bvec_item *bvi;
1174                 unsigned int end, cur;
1175                 void *dst, *src;
1176
1177                 bvi = container_of(p, struct z_erofs_bvec_item, list);
1178                 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1179                 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1180                             bvi->bvec.end);
1181                 dst = kmap_local_page(bvi->bvec.page);
1182                 while (cur < end) {
1183                         unsigned int pgnr, scur, len;
1184
1185                         pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1186                         DBG_BUGON(pgnr >= be->nr_pages);
1187
1188                         scur = bvi->bvec.offset + cur -
1189                                         ((pgnr << PAGE_SHIFT) - off0);
1190                         len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1191                         if (!be->decompressed_pages[pgnr]) {
1192                                 err = -EFSCORRUPTED;
1193                                 cur += len;
1194                                 continue;
1195                         }
1196                         src = kmap_local_page(be->decompressed_pages[pgnr]);
1197                         memcpy(dst + cur, src + scur, len);
1198                         kunmap_local(src);
1199                         cur += len;
1200                 }
1201                 kunmap_local(dst);
1202                 if (err)
1203                         z_erofs_page_mark_eio(bvi->bvec.page);
1204                 z_erofs_onlinepage_endio(bvi->bvec.page);
1205                 list_del(p);
1206                 kfree(bvi);
1207         }
1208 }
1209
1210 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1211 {
1212         struct z_erofs_pcluster *pcl = be->pcl;
1213         struct z_erofs_bvec_iter biter;
1214         struct page *old_bvpage;
1215         int i;
1216
1217         z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1218         for (i = 0; i < pcl->vcnt; ++i) {
1219                 struct z_erofs_bvec bvec;
1220
1221                 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1222
1223                 if (old_bvpage)
1224                         z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1225
1226                 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1227                 z_erofs_do_decompressed_bvec(be, &bvec);
1228         }
1229
1230         old_bvpage = z_erofs_bvec_iter_end(&biter);
1231         if (old_bvpage)
1232                 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1233 }
1234
1235 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1236                                   bool *overlapped)
1237 {
1238         struct z_erofs_pcluster *pcl = be->pcl;
1239         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1240         int i, err = 0;
1241
1242         *overlapped = false;
1243         for (i = 0; i < pclusterpages; ++i) {
1244                 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1245                 struct page *page = bvec->page;
1246
1247                 /* compressed pages ought to be present before decompressing */
1248                 if (!page) {
1249                         DBG_BUGON(1);
1250                         continue;
1251                 }
1252                 be->compressed_pages[i] = page;
1253
1254                 if (z_erofs_is_inline_pcluster(pcl)) {
1255                         if (!PageUptodate(page))
1256                                 err = -EIO;
1257                         continue;
1258                 }
1259
1260                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1261                 if (!z_erofs_is_shortlived_page(page)) {
1262                         if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
1263                                 if (!PageUptodate(page))
1264                                         err = -EIO;
1265                                 continue;
1266                         }
1267                         z_erofs_do_decompressed_bvec(be, bvec);
1268                         *overlapped = true;
1269                 }
1270         }
1271
1272         if (err)
1273                 return err;
1274         return 0;
1275 }
1276
1277 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1278                                        int err)
1279 {
1280         struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1281         struct z_erofs_pcluster *pcl = be->pcl;
1282         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1283         const struct z_erofs_decompressor *decompressor =
1284                                 &erofs_decompressors[pcl->algorithmformat];
1285         unsigned int i, inputsize;
1286         int err2;
1287         struct page *page;
1288         bool overlapped;
1289
1290         mutex_lock(&pcl->lock);
1291         be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1292
1293         /* allocate (de)compressed page arrays if cannot be kept on stack */
1294         be->decompressed_pages = NULL;
1295         be->compressed_pages = NULL;
1296         be->onstack_used = 0;
1297         if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1298                 be->decompressed_pages = be->onstack_pages;
1299                 be->onstack_used = be->nr_pages;
1300                 memset(be->decompressed_pages, 0,
1301                        sizeof(struct page *) * be->nr_pages);
1302         }
1303
1304         if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1305                 be->compressed_pages = be->onstack_pages + be->onstack_used;
1306
1307         if (!be->decompressed_pages)
1308                 be->decompressed_pages =
1309                         kvcalloc(be->nr_pages, sizeof(struct page *),
1310                                  GFP_KERNEL | __GFP_NOFAIL);
1311         if (!be->compressed_pages)
1312                 be->compressed_pages =
1313                         kvcalloc(pclusterpages, sizeof(struct page *),
1314                                  GFP_KERNEL | __GFP_NOFAIL);
1315
1316         z_erofs_parse_out_bvecs(be);
1317         err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1318         if (err2)
1319                 err = err2;
1320         if (err)
1321                 goto out;
1322
1323         if (z_erofs_is_inline_pcluster(pcl))
1324                 inputsize = pcl->tailpacking_size;
1325         else
1326                 inputsize = pclusterpages * PAGE_SIZE;
1327
1328         err = decompressor->decompress(&(struct z_erofs_decompress_req) {
1329                                         .sb = be->sb,
1330                                         .in = be->compressed_pages,
1331                                         .out = be->decompressed_pages,
1332                                         .pageofs_in = pcl->pageofs_in,
1333                                         .pageofs_out = pcl->pageofs_out,
1334                                         .inputsize = inputsize,
1335                                         .outputsize = pcl->length,
1336                                         .alg = pcl->algorithmformat,
1337                                         .inplace_io = overlapped,
1338                                         .partial_decoding = pcl->partial,
1339                                         .fillgaps = pcl->multibases,
1340                                  }, be->pagepool);
1341
1342 out:
1343         /* must handle all compressed pages before actual file pages */
1344         if (z_erofs_is_inline_pcluster(pcl)) {
1345                 page = pcl->compressed_bvecs[0].page;
1346                 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1347                 put_page(page);
1348         } else {
1349                 for (i = 0; i < pclusterpages; ++i) {
1350                         page = pcl->compressed_bvecs[i].page;
1351
1352                         if (erofs_page_is_managed(sbi, page))
1353                                 continue;
1354
1355                         /* recycle all individual short-lived pages */
1356                         (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1357                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1358                 }
1359         }
1360         if (be->compressed_pages < be->onstack_pages ||
1361             be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1362                 kvfree(be->compressed_pages);
1363         z_erofs_fill_other_copies(be, err);
1364
1365         for (i = 0; i < be->nr_pages; ++i) {
1366                 page = be->decompressed_pages[i];
1367                 if (!page)
1368                         continue;
1369
1370                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1371
1372                 /* recycle all individual short-lived pages */
1373                 if (z_erofs_put_shortlivedpage(be->pagepool, page))
1374                         continue;
1375                 if (err)
1376                         z_erofs_page_mark_eio(page);
1377                 z_erofs_onlinepage_endio(page);
1378         }
1379
1380         if (be->decompressed_pages != be->onstack_pages)
1381                 kvfree(be->decompressed_pages);
1382
1383         pcl->length = 0;
1384         pcl->partial = true;
1385         pcl->multibases = false;
1386         pcl->bvset.nextpage = NULL;
1387         pcl->vcnt = 0;
1388
1389         /* pcluster lock MUST be taken before the following line */
1390         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1391         mutex_unlock(&pcl->lock);
1392         return err;
1393 }
1394
1395 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1396                                      struct page **pagepool)
1397 {
1398         struct z_erofs_decompress_backend be = {
1399                 .sb = io->sb,
1400                 .pagepool = pagepool,
1401                 .decompressed_secondary_bvecs =
1402                         LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1403         };
1404         z_erofs_next_pcluster_t owned = io->head;
1405
1406         while (owned != Z_EROFS_PCLUSTER_TAIL) {
1407                 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1408
1409                 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1410                 owned = READ_ONCE(be.pcl->next);
1411
1412                 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1413                 erofs_workgroup_put(&be.pcl->obj);
1414         }
1415 }
1416
1417 static void z_erofs_decompressqueue_work(struct work_struct *work)
1418 {
1419         struct z_erofs_decompressqueue *bgq =
1420                 container_of(work, struct z_erofs_decompressqueue, u.work);
1421         struct page *pagepool = NULL;
1422
1423         DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1424         z_erofs_decompress_queue(bgq, &pagepool);
1425         erofs_release_pages(&pagepool);
1426         kvfree(bgq);
1427 }
1428
1429 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1430 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1431 {
1432         z_erofs_decompressqueue_work((struct work_struct *)work);
1433 }
1434 #endif
1435
1436 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1437                                        int bios)
1438 {
1439         struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1440
1441         /* wake up the caller thread for sync decompression */
1442         if (io->sync) {
1443                 if (!atomic_add_return(bios, &io->pending_bios))
1444                         complete(&io->u.done);
1445                 return;
1446         }
1447
1448         if (atomic_add_return(bios, &io->pending_bios))
1449                 return;
1450         /* Use (kthread_)work and sync decompression for atomic contexts only */
1451         if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1452 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1453                 struct kthread_worker *worker;
1454
1455                 rcu_read_lock();
1456                 worker = rcu_dereference(
1457                                 z_erofs_pcpu_workers[raw_smp_processor_id()]);
1458                 if (!worker) {
1459                         INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1460                         queue_work(z_erofs_workqueue, &io->u.work);
1461                 } else {
1462                         kthread_queue_work(worker, &io->u.kthread_work);
1463                 }
1464                 rcu_read_unlock();
1465 #else
1466                 queue_work(z_erofs_workqueue, &io->u.work);
1467 #endif
1468                 /* enable sync decompression for readahead */
1469                 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1470                         sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1471                 return;
1472         }
1473         z_erofs_decompressqueue_work(&io->u.work);
1474 }
1475
1476 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
1477                                                unsigned int nr,
1478                                                struct page **pagepool,
1479                                                struct address_space *mc)
1480 {
1481         const pgoff_t index = pcl->obj.index;
1482         gfp_t gfp = mapping_gfp_mask(mc);
1483         bool tocache = false;
1484
1485         struct address_space *mapping;
1486         struct page *oldpage, *page;
1487         int justfound;
1488
1489 repeat:
1490         page = READ_ONCE(pcl->compressed_bvecs[nr].page);
1491         oldpage = page;
1492
1493         if (!page)
1494                 goto out_allocpage;
1495
1496         justfound = (unsigned long)page & 1UL;
1497         page = (struct page *)((unsigned long)page & ~1UL);
1498
1499         /*
1500          * preallocated cached pages, which is used to avoid direct reclaim
1501          * otherwise, it will go inplace I/O path instead.
1502          */
1503         if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1504                 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1505                 set_page_private(page, 0);
1506                 tocache = true;
1507                 goto out_tocache;
1508         }
1509         mapping = READ_ONCE(page->mapping);
1510
1511         /*
1512          * file-backed online pages in plcuster are all locked steady,
1513          * therefore it is impossible for `mapping' to be NULL.
1514          */
1515         if (mapping && mapping != mc)
1516                 /* ought to be unmanaged pages */
1517                 goto out;
1518
1519         /* directly return for shortlived page as well */
1520         if (z_erofs_is_shortlived_page(page))
1521                 goto out;
1522
1523         lock_page(page);
1524
1525         /* only true if page reclaim goes wrong, should never happen */
1526         DBG_BUGON(justfound && PagePrivate(page));
1527
1528         /* the page is still in manage cache */
1529         if (page->mapping == mc) {
1530                 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1531
1532                 if (!PagePrivate(page)) {
1533                         /*
1534                          * impossible to be !PagePrivate(page) for
1535                          * the current restriction as well if
1536                          * the page is already in compressed_bvecs[].
1537                          */
1538                         DBG_BUGON(!justfound);
1539
1540                         justfound = 0;
1541                         set_page_private(page, (unsigned long)pcl);
1542                         SetPagePrivate(page);
1543                 }
1544
1545                 /* no need to submit io if it is already up-to-date */
1546                 if (PageUptodate(page)) {
1547                         unlock_page(page);
1548                         page = NULL;
1549                 }
1550                 goto out;
1551         }
1552
1553         /*
1554          * the managed page has been truncated, it's unsafe to
1555          * reuse this one, let's allocate a new cache-managed page.
1556          */
1557         DBG_BUGON(page->mapping);
1558         DBG_BUGON(!justfound);
1559
1560         tocache = true;
1561         unlock_page(page);
1562         put_page(page);
1563 out_allocpage:
1564         page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
1565         if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
1566                                oldpage, page)) {
1567                 erofs_pagepool_add(pagepool, page);
1568                 cond_resched();
1569                 goto repeat;
1570         }
1571 out_tocache:
1572         if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1573                 /* turn into temporary page if fails (1 ref) */
1574                 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1575                 goto out;
1576         }
1577         attach_page_private(page, pcl);
1578         /* drop a refcount added by allocpage (then we have 2 refs here) */
1579         put_page(page);
1580
1581 out:    /* the only exit (for tracing and debugging) */
1582         return page;
1583 }
1584
1585 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1586                               struct z_erofs_decompressqueue *fgq, bool *fg)
1587 {
1588         struct z_erofs_decompressqueue *q;
1589
1590         if (fg && !*fg) {
1591                 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1592                 if (!q) {
1593                         *fg = true;
1594                         goto fg_out;
1595                 }
1596 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1597                 kthread_init_work(&q->u.kthread_work,
1598                                   z_erofs_decompressqueue_kthread_work);
1599 #else
1600                 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1601 #endif
1602         } else {
1603 fg_out:
1604                 q = fgq;
1605                 init_completion(&fgq->u.done);
1606                 atomic_set(&fgq->pending_bios, 0);
1607                 q->eio = false;
1608                 q->sync = true;
1609         }
1610         q->sb = sb;
1611         q->head = Z_EROFS_PCLUSTER_TAIL;
1612         return q;
1613 }
1614
1615 /* define decompression jobqueue types */
1616 enum {
1617         JQ_BYPASS,
1618         JQ_SUBMIT,
1619         NR_JOBQUEUES,
1620 };
1621
1622 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1623                                     z_erofs_next_pcluster_t qtail[],
1624                                     z_erofs_next_pcluster_t owned_head)
1625 {
1626         z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1627         z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1628
1629         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1630
1631         WRITE_ONCE(*submit_qtail, owned_head);
1632         WRITE_ONCE(*bypass_qtail, &pcl->next);
1633
1634         qtail[JQ_BYPASS] = &pcl->next;
1635 }
1636
1637 static void z_erofs_decompressqueue_endio(struct bio *bio)
1638 {
1639         struct z_erofs_decompressqueue *q = bio->bi_private;
1640         blk_status_t err = bio->bi_status;
1641         struct bio_vec *bvec;
1642         struct bvec_iter_all iter_all;
1643
1644         bio_for_each_segment_all(bvec, bio, iter_all) {
1645                 struct page *page = bvec->bv_page;
1646
1647                 DBG_BUGON(PageUptodate(page));
1648                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1649
1650                 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1651                         if (!err)
1652                                 SetPageUptodate(page);
1653                         unlock_page(page);
1654                 }
1655         }
1656         if (err)
1657                 q->eio = true;
1658         z_erofs_decompress_kickoff(q, -1);
1659         bio_put(bio);
1660 }
1661
1662 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1663                                  struct z_erofs_decompressqueue *fgq,
1664                                  bool *force_fg, bool readahead)
1665 {
1666         struct super_block *sb = f->inode->i_sb;
1667         struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1668         z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1669         struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1670         z_erofs_next_pcluster_t owned_head = f->owned_head;
1671         /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1672         pgoff_t last_index;
1673         struct block_device *last_bdev;
1674         unsigned int nr_bios = 0;
1675         struct bio *bio = NULL;
1676         unsigned long pflags;
1677         int memstall = 0;
1678
1679         /*
1680          * if managed cache is enabled, bypass jobqueue is needed,
1681          * no need to read from device for all pclusters in this queue.
1682          */
1683         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1684         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1685
1686         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1687         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1688
1689         /* by default, all need io submission */
1690         q[JQ_SUBMIT]->head = owned_head;
1691
1692         do {
1693                 struct erofs_map_dev mdev;
1694                 struct z_erofs_pcluster *pcl;
1695                 pgoff_t cur, end;
1696                 unsigned int i = 0;
1697                 bool bypass = true;
1698
1699                 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1700                 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1701                 owned_head = READ_ONCE(pcl->next);
1702
1703                 if (z_erofs_is_inline_pcluster(pcl)) {
1704                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1705                         continue;
1706                 }
1707
1708                 /* no device id here, thus it will always succeed */
1709                 mdev = (struct erofs_map_dev) {
1710                         .m_pa = erofs_pos(sb, pcl->obj.index),
1711                 };
1712                 (void)erofs_map_dev(sb, &mdev);
1713
1714                 cur = erofs_blknr(sb, mdev.m_pa);
1715                 end = cur + pcl->pclusterpages;
1716
1717                 do {
1718                         struct page *page;
1719
1720                         page = pickup_page_for_submission(pcl, i++,
1721                                         &f->pagepool, mc);
1722                         if (!page)
1723                                 continue;
1724
1725                         if (bio && (cur != last_index + 1 ||
1726                                     last_bdev != mdev.m_bdev)) {
1727 submit_bio_retry:
1728                                 submit_bio(bio);
1729                                 if (memstall) {
1730                                         psi_memstall_leave(&pflags);
1731                                         memstall = 0;
1732                                 }
1733                                 bio = NULL;
1734                         }
1735
1736                         if (unlikely(PageWorkingset(page)) && !memstall) {
1737                                 psi_memstall_enter(&pflags);
1738                                 memstall = 1;
1739                         }
1740
1741                         if (!bio) {
1742                                 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1743                                                 REQ_OP_READ, GFP_NOIO);
1744                                 bio->bi_end_io = z_erofs_decompressqueue_endio;
1745
1746                                 last_bdev = mdev.m_bdev;
1747                                 bio->bi_iter.bi_sector = (sector_t)cur <<
1748                                         (sb->s_blocksize_bits - 9);
1749                                 bio->bi_private = q[JQ_SUBMIT];
1750                                 if (readahead)
1751                                         bio->bi_opf |= REQ_RAHEAD;
1752                                 ++nr_bios;
1753                         }
1754
1755                         if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
1756                                 goto submit_bio_retry;
1757
1758                         last_index = cur;
1759                         bypass = false;
1760                 } while (++cur < end);
1761
1762                 if (!bypass)
1763                         qtail[JQ_SUBMIT] = &pcl->next;
1764                 else
1765                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1766         } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1767
1768         if (bio) {
1769                 submit_bio(bio);
1770                 if (memstall)
1771                         psi_memstall_leave(&pflags);
1772         }
1773
1774         /*
1775          * although background is preferred, no one is pending for submission.
1776          * don't issue decompression but drop it directly instead.
1777          */
1778         if (!*force_fg && !nr_bios) {
1779                 kvfree(q[JQ_SUBMIT]);
1780                 return;
1781         }
1782         z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1783 }
1784
1785 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1786                              bool force_fg, bool ra)
1787 {
1788         struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1789
1790         if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1791                 return;
1792         z_erofs_submit_queue(f, io, &force_fg, ra);
1793
1794         /* handle bypass queue (no i/o pclusters) immediately */
1795         z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1796
1797         if (!force_fg)
1798                 return;
1799
1800         /* wait until all bios are completed */
1801         wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1802
1803         /* handle synchronous decompress queue in the caller context */
1804         z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
1805 }
1806
1807 /*
1808  * Since partial uptodate is still unimplemented for now, we have to use
1809  * approximate readmore strategies as a start.
1810  */
1811 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1812                 struct readahead_control *rac, bool backmost)
1813 {
1814         struct inode *inode = f->inode;
1815         struct erofs_map_blocks *map = &f->map;
1816         erofs_off_t cur, end, headoffset = f->headoffset;
1817         int err;
1818
1819         if (backmost) {
1820                 if (rac)
1821                         end = headoffset + readahead_length(rac) - 1;
1822                 else
1823                         end = headoffset + PAGE_SIZE - 1;
1824                 map->m_la = end;
1825                 err = z_erofs_map_blocks_iter(inode, map,
1826                                               EROFS_GET_BLOCKS_READMORE);
1827                 if (err)
1828                         return;
1829
1830                 /* expand ra for the trailing edge if readahead */
1831                 if (rac) {
1832                         cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1833                         readahead_expand(rac, headoffset, cur - headoffset);
1834                         return;
1835                 }
1836                 end = round_up(end, PAGE_SIZE);
1837         } else {
1838                 end = round_up(map->m_la, PAGE_SIZE);
1839
1840                 if (!map->m_llen)
1841                         return;
1842         }
1843
1844         cur = map->m_la + map->m_llen - 1;
1845         while ((cur >= end) && (cur < i_size_read(inode))) {
1846                 pgoff_t index = cur >> PAGE_SHIFT;
1847                 struct page *page;
1848
1849                 page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
1850                 if (page) {
1851                         if (PageUptodate(page)) {
1852                                 unlock_page(page);
1853                         } else {
1854                                 err = z_erofs_do_read_page(f, page);
1855                                 if (err)
1856                                         erofs_err(inode->i_sb,
1857                                                   "readmore error at page %lu @ nid %llu",
1858                                                   index, EROFS_I(inode)->nid);
1859                         }
1860                         put_page(page);
1861                 }
1862
1863                 if (cur < PAGE_SIZE)
1864                         break;
1865                 cur = (index << PAGE_SHIFT) - 1;
1866         }
1867 }
1868
1869 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1870 {
1871         struct page *page = &folio->page;
1872         struct inode *const inode = page->mapping->host;
1873         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1874         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1875         int err;
1876
1877         trace_erofs_readpage(page, false);
1878         f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1879
1880         z_erofs_pcluster_readmore(&f, NULL, true);
1881         err = z_erofs_do_read_page(&f, page);
1882         z_erofs_pcluster_readmore(&f, NULL, false);
1883         (void)z_erofs_collector_end(&f);
1884
1885         /* if some compressed cluster ready, need submit them anyway */
1886         z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
1887
1888         if (err)
1889                 erofs_err(inode->i_sb, "failed to read, err [%d]", err);
1890
1891         erofs_put_metabuf(&f.map.buf);
1892         erofs_release_pages(&f.pagepool);
1893         return err;
1894 }
1895
1896 static void z_erofs_readahead(struct readahead_control *rac)
1897 {
1898         struct inode *const inode = rac->mapping->host;
1899         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1900         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1901         struct page *head = NULL, *page;
1902         unsigned int nr_pages;
1903
1904         f.headoffset = readahead_pos(rac);
1905
1906         z_erofs_pcluster_readmore(&f, rac, true);
1907         nr_pages = readahead_count(rac);
1908         trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
1909
1910         while ((page = readahead_page(rac))) {
1911                 set_page_private(page, (unsigned long)head);
1912                 head = page;
1913         }
1914
1915         while (head) {
1916                 struct page *page = head;
1917                 int err;
1918
1919                 /* traversal in reverse order */
1920                 head = (void *)page_private(page);
1921
1922                 err = z_erofs_do_read_page(&f, page);
1923                 if (err)
1924                         erofs_err(inode->i_sb,
1925                                   "readahead error at page %lu @ nid %llu",
1926                                   page->index, EROFS_I(inode)->nid);
1927                 put_page(page);
1928         }
1929         z_erofs_pcluster_readmore(&f, rac, false);
1930         (void)z_erofs_collector_end(&f);
1931
1932         z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
1933         erofs_put_metabuf(&f.map.buf);
1934         erofs_release_pages(&f.pagepool);
1935 }
1936
1937 const struct address_space_operations z_erofs_aops = {
1938         .read_folio = z_erofs_read_folio,
1939         .readahead = z_erofs_readahead,
1940 };