staging: erofs: detect potential multiref due to corrupted images
[platform/kernel/linux-rpi.git] / drivers / staging / erofs / unzip_vle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
15
16 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
17 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
18
19 void z_erofs_exit_zip_subsystem(void)
20 {
21         destroy_workqueue(z_erofs_workqueue);
22         kmem_cache_destroy(z_erofs_workgroup_cachep);
23 }
24
25 static inline int init_unzip_workqueue(void)
26 {
27         const unsigned onlinecpus = num_possible_cpus();
28
29         /*
30          * we don't need too many threads, limiting threads
31          * could improve scheduling performance.
32          */
33         z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
34                 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
35                 onlinecpus + onlinecpus / 4);
36
37         return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
38 }
39
40 int z_erofs_init_zip_subsystem(void)
41 {
42         z_erofs_workgroup_cachep =
43                 kmem_cache_create("erofs_compress",
44                 Z_EROFS_WORKGROUP_SIZE, 0,
45                 SLAB_RECLAIM_ACCOUNT, NULL);
46
47         if (z_erofs_workgroup_cachep != NULL) {
48                 if (!init_unzip_workqueue())
49                         return 0;
50
51                 kmem_cache_destroy(z_erofs_workgroup_cachep);
52         }
53         return -ENOMEM;
54 }
55
56 enum z_erofs_vle_work_role {
57         Z_EROFS_VLE_WORK_SECONDARY,
58         Z_EROFS_VLE_WORK_PRIMARY,
59         /*
60          * The current work was the tail of an exist chain, and the previous
61          * processed chained works are all decided to be hooked up to it.
62          * A new chain should be created for the remaining unprocessed works,
63          * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
64          * the next work cannot reuse the whole page in the following scenario:
65          *  ________________________________________________________________
66          * |      tail (partial) page     |       head (partial) page       |
67          * |  (belongs to the next work)  |  (belongs to the current work)  |
68          * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
69          */
70         Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
71         /*
72          * The current work has been linked with the processed chained works,
73          * and could be also linked with the potential remaining works, which
74          * means if the processing page is the tail partial page of the work,
75          * the current work can safely use the whole page (since the next work
76          * is under control) for in-place decompression, as illustrated below:
77          *  ________________________________________________________________
78          * |  tail (partial) page  |          head (partial) page           |
79          * | (of the current work) |         (of the previous work)         |
80          * |  PRIMARY_FOLLOWED or  |                                        |
81          * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
82          *
83          * [  (*) the above page can be used for the current work itself.  ]
84          */
85         Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
86         Z_EROFS_VLE_WORK_MAX
87 };
88
89 struct z_erofs_vle_work_builder {
90         enum z_erofs_vle_work_role role;
91         /*
92          * 'hosted = false' means that the current workgroup doesn't belong to
93          * the owned chained workgroups. In the other words, it is none of our
94          * business to submit this workgroup.
95          */
96         bool hosted;
97
98         struct z_erofs_vle_workgroup *grp;
99         struct z_erofs_vle_work *work;
100         struct z_erofs_pagevec_ctor vector;
101
102         /* pages used for reading the compressed data */
103         struct page **compressed_pages;
104         unsigned compressed_deficit;
105 };
106
107 #define VLE_WORK_BUILDER_INIT() \
108         { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
109
110 #ifdef EROFS_FS_HAS_MANAGED_CACHE
111
112 static bool grab_managed_cache_pages(struct address_space *mapping,
113                                      erofs_blk_t start,
114                                      struct page **compressed_pages,
115                                      int clusterblks,
116                                      bool reserve_allocation)
117 {
118         bool noio = true;
119         unsigned int i;
120
121         /* TODO: optimize by introducing find_get_pages_range */
122         for (i = 0; i < clusterblks; ++i) {
123                 struct page *page, *found;
124
125                 if (READ_ONCE(compressed_pages[i]) != NULL)
126                         continue;
127
128                 page = found = find_get_page(mapping, start + i);
129                 if (found == NULL) {
130                         noio = false;
131                         if (!reserve_allocation)
132                                 continue;
133                         page = EROFS_UNALLOCATED_CACHED_PAGE;
134                 }
135
136                 if (NULL == cmpxchg(compressed_pages + i, NULL, page))
137                         continue;
138
139                 if (found != NULL)
140                         put_page(found);
141         }
142         return noio;
143 }
144
145 /* called by erofs_shrinker to get rid of all compressed_pages */
146 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
147                                        struct erofs_workgroup *egrp)
148 {
149         struct z_erofs_vle_workgroup *const grp =
150                 container_of(egrp, struct z_erofs_vle_workgroup, obj);
151         struct address_space *const mapping = sbi->managed_cache->i_mapping;
152         const int clusterpages = erofs_clusterpages(sbi);
153         int i;
154
155         /*
156          * refcount of workgroup is now freezed as 1,
157          * therefore no need to worry about available decompression users.
158          */
159         for (i = 0; i < clusterpages; ++i) {
160                 struct page *page = grp->compressed_pages[i];
161
162                 if (page == NULL || page->mapping != mapping)
163                         continue;
164
165                 /* block other users from reclaiming or migrating the page */
166                 if (!trylock_page(page))
167                         return -EBUSY;
168
169                 /* barrier is implied in the following 'unlock_page' */
170                 WRITE_ONCE(grp->compressed_pages[i], NULL);
171
172                 set_page_private(page, 0);
173                 ClearPagePrivate(page);
174
175                 unlock_page(page);
176                 put_page(page);
177         }
178         return 0;
179 }
180
181 int erofs_try_to_free_cached_page(struct address_space *mapping,
182                                   struct page *page)
183 {
184         struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
185         const unsigned int clusterpages = erofs_clusterpages(sbi);
186
187         struct z_erofs_vle_workgroup *grp;
188         int ret = 0;    /* 0 - busy */
189
190         /* prevent the workgroup from being freed */
191         rcu_read_lock();
192         grp = (void *)page_private(page);
193
194         if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
195                 unsigned int i;
196
197                 for (i = 0; i < clusterpages; ++i) {
198                         if (grp->compressed_pages[i] == page) {
199                                 WRITE_ONCE(grp->compressed_pages[i], NULL);
200                                 ret = 1;
201                                 break;
202                         }
203                 }
204                 erofs_workgroup_unfreeze(&grp->obj, 1);
205         }
206         rcu_read_unlock();
207
208         if (ret) {
209                 ClearPagePrivate(page);
210                 put_page(page);
211         }
212         return ret;
213 }
214 #endif
215
216 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
217 static inline bool try_to_reuse_as_compressed_page(
218         struct z_erofs_vle_work_builder *b,
219         struct page *page)
220 {
221         while (b->compressed_deficit) {
222                 --b->compressed_deficit;
223                 if (NULL == cmpxchg(b->compressed_pages++, NULL, page))
224                         return true;
225         }
226
227         return false;
228 }
229
230 /* callers must be with work->lock held */
231 static int z_erofs_vle_work_add_page(
232         struct z_erofs_vle_work_builder *builder,
233         struct page *page,
234         enum z_erofs_page_type type)
235 {
236         int ret;
237         bool occupied;
238
239         /* give priority for the compressed data storage */
240         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
241                 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
242                 try_to_reuse_as_compressed_page(builder, page))
243                 return 0;
244
245         ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
246                 page, type, &occupied);
247         builder->work->vcnt += (unsigned)ret;
248
249         return ret ? 0 : -EAGAIN;
250 }
251
252 static enum z_erofs_vle_work_role
253 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
254                        z_erofs_vle_owned_workgrp_t *owned_head,
255                        bool *hosted)
256 {
257         DBG_BUGON(*hosted == true);
258
259         /* let's claim these following types of workgroup */
260 retry:
261         if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
262                 /* type 1, nil workgroup */
263                 if (Z_EROFS_VLE_WORKGRP_NIL != cmpxchg(&grp->next,
264                         Z_EROFS_VLE_WORKGRP_NIL, *owned_head))
265                         goto retry;
266
267                 *owned_head = grp;
268                 *hosted = true;
269                 /* lucky, I am the followee :) */
270                 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
271
272         } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
273                 /*
274                  * type 2, link to the end of a existing open chain,
275                  * be careful that its submission itself is governed
276                  * by the original owned chain.
277                  */
278                 if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
279                         Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
280                         goto retry;
281                 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
282                 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
283         }
284
285         return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
286 }
287
288 static struct z_erofs_vle_work *
289 z_erofs_vle_work_lookup(struct super_block *sb,
290                         pgoff_t idx, unsigned pageofs,
291                         struct z_erofs_vle_workgroup **grp_ret,
292                         enum z_erofs_vle_work_role *role,
293                         z_erofs_vle_owned_workgrp_t *owned_head,
294                         bool *hosted)
295 {
296         bool tag, primary;
297         struct erofs_workgroup *egrp;
298         struct z_erofs_vle_workgroup *grp;
299         struct z_erofs_vle_work *work;
300
301         egrp = erofs_find_workgroup(sb, idx, &tag);
302         if (egrp == NULL) {
303                 *grp_ret = NULL;
304                 return NULL;
305         }
306
307         *grp_ret = grp = container_of(egrp,
308                 struct z_erofs_vle_workgroup, obj);
309
310         work = z_erofs_vle_grab_work(grp, pageofs);
311         /* if multiref is disabled, `primary' is always true */
312         primary = true;
313
314         if (work->pageofs != pageofs) {
315                 DBG_BUGON(1);
316                 erofs_workgroup_put(egrp);
317                 return ERR_PTR(-EIO);
318         }
319
320         /*
321          * lock must be taken first to avoid grp->next == NIL between
322          * claiming workgroup and adding pages:
323          *                        grp->next != NIL
324          *   grp->next = NIL
325          *   mutex_unlock_all
326          *                        mutex_lock(&work->lock)
327          *                        add all pages to pagevec
328          *
329          * [correct locking case 1]:
330          *   mutex_lock(grp->work[a])
331          *   ...
332          *   mutex_lock(grp->work[b])     mutex_lock(grp->work[c])
333          *   ...                          *role = SECONDARY
334          *                                add all pages to pagevec
335          *                                ...
336          *                                mutex_unlock(grp->work[c])
337          *   mutex_lock(grp->work[c])
338          *   ...
339          *   grp->next = NIL
340          *   mutex_unlock_all
341          *
342          * [correct locking case 2]:
343          *   mutex_lock(grp->work[b])
344          *   ...
345          *   mutex_lock(grp->work[a])
346          *   ...
347          *   mutex_lock(grp->work[c])
348          *   ...
349          *   grp->next = NIL
350          *   mutex_unlock_all
351          *                                mutex_lock(grp->work[a])
352          *                                *role = PRIMARY_OWNER
353          *                                add all pages to pagevec
354          *                                ...
355          */
356         mutex_lock(&work->lock);
357
358         *hosted = false;
359         if (!primary)
360                 *role = Z_EROFS_VLE_WORK_SECONDARY;
361         else    /* claim the workgroup if possible */
362                 *role = try_to_claim_workgroup(grp, owned_head, hosted);
363         return work;
364 }
365
366 static struct z_erofs_vle_work *
367 z_erofs_vle_work_register(struct super_block *sb,
368                           struct z_erofs_vle_workgroup **grp_ret,
369                           struct erofs_map_blocks *map,
370                           pgoff_t index, unsigned pageofs,
371                           enum z_erofs_vle_work_role *role,
372                           z_erofs_vle_owned_workgrp_t *owned_head,
373                           bool *hosted)
374 {
375         bool newgrp = false;
376         struct z_erofs_vle_workgroup *grp = *grp_ret;
377         struct z_erofs_vle_work *work;
378
379         /* if multiref is disabled, grp should never be nullptr */
380         if (unlikely(grp)) {
381                 DBG_BUGON(1);
382                 return ERR_PTR(-EINVAL);
383         }
384
385         /* no available workgroup, let's allocate one */
386         grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
387         if (unlikely(grp == NULL))
388                 return ERR_PTR(-ENOMEM);
389
390         grp->obj.index = index;
391         grp->llen = map->m_llen;
392
393         z_erofs_vle_set_workgrp_fmt(grp,
394                 (map->m_flags & EROFS_MAP_ZIPPED) ?
395                         Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
396                         Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
397         atomic_set(&grp->obj.refcount, 1);
398
399         /* new workgrps have been claimed as type 1 */
400         WRITE_ONCE(grp->next, *owned_head);
401         /* primary and followed work for all new workgrps */
402         *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
403         /* it should be submitted by ourselves */
404         *hosted = true;
405
406         newgrp = true;
407         work = z_erofs_vle_grab_primary_work(grp);
408         work->pageofs = pageofs;
409
410         mutex_init(&work->lock);
411
412         if (newgrp) {
413                 int err = erofs_register_workgroup(sb, &grp->obj, 0);
414
415                 if (err) {
416                         kmem_cache_free(z_erofs_workgroup_cachep, grp);
417                         return ERR_PTR(-EAGAIN);
418                 }
419         }
420
421         *owned_head = *grp_ret = grp;
422
423         mutex_lock(&work->lock);
424         return work;
425 }
426
427 static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
428                                          unsigned int llen)
429 {
430         while (1) {
431                 unsigned int orig_llen = grp->llen;
432
433                 if (orig_llen >= llen || orig_llen ==
434                         cmpxchg(&grp->llen, orig_llen, llen))
435                         break;
436         }
437 }
438
439 #define builder_is_hooked(builder) \
440         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
441
442 #define builder_is_followed(builder) \
443         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
444
445 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
446                                        struct super_block *sb,
447                                        struct erofs_map_blocks *map,
448                                        z_erofs_vle_owned_workgrp_t *owned_head)
449 {
450         const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
451         const erofs_blk_t index = erofs_blknr(map->m_pa);
452         const unsigned pageofs = map->m_la & ~PAGE_MASK;
453         struct z_erofs_vle_workgroup *grp;
454         struct z_erofs_vle_work *work;
455
456         DBG_BUGON(builder->work != NULL);
457
458         /* must be Z_EROFS_WORK_TAIL or the next chained work */
459         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
460         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
461
462         DBG_BUGON(erofs_blkoff(map->m_pa));
463
464 repeat:
465         work = z_erofs_vle_work_lookup(sb, index,
466                 pageofs, &grp, &builder->role, owned_head, &builder->hosted);
467         if (work != NULL) {
468                 __update_workgrp_llen(grp, map->m_llen);
469                 goto got_it;
470         }
471
472         work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
473                 &builder->role, owned_head, &builder->hosted);
474
475         if (unlikely(work == ERR_PTR(-EAGAIN)))
476                 goto repeat;
477
478         if (unlikely(IS_ERR(work)))
479                 return PTR_ERR(work);
480 got_it:
481         z_erofs_pagevec_ctor_init(&builder->vector,
482                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
483
484         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
485                 /* enable possibly in-place decompression */
486                 builder->compressed_pages = grp->compressed_pages;
487                 builder->compressed_deficit = clusterpages;
488         } else {
489                 builder->compressed_pages = NULL;
490                 builder->compressed_deficit = 0;
491         }
492
493         builder->grp = grp;
494         builder->work = work;
495         return 0;
496 }
497
498 /*
499  * keep in mind that no referenced workgroups will be freed
500  * only after a RCU grace period, so rcu_read_lock() could
501  * prevent a workgroup from being freed.
502  */
503 static void z_erofs_rcu_callback(struct rcu_head *head)
504 {
505         struct z_erofs_vle_work *work = container_of(head,
506                 struct z_erofs_vle_work, rcu);
507         struct z_erofs_vle_workgroup *grp =
508                 z_erofs_vle_work_workgroup(work, true);
509
510         kmem_cache_free(z_erofs_workgroup_cachep, grp);
511 }
512
513 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
514 {
515         struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
516                 struct z_erofs_vle_workgroup, obj);
517         struct z_erofs_vle_work *const work = &vgrp->work;
518
519         call_rcu(&work->rcu, z_erofs_rcu_callback);
520 }
521
522 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
523         struct z_erofs_vle_work *work __maybe_unused)
524 {
525         erofs_workgroup_put(&grp->obj);
526 }
527
528 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
529 {
530         struct z_erofs_vle_workgroup *grp =
531                 z_erofs_vle_work_workgroup(work, true);
532
533         __z_erofs_vle_work_release(grp, work);
534 }
535
536 static inline bool
537 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
538 {
539         struct z_erofs_vle_work *work = builder->work;
540
541         if (work == NULL)
542                 return false;
543
544         z_erofs_pagevec_ctor_exit(&builder->vector, false);
545         mutex_unlock(&work->lock);
546
547         /*
548          * if all pending pages are added, don't hold work reference
549          * any longer if the current work isn't hosted by ourselves.
550          */
551         if (!builder->hosted)
552                 __z_erofs_vle_work_release(builder->grp, work);
553
554         builder->work = NULL;
555         builder->grp = NULL;
556         return true;
557 }
558
559 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
560                                                gfp_t gfp)
561 {
562         struct page *page = erofs_allocpage(pagepool, gfp);
563
564         if (unlikely(page == NULL))
565                 return NULL;
566
567         page->mapping = Z_EROFS_MAPPING_STAGING;
568         return page;
569 }
570
571 struct z_erofs_vle_frontend {
572         struct inode *const inode;
573
574         struct z_erofs_vle_work_builder builder;
575         struct erofs_map_blocks_iter m_iter;
576
577         z_erofs_vle_owned_workgrp_t owned_head;
578
579         bool initial;
580 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
581         erofs_off_t cachedzone_la;
582 #endif
583 };
584
585 #define VLE_FRONTEND_INIT(__i) { \
586         .inode = __i, \
587         .m_iter = { \
588                 { .m_llen = 0, .m_plen = 0 }, \
589                 .mpage = NULL \
590         }, \
591         .builder = VLE_WORK_BUILDER_INIT(), \
592         .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
593         .initial = true, }
594
595 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
596                                 struct page *page,
597                                 struct list_head *page_pool)
598 {
599         struct super_block *const sb = fe->inode->i_sb;
600         struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
601         struct erofs_map_blocks_iter *const m = &fe->m_iter;
602         struct erofs_map_blocks *const map = &m->map;
603         struct z_erofs_vle_work_builder *const builder = &fe->builder;
604         const loff_t offset = page_offset(page);
605
606         bool tight = builder_is_hooked(builder);
607         struct z_erofs_vle_work *work = builder->work;
608
609 #ifdef EROFS_FS_HAS_MANAGED_CACHE
610         struct address_space *const mngda = sbi->managed_cache->i_mapping;
611         struct z_erofs_vle_workgroup *grp;
612         bool noio_outoforder;
613 #endif
614
615         enum z_erofs_page_type page_type;
616         unsigned cur, end, spiltted, index;
617         int err = 0;
618
619         /* register locked file pages as online pages in pack */
620         z_erofs_onlinepage_init(page);
621
622         spiltted = 0;
623         end = PAGE_SIZE;
624 repeat:
625         cur = end - 1;
626
627         /* lucky, within the range of the current map_blocks */
628         if (offset + cur >= map->m_la &&
629                 offset + cur < map->m_la + map->m_llen) {
630                 /* didn't get a valid unzip work previously (very rare) */
631                 if (!builder->work)
632                         goto restart_now;
633                 goto hitted;
634         }
635
636         /* go ahead the next map_blocks */
637         debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
638
639         if (z_erofs_vle_work_iter_end(builder))
640                 fe->initial = false;
641
642         map->m_la = offset + cur;
643         map->m_llen = 0;
644         err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
645         if (unlikely(err))
646                 goto err_out;
647
648 restart_now:
649         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
650                 goto hitted;
651
652         DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
653         DBG_BUGON(erofs_blkoff(map->m_pa));
654
655         err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
656         if (unlikely(err))
657                 goto err_out;
658
659 #ifdef EROFS_FS_HAS_MANAGED_CACHE
660         grp = fe->builder.grp;
661
662         /* let's do out-of-order decompression for noio */
663         noio_outoforder = grab_managed_cache_pages(mngda,
664                 erofs_blknr(map->m_pa),
665                 grp->compressed_pages, erofs_blknr(map->m_plen),
666                 /* compressed page caching selection strategy */
667                 fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
668                         map->m_la < fe->cachedzone_la : 0));
669
670         if (noio_outoforder && builder_is_followed(builder))
671                 builder->role = Z_EROFS_VLE_WORK_PRIMARY;
672 #endif
673
674         tight &= builder_is_hooked(builder);
675         work = builder->work;
676 hitted:
677         cur = end - min_t(unsigned, offset + end - map->m_la, end);
678         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
679                 zero_user_segment(page, cur, end);
680                 goto next_part;
681         }
682
683         /* let's derive page type */
684         page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
685                 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
686                         (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
687                                 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
688
689         if (cur)
690                 tight &= builder_is_followed(builder);
691
692 retry:
693         err = z_erofs_vle_work_add_page(builder, page, page_type);
694         /* should allocate an additional staging page for pagevec */
695         if (err == -EAGAIN) {
696                 struct page *const newpage =
697                         __stagingpage_alloc(page_pool, GFP_NOFS);
698
699                 err = z_erofs_vle_work_add_page(builder,
700                         newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
701                 if (likely(!err))
702                         goto retry;
703         }
704
705         if (unlikely(err))
706                 goto err_out;
707
708         index = page->index - map->m_la / PAGE_SIZE;
709
710         /* FIXME! avoid the last relundant fixup & endio */
711         z_erofs_onlinepage_fixup(page, index, true);
712
713         /* bump up the number of spiltted parts of a page */
714         ++spiltted;
715         /* also update nr_pages */
716         work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
717 next_part:
718         /* can be used for verification */
719         map->m_llen = offset + cur - map->m_la;
720
721         end = cur;
722         if (end > 0)
723                 goto repeat;
724
725 out:
726         /* FIXME! avoid the last relundant fixup & endio */
727         z_erofs_onlinepage_endio(page);
728
729         debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
730                 __func__, page, spiltted, map->m_llen);
731         return err;
732
733         /* if some error occurred while processing this page */
734 err_out:
735         SetPageError(page);
736         goto out;
737 }
738
739 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
740 {
741         tagptr1_t t = tagptr_init(tagptr1_t, ptr);
742         struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
743         bool background = tagptr_unfold_tags(t);
744
745         if (!background) {
746                 unsigned long flags;
747
748                 spin_lock_irqsave(&io->u.wait.lock, flags);
749                 if (!atomic_add_return(bios, &io->pending_bios))
750                         wake_up_locked(&io->u.wait);
751                 spin_unlock_irqrestore(&io->u.wait.lock, flags);
752                 return;
753         }
754
755         if (!atomic_add_return(bios, &io->pending_bios))
756                 queue_work(z_erofs_workqueue, &io->u.work);
757 }
758
759 static inline void z_erofs_vle_read_endio(struct bio *bio)
760 {
761         const blk_status_t err = bio->bi_status;
762         unsigned i;
763         struct bio_vec *bvec;
764 #ifdef EROFS_FS_HAS_MANAGED_CACHE
765         struct address_space *mngda = NULL;
766 #endif
767
768         bio_for_each_segment_all(bvec, bio, i) {
769                 struct page *page = bvec->bv_page;
770                 bool cachemngd = false;
771
772                 DBG_BUGON(PageUptodate(page));
773                 DBG_BUGON(!page->mapping);
774
775 #ifdef EROFS_FS_HAS_MANAGED_CACHE
776                 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
777                         struct inode *const inode = page->mapping->host;
778                         struct super_block *const sb = inode->i_sb;
779
780                         mngda = EROFS_SB(sb)->managed_cache->i_mapping;
781                 }
782
783                 /*
784                  * If mngda has not gotten, it equals NULL,
785                  * however, page->mapping never be NULL if working properly.
786                  */
787                 cachemngd = (page->mapping == mngda);
788 #endif
789
790                 if (unlikely(err))
791                         SetPageError(page);
792                 else if (cachemngd)
793                         SetPageUptodate(page);
794
795                 if (cachemngd)
796                         unlock_page(page);
797         }
798
799         z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
800         bio_put(bio);
801 }
802
803 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
804 static DEFINE_MUTEX(z_pagemap_global_lock);
805
806 static int z_erofs_vle_unzip(struct super_block *sb,
807         struct z_erofs_vle_workgroup *grp,
808         struct list_head *page_pool)
809 {
810         struct erofs_sb_info *const sbi = EROFS_SB(sb);
811 #ifdef EROFS_FS_HAS_MANAGED_CACHE
812         struct address_space *const mngda = sbi->managed_cache->i_mapping;
813 #endif
814         const unsigned clusterpages = erofs_clusterpages(sbi);
815
816         struct z_erofs_pagevec_ctor ctor;
817         unsigned int nr_pages;
818         unsigned int sparsemem_pages = 0;
819         struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
820         struct page **pages, **compressed_pages, *page;
821         unsigned i, llen;
822
823         enum z_erofs_page_type page_type;
824         bool overlapped;
825         struct z_erofs_vle_work *work;
826         void *vout;
827         int err;
828
829         might_sleep();
830         work = z_erofs_vle_grab_primary_work(grp);
831         DBG_BUGON(!READ_ONCE(work->nr_pages));
832
833         mutex_lock(&work->lock);
834         nr_pages = work->nr_pages;
835
836         if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
837                 pages = pages_onstack;
838         else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
839                 mutex_trylock(&z_pagemap_global_lock))
840                 pages = z_pagemap_global;
841         else {
842 repeat:
843                 pages = kvmalloc_array(nr_pages,
844                         sizeof(struct page *), GFP_KERNEL);
845
846                 /* fallback to global pagemap for the lowmem scenario */
847                 if (unlikely(pages == NULL)) {
848                         if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
849                                 goto repeat;
850                         else {
851                                 mutex_lock(&z_pagemap_global_lock);
852                                 pages = z_pagemap_global;
853                         }
854                 }
855         }
856
857         for (i = 0; i < nr_pages; ++i)
858                 pages[i] = NULL;
859
860         err = 0;
861         z_erofs_pagevec_ctor_init(&ctor,
862                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
863
864         for (i = 0; i < work->vcnt; ++i) {
865                 unsigned pagenr;
866
867                 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
868
869                 /* all pages in pagevec ought to be valid */
870                 DBG_BUGON(page == NULL);
871                 DBG_BUGON(page->mapping == NULL);
872
873                 if (z_erofs_gather_if_stagingpage(page_pool, page))
874                         continue;
875
876                 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
877                         pagenr = 0;
878                 else
879                         pagenr = z_erofs_onlinepage_index(page);
880
881                 DBG_BUGON(pagenr >= nr_pages);
882
883                 /*
884                  * currently EROFS doesn't support multiref(dedup),
885                  * so here erroring out one multiref page.
886                  */
887                 if (pages[pagenr]) {
888                         DBG_BUGON(1);
889                         SetPageError(pages[pagenr]);
890                         z_erofs_onlinepage_endio(pages[pagenr]);
891                         err = -EIO;
892                 }
893                 pages[pagenr] = page;
894         }
895         sparsemem_pages = i;
896
897         z_erofs_pagevec_ctor_exit(&ctor, true);
898
899         overlapped = false;
900         compressed_pages = grp->compressed_pages;
901
902         for (i = 0; i < clusterpages; ++i) {
903                 unsigned pagenr;
904
905                 page = compressed_pages[i];
906
907                 /* all compressed pages ought to be valid */
908                 DBG_BUGON(page == NULL);
909                 DBG_BUGON(page->mapping == NULL);
910
911                 if (!z_erofs_is_stagingpage(page)) {
912 #ifdef EROFS_FS_HAS_MANAGED_CACHE
913                         if (page->mapping == mngda) {
914                                 if (unlikely(!PageUptodate(page)))
915                                         err = -EIO;
916                                 continue;
917                         }
918 #endif
919
920                         /*
921                          * only if non-head page can be selected
922                          * for inplace decompression
923                          */
924                         pagenr = z_erofs_onlinepage_index(page);
925
926                         DBG_BUGON(pagenr >= nr_pages);
927                         if (pages[pagenr]) {
928                                 DBG_BUGON(1);
929                                 SetPageError(pages[pagenr]);
930                                 z_erofs_onlinepage_endio(pages[pagenr]);
931                                 err = -EIO;
932                         }
933                         ++sparsemem_pages;
934                         pages[pagenr] = page;
935
936                         overlapped = true;
937                 }
938
939                 /* PG_error needs checking for inplaced and staging pages */
940                 if (unlikely(PageError(page))) {
941                         DBG_BUGON(PageUptodate(page));
942                         err = -EIO;
943                 }
944         }
945
946         if (unlikely(err))
947                 goto out;
948
949         llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
950
951         if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
952                 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
953                         pages, nr_pages, work->pageofs);
954                 goto out;
955         }
956
957         if (llen > grp->llen)
958                 llen = grp->llen;
959
960         err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
961                                             pages, llen, work->pageofs);
962         if (err != -ENOTSUPP)
963                 goto out;
964
965         if (sparsemem_pages >= nr_pages)
966                 goto skip_allocpage;
967
968         for (i = 0; i < nr_pages; ++i) {
969                 if (pages[i] != NULL)
970                         continue;
971
972                 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
973         }
974
975 skip_allocpage:
976         vout = erofs_vmap(pages, nr_pages);
977         if (!vout) {
978                 err = -ENOMEM;
979                 goto out;
980         }
981
982         err = z_erofs_vle_unzip_vmap(compressed_pages,
983                 clusterpages, vout, llen, work->pageofs, overlapped);
984
985         erofs_vunmap(vout, nr_pages);
986
987 out:
988         /* must handle all compressed pages before endding pages */
989         for (i = 0; i < clusterpages; ++i) {
990                 page = compressed_pages[i];
991
992 #ifdef EROFS_FS_HAS_MANAGED_CACHE
993                 if (page->mapping == mngda)
994                         continue;
995 #endif
996                 /* recycle all individual staging pages */
997                 (void)z_erofs_gather_if_stagingpage(page_pool, page);
998
999                 WRITE_ONCE(compressed_pages[i], NULL);
1000         }
1001
1002         for (i = 0; i < nr_pages; ++i) {
1003                 page = pages[i];
1004                 if (!page)
1005                         continue;
1006
1007                 DBG_BUGON(page->mapping == NULL);
1008
1009                 /* recycle all individual staging pages */
1010                 if (z_erofs_gather_if_stagingpage(page_pool, page))
1011                         continue;
1012
1013                 if (unlikely(err < 0))
1014                         SetPageError(page);
1015
1016                 z_erofs_onlinepage_endio(page);
1017         }
1018
1019         if (pages == z_pagemap_global)
1020                 mutex_unlock(&z_pagemap_global_lock);
1021         else if (unlikely(pages != pages_onstack))
1022                 kvfree(pages);
1023
1024         work->nr_pages = 0;
1025         work->vcnt = 0;
1026
1027         /* all work locks MUST be taken before the following line */
1028
1029         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1030
1031         /* all work locks SHOULD be released right now */
1032         mutex_unlock(&work->lock);
1033
1034         z_erofs_vle_work_release(work);
1035         return err;
1036 }
1037
1038 static void z_erofs_vle_unzip_all(struct super_block *sb,
1039                                   struct z_erofs_vle_unzip_io *io,
1040                                   struct list_head *page_pool)
1041 {
1042         z_erofs_vle_owned_workgrp_t owned = io->head;
1043
1044         while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1045                 struct z_erofs_vle_workgroup *grp;
1046
1047                 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1048                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1049
1050                 /* no possible that 'owned' equals NULL */
1051                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1052
1053                 grp = owned;
1054                 owned = READ_ONCE(grp->next);
1055
1056                 z_erofs_vle_unzip(sb, grp, page_pool);
1057         }
1058 }
1059
1060 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1061 {
1062         struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1063                 struct z_erofs_vle_unzip_io_sb, io.u.work);
1064         LIST_HEAD(page_pool);
1065
1066         DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1067         z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1068
1069         put_pages_list(&page_pool);
1070         kvfree(iosb);
1071 }
1072
1073 static inline struct z_erofs_vle_unzip_io *
1074 prepare_io_handler(struct super_block *sb,
1075                    struct z_erofs_vle_unzip_io *io,
1076                    bool background)
1077 {
1078         struct z_erofs_vle_unzip_io_sb *iosb;
1079
1080         if (!background) {
1081                 /* waitqueue available for foreground io */
1082                 BUG_ON(io == NULL);
1083
1084                 init_waitqueue_head(&io->u.wait);
1085                 atomic_set(&io->pending_bios, 0);
1086                 goto out;
1087         }
1088
1089         if (io != NULL)
1090                 BUG();
1091         else {
1092                 /* allocate extra io descriptor for background io */
1093                 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1094                         GFP_KERNEL | __GFP_NOFAIL);
1095                 BUG_ON(iosb == NULL);
1096
1097                 io = &iosb->io;
1098         }
1099
1100         iosb->sb = sb;
1101         INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1102 out:
1103         io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1104         return io;
1105 }
1106
1107 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1108 /* true - unlocked (noio), false - locked (need submit io) */
1109 static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
1110                                         struct page *page)
1111 {
1112         wait_on_page_locked(page);
1113         if (PagePrivate(page) && PageUptodate(page))
1114                 return true;
1115
1116         lock_page(page);
1117         ClearPageError(page);
1118
1119         if (unlikely(!PagePrivate(page))) {
1120                 set_page_private(page, (unsigned long)grp);
1121                 SetPagePrivate(page);
1122         }
1123         if (unlikely(PageUptodate(page))) {
1124                 unlock_page(page);
1125                 return true;
1126         }
1127         return false;
1128 }
1129
1130 #define __FSIO_1 1
1131 #else
1132 #define __FSIO_1 0
1133 #endif
1134
1135 static bool z_erofs_vle_submit_all(struct super_block *sb,
1136                                    z_erofs_vle_owned_workgrp_t owned_head,
1137                                    struct list_head *pagepool,
1138                                    struct z_erofs_vle_unzip_io *fg_io,
1139                                    bool force_fg)
1140 {
1141         struct erofs_sb_info *const sbi = EROFS_SB(sb);
1142         const unsigned clusterpages = erofs_clusterpages(sbi);
1143         const gfp_t gfp = GFP_NOFS;
1144 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1145         struct address_space *const mngda = sbi->managed_cache->i_mapping;
1146         struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
1147 #endif
1148         struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
1149         struct bio *bio;
1150         tagptr1_t bi_private;
1151         /* since bio will be NULL, no need to initialize last_index */
1152         pgoff_t uninitialized_var(last_index);
1153         bool force_submit = false;
1154         unsigned nr_bios;
1155
1156         if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1157                 return false;
1158
1159         /*
1160          * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
1161          * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
1162          */
1163 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1164         ios[0] = prepare_io_handler(sb, fg_io + 0, false);
1165 #endif
1166
1167         if (force_fg) {
1168                 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
1169                 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
1170         } else {
1171                 ios[__FSIO_1] = prepare_io_handler(sb, NULL, true);
1172                 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1);
1173         }
1174
1175         nr_bios = 0;
1176         force_submit = false;
1177         bio = NULL;
1178
1179         /* by default, all need io submission */
1180         ios[__FSIO_1]->head = owned_head;
1181
1182         do {
1183                 struct z_erofs_vle_workgroup *grp;
1184                 struct page **compressed_pages, *oldpage, *page;
1185                 pgoff_t first_index;
1186                 unsigned i = 0;
1187 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1188                 unsigned int noio = 0;
1189                 bool cachemngd;
1190 #endif
1191                 int err;
1192
1193                 /* no possible 'owned_head' equals the following */
1194                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1195                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1196
1197                 grp = owned_head;
1198
1199                 /* close the main owned chain at first */
1200                 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1201                         Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1202
1203                 first_index = grp->obj.index;
1204                 compressed_pages = grp->compressed_pages;
1205
1206                 force_submit |= (first_index != last_index + 1);
1207 repeat:
1208                 /* fulfill all compressed pages */
1209                 oldpage = page = READ_ONCE(compressed_pages[i]);
1210
1211 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1212                 cachemngd = false;
1213
1214                 if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
1215                         cachemngd = true;
1216                         goto do_allocpage;
1217                 } else if (page != NULL) {
1218                         if (page->mapping != mngda)
1219                                 BUG_ON(PageUptodate(page));
1220                         else if (recover_managed_page(grp, page)) {
1221                                 /* page is uptodate, skip io submission */
1222                                 force_submit = true;
1223                                 ++noio;
1224                                 goto skippage;
1225                         }
1226                 } else {
1227 do_allocpage:
1228 #else
1229                 if (page != NULL)
1230                         BUG_ON(PageUptodate(page));
1231                 else {
1232 #endif
1233                         page = __stagingpage_alloc(pagepool, gfp);
1234
1235                         if (oldpage != cmpxchg(compressed_pages + i,
1236                                 oldpage, page)) {
1237                                 list_add(&page->lru, pagepool);
1238                                 goto repeat;
1239 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1240                         } else if (cachemngd && !add_to_page_cache_lru(page,
1241                                 mngda, first_index + i, gfp)) {
1242                                 set_page_private(page, (unsigned long)grp);
1243                                 SetPagePrivate(page);
1244 #endif
1245                         }
1246                 }
1247
1248                 if (bio != NULL && force_submit) {
1249 submit_bio_retry:
1250                         __submit_bio(bio, REQ_OP_READ, 0);
1251                         bio = NULL;
1252                 }
1253
1254                 if (bio == NULL) {
1255                         bio = prepare_bio(sb, first_index + i,
1256                                 BIO_MAX_PAGES, z_erofs_vle_read_endio);
1257                         bio->bi_private = tagptr_cast_ptr(bi_private);
1258
1259                         ++nr_bios;
1260                 }
1261
1262                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1263                 if (err < PAGE_SIZE)
1264                         goto submit_bio_retry;
1265
1266                 force_submit = false;
1267                 last_index = first_index + i;
1268 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1269 skippage:
1270 #endif
1271                 if (++i < clusterpages)
1272                         goto repeat;
1273
1274 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1275                 if (noio < clusterpages) {
1276                         lstgrp_io = grp;
1277                 } else {
1278                         z_erofs_vle_owned_workgrp_t iogrp_next =
1279                                 owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
1280                                 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
1281                                 owned_head;
1282
1283                         if (lstgrp_io == NULL)
1284                                 ios[1]->head = iogrp_next;
1285                         else
1286                                 WRITE_ONCE(lstgrp_io->next, iogrp_next);
1287
1288                         if (lstgrp_noio == NULL)
1289                                 ios[0]->head = grp;
1290                         else
1291                                 WRITE_ONCE(lstgrp_noio->next, grp);
1292
1293                         lstgrp_noio = grp;
1294                 }
1295 #endif
1296         } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1297
1298         if (bio != NULL)
1299                 __submit_bio(bio, REQ_OP_READ, 0);
1300
1301 #ifndef EROFS_FS_HAS_MANAGED_CACHE
1302         BUG_ON(!nr_bios);
1303 #else
1304         if (lstgrp_noio != NULL)
1305                 WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1306
1307         if (!force_fg && !nr_bios) {
1308                 kvfree(container_of(ios[1],
1309                         struct z_erofs_vle_unzip_io_sb, io));
1310                 return true;
1311         }
1312 #endif
1313
1314         z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
1315         return true;
1316 }
1317
1318 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1319                                      struct list_head *pagepool,
1320                                      bool force_fg)
1321 {
1322         struct super_block *sb = f->inode->i_sb;
1323         struct z_erofs_vle_unzip_io io[1 + __FSIO_1];
1324
1325         if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1326                 return;
1327
1328 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1329         z_erofs_vle_unzip_all(sb, &io[0], pagepool);
1330 #endif
1331         if (!force_fg)
1332                 return;
1333
1334         /* wait until all bios are completed */
1335         wait_event(io[__FSIO_1].u.wait,
1336                 !atomic_read(&io[__FSIO_1].pending_bios));
1337
1338         /* let's synchronous decompression */
1339         z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool);
1340 }
1341
1342 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1343                                              struct page *page)
1344 {
1345         struct inode *const inode = page->mapping->host;
1346         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1347         int err;
1348         LIST_HEAD(pagepool);
1349
1350 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1351         f.cachedzone_la = page->index << PAGE_SHIFT;
1352 #endif
1353         err = z_erofs_do_read_page(&f, page, &pagepool);
1354         (void)z_erofs_vle_work_iter_end(&f.builder);
1355
1356         /* if some compressed cluster ready, need submit them anyway */
1357         z_erofs_submit_and_unzip(&f, &pagepool, true);
1358
1359         if (err)
1360                 errln("%s, failed to read, err [%d]", __func__, err);
1361
1362         if (f.m_iter.mpage != NULL)
1363                 put_page(f.m_iter.mpage);
1364
1365         /* clean up the remaining free pages */
1366         put_pages_list(&pagepool);
1367         return err;
1368 }
1369
1370 static inline int __z_erofs_vle_normalaccess_readpages(
1371         struct file *filp,
1372         struct address_space *mapping,
1373         struct list_head *pages, unsigned nr_pages, bool sync)
1374 {
1375         struct inode *const inode = mapping->host;
1376
1377         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1378         gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1379         struct page *head = NULL;
1380         LIST_HEAD(pagepool);
1381
1382 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1383         f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
1384 #endif
1385         for (; nr_pages; --nr_pages) {
1386                 struct page *page = lru_to_page(pages);
1387
1388                 prefetchw(&page->flags);
1389                 list_del(&page->lru);
1390
1391                 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1392                         list_add(&page->lru, &pagepool);
1393                         continue;
1394                 }
1395
1396                 set_page_private(page, (unsigned long)head);
1397                 head = page;
1398         }
1399
1400         while (head != NULL) {
1401                 struct page *page = head;
1402                 int err;
1403
1404                 /* traversal in reverse order */
1405                 head = (void *)page_private(page);
1406
1407                 err = z_erofs_do_read_page(&f, page, &pagepool);
1408                 if (err) {
1409                         struct erofs_vnode *vi = EROFS_V(inode);
1410
1411                         errln("%s, readahead error at page %lu of nid %llu",
1412                                 __func__, page->index, vi->nid);
1413                 }
1414
1415                 put_page(page);
1416         }
1417
1418         (void)z_erofs_vle_work_iter_end(&f.builder);
1419
1420         z_erofs_submit_and_unzip(&f, &pagepool, sync);
1421
1422         if (f.m_iter.mpage != NULL)
1423                 put_page(f.m_iter.mpage);
1424
1425         /* clean up the remaining free pages */
1426         put_pages_list(&pagepool);
1427         return 0;
1428 }
1429
1430 static int z_erofs_vle_normalaccess_readpages(
1431         struct file *filp,
1432         struct address_space *mapping,
1433         struct list_head *pages, unsigned nr_pages)
1434 {
1435         return __z_erofs_vle_normalaccess_readpages(filp,
1436                 mapping, pages, nr_pages,
1437                 nr_pages < 4 /* sync */);
1438 }
1439
1440 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1441         .readpage = z_erofs_vle_normalaccess_readpage,
1442         .readpages = z_erofs_vle_normalaccess_readpages,
1443 };
1444
1445 #define __vle_cluster_advise(x, bit, bits) \
1446         ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1447
1448 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1449         Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1450
1451 enum {
1452         Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
1453         Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
1454         Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
1455         Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
1456         Z_EROFS_VLE_CLUSTER_TYPE_MAX
1457 };
1458
1459 #define vle_cluster_type(di)    \
1460         __vle_cluster_type((di)->di_advise)
1461
1462 static inline unsigned
1463 vle_compressed_index_clusterofs(unsigned clustersize,
1464         struct z_erofs_vle_decompressed_index *di)
1465 {
1466         debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
1467                 __func__, di, di->di_advise, vle_cluster_type(di),
1468                 di->di_clusterofs, di->di_u.blkaddr);
1469
1470         switch (vle_cluster_type(di)) {
1471         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1472                 break;
1473         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1474         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1475                 return di->di_clusterofs;
1476         default:
1477                 BUG_ON(1);
1478         }
1479         return clustersize;
1480 }
1481
1482 static inline erofs_blk_t
1483 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1484 {
1485         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1486         struct erofs_vnode *vi = EROFS_V(inode);
1487
1488         unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1489                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1490                 index * sizeof(struct z_erofs_vle_decompressed_index);
1491
1492         return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1493 }
1494
1495 static inline unsigned int
1496 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1497 {
1498         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1499         struct erofs_vnode *vi = EROFS_V(inode);
1500
1501         unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1502                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1503                 index * sizeof(struct z_erofs_vle_decompressed_index);
1504
1505         return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1506 }
1507
1508 /*
1509  * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1510  * ---
1511  * VLE compression mode attempts to compress a number of logical data into
1512  * a physical cluster with a fixed size.
1513  * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1514  */
1515 static erofs_off_t vle_get_logical_extent_head(
1516         struct inode *inode,
1517         struct page **page_iter,
1518         void **kaddr_iter,
1519         unsigned lcn,   /* logical cluster number */
1520         erofs_blk_t *pcn,
1521         unsigned *flags)
1522 {
1523         /* for extent meta */
1524         struct page *page = *page_iter;
1525         erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
1526         struct z_erofs_vle_decompressed_index *di;
1527         unsigned long long ofs;
1528         const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1529         const unsigned int clustersize = 1 << clusterbits;
1530         unsigned int delta0;
1531
1532         if (page->index != blkaddr) {
1533                 kunmap_atomic(*kaddr_iter);
1534                 unlock_page(page);
1535                 put_page(page);
1536
1537                 *page_iter = page = erofs_get_meta_page(inode->i_sb,
1538                         blkaddr, false);
1539                 *kaddr_iter = kmap_atomic(page);
1540         }
1541
1542         di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
1543         switch (vle_cluster_type(di)) {
1544         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1545                 delta0 = le16_to_cpu(di->di_u.delta[0]);
1546                 DBG_BUGON(!delta0);
1547                 DBG_BUGON(lcn < delta0);
1548
1549                 ofs = vle_get_logical_extent_head(inode,
1550                         page_iter, kaddr_iter,
1551                         lcn - delta0, pcn, flags);
1552                 break;
1553         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1554                 *flags ^= EROFS_MAP_ZIPPED;
1555         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1556                 /* clustersize should be a power of two */
1557                 ofs = ((unsigned long long)lcn << clusterbits) +
1558                         (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1559                 *pcn = le32_to_cpu(di->di_u.blkaddr);
1560                 break;
1561         default:
1562                 BUG_ON(1);
1563         }
1564         return ofs;
1565 }
1566
1567 int z_erofs_map_blocks_iter(struct inode *inode,
1568         struct erofs_map_blocks *map,
1569         struct page **mpage_ret, int flags)
1570 {
1571         /* logicial extent (start, end) offset */
1572         unsigned long long ofs, end;
1573         struct z_erofs_vle_decompressed_index *di;
1574         erofs_blk_t e_blkaddr, pcn;
1575         unsigned lcn, logical_cluster_ofs, cluster_type;
1576         u32 ofs_rem;
1577         struct page *mpage = *mpage_ret;
1578         void *kaddr;
1579         bool initial;
1580         const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1581         const unsigned int clustersize = 1 << clusterbits;
1582         int err = 0;
1583
1584         /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1585         initial = !map->m_llen;
1586
1587         /* when trying to read beyond EOF, leave it unmapped */
1588         if (unlikely(map->m_la >= inode->i_size)) {
1589                 BUG_ON(!initial);
1590                 map->m_llen = map->m_la + 1 - inode->i_size;
1591                 map->m_la = inode->i_size - 1;
1592                 map->m_flags = 0;
1593                 goto out;
1594         }
1595
1596         debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1597                 map->m_la, map->m_llen);
1598
1599         ofs = map->m_la + map->m_llen;
1600
1601         /* clustersize should be power of two */
1602         lcn = ofs >> clusterbits;
1603         ofs_rem = ofs & (clustersize - 1);
1604
1605         e_blkaddr = vle_extent_blkaddr(inode, lcn);
1606
1607         if (mpage == NULL || mpage->index != e_blkaddr) {
1608                 if (mpage != NULL)
1609                         put_page(mpage);
1610
1611                 mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
1612                 *mpage_ret = mpage;
1613         } else {
1614                 lock_page(mpage);
1615                 DBG_BUGON(!PageUptodate(mpage));
1616         }
1617
1618         kaddr = kmap_atomic(mpage);
1619         di = kaddr + vle_extent_blkoff(inode, lcn);
1620
1621         debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
1622                 e_blkaddr, vle_extent_blkoff(inode, lcn));
1623
1624         logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
1625         if (!initial) {
1626                 /* [walking mode] 'map' has been already initialized */
1627                 map->m_llen += logical_cluster_ofs;
1628                 goto unmap_out;
1629         }
1630
1631         /* by default, compressed */
1632         map->m_flags |= EROFS_MAP_ZIPPED;
1633
1634         end = (u64)(lcn + 1) * clustersize;
1635
1636         cluster_type = vle_cluster_type(di);
1637
1638         switch (cluster_type) {
1639         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1640                 if (ofs_rem >= logical_cluster_ofs)
1641                         map->m_flags ^= EROFS_MAP_ZIPPED;
1642                 /* fallthrough */
1643         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1644                 if (ofs_rem == logical_cluster_ofs) {
1645                         pcn = le32_to_cpu(di->di_u.blkaddr);
1646                         goto exact_hitted;
1647                 }
1648
1649                 if (ofs_rem > logical_cluster_ofs) {
1650                         ofs = lcn * clustersize | logical_cluster_ofs;
1651                         pcn = le32_to_cpu(di->di_u.blkaddr);
1652                         break;
1653                 }
1654
1655                 /* logical cluster number should be >= 1 */
1656                 if (unlikely(!lcn)) {
1657                         errln("invalid logical cluster 0 at nid %llu",
1658                                 EROFS_V(inode)->nid);
1659                         err = -EIO;
1660                         goto unmap_out;
1661                 }
1662                 end = (lcn-- * clustersize) | logical_cluster_ofs;
1663                 /* fallthrough */
1664         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1665                 /* get the correspoinding first chunk */
1666                 ofs = vle_get_logical_extent_head(inode, mpage_ret,
1667                         &kaddr, lcn, &pcn, &map->m_flags);
1668                 mpage = *mpage_ret;
1669                 break;
1670         default:
1671                 errln("unknown cluster type %u at offset %llu of nid %llu",
1672                         cluster_type, ofs, EROFS_V(inode)->nid);
1673                 err = -EIO;
1674                 goto unmap_out;
1675         }
1676
1677         map->m_la = ofs;
1678 exact_hitted:
1679         map->m_llen = end - ofs;
1680         map->m_plen = clustersize;
1681         map->m_pa = blknr_to_addr(pcn);
1682         map->m_flags |= EROFS_MAP_MAPPED;
1683 unmap_out:
1684         kunmap_atomic(kaddr);
1685         unlock_page(mpage);
1686 out:
1687         debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1688                 __func__, map->m_la, map->m_pa,
1689                 map->m_llen, map->m_plen, map->m_flags);
1690
1691         /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1692         DBG_BUGON(err < 0);
1693         return err;
1694 }
1695