7bd406f34f8bd1c624b77594fba9c2977520e0c1
[platform/kernel/linux-rpi.git] / drivers / staging / erofs / unzip_vle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
15
16 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
17 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
18
19 void z_erofs_exit_zip_subsystem(void)
20 {
21         destroy_workqueue(z_erofs_workqueue);
22         kmem_cache_destroy(z_erofs_workgroup_cachep);
23 }
24
25 static inline int init_unzip_workqueue(void)
26 {
27         const unsigned onlinecpus = num_possible_cpus();
28
29         /*
30          * we don't need too many threads, limiting threads
31          * could improve scheduling performance.
32          */
33         z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
34                 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
35                 onlinecpus + onlinecpus / 4);
36
37         return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
38 }
39
40 int z_erofs_init_zip_subsystem(void)
41 {
42         z_erofs_workgroup_cachep =
43                 kmem_cache_create("erofs_compress",
44                 Z_EROFS_WORKGROUP_SIZE, 0,
45                 SLAB_RECLAIM_ACCOUNT, NULL);
46
47         if (z_erofs_workgroup_cachep != NULL) {
48                 if (!init_unzip_workqueue())
49                         return 0;
50
51                 kmem_cache_destroy(z_erofs_workgroup_cachep);
52         }
53         return -ENOMEM;
54 }
55
56 enum z_erofs_vle_work_role {
57         Z_EROFS_VLE_WORK_SECONDARY,
58         Z_EROFS_VLE_WORK_PRIMARY,
59         /*
60          * The current work was the tail of an exist chain, and the previous
61          * processed chained works are all decided to be hooked up to it.
62          * A new chain should be created for the remaining unprocessed works,
63          * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
64          * the next work cannot reuse the whole page in the following scenario:
65          *  ________________________________________________________________
66          * |      tail (partial) page     |       head (partial) page       |
67          * |  (belongs to the next work)  |  (belongs to the current work)  |
68          * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
69          */
70         Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
71         /*
72          * The current work has been linked with the processed chained works,
73          * and could be also linked with the potential remaining works, which
74          * means if the processing page is the tail partial page of the work,
75          * the current work can safely use the whole page (since the next work
76          * is under control) for in-place decompression, as illustrated below:
77          *  ________________________________________________________________
78          * |  tail (partial) page  |          head (partial) page           |
79          * | (of the current work) |         (of the previous work)         |
80          * |  PRIMARY_FOLLOWED or  |                                        |
81          * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
82          *
83          * [  (*) the above page can be used for the current work itself.  ]
84          */
85         Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
86         Z_EROFS_VLE_WORK_MAX
87 };
88
89 struct z_erofs_vle_work_builder {
90         enum z_erofs_vle_work_role role;
91         /*
92          * 'hosted = false' means that the current workgroup doesn't belong to
93          * the owned chained workgroups. In the other words, it is none of our
94          * business to submit this workgroup.
95          */
96         bool hosted;
97
98         struct z_erofs_vle_workgroup *grp;
99         struct z_erofs_vle_work *work;
100         struct z_erofs_pagevec_ctor vector;
101
102         /* pages used for reading the compressed data */
103         struct page **compressed_pages;
104         unsigned compressed_deficit;
105 };
106
107 #define VLE_WORK_BUILDER_INIT() \
108         { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
109
110 #ifdef EROFS_FS_HAS_MANAGED_CACHE
111
112 static bool grab_managed_cache_pages(struct address_space *mapping,
113                                      erofs_blk_t start,
114                                      struct page **compressed_pages,
115                                      int clusterblks,
116                                      bool reserve_allocation)
117 {
118         bool noio = true;
119         unsigned int i;
120
121         /* TODO: optimize by introducing find_get_pages_range */
122         for (i = 0; i < clusterblks; ++i) {
123                 struct page *page, *found;
124
125                 if (READ_ONCE(compressed_pages[i]) != NULL)
126                         continue;
127
128                 page = found = find_get_page(mapping, start + i);
129                 if (found == NULL) {
130                         noio = false;
131                         if (!reserve_allocation)
132                                 continue;
133                         page = EROFS_UNALLOCATED_CACHED_PAGE;
134                 }
135
136                 if (NULL == cmpxchg(compressed_pages + i, NULL, page))
137                         continue;
138
139                 if (found != NULL)
140                         put_page(found);
141         }
142         return noio;
143 }
144
145 /* called by erofs_shrinker to get rid of all compressed_pages */
146 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
147                                        struct erofs_workgroup *egrp)
148 {
149         struct z_erofs_vle_workgroup *const grp =
150                 container_of(egrp, struct z_erofs_vle_workgroup, obj);
151         struct address_space *const mapping = sbi->managed_cache->i_mapping;
152         const int clusterpages = erofs_clusterpages(sbi);
153         int i;
154
155         /*
156          * refcount of workgroup is now freezed as 1,
157          * therefore no need to worry about available decompression users.
158          */
159         for (i = 0; i < clusterpages; ++i) {
160                 struct page *page = grp->compressed_pages[i];
161
162                 if (page == NULL || page->mapping != mapping)
163                         continue;
164
165                 /* block other users from reclaiming or migrating the page */
166                 if (!trylock_page(page))
167                         return -EBUSY;
168
169                 /* barrier is implied in the following 'unlock_page' */
170                 WRITE_ONCE(grp->compressed_pages[i], NULL);
171
172                 set_page_private(page, 0);
173                 ClearPagePrivate(page);
174
175                 unlock_page(page);
176                 put_page(page);
177         }
178         return 0;
179 }
180
181 int erofs_try_to_free_cached_page(struct address_space *mapping,
182                                   struct page *page)
183 {
184         struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
185         const unsigned int clusterpages = erofs_clusterpages(sbi);
186
187         struct z_erofs_vle_workgroup *grp;
188         int ret = 0;    /* 0 - busy */
189
190         /* prevent the workgroup from being freed */
191         rcu_read_lock();
192         grp = (void *)page_private(page);
193
194         if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
195                 unsigned int i;
196
197                 for (i = 0; i < clusterpages; ++i) {
198                         if (grp->compressed_pages[i] == page) {
199                                 WRITE_ONCE(grp->compressed_pages[i], NULL);
200                                 ret = 1;
201                                 break;
202                         }
203                 }
204                 erofs_workgroup_unfreeze(&grp->obj, 1);
205         }
206         rcu_read_unlock();
207
208         if (ret) {
209                 ClearPagePrivate(page);
210                 put_page(page);
211         }
212         return ret;
213 }
214 #endif
215
216 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
217 static inline bool try_to_reuse_as_compressed_page(
218         struct z_erofs_vle_work_builder *b,
219         struct page *page)
220 {
221         while (b->compressed_deficit) {
222                 --b->compressed_deficit;
223                 if (NULL == cmpxchg(b->compressed_pages++, NULL, page))
224                         return true;
225         }
226
227         return false;
228 }
229
230 /* callers must be with work->lock held */
231 static int z_erofs_vle_work_add_page(
232         struct z_erofs_vle_work_builder *builder,
233         struct page *page,
234         enum z_erofs_page_type type)
235 {
236         int ret;
237         bool occupied;
238
239         /* give priority for the compressed data storage */
240         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
241                 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
242                 try_to_reuse_as_compressed_page(builder, page))
243                 return 0;
244
245         ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
246                 page, type, &occupied);
247         builder->work->vcnt += (unsigned)ret;
248
249         return ret ? 0 : -EAGAIN;
250 }
251
252 static enum z_erofs_vle_work_role
253 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
254                        z_erofs_vle_owned_workgrp_t *owned_head,
255                        bool *hosted)
256 {
257         DBG_BUGON(*hosted == true);
258
259         /* let's claim these following types of workgroup */
260 retry:
261         if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
262                 /* type 1, nil workgroup */
263                 if (Z_EROFS_VLE_WORKGRP_NIL != cmpxchg(&grp->next,
264                         Z_EROFS_VLE_WORKGRP_NIL, *owned_head))
265                         goto retry;
266
267                 *owned_head = grp;
268                 *hosted = true;
269                 /* lucky, I am the followee :) */
270                 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
271
272         } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
273                 /*
274                  * type 2, link to the end of a existing open chain,
275                  * be careful that its submission itself is governed
276                  * by the original owned chain.
277                  */
278                 if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
279                         Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
280                         goto retry;
281                 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
282                 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
283         }
284
285         return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
286 }
287
288 static struct z_erofs_vle_work *
289 z_erofs_vle_work_lookup(struct super_block *sb,
290                         pgoff_t idx, unsigned pageofs,
291                         struct z_erofs_vle_workgroup **grp_ret,
292                         enum z_erofs_vle_work_role *role,
293                         z_erofs_vle_owned_workgrp_t *owned_head,
294                         bool *hosted)
295 {
296         bool tag, primary;
297         struct erofs_workgroup *egrp;
298         struct z_erofs_vle_workgroup *grp;
299         struct z_erofs_vle_work *work;
300
301         egrp = erofs_find_workgroup(sb, idx, &tag);
302         if (egrp == NULL) {
303                 *grp_ret = NULL;
304                 return NULL;
305         }
306
307         *grp_ret = grp = container_of(egrp,
308                 struct z_erofs_vle_workgroup, obj);
309
310         work = z_erofs_vle_grab_work(grp, pageofs);
311         /* if multiref is disabled, `primary' is always true */
312         primary = true;
313
314         if (work->pageofs != pageofs) {
315                 DBG_BUGON(1);
316                 erofs_workgroup_put(egrp);
317                 return ERR_PTR(-EIO);
318         }
319
320         /*
321          * lock must be taken first to avoid grp->next == NIL between
322          * claiming workgroup and adding pages:
323          *                        grp->next != NIL
324          *   grp->next = NIL
325          *   mutex_unlock_all
326          *                        mutex_lock(&work->lock)
327          *                        add all pages to pagevec
328          *
329          * [correct locking case 1]:
330          *   mutex_lock(grp->work[a])
331          *   ...
332          *   mutex_lock(grp->work[b])     mutex_lock(grp->work[c])
333          *   ...                          *role = SECONDARY
334          *                                add all pages to pagevec
335          *                                ...
336          *                                mutex_unlock(grp->work[c])
337          *   mutex_lock(grp->work[c])
338          *   ...
339          *   grp->next = NIL
340          *   mutex_unlock_all
341          *
342          * [correct locking case 2]:
343          *   mutex_lock(grp->work[b])
344          *   ...
345          *   mutex_lock(grp->work[a])
346          *   ...
347          *   mutex_lock(grp->work[c])
348          *   ...
349          *   grp->next = NIL
350          *   mutex_unlock_all
351          *                                mutex_lock(grp->work[a])
352          *                                *role = PRIMARY_OWNER
353          *                                add all pages to pagevec
354          *                                ...
355          */
356         mutex_lock(&work->lock);
357
358         *hosted = false;
359         if (!primary)
360                 *role = Z_EROFS_VLE_WORK_SECONDARY;
361         else    /* claim the workgroup if possible */
362                 *role = try_to_claim_workgroup(grp, owned_head, hosted);
363         return work;
364 }
365
366 static struct z_erofs_vle_work *
367 z_erofs_vle_work_register(struct super_block *sb,
368                           struct z_erofs_vle_workgroup **grp_ret,
369                           struct erofs_map_blocks *map,
370                           pgoff_t index, unsigned pageofs,
371                           enum z_erofs_vle_work_role *role,
372                           z_erofs_vle_owned_workgrp_t *owned_head,
373                           bool *hosted)
374 {
375         bool newgrp = false;
376         struct z_erofs_vle_workgroup *grp = *grp_ret;
377         struct z_erofs_vle_work *work;
378
379         /* if multiref is disabled, grp should never be nullptr */
380         if (unlikely(grp)) {
381                 DBG_BUGON(1);
382                 return ERR_PTR(-EINVAL);
383         }
384
385         /* no available workgroup, let's allocate one */
386         grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
387         if (unlikely(grp == NULL))
388                 return ERR_PTR(-ENOMEM);
389
390         grp->obj.index = index;
391         grp->llen = map->m_llen;
392
393         z_erofs_vle_set_workgrp_fmt(grp,
394                 (map->m_flags & EROFS_MAP_ZIPPED) ?
395                         Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
396                         Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
397         atomic_set(&grp->obj.refcount, 1);
398
399         /* new workgrps have been claimed as type 1 */
400         WRITE_ONCE(grp->next, *owned_head);
401         /* primary and followed work for all new workgrps */
402         *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
403         /* it should be submitted by ourselves */
404         *hosted = true;
405
406         newgrp = true;
407         work = z_erofs_vle_grab_primary_work(grp);
408         work->pageofs = pageofs;
409
410         mutex_init(&work->lock);
411
412         if (newgrp) {
413                 int err = erofs_register_workgroup(sb, &grp->obj, 0);
414
415                 if (err) {
416                         kmem_cache_free(z_erofs_workgroup_cachep, grp);
417                         return ERR_PTR(-EAGAIN);
418                 }
419         }
420
421         *owned_head = *grp_ret = grp;
422
423         mutex_lock(&work->lock);
424         return work;
425 }
426
427 static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
428                                          unsigned int llen)
429 {
430         while (1) {
431                 unsigned int orig_llen = grp->llen;
432
433                 if (orig_llen >= llen || orig_llen ==
434                         cmpxchg(&grp->llen, orig_llen, llen))
435                         break;
436         }
437 }
438
439 #define builder_is_hooked(builder) \
440         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
441
442 #define builder_is_followed(builder) \
443         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
444
445 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
446                                        struct super_block *sb,
447                                        struct erofs_map_blocks *map,
448                                        z_erofs_vle_owned_workgrp_t *owned_head)
449 {
450         const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
451         const erofs_blk_t index = erofs_blknr(map->m_pa);
452         const unsigned pageofs = map->m_la & ~PAGE_MASK;
453         struct z_erofs_vle_workgroup *grp;
454         struct z_erofs_vle_work *work;
455
456         DBG_BUGON(builder->work != NULL);
457
458         /* must be Z_EROFS_WORK_TAIL or the next chained work */
459         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
460         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
461
462         DBG_BUGON(erofs_blkoff(map->m_pa));
463
464 repeat:
465         work = z_erofs_vle_work_lookup(sb, index,
466                 pageofs, &grp, &builder->role, owned_head, &builder->hosted);
467         if (work != NULL) {
468                 __update_workgrp_llen(grp, map->m_llen);
469                 goto got_it;
470         }
471
472         work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
473                 &builder->role, owned_head, &builder->hosted);
474
475         if (unlikely(work == ERR_PTR(-EAGAIN)))
476                 goto repeat;
477
478         if (unlikely(IS_ERR(work)))
479                 return PTR_ERR(work);
480 got_it:
481         z_erofs_pagevec_ctor_init(&builder->vector,
482                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
483
484         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
485                 /* enable possibly in-place decompression */
486                 builder->compressed_pages = grp->compressed_pages;
487                 builder->compressed_deficit = clusterpages;
488         } else {
489                 builder->compressed_pages = NULL;
490                 builder->compressed_deficit = 0;
491         }
492
493         builder->grp = grp;
494         builder->work = work;
495         return 0;
496 }
497
498 /*
499  * keep in mind that no referenced workgroups will be freed
500  * only after a RCU grace period, so rcu_read_lock() could
501  * prevent a workgroup from being freed.
502  */
503 static void z_erofs_rcu_callback(struct rcu_head *head)
504 {
505         struct z_erofs_vle_work *work = container_of(head,
506                 struct z_erofs_vle_work, rcu);
507         struct z_erofs_vle_workgroup *grp =
508                 z_erofs_vle_work_workgroup(work, true);
509
510         kmem_cache_free(z_erofs_workgroup_cachep, grp);
511 }
512
513 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
514 {
515         struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
516                 struct z_erofs_vle_workgroup, obj);
517         struct z_erofs_vle_work *const work = &vgrp->work;
518
519         call_rcu(&work->rcu, z_erofs_rcu_callback);
520 }
521
522 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
523         struct z_erofs_vle_work *work __maybe_unused)
524 {
525         erofs_workgroup_put(&grp->obj);
526 }
527
528 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
529 {
530         struct z_erofs_vle_workgroup *grp =
531                 z_erofs_vle_work_workgroup(work, true);
532
533         __z_erofs_vle_work_release(grp, work);
534 }
535
536 static inline bool
537 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
538 {
539         struct z_erofs_vle_work *work = builder->work;
540
541         if (work == NULL)
542                 return false;
543
544         z_erofs_pagevec_ctor_exit(&builder->vector, false);
545         mutex_unlock(&work->lock);
546
547         /*
548          * if all pending pages are added, don't hold work reference
549          * any longer if the current work isn't hosted by ourselves.
550          */
551         if (!builder->hosted)
552                 __z_erofs_vle_work_release(builder->grp, work);
553
554         builder->work = NULL;
555         builder->grp = NULL;
556         return true;
557 }
558
559 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
560                                                gfp_t gfp)
561 {
562         struct page *page = erofs_allocpage(pagepool, gfp);
563
564         if (unlikely(page == NULL))
565                 return NULL;
566
567         page->mapping = Z_EROFS_MAPPING_STAGING;
568         return page;
569 }
570
571 struct z_erofs_vle_frontend {
572         struct inode *const inode;
573
574         struct z_erofs_vle_work_builder builder;
575         struct erofs_map_blocks_iter m_iter;
576
577         z_erofs_vle_owned_workgrp_t owned_head;
578
579         bool initial;
580 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
581         erofs_off_t cachedzone_la;
582 #endif
583 };
584
585 #define VLE_FRONTEND_INIT(__i) { \
586         .inode = __i, \
587         .m_iter = { \
588                 { .m_llen = 0, .m_plen = 0 }, \
589                 .mpage = NULL \
590         }, \
591         .builder = VLE_WORK_BUILDER_INIT(), \
592         .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
593         .initial = true, }
594
595 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
596                                 struct page *page,
597                                 struct list_head *page_pool)
598 {
599         struct super_block *const sb = fe->inode->i_sb;
600         struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
601         struct erofs_map_blocks_iter *const m = &fe->m_iter;
602         struct erofs_map_blocks *const map = &m->map;
603         struct z_erofs_vle_work_builder *const builder = &fe->builder;
604         const loff_t offset = page_offset(page);
605
606         bool tight = builder_is_hooked(builder);
607         struct z_erofs_vle_work *work = builder->work;
608
609 #ifdef EROFS_FS_HAS_MANAGED_CACHE
610         struct address_space *const mngda = sbi->managed_cache->i_mapping;
611         struct z_erofs_vle_workgroup *grp;
612         bool noio_outoforder;
613 #endif
614
615         enum z_erofs_page_type page_type;
616         unsigned cur, end, spiltted, index;
617         int err = 0;
618
619         /* register locked file pages as online pages in pack */
620         z_erofs_onlinepage_init(page);
621
622         spiltted = 0;
623         end = PAGE_SIZE;
624 repeat:
625         cur = end - 1;
626
627         /* lucky, within the range of the current map_blocks */
628         if (offset + cur >= map->m_la &&
629                 offset + cur < map->m_la + map->m_llen) {
630                 /* didn't get a valid unzip work previously (very rare) */
631                 if (!builder->work)
632                         goto restart_now;
633                 goto hitted;
634         }
635
636         /* go ahead the next map_blocks */
637         debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
638
639         if (z_erofs_vle_work_iter_end(builder))
640                 fe->initial = false;
641
642         map->m_la = offset + cur;
643         map->m_llen = 0;
644         err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
645         if (unlikely(err))
646                 goto err_out;
647
648 restart_now:
649         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
650                 goto hitted;
651
652         DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
653         DBG_BUGON(erofs_blkoff(map->m_pa));
654
655         err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
656         if (unlikely(err))
657                 goto err_out;
658
659 #ifdef EROFS_FS_HAS_MANAGED_CACHE
660         grp = fe->builder.grp;
661
662         /* let's do out-of-order decompression for noio */
663         noio_outoforder = grab_managed_cache_pages(mngda,
664                 erofs_blknr(map->m_pa),
665                 grp->compressed_pages, erofs_blknr(map->m_plen),
666                 /* compressed page caching selection strategy */
667                 fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
668                         map->m_la < fe->cachedzone_la : 0));
669
670         if (noio_outoforder && builder_is_followed(builder))
671                 builder->role = Z_EROFS_VLE_WORK_PRIMARY;
672 #endif
673
674         tight &= builder_is_hooked(builder);
675         work = builder->work;
676 hitted:
677         cur = end - min_t(unsigned, offset + end - map->m_la, end);
678         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
679                 zero_user_segment(page, cur, end);
680                 goto next_part;
681         }
682
683         /* let's derive page type */
684         page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
685                 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
686                         (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
687                                 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
688
689         if (cur)
690                 tight &= builder_is_followed(builder);
691
692 retry:
693         err = z_erofs_vle_work_add_page(builder, page, page_type);
694         /* should allocate an additional staging page for pagevec */
695         if (err == -EAGAIN) {
696                 struct page *const newpage =
697                         __stagingpage_alloc(page_pool, GFP_NOFS);
698
699                 err = z_erofs_vle_work_add_page(builder,
700                         newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
701                 if (likely(!err))
702                         goto retry;
703         }
704
705         if (unlikely(err))
706                 goto err_out;
707
708         index = page->index - map->m_la / PAGE_SIZE;
709
710         /* FIXME! avoid the last relundant fixup & endio */
711         z_erofs_onlinepage_fixup(page, index, true);
712
713         /* bump up the number of spiltted parts of a page */
714         ++spiltted;
715         /* also update nr_pages */
716         work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
717 next_part:
718         /* can be used for verification */
719         map->m_llen = offset + cur - map->m_la;
720
721         end = cur;
722         if (end > 0)
723                 goto repeat;
724
725 out:
726         /* FIXME! avoid the last relundant fixup & endio */
727         z_erofs_onlinepage_endio(page);
728
729         debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
730                 __func__, page, spiltted, map->m_llen);
731         return err;
732
733         /* if some error occurred while processing this page */
734 err_out:
735         SetPageError(page);
736         goto out;
737 }
738
739 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
740 {
741         tagptr1_t t = tagptr_init(tagptr1_t, ptr);
742         struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
743         bool background = tagptr_unfold_tags(t);
744
745         if (!background) {
746                 unsigned long flags;
747
748                 spin_lock_irqsave(&io->u.wait.lock, flags);
749                 if (!atomic_add_return(bios, &io->pending_bios))
750                         wake_up_locked(&io->u.wait);
751                 spin_unlock_irqrestore(&io->u.wait.lock, flags);
752                 return;
753         }
754
755         if (!atomic_add_return(bios, &io->pending_bios))
756                 queue_work(z_erofs_workqueue, &io->u.work);
757 }
758
759 static inline void z_erofs_vle_read_endio(struct bio *bio)
760 {
761         const blk_status_t err = bio->bi_status;
762         unsigned i;
763         struct bio_vec *bvec;
764 #ifdef EROFS_FS_HAS_MANAGED_CACHE
765         struct address_space *mngda = NULL;
766 #endif
767
768         bio_for_each_segment_all(bvec, bio, i) {
769                 struct page *page = bvec->bv_page;
770                 bool cachemngd = false;
771
772                 DBG_BUGON(PageUptodate(page));
773                 DBG_BUGON(!page->mapping);
774
775 #ifdef EROFS_FS_HAS_MANAGED_CACHE
776                 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
777                         struct inode *const inode = page->mapping->host;
778                         struct super_block *const sb = inode->i_sb;
779
780                         mngda = EROFS_SB(sb)->managed_cache->i_mapping;
781                 }
782
783                 /*
784                  * If mngda has not gotten, it equals NULL,
785                  * however, page->mapping never be NULL if working properly.
786                  */
787                 cachemngd = (page->mapping == mngda);
788 #endif
789
790                 if (unlikely(err))
791                         SetPageError(page);
792                 else if (cachemngd)
793                         SetPageUptodate(page);
794
795                 if (cachemngd)
796                         unlock_page(page);
797         }
798
799         z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
800         bio_put(bio);
801 }
802
803 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
804 static DEFINE_MUTEX(z_pagemap_global_lock);
805
806 static int z_erofs_vle_unzip(struct super_block *sb,
807         struct z_erofs_vle_workgroup *grp,
808         struct list_head *page_pool)
809 {
810         struct erofs_sb_info *const sbi = EROFS_SB(sb);
811 #ifdef EROFS_FS_HAS_MANAGED_CACHE
812         struct address_space *const mngda = sbi->managed_cache->i_mapping;
813 #endif
814         const unsigned clusterpages = erofs_clusterpages(sbi);
815
816         struct z_erofs_pagevec_ctor ctor;
817         unsigned int nr_pages;
818         unsigned int sparsemem_pages = 0;
819         struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
820         struct page **pages, **compressed_pages, *page;
821         unsigned i, llen;
822
823         enum z_erofs_page_type page_type;
824         bool overlapped;
825         struct z_erofs_vle_work *work;
826         void *vout;
827         int err;
828
829         might_sleep();
830         work = z_erofs_vle_grab_primary_work(grp);
831         DBG_BUGON(!READ_ONCE(work->nr_pages));
832
833         mutex_lock(&work->lock);
834         nr_pages = work->nr_pages;
835
836         if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
837                 pages = pages_onstack;
838         else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
839                 mutex_trylock(&z_pagemap_global_lock))
840                 pages = z_pagemap_global;
841         else {
842 repeat:
843                 pages = kvmalloc_array(nr_pages,
844                         sizeof(struct page *), GFP_KERNEL);
845
846                 /* fallback to global pagemap for the lowmem scenario */
847                 if (unlikely(pages == NULL)) {
848                         if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
849                                 goto repeat;
850                         else {
851                                 mutex_lock(&z_pagemap_global_lock);
852                                 pages = z_pagemap_global;
853                         }
854                 }
855         }
856
857         for (i = 0; i < nr_pages; ++i)
858                 pages[i] = NULL;
859
860         z_erofs_pagevec_ctor_init(&ctor,
861                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
862
863         for (i = 0; i < work->vcnt; ++i) {
864                 unsigned pagenr;
865
866                 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
867
868                 /* all pages in pagevec ought to be valid */
869                 DBG_BUGON(page == NULL);
870                 DBG_BUGON(page->mapping == NULL);
871
872                 if (z_erofs_gather_if_stagingpage(page_pool, page))
873                         continue;
874
875                 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
876                         pagenr = 0;
877                 else
878                         pagenr = z_erofs_onlinepage_index(page);
879
880                 DBG_BUGON(pagenr >= nr_pages);
881                 DBG_BUGON(pages[pagenr]);
882
883                 pages[pagenr] = page;
884         }
885         sparsemem_pages = i;
886
887         z_erofs_pagevec_ctor_exit(&ctor, true);
888
889         overlapped = false;
890         compressed_pages = grp->compressed_pages;
891
892         err = 0;
893         for (i = 0; i < clusterpages; ++i) {
894                 unsigned pagenr;
895
896                 page = compressed_pages[i];
897
898                 /* all compressed pages ought to be valid */
899                 DBG_BUGON(page == NULL);
900                 DBG_BUGON(page->mapping == NULL);
901
902                 if (!z_erofs_is_stagingpage(page)) {
903 #ifdef EROFS_FS_HAS_MANAGED_CACHE
904                         if (page->mapping == mngda) {
905                                 if (unlikely(!PageUptodate(page)))
906                                         err = -EIO;
907                                 continue;
908                         }
909 #endif
910
911                         /*
912                          * only if non-head page can be selected
913                          * for inplace decompression
914                          */
915                         pagenr = z_erofs_onlinepage_index(page);
916
917                         DBG_BUGON(pagenr >= nr_pages);
918                         DBG_BUGON(pages[pagenr]);
919                         ++sparsemem_pages;
920                         pages[pagenr] = page;
921
922                         overlapped = true;
923                 }
924
925                 /* PG_error needs checking for inplaced and staging pages */
926                 if (unlikely(PageError(page))) {
927                         DBG_BUGON(PageUptodate(page));
928                         err = -EIO;
929                 }
930         }
931
932         if (unlikely(err))
933                 goto out;
934
935         llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
936
937         if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
938                 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
939                         pages, nr_pages, work->pageofs);
940                 goto out;
941         }
942
943         if (llen > grp->llen)
944                 llen = grp->llen;
945
946         err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
947                                             pages, llen, work->pageofs);
948         if (err != -ENOTSUPP)
949                 goto out;
950
951         if (sparsemem_pages >= nr_pages)
952                 goto skip_allocpage;
953
954         for (i = 0; i < nr_pages; ++i) {
955                 if (pages[i] != NULL)
956                         continue;
957
958                 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
959         }
960
961 skip_allocpage:
962         vout = erofs_vmap(pages, nr_pages);
963         if (!vout) {
964                 err = -ENOMEM;
965                 goto out;
966         }
967
968         err = z_erofs_vle_unzip_vmap(compressed_pages,
969                 clusterpages, vout, llen, work->pageofs, overlapped);
970
971         erofs_vunmap(vout, nr_pages);
972
973 out:
974         /* must handle all compressed pages before endding pages */
975         for (i = 0; i < clusterpages; ++i) {
976                 page = compressed_pages[i];
977
978 #ifdef EROFS_FS_HAS_MANAGED_CACHE
979                 if (page->mapping == mngda)
980                         continue;
981 #endif
982                 /* recycle all individual staging pages */
983                 (void)z_erofs_gather_if_stagingpage(page_pool, page);
984
985                 WRITE_ONCE(compressed_pages[i], NULL);
986         }
987
988         for (i = 0; i < nr_pages; ++i) {
989                 page = pages[i];
990                 if (!page)
991                         continue;
992
993                 DBG_BUGON(page->mapping == NULL);
994
995                 /* recycle all individual staging pages */
996                 if (z_erofs_gather_if_stagingpage(page_pool, page))
997                         continue;
998
999                 if (unlikely(err < 0))
1000                         SetPageError(page);
1001
1002                 z_erofs_onlinepage_endio(page);
1003         }
1004
1005         if (pages == z_pagemap_global)
1006                 mutex_unlock(&z_pagemap_global_lock);
1007         else if (unlikely(pages != pages_onstack))
1008                 kvfree(pages);
1009
1010         work->nr_pages = 0;
1011         work->vcnt = 0;
1012
1013         /* all work locks MUST be taken before the following line */
1014
1015         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1016
1017         /* all work locks SHOULD be released right now */
1018         mutex_unlock(&work->lock);
1019
1020         z_erofs_vle_work_release(work);
1021         return err;
1022 }
1023
1024 static void z_erofs_vle_unzip_all(struct super_block *sb,
1025                                   struct z_erofs_vle_unzip_io *io,
1026                                   struct list_head *page_pool)
1027 {
1028         z_erofs_vle_owned_workgrp_t owned = io->head;
1029
1030         while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1031                 struct z_erofs_vle_workgroup *grp;
1032
1033                 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1034                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1035
1036                 /* no possible that 'owned' equals NULL */
1037                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1038
1039                 grp = owned;
1040                 owned = READ_ONCE(grp->next);
1041
1042                 z_erofs_vle_unzip(sb, grp, page_pool);
1043         }
1044 }
1045
1046 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1047 {
1048         struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1049                 struct z_erofs_vle_unzip_io_sb, io.u.work);
1050         LIST_HEAD(page_pool);
1051
1052         DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1053         z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1054
1055         put_pages_list(&page_pool);
1056         kvfree(iosb);
1057 }
1058
1059 static inline struct z_erofs_vle_unzip_io *
1060 prepare_io_handler(struct super_block *sb,
1061                    struct z_erofs_vle_unzip_io *io,
1062                    bool background)
1063 {
1064         struct z_erofs_vle_unzip_io_sb *iosb;
1065
1066         if (!background) {
1067                 /* waitqueue available for foreground io */
1068                 BUG_ON(io == NULL);
1069
1070                 init_waitqueue_head(&io->u.wait);
1071                 atomic_set(&io->pending_bios, 0);
1072                 goto out;
1073         }
1074
1075         if (io != NULL)
1076                 BUG();
1077         else {
1078                 /* allocate extra io descriptor for background io */
1079                 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1080                         GFP_KERNEL | __GFP_NOFAIL);
1081                 BUG_ON(iosb == NULL);
1082
1083                 io = &iosb->io;
1084         }
1085
1086         iosb->sb = sb;
1087         INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1088 out:
1089         io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1090         return io;
1091 }
1092
1093 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1094 /* true - unlocked (noio), false - locked (need submit io) */
1095 static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
1096                                         struct page *page)
1097 {
1098         wait_on_page_locked(page);
1099         if (PagePrivate(page) && PageUptodate(page))
1100                 return true;
1101
1102         lock_page(page);
1103         ClearPageError(page);
1104
1105         if (unlikely(!PagePrivate(page))) {
1106                 set_page_private(page, (unsigned long)grp);
1107                 SetPagePrivate(page);
1108         }
1109         if (unlikely(PageUptodate(page))) {
1110                 unlock_page(page);
1111                 return true;
1112         }
1113         return false;
1114 }
1115
1116 #define __FSIO_1 1
1117 #else
1118 #define __FSIO_1 0
1119 #endif
1120
1121 static bool z_erofs_vle_submit_all(struct super_block *sb,
1122                                    z_erofs_vle_owned_workgrp_t owned_head,
1123                                    struct list_head *pagepool,
1124                                    struct z_erofs_vle_unzip_io *fg_io,
1125                                    bool force_fg)
1126 {
1127         struct erofs_sb_info *const sbi = EROFS_SB(sb);
1128         const unsigned clusterpages = erofs_clusterpages(sbi);
1129         const gfp_t gfp = GFP_NOFS;
1130 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1131         struct address_space *const mngda = sbi->managed_cache->i_mapping;
1132         struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
1133 #endif
1134         struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
1135         struct bio *bio;
1136         tagptr1_t bi_private;
1137         /* since bio will be NULL, no need to initialize last_index */
1138         pgoff_t uninitialized_var(last_index);
1139         bool force_submit = false;
1140         unsigned nr_bios;
1141
1142         if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1143                 return false;
1144
1145         /*
1146          * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
1147          * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
1148          */
1149 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1150         ios[0] = prepare_io_handler(sb, fg_io + 0, false);
1151 #endif
1152
1153         if (force_fg) {
1154                 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
1155                 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
1156         } else {
1157                 ios[__FSIO_1] = prepare_io_handler(sb, NULL, true);
1158                 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1);
1159         }
1160
1161         nr_bios = 0;
1162         force_submit = false;
1163         bio = NULL;
1164
1165         /* by default, all need io submission */
1166         ios[__FSIO_1]->head = owned_head;
1167
1168         do {
1169                 struct z_erofs_vle_workgroup *grp;
1170                 struct page **compressed_pages, *oldpage, *page;
1171                 pgoff_t first_index;
1172                 unsigned i = 0;
1173 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1174                 unsigned int noio = 0;
1175                 bool cachemngd;
1176 #endif
1177                 int err;
1178
1179                 /* no possible 'owned_head' equals the following */
1180                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1181                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1182
1183                 grp = owned_head;
1184
1185                 /* close the main owned chain at first */
1186                 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1187                         Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1188
1189                 first_index = grp->obj.index;
1190                 compressed_pages = grp->compressed_pages;
1191
1192                 force_submit |= (first_index != last_index + 1);
1193 repeat:
1194                 /* fulfill all compressed pages */
1195                 oldpage = page = READ_ONCE(compressed_pages[i]);
1196
1197 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1198                 cachemngd = false;
1199
1200                 if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
1201                         cachemngd = true;
1202                         goto do_allocpage;
1203                 } else if (page != NULL) {
1204                         if (page->mapping != mngda)
1205                                 BUG_ON(PageUptodate(page));
1206                         else if (recover_managed_page(grp, page)) {
1207                                 /* page is uptodate, skip io submission */
1208                                 force_submit = true;
1209                                 ++noio;
1210                                 goto skippage;
1211                         }
1212                 } else {
1213 do_allocpage:
1214 #else
1215                 if (page != NULL)
1216                         BUG_ON(PageUptodate(page));
1217                 else {
1218 #endif
1219                         page = __stagingpage_alloc(pagepool, gfp);
1220
1221                         if (oldpage != cmpxchg(compressed_pages + i,
1222                                 oldpage, page)) {
1223                                 list_add(&page->lru, pagepool);
1224                                 goto repeat;
1225 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1226                         } else if (cachemngd && !add_to_page_cache_lru(page,
1227                                 mngda, first_index + i, gfp)) {
1228                                 set_page_private(page, (unsigned long)grp);
1229                                 SetPagePrivate(page);
1230 #endif
1231                         }
1232                 }
1233
1234                 if (bio != NULL && force_submit) {
1235 submit_bio_retry:
1236                         __submit_bio(bio, REQ_OP_READ, 0);
1237                         bio = NULL;
1238                 }
1239
1240                 if (bio == NULL) {
1241                         bio = prepare_bio(sb, first_index + i,
1242                                 BIO_MAX_PAGES, z_erofs_vle_read_endio);
1243                         bio->bi_private = tagptr_cast_ptr(bi_private);
1244
1245                         ++nr_bios;
1246                 }
1247
1248                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1249                 if (err < PAGE_SIZE)
1250                         goto submit_bio_retry;
1251
1252                 force_submit = false;
1253                 last_index = first_index + i;
1254 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1255 skippage:
1256 #endif
1257                 if (++i < clusterpages)
1258                         goto repeat;
1259
1260 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1261                 if (noio < clusterpages) {
1262                         lstgrp_io = grp;
1263                 } else {
1264                         z_erofs_vle_owned_workgrp_t iogrp_next =
1265                                 owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
1266                                 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
1267                                 owned_head;
1268
1269                         if (lstgrp_io == NULL)
1270                                 ios[1]->head = iogrp_next;
1271                         else
1272                                 WRITE_ONCE(lstgrp_io->next, iogrp_next);
1273
1274                         if (lstgrp_noio == NULL)
1275                                 ios[0]->head = grp;
1276                         else
1277                                 WRITE_ONCE(lstgrp_noio->next, grp);
1278
1279                         lstgrp_noio = grp;
1280                 }
1281 #endif
1282         } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1283
1284         if (bio != NULL)
1285                 __submit_bio(bio, REQ_OP_READ, 0);
1286
1287 #ifndef EROFS_FS_HAS_MANAGED_CACHE
1288         BUG_ON(!nr_bios);
1289 #else
1290         if (lstgrp_noio != NULL)
1291                 WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1292
1293         if (!force_fg && !nr_bios) {
1294                 kvfree(container_of(ios[1],
1295                         struct z_erofs_vle_unzip_io_sb, io));
1296                 return true;
1297         }
1298 #endif
1299
1300         z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
1301         return true;
1302 }
1303
1304 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1305                                      struct list_head *pagepool,
1306                                      bool force_fg)
1307 {
1308         struct super_block *sb = f->inode->i_sb;
1309         struct z_erofs_vle_unzip_io io[1 + __FSIO_1];
1310
1311         if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1312                 return;
1313
1314 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1315         z_erofs_vle_unzip_all(sb, &io[0], pagepool);
1316 #endif
1317         if (!force_fg)
1318                 return;
1319
1320         /* wait until all bios are completed */
1321         wait_event(io[__FSIO_1].u.wait,
1322                 !atomic_read(&io[__FSIO_1].pending_bios));
1323
1324         /* let's synchronous decompression */
1325         z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool);
1326 }
1327
1328 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1329                                              struct page *page)
1330 {
1331         struct inode *const inode = page->mapping->host;
1332         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1333         int err;
1334         LIST_HEAD(pagepool);
1335
1336 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1337         f.cachedzone_la = page->index << PAGE_SHIFT;
1338 #endif
1339         err = z_erofs_do_read_page(&f, page, &pagepool);
1340         (void)z_erofs_vle_work_iter_end(&f.builder);
1341
1342         /* if some compressed cluster ready, need submit them anyway */
1343         z_erofs_submit_and_unzip(&f, &pagepool, true);
1344
1345         if (err)
1346                 errln("%s, failed to read, err [%d]", __func__, err);
1347
1348         if (f.m_iter.mpage != NULL)
1349                 put_page(f.m_iter.mpage);
1350
1351         /* clean up the remaining free pages */
1352         put_pages_list(&pagepool);
1353         return err;
1354 }
1355
1356 static inline int __z_erofs_vle_normalaccess_readpages(
1357         struct file *filp,
1358         struct address_space *mapping,
1359         struct list_head *pages, unsigned nr_pages, bool sync)
1360 {
1361         struct inode *const inode = mapping->host;
1362
1363         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1364         gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1365         struct page *head = NULL;
1366         LIST_HEAD(pagepool);
1367
1368 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1369         f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
1370 #endif
1371         for (; nr_pages; --nr_pages) {
1372                 struct page *page = lru_to_page(pages);
1373
1374                 prefetchw(&page->flags);
1375                 list_del(&page->lru);
1376
1377                 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1378                         list_add(&page->lru, &pagepool);
1379                         continue;
1380                 }
1381
1382                 set_page_private(page, (unsigned long)head);
1383                 head = page;
1384         }
1385
1386         while (head != NULL) {
1387                 struct page *page = head;
1388                 int err;
1389
1390                 /* traversal in reverse order */
1391                 head = (void *)page_private(page);
1392
1393                 err = z_erofs_do_read_page(&f, page, &pagepool);
1394                 if (err) {
1395                         struct erofs_vnode *vi = EROFS_V(inode);
1396
1397                         errln("%s, readahead error at page %lu of nid %llu",
1398                                 __func__, page->index, vi->nid);
1399                 }
1400
1401                 put_page(page);
1402         }
1403
1404         (void)z_erofs_vle_work_iter_end(&f.builder);
1405
1406         z_erofs_submit_and_unzip(&f, &pagepool, sync);
1407
1408         if (f.m_iter.mpage != NULL)
1409                 put_page(f.m_iter.mpage);
1410
1411         /* clean up the remaining free pages */
1412         put_pages_list(&pagepool);
1413         return 0;
1414 }
1415
1416 static int z_erofs_vle_normalaccess_readpages(
1417         struct file *filp,
1418         struct address_space *mapping,
1419         struct list_head *pages, unsigned nr_pages)
1420 {
1421         return __z_erofs_vle_normalaccess_readpages(filp,
1422                 mapping, pages, nr_pages,
1423                 nr_pages < 4 /* sync */);
1424 }
1425
1426 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1427         .readpage = z_erofs_vle_normalaccess_readpage,
1428         .readpages = z_erofs_vle_normalaccess_readpages,
1429 };
1430
1431 #define __vle_cluster_advise(x, bit, bits) \
1432         ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1433
1434 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1435         Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1436
1437 enum {
1438         Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
1439         Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
1440         Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
1441         Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
1442         Z_EROFS_VLE_CLUSTER_TYPE_MAX
1443 };
1444
1445 #define vle_cluster_type(di)    \
1446         __vle_cluster_type((di)->di_advise)
1447
1448 static inline unsigned
1449 vle_compressed_index_clusterofs(unsigned clustersize,
1450         struct z_erofs_vle_decompressed_index *di)
1451 {
1452         debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
1453                 __func__, di, di->di_advise, vle_cluster_type(di),
1454                 di->di_clusterofs, di->di_u.blkaddr);
1455
1456         switch (vle_cluster_type(di)) {
1457         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1458                 break;
1459         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1460         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1461                 return di->di_clusterofs;
1462         default:
1463                 BUG_ON(1);
1464         }
1465         return clustersize;
1466 }
1467
1468 static inline erofs_blk_t
1469 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1470 {
1471         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1472         struct erofs_vnode *vi = EROFS_V(inode);
1473
1474         unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1475                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1476                 index * sizeof(struct z_erofs_vle_decompressed_index);
1477
1478         return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1479 }
1480
1481 static inline unsigned int
1482 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1483 {
1484         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1485         struct erofs_vnode *vi = EROFS_V(inode);
1486
1487         unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1488                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1489                 index * sizeof(struct z_erofs_vle_decompressed_index);
1490
1491         return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1492 }
1493
1494 /*
1495  * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1496  * ---
1497  * VLE compression mode attempts to compress a number of logical data into
1498  * a physical cluster with a fixed size.
1499  * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1500  */
1501 static erofs_off_t vle_get_logical_extent_head(
1502         struct inode *inode,
1503         struct page **page_iter,
1504         void **kaddr_iter,
1505         unsigned lcn,   /* logical cluster number */
1506         erofs_blk_t *pcn,
1507         unsigned *flags)
1508 {
1509         /* for extent meta */
1510         struct page *page = *page_iter;
1511         erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
1512         struct z_erofs_vle_decompressed_index *di;
1513         unsigned long long ofs;
1514         const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1515         const unsigned int clustersize = 1 << clusterbits;
1516         unsigned int delta0;
1517
1518         if (page->index != blkaddr) {
1519                 kunmap_atomic(*kaddr_iter);
1520                 unlock_page(page);
1521                 put_page(page);
1522
1523                 *page_iter = page = erofs_get_meta_page(inode->i_sb,
1524                         blkaddr, false);
1525                 *kaddr_iter = kmap_atomic(page);
1526         }
1527
1528         di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
1529         switch (vle_cluster_type(di)) {
1530         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1531                 delta0 = le16_to_cpu(di->di_u.delta[0]);
1532                 DBG_BUGON(!delta0);
1533                 DBG_BUGON(lcn < delta0);
1534
1535                 ofs = vle_get_logical_extent_head(inode,
1536                         page_iter, kaddr_iter,
1537                         lcn - delta0, pcn, flags);
1538                 break;
1539         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1540                 *flags ^= EROFS_MAP_ZIPPED;
1541         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1542                 /* clustersize should be a power of two */
1543                 ofs = ((unsigned long long)lcn << clusterbits) +
1544                         (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1545                 *pcn = le32_to_cpu(di->di_u.blkaddr);
1546                 break;
1547         default:
1548                 BUG_ON(1);
1549         }
1550         return ofs;
1551 }
1552
1553 int z_erofs_map_blocks_iter(struct inode *inode,
1554         struct erofs_map_blocks *map,
1555         struct page **mpage_ret, int flags)
1556 {
1557         /* logicial extent (start, end) offset */
1558         unsigned long long ofs, end;
1559         struct z_erofs_vle_decompressed_index *di;
1560         erofs_blk_t e_blkaddr, pcn;
1561         unsigned lcn, logical_cluster_ofs, cluster_type;
1562         u32 ofs_rem;
1563         struct page *mpage = *mpage_ret;
1564         void *kaddr;
1565         bool initial;
1566         const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1567         const unsigned int clustersize = 1 << clusterbits;
1568         int err = 0;
1569
1570         /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1571         initial = !map->m_llen;
1572
1573         /* when trying to read beyond EOF, leave it unmapped */
1574         if (unlikely(map->m_la >= inode->i_size)) {
1575                 BUG_ON(!initial);
1576                 map->m_llen = map->m_la + 1 - inode->i_size;
1577                 map->m_la = inode->i_size - 1;
1578                 map->m_flags = 0;
1579                 goto out;
1580         }
1581
1582         debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1583                 map->m_la, map->m_llen);
1584
1585         ofs = map->m_la + map->m_llen;
1586
1587         /* clustersize should be power of two */
1588         lcn = ofs >> clusterbits;
1589         ofs_rem = ofs & (clustersize - 1);
1590
1591         e_blkaddr = vle_extent_blkaddr(inode, lcn);
1592
1593         if (mpage == NULL || mpage->index != e_blkaddr) {
1594                 if (mpage != NULL)
1595                         put_page(mpage);
1596
1597                 mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
1598                 *mpage_ret = mpage;
1599         } else {
1600                 lock_page(mpage);
1601                 DBG_BUGON(!PageUptodate(mpage));
1602         }
1603
1604         kaddr = kmap_atomic(mpage);
1605         di = kaddr + vle_extent_blkoff(inode, lcn);
1606
1607         debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
1608                 e_blkaddr, vle_extent_blkoff(inode, lcn));
1609
1610         logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
1611         if (!initial) {
1612                 /* [walking mode] 'map' has been already initialized */
1613                 map->m_llen += logical_cluster_ofs;
1614                 goto unmap_out;
1615         }
1616
1617         /* by default, compressed */
1618         map->m_flags |= EROFS_MAP_ZIPPED;
1619
1620         end = (u64)(lcn + 1) * clustersize;
1621
1622         cluster_type = vle_cluster_type(di);
1623
1624         switch (cluster_type) {
1625         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1626                 if (ofs_rem >= logical_cluster_ofs)
1627                         map->m_flags ^= EROFS_MAP_ZIPPED;
1628                 /* fallthrough */
1629         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1630                 if (ofs_rem == logical_cluster_ofs) {
1631                         pcn = le32_to_cpu(di->di_u.blkaddr);
1632                         goto exact_hitted;
1633                 }
1634
1635                 if (ofs_rem > logical_cluster_ofs) {
1636                         ofs = lcn * clustersize | logical_cluster_ofs;
1637                         pcn = le32_to_cpu(di->di_u.blkaddr);
1638                         break;
1639                 }
1640
1641                 /* logical cluster number should be >= 1 */
1642                 if (unlikely(!lcn)) {
1643                         errln("invalid logical cluster 0 at nid %llu",
1644                                 EROFS_V(inode)->nid);
1645                         err = -EIO;
1646                         goto unmap_out;
1647                 }
1648                 end = (lcn-- * clustersize) | logical_cluster_ofs;
1649                 /* fallthrough */
1650         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1651                 /* get the correspoinding first chunk */
1652                 ofs = vle_get_logical_extent_head(inode, mpage_ret,
1653                         &kaddr, lcn, &pcn, &map->m_flags);
1654                 mpage = *mpage_ret;
1655                 break;
1656         default:
1657                 errln("unknown cluster type %u at offset %llu of nid %llu",
1658                         cluster_type, ofs, EROFS_V(inode)->nid);
1659                 err = -EIO;
1660                 goto unmap_out;
1661         }
1662
1663         map->m_la = ofs;
1664 exact_hitted:
1665         map->m_llen = end - ofs;
1666         map->m_plen = clustersize;
1667         map->m_pa = blknr_to_addr(pcn);
1668         map->m_flags |= EROFS_MAP_MAPPED;
1669 unmap_out:
1670         kunmap_atomic(kaddr);
1671         unlock_page(mpage);
1672 out:
1673         debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1674                 __func__, map->m_la, map->m_pa,
1675                 map->m_llen, map->m_plen, map->m_flags);
1676
1677         /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1678         DBG_BUGON(err < 0);
1679         return err;
1680 }
1681