1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
16 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
17 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
19 void z_erofs_exit_zip_subsystem(void)
21 destroy_workqueue(z_erofs_workqueue);
22 kmem_cache_destroy(z_erofs_workgroup_cachep);
25 static inline int init_unzip_workqueue(void)
27 const unsigned onlinecpus = num_possible_cpus();
30 * we don't need too many threads, limiting threads
31 * could improve scheduling performance.
33 z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
34 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
35 onlinecpus + onlinecpus / 4);
37 return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
40 int z_erofs_init_zip_subsystem(void)
42 z_erofs_workgroup_cachep =
43 kmem_cache_create("erofs_compress",
44 Z_EROFS_WORKGROUP_SIZE, 0,
45 SLAB_RECLAIM_ACCOUNT, NULL);
47 if (z_erofs_workgroup_cachep != NULL) {
48 if (!init_unzip_workqueue())
51 kmem_cache_destroy(z_erofs_workgroup_cachep);
56 enum z_erofs_vle_work_role {
57 Z_EROFS_VLE_WORK_SECONDARY,
58 Z_EROFS_VLE_WORK_PRIMARY,
60 * The current work was the tail of an exist chain, and the previous
61 * processed chained works are all decided to be hooked up to it.
62 * A new chain should be created for the remaining unprocessed works,
63 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
64 * the next work cannot reuse the whole page in the following scenario:
65 * ________________________________________________________________
66 * | tail (partial) page | head (partial) page |
67 * | (belongs to the next work) | (belongs to the current work) |
68 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
70 Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
72 * The current work has been linked with the processed chained works,
73 * and could be also linked with the potential remaining works, which
74 * means if the processing page is the tail partial page of the work,
75 * the current work can safely use the whole page (since the next work
76 * is under control) for in-place decompression, as illustrated below:
77 * ________________________________________________________________
78 * | tail (partial) page | head (partial) page |
79 * | (of the current work) | (of the previous work) |
80 * | PRIMARY_FOLLOWED or | |
81 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
83 * [ (*) the above page can be used for the current work itself. ]
85 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
89 struct z_erofs_vle_work_builder {
90 enum z_erofs_vle_work_role role;
92 * 'hosted = false' means that the current workgroup doesn't belong to
93 * the owned chained workgroups. In the other words, it is none of our
94 * business to submit this workgroup.
98 struct z_erofs_vle_workgroup *grp;
99 struct z_erofs_vle_work *work;
100 struct z_erofs_pagevec_ctor vector;
102 /* pages used for reading the compressed data */
103 struct page **compressed_pages;
104 unsigned compressed_deficit;
107 #define VLE_WORK_BUILDER_INIT() \
108 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
110 #ifdef EROFS_FS_HAS_MANAGED_CACHE
112 static bool grab_managed_cache_pages(struct address_space *mapping,
114 struct page **compressed_pages,
116 bool reserve_allocation)
121 /* TODO: optimize by introducing find_get_pages_range */
122 for (i = 0; i < clusterblks; ++i) {
123 struct page *page, *found;
125 if (READ_ONCE(compressed_pages[i]) != NULL)
128 page = found = find_get_page(mapping, start + i);
131 if (!reserve_allocation)
133 page = EROFS_UNALLOCATED_CACHED_PAGE;
136 if (NULL == cmpxchg(compressed_pages + i, NULL, page))
145 /* called by erofs_shrinker to get rid of all compressed_pages */
146 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
147 struct erofs_workgroup *egrp)
149 struct z_erofs_vle_workgroup *const grp =
150 container_of(egrp, struct z_erofs_vle_workgroup, obj);
151 struct address_space *const mapping = sbi->managed_cache->i_mapping;
152 const int clusterpages = erofs_clusterpages(sbi);
156 * refcount of workgroup is now freezed as 1,
157 * therefore no need to worry about available decompression users.
159 for (i = 0; i < clusterpages; ++i) {
160 struct page *page = grp->compressed_pages[i];
162 if (page == NULL || page->mapping != mapping)
165 /* block other users from reclaiming or migrating the page */
166 if (!trylock_page(page))
169 /* barrier is implied in the following 'unlock_page' */
170 WRITE_ONCE(grp->compressed_pages[i], NULL);
172 set_page_private(page, 0);
173 ClearPagePrivate(page);
181 int erofs_try_to_free_cached_page(struct address_space *mapping,
184 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
185 const unsigned int clusterpages = erofs_clusterpages(sbi);
187 struct z_erofs_vle_workgroup *grp;
188 int ret = 0; /* 0 - busy */
190 /* prevent the workgroup from being freed */
192 grp = (void *)page_private(page);
194 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
197 for (i = 0; i < clusterpages; ++i) {
198 if (grp->compressed_pages[i] == page) {
199 WRITE_ONCE(grp->compressed_pages[i], NULL);
204 erofs_workgroup_unfreeze(&grp->obj, 1);
209 ClearPagePrivate(page);
216 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
217 static inline bool try_to_reuse_as_compressed_page(
218 struct z_erofs_vle_work_builder *b,
221 while (b->compressed_deficit) {
222 --b->compressed_deficit;
223 if (NULL == cmpxchg(b->compressed_pages++, NULL, page))
230 /* callers must be with work->lock held */
231 static int z_erofs_vle_work_add_page(
232 struct z_erofs_vle_work_builder *builder,
234 enum z_erofs_page_type type)
239 /* give priority for the compressed data storage */
240 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
241 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
242 try_to_reuse_as_compressed_page(builder, page))
245 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
246 page, type, &occupied);
247 builder->work->vcnt += (unsigned)ret;
249 return ret ? 0 : -EAGAIN;
252 static enum z_erofs_vle_work_role
253 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
254 z_erofs_vle_owned_workgrp_t *owned_head,
257 DBG_BUGON(*hosted == true);
259 /* let's claim these following types of workgroup */
261 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
262 /* type 1, nil workgroup */
263 if (Z_EROFS_VLE_WORKGRP_NIL != cmpxchg(&grp->next,
264 Z_EROFS_VLE_WORKGRP_NIL, *owned_head))
269 /* lucky, I am the followee :) */
270 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
272 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
274 * type 2, link to the end of a existing open chain,
275 * be careful that its submission itself is governed
276 * by the original owned chain.
278 if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
279 Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
281 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
282 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
285 return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
288 static struct z_erofs_vle_work *
289 z_erofs_vle_work_lookup(struct super_block *sb,
290 pgoff_t idx, unsigned pageofs,
291 struct z_erofs_vle_workgroup **grp_ret,
292 enum z_erofs_vle_work_role *role,
293 z_erofs_vle_owned_workgrp_t *owned_head,
297 struct erofs_workgroup *egrp;
298 struct z_erofs_vle_workgroup *grp;
299 struct z_erofs_vle_work *work;
301 egrp = erofs_find_workgroup(sb, idx, &tag);
307 *grp_ret = grp = container_of(egrp,
308 struct z_erofs_vle_workgroup, obj);
310 work = z_erofs_vle_grab_work(grp, pageofs);
311 /* if multiref is disabled, `primary' is always true */
314 if (work->pageofs != pageofs) {
316 erofs_workgroup_put(egrp);
317 return ERR_PTR(-EIO);
321 * lock must be taken first to avoid grp->next == NIL between
322 * claiming workgroup and adding pages:
326 * mutex_lock(&work->lock)
327 * add all pages to pagevec
329 * [correct locking case 1]:
330 * mutex_lock(grp->work[a])
332 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
333 * ... *role = SECONDARY
334 * add all pages to pagevec
336 * mutex_unlock(grp->work[c])
337 * mutex_lock(grp->work[c])
342 * [correct locking case 2]:
343 * mutex_lock(grp->work[b])
345 * mutex_lock(grp->work[a])
347 * mutex_lock(grp->work[c])
351 * mutex_lock(grp->work[a])
352 * *role = PRIMARY_OWNER
353 * add all pages to pagevec
356 mutex_lock(&work->lock);
360 *role = Z_EROFS_VLE_WORK_SECONDARY;
361 else /* claim the workgroup if possible */
362 *role = try_to_claim_workgroup(grp, owned_head, hosted);
366 static struct z_erofs_vle_work *
367 z_erofs_vle_work_register(struct super_block *sb,
368 struct z_erofs_vle_workgroup **grp_ret,
369 struct erofs_map_blocks *map,
370 pgoff_t index, unsigned pageofs,
371 enum z_erofs_vle_work_role *role,
372 z_erofs_vle_owned_workgrp_t *owned_head,
376 struct z_erofs_vle_workgroup *grp = *grp_ret;
377 struct z_erofs_vle_work *work;
379 /* if multiref is disabled, grp should never be nullptr */
382 return ERR_PTR(-EINVAL);
385 /* no available workgroup, let's allocate one */
386 grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
387 if (unlikely(grp == NULL))
388 return ERR_PTR(-ENOMEM);
390 grp->obj.index = index;
391 grp->llen = map->m_llen;
393 z_erofs_vle_set_workgrp_fmt(grp,
394 (map->m_flags & EROFS_MAP_ZIPPED) ?
395 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
396 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
397 atomic_set(&grp->obj.refcount, 1);
399 /* new workgrps have been claimed as type 1 */
400 WRITE_ONCE(grp->next, *owned_head);
401 /* primary and followed work for all new workgrps */
402 *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
403 /* it should be submitted by ourselves */
407 work = z_erofs_vle_grab_primary_work(grp);
408 work->pageofs = pageofs;
410 mutex_init(&work->lock);
413 int err = erofs_register_workgroup(sb, &grp->obj, 0);
416 kmem_cache_free(z_erofs_workgroup_cachep, grp);
417 return ERR_PTR(-EAGAIN);
421 *owned_head = *grp_ret = grp;
423 mutex_lock(&work->lock);
427 static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
431 unsigned int orig_llen = grp->llen;
433 if (orig_llen >= llen || orig_llen ==
434 cmpxchg(&grp->llen, orig_llen, llen))
439 #define builder_is_hooked(builder) \
440 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
442 #define builder_is_followed(builder) \
443 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
445 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
446 struct super_block *sb,
447 struct erofs_map_blocks *map,
448 z_erofs_vle_owned_workgrp_t *owned_head)
450 const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
451 const erofs_blk_t index = erofs_blknr(map->m_pa);
452 const unsigned pageofs = map->m_la & ~PAGE_MASK;
453 struct z_erofs_vle_workgroup *grp;
454 struct z_erofs_vle_work *work;
456 DBG_BUGON(builder->work != NULL);
458 /* must be Z_EROFS_WORK_TAIL or the next chained work */
459 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
460 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
462 DBG_BUGON(erofs_blkoff(map->m_pa));
465 work = z_erofs_vle_work_lookup(sb, index,
466 pageofs, &grp, &builder->role, owned_head, &builder->hosted);
468 __update_workgrp_llen(grp, map->m_llen);
472 work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
473 &builder->role, owned_head, &builder->hosted);
475 if (unlikely(work == ERR_PTR(-EAGAIN)))
478 if (unlikely(IS_ERR(work)))
479 return PTR_ERR(work);
481 z_erofs_pagevec_ctor_init(&builder->vector,
482 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
484 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
485 /* enable possibly in-place decompression */
486 builder->compressed_pages = grp->compressed_pages;
487 builder->compressed_deficit = clusterpages;
489 builder->compressed_pages = NULL;
490 builder->compressed_deficit = 0;
494 builder->work = work;
499 * keep in mind that no referenced workgroups will be freed
500 * only after a RCU grace period, so rcu_read_lock() could
501 * prevent a workgroup from being freed.
503 static void z_erofs_rcu_callback(struct rcu_head *head)
505 struct z_erofs_vle_work *work = container_of(head,
506 struct z_erofs_vle_work, rcu);
507 struct z_erofs_vle_workgroup *grp =
508 z_erofs_vle_work_workgroup(work, true);
510 kmem_cache_free(z_erofs_workgroup_cachep, grp);
513 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
515 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
516 struct z_erofs_vle_workgroup, obj);
517 struct z_erofs_vle_work *const work = &vgrp->work;
519 call_rcu(&work->rcu, z_erofs_rcu_callback);
522 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
523 struct z_erofs_vle_work *work __maybe_unused)
525 erofs_workgroup_put(&grp->obj);
528 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
530 struct z_erofs_vle_workgroup *grp =
531 z_erofs_vle_work_workgroup(work, true);
533 __z_erofs_vle_work_release(grp, work);
537 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
539 struct z_erofs_vle_work *work = builder->work;
544 z_erofs_pagevec_ctor_exit(&builder->vector, false);
545 mutex_unlock(&work->lock);
548 * if all pending pages are added, don't hold work reference
549 * any longer if the current work isn't hosted by ourselves.
551 if (!builder->hosted)
552 __z_erofs_vle_work_release(builder->grp, work);
554 builder->work = NULL;
559 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
562 struct page *page = erofs_allocpage(pagepool, gfp);
564 if (unlikely(page == NULL))
567 page->mapping = Z_EROFS_MAPPING_STAGING;
571 struct z_erofs_vle_frontend {
572 struct inode *const inode;
574 struct z_erofs_vle_work_builder builder;
575 struct erofs_map_blocks_iter m_iter;
577 z_erofs_vle_owned_workgrp_t owned_head;
580 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
581 erofs_off_t cachedzone_la;
585 #define VLE_FRONTEND_INIT(__i) { \
588 { .m_llen = 0, .m_plen = 0 }, \
591 .builder = VLE_WORK_BUILDER_INIT(), \
592 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
595 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
597 struct list_head *page_pool)
599 struct super_block *const sb = fe->inode->i_sb;
600 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
601 struct erofs_map_blocks_iter *const m = &fe->m_iter;
602 struct erofs_map_blocks *const map = &m->map;
603 struct z_erofs_vle_work_builder *const builder = &fe->builder;
604 const loff_t offset = page_offset(page);
606 bool tight = builder_is_hooked(builder);
607 struct z_erofs_vle_work *work = builder->work;
609 #ifdef EROFS_FS_HAS_MANAGED_CACHE
610 struct address_space *const mngda = sbi->managed_cache->i_mapping;
611 struct z_erofs_vle_workgroup *grp;
612 bool noio_outoforder;
615 enum z_erofs_page_type page_type;
616 unsigned cur, end, spiltted, index;
619 /* register locked file pages as online pages in pack */
620 z_erofs_onlinepage_init(page);
627 /* lucky, within the range of the current map_blocks */
628 if (offset + cur >= map->m_la &&
629 offset + cur < map->m_la + map->m_llen) {
630 /* didn't get a valid unzip work previously (very rare) */
636 /* go ahead the next map_blocks */
637 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
639 if (z_erofs_vle_work_iter_end(builder))
642 map->m_la = offset + cur;
644 err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
649 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
652 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
653 DBG_BUGON(erofs_blkoff(map->m_pa));
655 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
659 #ifdef EROFS_FS_HAS_MANAGED_CACHE
660 grp = fe->builder.grp;
662 /* let's do out-of-order decompression for noio */
663 noio_outoforder = grab_managed_cache_pages(mngda,
664 erofs_blknr(map->m_pa),
665 grp->compressed_pages, erofs_blknr(map->m_plen),
666 /* compressed page caching selection strategy */
667 fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
668 map->m_la < fe->cachedzone_la : 0));
670 if (noio_outoforder && builder_is_followed(builder))
671 builder->role = Z_EROFS_VLE_WORK_PRIMARY;
674 tight &= builder_is_hooked(builder);
675 work = builder->work;
677 cur = end - min_t(unsigned, offset + end - map->m_la, end);
678 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
679 zero_user_segment(page, cur, end);
683 /* let's derive page type */
684 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
685 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
686 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
687 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
690 tight &= builder_is_followed(builder);
693 err = z_erofs_vle_work_add_page(builder, page, page_type);
694 /* should allocate an additional staging page for pagevec */
695 if (err == -EAGAIN) {
696 struct page *const newpage =
697 __stagingpage_alloc(page_pool, GFP_NOFS);
699 err = z_erofs_vle_work_add_page(builder,
700 newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
708 index = page->index - map->m_la / PAGE_SIZE;
710 /* FIXME! avoid the last relundant fixup & endio */
711 z_erofs_onlinepage_fixup(page, index, true);
713 /* bump up the number of spiltted parts of a page */
715 /* also update nr_pages */
716 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
718 /* can be used for verification */
719 map->m_llen = offset + cur - map->m_la;
726 /* FIXME! avoid the last relundant fixup & endio */
727 z_erofs_onlinepage_endio(page);
729 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
730 __func__, page, spiltted, map->m_llen);
733 /* if some error occurred while processing this page */
739 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
741 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
742 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
743 bool background = tagptr_unfold_tags(t);
748 spin_lock_irqsave(&io->u.wait.lock, flags);
749 if (!atomic_add_return(bios, &io->pending_bios))
750 wake_up_locked(&io->u.wait);
751 spin_unlock_irqrestore(&io->u.wait.lock, flags);
755 if (!atomic_add_return(bios, &io->pending_bios))
756 queue_work(z_erofs_workqueue, &io->u.work);
759 static inline void z_erofs_vle_read_endio(struct bio *bio)
761 const blk_status_t err = bio->bi_status;
763 struct bio_vec *bvec;
764 #ifdef EROFS_FS_HAS_MANAGED_CACHE
765 struct address_space *mngda = NULL;
768 bio_for_each_segment_all(bvec, bio, i) {
769 struct page *page = bvec->bv_page;
770 bool cachemngd = false;
772 DBG_BUGON(PageUptodate(page));
773 DBG_BUGON(!page->mapping);
775 #ifdef EROFS_FS_HAS_MANAGED_CACHE
776 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
777 struct inode *const inode = page->mapping->host;
778 struct super_block *const sb = inode->i_sb;
780 mngda = EROFS_SB(sb)->managed_cache->i_mapping;
784 * If mngda has not gotten, it equals NULL,
785 * however, page->mapping never be NULL if working properly.
787 cachemngd = (page->mapping == mngda);
793 SetPageUptodate(page);
799 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
803 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
804 static DEFINE_MUTEX(z_pagemap_global_lock);
806 static int z_erofs_vle_unzip(struct super_block *sb,
807 struct z_erofs_vle_workgroup *grp,
808 struct list_head *page_pool)
810 struct erofs_sb_info *const sbi = EROFS_SB(sb);
811 #ifdef EROFS_FS_HAS_MANAGED_CACHE
812 struct address_space *const mngda = sbi->managed_cache->i_mapping;
814 const unsigned clusterpages = erofs_clusterpages(sbi);
816 struct z_erofs_pagevec_ctor ctor;
817 unsigned int nr_pages;
818 unsigned int sparsemem_pages = 0;
819 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
820 struct page **pages, **compressed_pages, *page;
823 enum z_erofs_page_type page_type;
825 struct z_erofs_vle_work *work;
830 work = z_erofs_vle_grab_primary_work(grp);
831 DBG_BUGON(!READ_ONCE(work->nr_pages));
833 mutex_lock(&work->lock);
834 nr_pages = work->nr_pages;
836 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
837 pages = pages_onstack;
838 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
839 mutex_trylock(&z_pagemap_global_lock))
840 pages = z_pagemap_global;
843 pages = kvmalloc_array(nr_pages,
844 sizeof(struct page *), GFP_KERNEL);
846 /* fallback to global pagemap for the lowmem scenario */
847 if (unlikely(pages == NULL)) {
848 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
851 mutex_lock(&z_pagemap_global_lock);
852 pages = z_pagemap_global;
857 for (i = 0; i < nr_pages; ++i)
860 z_erofs_pagevec_ctor_init(&ctor,
861 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
863 for (i = 0; i < work->vcnt; ++i) {
866 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
868 /* all pages in pagevec ought to be valid */
869 DBG_BUGON(page == NULL);
870 DBG_BUGON(page->mapping == NULL);
872 if (z_erofs_gather_if_stagingpage(page_pool, page))
875 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
878 pagenr = z_erofs_onlinepage_index(page);
880 DBG_BUGON(pagenr >= nr_pages);
881 DBG_BUGON(pages[pagenr]);
883 pages[pagenr] = page;
887 z_erofs_pagevec_ctor_exit(&ctor, true);
890 compressed_pages = grp->compressed_pages;
893 for (i = 0; i < clusterpages; ++i) {
896 page = compressed_pages[i];
898 /* all compressed pages ought to be valid */
899 DBG_BUGON(page == NULL);
900 DBG_BUGON(page->mapping == NULL);
902 if (!z_erofs_is_stagingpage(page)) {
903 #ifdef EROFS_FS_HAS_MANAGED_CACHE
904 if (page->mapping == mngda) {
905 if (unlikely(!PageUptodate(page)))
912 * only if non-head page can be selected
913 * for inplace decompression
915 pagenr = z_erofs_onlinepage_index(page);
917 DBG_BUGON(pagenr >= nr_pages);
918 DBG_BUGON(pages[pagenr]);
920 pages[pagenr] = page;
925 /* PG_error needs checking for inplaced and staging pages */
926 if (unlikely(PageError(page))) {
927 DBG_BUGON(PageUptodate(page));
935 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
937 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
938 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
939 pages, nr_pages, work->pageofs);
943 if (llen > grp->llen)
946 err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
947 pages, llen, work->pageofs);
948 if (err != -ENOTSUPP)
951 if (sparsemem_pages >= nr_pages)
954 for (i = 0; i < nr_pages; ++i) {
955 if (pages[i] != NULL)
958 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
962 vout = erofs_vmap(pages, nr_pages);
968 err = z_erofs_vle_unzip_vmap(compressed_pages,
969 clusterpages, vout, llen, work->pageofs, overlapped);
971 erofs_vunmap(vout, nr_pages);
974 /* must handle all compressed pages before endding pages */
975 for (i = 0; i < clusterpages; ++i) {
976 page = compressed_pages[i];
978 #ifdef EROFS_FS_HAS_MANAGED_CACHE
979 if (page->mapping == mngda)
982 /* recycle all individual staging pages */
983 (void)z_erofs_gather_if_stagingpage(page_pool, page);
985 WRITE_ONCE(compressed_pages[i], NULL);
988 for (i = 0; i < nr_pages; ++i) {
993 DBG_BUGON(page->mapping == NULL);
995 /* recycle all individual staging pages */
996 if (z_erofs_gather_if_stagingpage(page_pool, page))
999 if (unlikely(err < 0))
1002 z_erofs_onlinepage_endio(page);
1005 if (pages == z_pagemap_global)
1006 mutex_unlock(&z_pagemap_global_lock);
1007 else if (unlikely(pages != pages_onstack))
1013 /* all work locks MUST be taken before the following line */
1015 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1017 /* all work locks SHOULD be released right now */
1018 mutex_unlock(&work->lock);
1020 z_erofs_vle_work_release(work);
1024 static void z_erofs_vle_unzip_all(struct super_block *sb,
1025 struct z_erofs_vle_unzip_io *io,
1026 struct list_head *page_pool)
1028 z_erofs_vle_owned_workgrp_t owned = io->head;
1030 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1031 struct z_erofs_vle_workgroup *grp;
1033 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1034 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1036 /* no possible that 'owned' equals NULL */
1037 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1040 owned = READ_ONCE(grp->next);
1042 z_erofs_vle_unzip(sb, grp, page_pool);
1046 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1048 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1049 struct z_erofs_vle_unzip_io_sb, io.u.work);
1050 LIST_HEAD(page_pool);
1052 DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1053 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1055 put_pages_list(&page_pool);
1059 static inline struct z_erofs_vle_unzip_io *
1060 prepare_io_handler(struct super_block *sb,
1061 struct z_erofs_vle_unzip_io *io,
1064 struct z_erofs_vle_unzip_io_sb *iosb;
1067 /* waitqueue available for foreground io */
1070 init_waitqueue_head(&io->u.wait);
1071 atomic_set(&io->pending_bios, 0);
1078 /* allocate extra io descriptor for background io */
1079 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1080 GFP_KERNEL | __GFP_NOFAIL);
1081 BUG_ON(iosb == NULL);
1087 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1089 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1093 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1094 /* true - unlocked (noio), false - locked (need submit io) */
1095 static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
1098 wait_on_page_locked(page);
1099 if (PagePrivate(page) && PageUptodate(page))
1103 ClearPageError(page);
1105 if (unlikely(!PagePrivate(page))) {
1106 set_page_private(page, (unsigned long)grp);
1107 SetPagePrivate(page);
1109 if (unlikely(PageUptodate(page))) {
1121 static bool z_erofs_vle_submit_all(struct super_block *sb,
1122 z_erofs_vle_owned_workgrp_t owned_head,
1123 struct list_head *pagepool,
1124 struct z_erofs_vle_unzip_io *fg_io,
1127 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1128 const unsigned clusterpages = erofs_clusterpages(sbi);
1129 const gfp_t gfp = GFP_NOFS;
1130 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1131 struct address_space *const mngda = sbi->managed_cache->i_mapping;
1132 struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
1134 struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
1136 tagptr1_t bi_private;
1137 /* since bio will be NULL, no need to initialize last_index */
1138 pgoff_t uninitialized_var(last_index);
1139 bool force_submit = false;
1142 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1146 * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
1147 * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
1149 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1150 ios[0] = prepare_io_handler(sb, fg_io + 0, false);
1154 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
1155 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
1157 ios[__FSIO_1] = prepare_io_handler(sb, NULL, true);
1158 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1);
1162 force_submit = false;
1165 /* by default, all need io submission */
1166 ios[__FSIO_1]->head = owned_head;
1169 struct z_erofs_vle_workgroup *grp;
1170 struct page **compressed_pages, *oldpage, *page;
1171 pgoff_t first_index;
1173 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1174 unsigned int noio = 0;
1179 /* no possible 'owned_head' equals the following */
1180 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1181 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1185 /* close the main owned chain at first */
1186 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1187 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1189 first_index = grp->obj.index;
1190 compressed_pages = grp->compressed_pages;
1192 force_submit |= (first_index != last_index + 1);
1194 /* fulfill all compressed pages */
1195 oldpage = page = READ_ONCE(compressed_pages[i]);
1197 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1200 if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
1203 } else if (page != NULL) {
1204 if (page->mapping != mngda)
1205 BUG_ON(PageUptodate(page));
1206 else if (recover_managed_page(grp, page)) {
1207 /* page is uptodate, skip io submission */
1208 force_submit = true;
1216 BUG_ON(PageUptodate(page));
1219 page = __stagingpage_alloc(pagepool, gfp);
1221 if (oldpage != cmpxchg(compressed_pages + i,
1223 list_add(&page->lru, pagepool);
1225 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1226 } else if (cachemngd && !add_to_page_cache_lru(page,
1227 mngda, first_index + i, gfp)) {
1228 set_page_private(page, (unsigned long)grp);
1229 SetPagePrivate(page);
1234 if (bio != NULL && force_submit) {
1236 __submit_bio(bio, REQ_OP_READ, 0);
1241 bio = prepare_bio(sb, first_index + i,
1242 BIO_MAX_PAGES, z_erofs_vle_read_endio);
1243 bio->bi_private = tagptr_cast_ptr(bi_private);
1248 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1249 if (err < PAGE_SIZE)
1250 goto submit_bio_retry;
1252 force_submit = false;
1253 last_index = first_index + i;
1254 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1257 if (++i < clusterpages)
1260 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1261 if (noio < clusterpages) {
1264 z_erofs_vle_owned_workgrp_t iogrp_next =
1265 owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
1266 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
1269 if (lstgrp_io == NULL)
1270 ios[1]->head = iogrp_next;
1272 WRITE_ONCE(lstgrp_io->next, iogrp_next);
1274 if (lstgrp_noio == NULL)
1277 WRITE_ONCE(lstgrp_noio->next, grp);
1282 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1285 __submit_bio(bio, REQ_OP_READ, 0);
1287 #ifndef EROFS_FS_HAS_MANAGED_CACHE
1290 if (lstgrp_noio != NULL)
1291 WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1293 if (!force_fg && !nr_bios) {
1294 kvfree(container_of(ios[1],
1295 struct z_erofs_vle_unzip_io_sb, io));
1300 z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
1304 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1305 struct list_head *pagepool,
1308 struct super_block *sb = f->inode->i_sb;
1309 struct z_erofs_vle_unzip_io io[1 + __FSIO_1];
1311 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1314 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1315 z_erofs_vle_unzip_all(sb, &io[0], pagepool);
1320 /* wait until all bios are completed */
1321 wait_event(io[__FSIO_1].u.wait,
1322 !atomic_read(&io[__FSIO_1].pending_bios));
1324 /* let's synchronous decompression */
1325 z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool);
1328 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1331 struct inode *const inode = page->mapping->host;
1332 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1334 LIST_HEAD(pagepool);
1336 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1337 f.cachedzone_la = page->index << PAGE_SHIFT;
1339 err = z_erofs_do_read_page(&f, page, &pagepool);
1340 (void)z_erofs_vle_work_iter_end(&f.builder);
1342 /* if some compressed cluster ready, need submit them anyway */
1343 z_erofs_submit_and_unzip(&f, &pagepool, true);
1346 errln("%s, failed to read, err [%d]", __func__, err);
1348 if (f.m_iter.mpage != NULL)
1349 put_page(f.m_iter.mpage);
1351 /* clean up the remaining free pages */
1352 put_pages_list(&pagepool);
1356 static inline int __z_erofs_vle_normalaccess_readpages(
1358 struct address_space *mapping,
1359 struct list_head *pages, unsigned nr_pages, bool sync)
1361 struct inode *const inode = mapping->host;
1363 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1364 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1365 struct page *head = NULL;
1366 LIST_HEAD(pagepool);
1368 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1369 f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
1371 for (; nr_pages; --nr_pages) {
1372 struct page *page = lru_to_page(pages);
1374 prefetchw(&page->flags);
1375 list_del(&page->lru);
1377 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1378 list_add(&page->lru, &pagepool);
1382 set_page_private(page, (unsigned long)head);
1386 while (head != NULL) {
1387 struct page *page = head;
1390 /* traversal in reverse order */
1391 head = (void *)page_private(page);
1393 err = z_erofs_do_read_page(&f, page, &pagepool);
1395 struct erofs_vnode *vi = EROFS_V(inode);
1397 errln("%s, readahead error at page %lu of nid %llu",
1398 __func__, page->index, vi->nid);
1404 (void)z_erofs_vle_work_iter_end(&f.builder);
1406 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1408 if (f.m_iter.mpage != NULL)
1409 put_page(f.m_iter.mpage);
1411 /* clean up the remaining free pages */
1412 put_pages_list(&pagepool);
1416 static int z_erofs_vle_normalaccess_readpages(
1418 struct address_space *mapping,
1419 struct list_head *pages, unsigned nr_pages)
1421 return __z_erofs_vle_normalaccess_readpages(filp,
1422 mapping, pages, nr_pages,
1423 nr_pages < 4 /* sync */);
1426 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1427 .readpage = z_erofs_vle_normalaccess_readpage,
1428 .readpages = z_erofs_vle_normalaccess_readpages,
1431 #define __vle_cluster_advise(x, bit, bits) \
1432 ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1434 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1435 Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1438 Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
1439 Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
1440 Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
1441 Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
1442 Z_EROFS_VLE_CLUSTER_TYPE_MAX
1445 #define vle_cluster_type(di) \
1446 __vle_cluster_type((di)->di_advise)
1448 static inline unsigned
1449 vle_compressed_index_clusterofs(unsigned clustersize,
1450 struct z_erofs_vle_decompressed_index *di)
1452 debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
1453 __func__, di, di->di_advise, vle_cluster_type(di),
1454 di->di_clusterofs, di->di_u.blkaddr);
1456 switch (vle_cluster_type(di)) {
1457 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1459 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1460 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1461 return di->di_clusterofs;
1468 static inline erofs_blk_t
1469 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1471 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1472 struct erofs_vnode *vi = EROFS_V(inode);
1474 unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1475 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1476 index * sizeof(struct z_erofs_vle_decompressed_index);
1478 return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1481 static inline unsigned int
1482 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1484 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1485 struct erofs_vnode *vi = EROFS_V(inode);
1487 unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1488 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1489 index * sizeof(struct z_erofs_vle_decompressed_index);
1491 return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1495 * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1497 * VLE compression mode attempts to compress a number of logical data into
1498 * a physical cluster with a fixed size.
1499 * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1501 static erofs_off_t vle_get_logical_extent_head(
1502 struct inode *inode,
1503 struct page **page_iter,
1505 unsigned lcn, /* logical cluster number */
1509 /* for extent meta */
1510 struct page *page = *page_iter;
1511 erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
1512 struct z_erofs_vle_decompressed_index *di;
1513 unsigned long long ofs;
1514 const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1515 const unsigned int clustersize = 1 << clusterbits;
1516 unsigned int delta0;
1518 if (page->index != blkaddr) {
1519 kunmap_atomic(*kaddr_iter);
1523 *page_iter = page = erofs_get_meta_page(inode->i_sb,
1525 *kaddr_iter = kmap_atomic(page);
1528 di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
1529 switch (vle_cluster_type(di)) {
1530 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1531 delta0 = le16_to_cpu(di->di_u.delta[0]);
1533 DBG_BUGON(lcn < delta0);
1535 ofs = vle_get_logical_extent_head(inode,
1536 page_iter, kaddr_iter,
1537 lcn - delta0, pcn, flags);
1539 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1540 *flags ^= EROFS_MAP_ZIPPED;
1541 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1542 /* clustersize should be a power of two */
1543 ofs = ((unsigned long long)lcn << clusterbits) +
1544 (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1545 *pcn = le32_to_cpu(di->di_u.blkaddr);
1553 int z_erofs_map_blocks_iter(struct inode *inode,
1554 struct erofs_map_blocks *map,
1555 struct page **mpage_ret, int flags)
1557 /* logicial extent (start, end) offset */
1558 unsigned long long ofs, end;
1559 struct z_erofs_vle_decompressed_index *di;
1560 erofs_blk_t e_blkaddr, pcn;
1561 unsigned lcn, logical_cluster_ofs, cluster_type;
1563 struct page *mpage = *mpage_ret;
1566 const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1567 const unsigned int clustersize = 1 << clusterbits;
1570 /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1571 initial = !map->m_llen;
1573 /* when trying to read beyond EOF, leave it unmapped */
1574 if (unlikely(map->m_la >= inode->i_size)) {
1576 map->m_llen = map->m_la + 1 - inode->i_size;
1577 map->m_la = inode->i_size - 1;
1582 debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1583 map->m_la, map->m_llen);
1585 ofs = map->m_la + map->m_llen;
1587 /* clustersize should be power of two */
1588 lcn = ofs >> clusterbits;
1589 ofs_rem = ofs & (clustersize - 1);
1591 e_blkaddr = vle_extent_blkaddr(inode, lcn);
1593 if (mpage == NULL || mpage->index != e_blkaddr) {
1597 mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
1601 DBG_BUGON(!PageUptodate(mpage));
1604 kaddr = kmap_atomic(mpage);
1605 di = kaddr + vle_extent_blkoff(inode, lcn);
1607 debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
1608 e_blkaddr, vle_extent_blkoff(inode, lcn));
1610 logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
1612 /* [walking mode] 'map' has been already initialized */
1613 map->m_llen += logical_cluster_ofs;
1617 /* by default, compressed */
1618 map->m_flags |= EROFS_MAP_ZIPPED;
1620 end = (u64)(lcn + 1) * clustersize;
1622 cluster_type = vle_cluster_type(di);
1624 switch (cluster_type) {
1625 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1626 if (ofs_rem >= logical_cluster_ofs)
1627 map->m_flags ^= EROFS_MAP_ZIPPED;
1629 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1630 if (ofs_rem == logical_cluster_ofs) {
1631 pcn = le32_to_cpu(di->di_u.blkaddr);
1635 if (ofs_rem > logical_cluster_ofs) {
1636 ofs = lcn * clustersize | logical_cluster_ofs;
1637 pcn = le32_to_cpu(di->di_u.blkaddr);
1641 /* logical cluster number should be >= 1 */
1642 if (unlikely(!lcn)) {
1643 errln("invalid logical cluster 0 at nid %llu",
1644 EROFS_V(inode)->nid);
1648 end = (lcn-- * clustersize) | logical_cluster_ofs;
1650 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1651 /* get the correspoinding first chunk */
1652 ofs = vle_get_logical_extent_head(inode, mpage_ret,
1653 &kaddr, lcn, &pcn, &map->m_flags);
1657 errln("unknown cluster type %u at offset %llu of nid %llu",
1658 cluster_type, ofs, EROFS_V(inode)->nid);
1665 map->m_llen = end - ofs;
1666 map->m_plen = clustersize;
1667 map->m_pa = blknr_to_addr(pcn);
1668 map->m_flags |= EROFS_MAP_MAPPED;
1670 kunmap_atomic(kaddr);
1673 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1674 __func__, map->m_la, map->m_pa,
1675 map->m_llen, map->m_plen, map->m_flags);
1677 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */