1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
16 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
17 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
19 void z_erofs_exit_zip_subsystem(void)
21 BUG_ON(z_erofs_workqueue == NULL);
22 BUG_ON(z_erofs_workgroup_cachep == NULL);
24 destroy_workqueue(z_erofs_workqueue);
25 kmem_cache_destroy(z_erofs_workgroup_cachep);
28 static inline int init_unzip_workqueue(void)
30 const unsigned onlinecpus = num_possible_cpus();
33 * we don't need too many threads, limiting threads
34 * could improve scheduling performance.
36 z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
37 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
38 onlinecpus + onlinecpus / 4);
40 return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
43 int z_erofs_init_zip_subsystem(void)
45 z_erofs_workgroup_cachep =
46 kmem_cache_create("erofs_compress",
47 Z_EROFS_WORKGROUP_SIZE, 0,
48 SLAB_RECLAIM_ACCOUNT, NULL);
50 if (z_erofs_workgroup_cachep != NULL) {
51 if (!init_unzip_workqueue())
54 kmem_cache_destroy(z_erofs_workgroup_cachep);
59 enum z_erofs_vle_work_role {
60 Z_EROFS_VLE_WORK_SECONDARY,
61 Z_EROFS_VLE_WORK_PRIMARY,
63 * The current work has at least been linked with the following
64 * processed chained works, which means if the processing page
65 * is the tail partial page of the work, the current work can
66 * safely use the whole page, as illustrated below:
67 * +--------------+-------------------------------------------+
68 * | tail page | head page (of the previous work) |
69 * +--------------+-------------------------------------------+
70 * /\ which belongs to the current work
71 * [ (*) this page can be used for the current work itself. ]
73 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
77 struct z_erofs_vle_work_builder {
78 enum z_erofs_vle_work_role role;
80 * 'hosted = false' means that the current workgroup doesn't belong to
81 * the owned chained workgroups. In the other words, it is none of our
82 * business to submit this workgroup.
86 struct z_erofs_vle_workgroup *grp;
87 struct z_erofs_vle_work *work;
88 struct z_erofs_pagevec_ctor vector;
90 /* pages used for reading the compressed data */
91 struct page **compressed_pages;
92 unsigned compressed_deficit;
95 #define VLE_WORK_BUILDER_INIT() \
96 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
98 #ifdef EROFS_FS_HAS_MANAGED_CACHE
100 static bool grab_managed_cache_pages(struct address_space *mapping,
102 struct page **compressed_pages,
104 bool reserve_allocation)
109 /* TODO: optimize by introducing find_get_pages_range */
110 for (i = 0; i < clusterblks; ++i) {
111 struct page *page, *found;
113 if (READ_ONCE(compressed_pages[i]) != NULL)
116 page = found = find_get_page(mapping, start + i);
119 if (!reserve_allocation)
121 page = EROFS_UNALLOCATED_CACHED_PAGE;
124 if (NULL == cmpxchg(compressed_pages + i, NULL, page))
133 /* called by erofs_shrinker to get rid of all compressed_pages */
134 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
135 struct erofs_workgroup *egrp)
137 struct z_erofs_vle_workgroup *const grp =
138 container_of(egrp, struct z_erofs_vle_workgroup, obj);
139 struct address_space *const mapping = sbi->managed_cache->i_mapping;
140 const int clusterpages = erofs_clusterpages(sbi);
144 * refcount of workgroup is now freezed as 1,
145 * therefore no need to worry about available decompression users.
147 for (i = 0; i < clusterpages; ++i) {
148 struct page *page = grp->compressed_pages[i];
150 if (page == NULL || page->mapping != mapping)
153 /* block other users from reclaiming or migrating the page */
154 if (!trylock_page(page))
157 /* barrier is implied in the following 'unlock_page' */
158 WRITE_ONCE(grp->compressed_pages[i], NULL);
160 set_page_private(page, 0);
161 ClearPagePrivate(page);
169 int erofs_try_to_free_cached_page(struct address_space *mapping,
172 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
173 const unsigned int clusterpages = erofs_clusterpages(sbi);
175 struct z_erofs_vle_workgroup *grp;
176 int ret = 0; /* 0 - busy */
178 /* prevent the workgroup from being freed */
180 grp = (void *)page_private(page);
182 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
185 for (i = 0; i < clusterpages; ++i) {
186 if (grp->compressed_pages[i] == page) {
187 WRITE_ONCE(grp->compressed_pages[i], NULL);
192 erofs_workgroup_unfreeze(&grp->obj, 1);
197 ClearPagePrivate(page);
204 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
205 static inline bool try_to_reuse_as_compressed_page(
206 struct z_erofs_vle_work_builder *b,
209 while (b->compressed_deficit) {
210 --b->compressed_deficit;
211 if (NULL == cmpxchg(b->compressed_pages++, NULL, page))
218 /* callers must be with work->lock held */
219 static int z_erofs_vle_work_add_page(
220 struct z_erofs_vle_work_builder *builder,
222 enum z_erofs_page_type type)
227 /* give priority for the compressed data storage */
228 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
229 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
230 try_to_reuse_as_compressed_page(builder, page))
233 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
234 page, type, &occupied);
235 builder->work->vcnt += (unsigned)ret;
237 return ret ? 0 : -EAGAIN;
240 static inline bool try_to_claim_workgroup(
241 struct z_erofs_vle_workgroup *grp,
242 z_erofs_vle_owned_workgrp_t *owned_head,
245 DBG_BUGON(*hosted == true);
247 /* let's claim these following types of workgroup */
249 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
250 /* type 1, nil workgroup */
251 if (Z_EROFS_VLE_WORKGRP_NIL != cmpxchg(&grp->next,
252 Z_EROFS_VLE_WORKGRP_NIL, *owned_head))
257 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
259 * type 2, link to the end of a existing open chain,
260 * be careful that its submission itself is governed
261 * by the original owned chain.
263 if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
264 Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
267 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
269 return false; /* :( better luck next time */
271 return true; /* lucky, I am the followee :) */
274 static struct z_erofs_vle_work *
275 z_erofs_vle_work_lookup(struct super_block *sb,
276 pgoff_t idx, unsigned pageofs,
277 struct z_erofs_vle_workgroup **grp_ret,
278 enum z_erofs_vle_work_role *role,
279 z_erofs_vle_owned_workgrp_t *owned_head,
283 struct erofs_workgroup *egrp;
284 struct z_erofs_vle_workgroup *grp;
285 struct z_erofs_vle_work *work;
287 egrp = erofs_find_workgroup(sb, idx, &tag);
293 *grp_ret = grp = container_of(egrp,
294 struct z_erofs_vle_workgroup, obj);
296 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
297 work = z_erofs_vle_grab_work(grp, pageofs);
303 DBG_BUGON(work->pageofs != pageofs);
306 * lock must be taken first to avoid grp->next == NIL between
307 * claiming workgroup and adding pages:
311 * mutex_lock(&work->lock)
312 * add all pages to pagevec
314 * [correct locking case 1]:
315 * mutex_lock(grp->work[a])
317 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
318 * ... *role = SECONDARY
319 * add all pages to pagevec
321 * mutex_unlock(grp->work[c])
322 * mutex_lock(grp->work[c])
327 * [correct locking case 2]:
328 * mutex_lock(grp->work[b])
330 * mutex_lock(grp->work[a])
332 * mutex_lock(grp->work[c])
336 * mutex_lock(grp->work[a])
337 * *role = PRIMARY_OWNER
338 * add all pages to pagevec
341 mutex_lock(&work->lock);
345 *role = Z_EROFS_VLE_WORK_SECONDARY;
346 /* claim the workgroup if possible */
347 else if (try_to_claim_workgroup(grp, owned_head, hosted))
348 *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
350 *role = Z_EROFS_VLE_WORK_PRIMARY;
355 static struct z_erofs_vle_work *
356 z_erofs_vle_work_register(struct super_block *sb,
357 struct z_erofs_vle_workgroup **grp_ret,
358 struct erofs_map_blocks *map,
359 pgoff_t index, unsigned pageofs,
360 enum z_erofs_vle_work_role *role,
361 z_erofs_vle_owned_workgrp_t *owned_head,
365 struct z_erofs_vle_workgroup *grp = *grp_ret;
366 struct z_erofs_vle_work *work;
368 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
374 /* no available workgroup, let's allocate one */
375 grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
376 if (unlikely(grp == NULL))
377 return ERR_PTR(-ENOMEM);
379 grp->obj.index = index;
380 grp->llen = map->m_llen;
382 z_erofs_vle_set_workgrp_fmt(grp,
383 (map->m_flags & EROFS_MAP_ZIPPED) ?
384 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
385 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
386 atomic_set(&grp->obj.refcount, 1);
388 /* new workgrps have been claimed as type 1 */
389 WRITE_ONCE(grp->next, *owned_head);
390 /* primary and followed work for all new workgrps */
391 *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
392 /* it should be submitted by ourselves */
396 #ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
398 /* currently unimplemented */
401 work = z_erofs_vle_grab_primary_work(grp);
403 work->pageofs = pageofs;
405 mutex_init(&work->lock);
408 int err = erofs_register_workgroup(sb, &grp->obj, 0);
411 kmem_cache_free(z_erofs_workgroup_cachep, grp);
412 return ERR_PTR(-EAGAIN);
416 *owned_head = *grp_ret = grp;
418 mutex_lock(&work->lock);
422 static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
426 unsigned int orig_llen = grp->llen;
428 if (orig_llen >= llen || orig_llen ==
429 cmpxchg(&grp->llen, orig_llen, llen))
434 #define builder_is_followed(builder) \
435 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
437 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
438 struct super_block *sb,
439 struct erofs_map_blocks *map,
440 z_erofs_vle_owned_workgrp_t *owned_head)
442 const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
443 const erofs_blk_t index = erofs_blknr(map->m_pa);
444 const unsigned pageofs = map->m_la & ~PAGE_MASK;
445 struct z_erofs_vle_workgroup *grp;
446 struct z_erofs_vle_work *work;
448 DBG_BUGON(builder->work != NULL);
450 /* must be Z_EROFS_WORK_TAIL or the next chained work */
451 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
452 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
454 DBG_BUGON(erofs_blkoff(map->m_pa));
457 work = z_erofs_vle_work_lookup(sb, index,
458 pageofs, &grp, &builder->role, owned_head, &builder->hosted);
460 __update_workgrp_llen(grp, map->m_llen);
464 work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
465 &builder->role, owned_head, &builder->hosted);
467 if (unlikely(work == ERR_PTR(-EAGAIN)))
470 if (unlikely(IS_ERR(work)))
471 return PTR_ERR(work);
473 z_erofs_pagevec_ctor_init(&builder->vector,
474 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
476 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
477 /* enable possibly in-place decompression */
478 builder->compressed_pages = grp->compressed_pages;
479 builder->compressed_deficit = clusterpages;
481 builder->compressed_pages = NULL;
482 builder->compressed_deficit = 0;
486 builder->work = work;
491 * keep in mind that no referenced workgroups will be freed
492 * only after a RCU grace period, so rcu_read_lock() could
493 * prevent a workgroup from being freed.
495 static void z_erofs_rcu_callback(struct rcu_head *head)
497 struct z_erofs_vle_work *work = container_of(head,
498 struct z_erofs_vle_work, rcu);
499 struct z_erofs_vle_workgroup *grp =
500 z_erofs_vle_work_workgroup(work, true);
502 kmem_cache_free(z_erofs_workgroup_cachep, grp);
505 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
507 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
508 struct z_erofs_vle_workgroup, obj);
509 struct z_erofs_vle_work *const work = &vgrp->work;
511 call_rcu(&work->rcu, z_erofs_rcu_callback);
514 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
515 struct z_erofs_vle_work *work __maybe_unused)
517 erofs_workgroup_put(&grp->obj);
520 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
522 struct z_erofs_vle_workgroup *grp =
523 z_erofs_vle_work_workgroup(work, true);
525 __z_erofs_vle_work_release(grp, work);
529 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
531 struct z_erofs_vle_work *work = builder->work;
536 z_erofs_pagevec_ctor_exit(&builder->vector, false);
537 mutex_unlock(&work->lock);
540 * if all pending pages are added, don't hold work reference
541 * any longer if the current work isn't hosted by ourselves.
543 if (!builder->hosted)
544 __z_erofs_vle_work_release(builder->grp, work);
546 builder->work = NULL;
551 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
554 struct page *page = erofs_allocpage(pagepool, gfp);
556 if (unlikely(page == NULL))
559 page->mapping = Z_EROFS_MAPPING_STAGING;
563 struct z_erofs_vle_frontend {
564 struct inode *const inode;
566 struct z_erofs_vle_work_builder builder;
567 struct erofs_map_blocks_iter m_iter;
569 z_erofs_vle_owned_workgrp_t owned_head;
572 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
573 erofs_off_t cachedzone_la;
577 #define VLE_FRONTEND_INIT(__i) { \
580 { .m_llen = 0, .m_plen = 0 }, \
583 .builder = VLE_WORK_BUILDER_INIT(), \
584 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
587 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
589 struct list_head *page_pool)
591 struct super_block *const sb = fe->inode->i_sb;
592 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
593 struct erofs_map_blocks_iter *const m = &fe->m_iter;
594 struct erofs_map_blocks *const map = &m->map;
595 struct z_erofs_vle_work_builder *const builder = &fe->builder;
596 const loff_t offset = page_offset(page);
598 bool tight = builder_is_followed(builder);
599 struct z_erofs_vle_work *work = builder->work;
601 #ifdef EROFS_FS_HAS_MANAGED_CACHE
602 struct address_space *const mngda = sbi->managed_cache->i_mapping;
603 struct z_erofs_vle_workgroup *grp;
604 bool noio_outoforder;
607 enum z_erofs_page_type page_type;
608 unsigned cur, end, spiltted, index;
611 /* register locked file pages as online pages in pack */
612 z_erofs_onlinepage_init(page);
619 /* lucky, within the range of the current map_blocks */
620 if (offset + cur >= map->m_la &&
621 offset + cur < map->m_la + map->m_llen)
624 /* go ahead the next map_blocks */
625 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
627 if (!z_erofs_vle_work_iter_end(builder))
630 map->m_la = offset + cur;
632 err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
636 /* deal with hole (FIXME! broken now) */
637 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
640 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
641 BUG_ON(erofs_blkoff(map->m_pa));
643 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
647 #ifdef EROFS_FS_HAS_MANAGED_CACHE
648 grp = fe->builder.grp;
650 /* let's do out-of-order decompression for noio */
651 noio_outoforder = grab_managed_cache_pages(mngda,
652 erofs_blknr(map->m_pa),
653 grp->compressed_pages, erofs_blknr(map->m_plen),
654 /* compressed page caching selection strategy */
655 fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
656 map->m_la < fe->cachedzone_la : 0));
658 if (noio_outoforder && builder_is_followed(builder))
659 builder->role = Z_EROFS_VLE_WORK_PRIMARY;
662 tight &= builder_is_followed(builder);
663 work = builder->work;
665 cur = end - min_t(unsigned, offset + end - map->m_la, end);
666 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
667 zero_user_segment(page, cur, end);
671 /* let's derive page type */
672 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
673 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
674 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
675 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
678 err = z_erofs_vle_work_add_page(builder, page, page_type);
679 /* should allocate an additional staging page for pagevec */
680 if (err == -EAGAIN) {
681 struct page *const newpage =
682 __stagingpage_alloc(page_pool, GFP_NOFS);
684 err = z_erofs_vle_work_add_page(builder,
685 newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
693 index = page->index - map->m_la / PAGE_SIZE;
695 /* FIXME! avoid the last relundant fixup & endio */
696 z_erofs_onlinepage_fixup(page, index, true);
699 /* also update nr_pages and increase queued_pages */
700 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
702 /* can be used for verification */
703 map->m_llen = offset + cur - map->m_la;
709 /* FIXME! avoid the last relundant fixup & endio */
710 z_erofs_onlinepage_endio(page);
712 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
713 __func__, page, spiltted, map->m_llen);
717 /* TODO: the missing error handing cases */
721 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
723 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
724 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
725 bool background = tagptr_unfold_tags(t);
727 if (atomic_add_return(bios, &io->pending_bios))
731 queue_work(z_erofs_workqueue, &io->u.work);
733 wake_up(&io->u.wait);
736 static inline void z_erofs_vle_read_endio(struct bio *bio)
738 const blk_status_t err = bio->bi_status;
740 struct bio_vec *bvec;
741 #ifdef EROFS_FS_HAS_MANAGED_CACHE
742 struct address_space *mngda = NULL;
745 bio_for_each_segment_all(bvec, bio, i) {
746 struct page *page = bvec->bv_page;
747 bool cachemngd = false;
749 DBG_BUGON(PageUptodate(page));
750 BUG_ON(page->mapping == NULL);
752 #ifdef EROFS_FS_HAS_MANAGED_CACHE
753 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
754 struct inode *const inode = page->mapping->host;
755 struct super_block *const sb = inode->i_sb;
757 mngda = EROFS_SB(sb)->managed_cache->i_mapping;
761 * If mngda has not gotten, it equals NULL,
762 * however, page->mapping never be NULL if working properly.
764 cachemngd = (page->mapping == mngda);
770 SetPageUptodate(page);
776 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
780 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
781 static DEFINE_MUTEX(z_pagemap_global_lock);
783 static int z_erofs_vle_unzip(struct super_block *sb,
784 struct z_erofs_vle_workgroup *grp,
785 struct list_head *page_pool)
787 struct erofs_sb_info *const sbi = EROFS_SB(sb);
788 #ifdef EROFS_FS_HAS_MANAGED_CACHE
789 struct address_space *const mngda = sbi->managed_cache->i_mapping;
791 const unsigned clusterpages = erofs_clusterpages(sbi);
793 struct z_erofs_pagevec_ctor ctor;
795 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
796 unsigned sparsemem_pages = 0;
798 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
799 struct page **pages, **compressed_pages, *page;
802 enum z_erofs_page_type page_type;
804 struct z_erofs_vle_work *work;
809 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
810 work = z_erofs_vle_grab_primary_work(grp);
814 BUG_ON(!READ_ONCE(work->nr_pages));
816 mutex_lock(&work->lock);
817 nr_pages = work->nr_pages;
819 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
820 pages = pages_onstack;
821 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
822 mutex_trylock(&z_pagemap_global_lock))
823 pages = z_pagemap_global;
826 pages = kvmalloc_array(nr_pages,
827 sizeof(struct page *), GFP_KERNEL);
829 /* fallback to global pagemap for the lowmem scenario */
830 if (unlikely(pages == NULL)) {
831 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
834 mutex_lock(&z_pagemap_global_lock);
835 pages = z_pagemap_global;
840 for (i = 0; i < nr_pages; ++i)
843 z_erofs_pagevec_ctor_init(&ctor,
844 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
846 for (i = 0; i < work->vcnt; ++i) {
849 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
851 /* all pages in pagevec ought to be valid */
852 DBG_BUGON(page == NULL);
853 DBG_BUGON(page->mapping == NULL);
855 if (z_erofs_gather_if_stagingpage(page_pool, page))
858 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
861 pagenr = z_erofs_onlinepage_index(page);
863 BUG_ON(pagenr >= nr_pages);
865 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
866 BUG_ON(pages[pagenr] != NULL);
869 pages[pagenr] = page;
872 z_erofs_pagevec_ctor_exit(&ctor, true);
875 compressed_pages = grp->compressed_pages;
877 for (i = 0; i < clusterpages; ++i) {
880 page = compressed_pages[i];
882 /* all compressed pages ought to be valid */
883 DBG_BUGON(page == NULL);
884 DBG_BUGON(page->mapping == NULL);
886 if (z_erofs_is_stagingpage(page))
888 #ifdef EROFS_FS_HAS_MANAGED_CACHE
889 else if (page->mapping == mngda) {
890 BUG_ON(PageLocked(page));
891 BUG_ON(!PageUptodate(page));
896 /* only non-head page could be reused as a compressed page */
897 pagenr = z_erofs_onlinepage_index(page);
899 BUG_ON(pagenr >= nr_pages);
900 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
901 BUG_ON(pages[pagenr] != NULL);
904 pages[pagenr] = page;
909 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
911 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
912 /* FIXME! this should be fixed in the future */
913 BUG_ON(grp->llen != llen);
915 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
916 pages, nr_pages, work->pageofs);
920 if (llen > grp->llen)
923 err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
924 clusterpages, pages, llen, work->pageofs,
925 z_erofs_onlinepage_endio);
926 if (err != -ENOTSUPP)
929 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
930 if (sparsemem_pages >= nr_pages) {
931 BUG_ON(sparsemem_pages > nr_pages);
936 for (i = 0; i < nr_pages; ++i) {
937 if (pages[i] != NULL)
940 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
943 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
946 vout = erofs_vmap(pages, nr_pages);
948 err = z_erofs_vle_unzip_vmap(compressed_pages,
949 clusterpages, vout, llen, work->pageofs, overlapped);
951 erofs_vunmap(vout, nr_pages);
954 for (i = 0; i < nr_pages; ++i) {
956 DBG_BUGON(page->mapping == NULL);
958 /* recycle all individual staging pages */
959 if (z_erofs_gather_if_stagingpage(page_pool, page))
962 if (unlikely(err < 0))
965 z_erofs_onlinepage_endio(page);
969 for (i = 0; i < clusterpages; ++i) {
970 page = compressed_pages[i];
972 #ifdef EROFS_FS_HAS_MANAGED_CACHE
973 if (page->mapping == mngda)
976 /* recycle all individual staging pages */
977 (void)z_erofs_gather_if_stagingpage(page_pool, page);
979 WRITE_ONCE(compressed_pages[i], NULL);
982 if (pages == z_pagemap_global)
983 mutex_unlock(&z_pagemap_global_lock);
984 else if (unlikely(pages != pages_onstack))
990 /* all work locks MUST be taken before the following line */
992 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
994 /* all work locks SHOULD be released right now */
995 mutex_unlock(&work->lock);
997 z_erofs_vle_work_release(work);
1001 static void z_erofs_vle_unzip_all(struct super_block *sb,
1002 struct z_erofs_vle_unzip_io *io,
1003 struct list_head *page_pool)
1005 z_erofs_vle_owned_workgrp_t owned = io->head;
1007 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1008 struct z_erofs_vle_workgroup *grp;
1010 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1011 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1013 /* no possible that 'owned' equals NULL */
1014 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1017 owned = READ_ONCE(grp->next);
1019 z_erofs_vle_unzip(sb, grp, page_pool);
1023 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1025 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1026 struct z_erofs_vle_unzip_io_sb, io.u.work);
1027 LIST_HEAD(page_pool);
1029 BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1030 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1032 put_pages_list(&page_pool);
1036 static inline struct z_erofs_vle_unzip_io *
1037 prepare_io_handler(struct super_block *sb,
1038 struct z_erofs_vle_unzip_io *io,
1041 struct z_erofs_vle_unzip_io_sb *iosb;
1044 /* waitqueue available for foreground io */
1047 init_waitqueue_head(&io->u.wait);
1048 atomic_set(&io->pending_bios, 0);
1055 /* allocate extra io descriptor for background io */
1056 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1057 GFP_KERNEL | __GFP_NOFAIL);
1058 BUG_ON(iosb == NULL);
1064 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1066 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1070 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1071 /* true - unlocked (noio), false - locked (need submit io) */
1072 static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
1075 wait_on_page_locked(page);
1076 if (PagePrivate(page) && PageUptodate(page))
1080 if (unlikely(!PagePrivate(page))) {
1081 set_page_private(page, (unsigned long)grp);
1082 SetPagePrivate(page);
1084 if (unlikely(PageUptodate(page))) {
1096 static bool z_erofs_vle_submit_all(struct super_block *sb,
1097 z_erofs_vle_owned_workgrp_t owned_head,
1098 struct list_head *pagepool,
1099 struct z_erofs_vle_unzip_io *fg_io,
1102 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1103 const unsigned clusterpages = erofs_clusterpages(sbi);
1104 const gfp_t gfp = GFP_NOFS;
1105 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1106 struct address_space *const mngda = sbi->managed_cache->i_mapping;
1107 struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
1109 struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
1111 tagptr1_t bi_private;
1112 /* since bio will be NULL, no need to initialize last_index */
1113 pgoff_t uninitialized_var(last_index);
1114 bool force_submit = false;
1117 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1121 * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
1122 * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
1124 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1125 ios[0] = prepare_io_handler(sb, fg_io + 0, false);
1129 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
1130 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
1132 ios[__FSIO_1] = prepare_io_handler(sb, NULL, true);
1133 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1);
1137 force_submit = false;
1140 /* by default, all need io submission */
1141 ios[__FSIO_1]->head = owned_head;
1144 struct z_erofs_vle_workgroup *grp;
1145 struct page **compressed_pages, *oldpage, *page;
1146 pgoff_t first_index;
1148 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1149 unsigned int noio = 0;
1154 /* no possible 'owned_head' equals the following */
1155 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1156 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1160 /* close the main owned chain at first */
1161 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1162 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1164 first_index = grp->obj.index;
1165 compressed_pages = grp->compressed_pages;
1167 force_submit |= (first_index != last_index + 1);
1169 /* fulfill all compressed pages */
1170 oldpage = page = READ_ONCE(compressed_pages[i]);
1172 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1175 if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
1178 } else if (page != NULL) {
1179 if (page->mapping != mngda)
1180 BUG_ON(PageUptodate(page));
1181 else if (recover_managed_page(grp, page)) {
1182 /* page is uptodate, skip io submission */
1183 force_submit = true;
1191 BUG_ON(PageUptodate(page));
1194 page = __stagingpage_alloc(pagepool, gfp);
1196 if (oldpage != cmpxchg(compressed_pages + i,
1198 list_add(&page->lru, pagepool);
1200 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1201 } else if (cachemngd && !add_to_page_cache_lru(page,
1202 mngda, first_index + i, gfp)) {
1203 set_page_private(page, (unsigned long)grp);
1204 SetPagePrivate(page);
1209 if (bio != NULL && force_submit) {
1211 __submit_bio(bio, REQ_OP_READ, 0);
1216 bio = prepare_bio(sb, first_index + i,
1217 BIO_MAX_PAGES, z_erofs_vle_read_endio);
1218 bio->bi_private = tagptr_cast_ptr(bi_private);
1223 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1224 if (err < PAGE_SIZE)
1225 goto submit_bio_retry;
1227 force_submit = false;
1228 last_index = first_index + i;
1229 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1232 if (++i < clusterpages)
1235 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1236 if (noio < clusterpages) {
1239 z_erofs_vle_owned_workgrp_t iogrp_next =
1240 owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
1241 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
1244 if (lstgrp_io == NULL)
1245 ios[1]->head = iogrp_next;
1247 WRITE_ONCE(lstgrp_io->next, iogrp_next);
1249 if (lstgrp_noio == NULL)
1252 WRITE_ONCE(lstgrp_noio->next, grp);
1257 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1260 __submit_bio(bio, REQ_OP_READ, 0);
1262 #ifndef EROFS_FS_HAS_MANAGED_CACHE
1265 if (lstgrp_noio != NULL)
1266 WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1268 if (!force_fg && !nr_bios) {
1269 kvfree(container_of(ios[1],
1270 struct z_erofs_vle_unzip_io_sb, io));
1275 z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
1279 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1280 struct list_head *pagepool,
1283 struct super_block *sb = f->inode->i_sb;
1284 struct z_erofs_vle_unzip_io io[1 + __FSIO_1];
1286 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1289 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1290 z_erofs_vle_unzip_all(sb, &io[0], pagepool);
1295 /* wait until all bios are completed */
1296 wait_event(io[__FSIO_1].u.wait,
1297 !atomic_read(&io[__FSIO_1].pending_bios));
1299 /* let's synchronous decompression */
1300 z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool);
1303 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1306 struct inode *const inode = page->mapping->host;
1307 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1309 LIST_HEAD(pagepool);
1311 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1312 f.cachedzone_la = page->index << PAGE_SHIFT;
1314 err = z_erofs_do_read_page(&f, page, &pagepool);
1315 (void)z_erofs_vle_work_iter_end(&f.builder);
1318 errln("%s, failed to read, err [%d]", __func__, err);
1322 z_erofs_submit_and_unzip(&f, &pagepool, true);
1324 if (f.m_iter.mpage != NULL)
1325 put_page(f.m_iter.mpage);
1327 /* clean up the remaining free pages */
1328 put_pages_list(&pagepool);
1332 static inline int __z_erofs_vle_normalaccess_readpages(
1334 struct address_space *mapping,
1335 struct list_head *pages, unsigned nr_pages, bool sync)
1337 struct inode *const inode = mapping->host;
1339 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1340 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1341 struct page *head = NULL;
1342 LIST_HEAD(pagepool);
1344 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1345 f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
1347 for (; nr_pages; --nr_pages) {
1348 struct page *page = lru_to_page(pages);
1350 prefetchw(&page->flags);
1351 list_del(&page->lru);
1353 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1354 list_add(&page->lru, &pagepool);
1358 BUG_ON(PagePrivate(page));
1359 set_page_private(page, (unsigned long)head);
1363 while (head != NULL) {
1364 struct page *page = head;
1367 /* traversal in reverse order */
1368 head = (void *)page_private(page);
1370 err = z_erofs_do_read_page(&f, page, &pagepool);
1372 struct erofs_vnode *vi = EROFS_V(inode);
1374 errln("%s, readahead error at page %lu of nid %llu",
1375 __func__, page->index, vi->nid);
1381 (void)z_erofs_vle_work_iter_end(&f.builder);
1383 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1385 if (f.m_iter.mpage != NULL)
1386 put_page(f.m_iter.mpage);
1388 /* clean up the remaining free pages */
1389 put_pages_list(&pagepool);
1393 static int z_erofs_vle_normalaccess_readpages(
1395 struct address_space *mapping,
1396 struct list_head *pages, unsigned nr_pages)
1398 return __z_erofs_vle_normalaccess_readpages(filp,
1399 mapping, pages, nr_pages,
1400 nr_pages < 4 /* sync */);
1403 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1404 .readpage = z_erofs_vle_normalaccess_readpage,
1405 .readpages = z_erofs_vle_normalaccess_readpages,
1408 #define __vle_cluster_advise(x, bit, bits) \
1409 ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1411 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1412 Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1415 Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
1416 Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
1417 Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
1418 Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
1419 Z_EROFS_VLE_CLUSTER_TYPE_MAX
1422 #define vle_cluster_type(di) \
1423 __vle_cluster_type((di)->di_advise)
1425 static inline unsigned
1426 vle_compressed_index_clusterofs(unsigned clustersize,
1427 struct z_erofs_vle_decompressed_index *di)
1429 debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
1430 __func__, di, di->di_advise, vle_cluster_type(di),
1431 di->di_clusterofs, di->di_u.blkaddr);
1433 switch (vle_cluster_type(di)) {
1434 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1436 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1437 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1438 return di->di_clusterofs;
1445 static inline erofs_blk_t
1446 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1448 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1449 struct erofs_vnode *vi = EROFS_V(inode);
1451 unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1452 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1453 index * sizeof(struct z_erofs_vle_decompressed_index);
1455 return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1458 static inline unsigned int
1459 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1461 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1462 struct erofs_vnode *vi = EROFS_V(inode);
1464 unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1465 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1466 index * sizeof(struct z_erofs_vle_decompressed_index);
1468 return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1472 * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1474 * VLE compression mode attempts to compress a number of logical data into
1475 * a physical cluster with a fixed size.
1476 * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1478 static erofs_off_t vle_get_logical_extent_head(
1479 struct inode *inode,
1480 struct page **page_iter,
1482 unsigned lcn, /* logical cluster number */
1486 /* for extent meta */
1487 struct page *page = *page_iter;
1488 erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
1489 struct z_erofs_vle_decompressed_index *di;
1490 unsigned long long ofs;
1491 const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1492 const unsigned int clustersize = 1 << clusterbits;
1494 if (page->index != blkaddr) {
1495 kunmap_atomic(*kaddr_iter);
1499 *page_iter = page = erofs_get_meta_page(inode->i_sb,
1501 *kaddr_iter = kmap_atomic(page);
1504 di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
1505 switch (vle_cluster_type(di)) {
1506 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1507 BUG_ON(!di->di_u.delta[0]);
1508 BUG_ON(lcn < di->di_u.delta[0]);
1510 ofs = vle_get_logical_extent_head(inode,
1511 page_iter, kaddr_iter,
1512 lcn - di->di_u.delta[0], pcn, flags);
1514 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1515 *flags ^= EROFS_MAP_ZIPPED;
1516 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1517 /* clustersize should be a power of two */
1518 ofs = ((unsigned long long)lcn << clusterbits) +
1519 (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1520 *pcn = le32_to_cpu(di->di_u.blkaddr);
1528 int z_erofs_map_blocks_iter(struct inode *inode,
1529 struct erofs_map_blocks *map,
1530 struct page **mpage_ret, int flags)
1532 /* logicial extent (start, end) offset */
1533 unsigned long long ofs, end;
1534 struct z_erofs_vle_decompressed_index *di;
1535 erofs_blk_t e_blkaddr, pcn;
1536 unsigned lcn, logical_cluster_ofs, cluster_type;
1538 struct page *mpage = *mpage_ret;
1541 const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1542 const unsigned int clustersize = 1 << clusterbits;
1545 /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1546 initial = !map->m_llen;
1548 /* when trying to read beyond EOF, leave it unmapped */
1549 if (unlikely(map->m_la >= inode->i_size)) {
1551 map->m_llen = map->m_la + 1 - inode->i_size;
1552 map->m_la = inode->i_size - 1;
1557 debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1558 map->m_la, map->m_llen);
1560 ofs = map->m_la + map->m_llen;
1562 /* clustersize should be power of two */
1563 lcn = ofs >> clusterbits;
1564 ofs_rem = ofs & (clustersize - 1);
1566 e_blkaddr = vle_extent_blkaddr(inode, lcn);
1568 if (mpage == NULL || mpage->index != e_blkaddr) {
1572 mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
1576 DBG_BUGON(!PageUptodate(mpage));
1579 kaddr = kmap_atomic(mpage);
1580 di = kaddr + vle_extent_blkoff(inode, lcn);
1582 debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
1583 e_blkaddr, vle_extent_blkoff(inode, lcn));
1585 logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
1587 /* [walking mode] 'map' has been already initialized */
1588 map->m_llen += logical_cluster_ofs;
1592 /* by default, compressed */
1593 map->m_flags |= EROFS_MAP_ZIPPED;
1595 end = (u64)(lcn + 1) * clustersize;
1597 cluster_type = vle_cluster_type(di);
1599 switch (cluster_type) {
1600 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1601 if (ofs_rem >= logical_cluster_ofs)
1602 map->m_flags ^= EROFS_MAP_ZIPPED;
1604 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1605 if (ofs_rem == logical_cluster_ofs) {
1606 pcn = le32_to_cpu(di->di_u.blkaddr);
1610 if (ofs_rem > logical_cluster_ofs) {
1611 ofs = lcn * clustersize | logical_cluster_ofs;
1612 pcn = le32_to_cpu(di->di_u.blkaddr);
1616 /* logical cluster number should be >= 1 */
1617 if (unlikely(!lcn)) {
1618 errln("invalid logical cluster 0 at nid %llu",
1619 EROFS_V(inode)->nid);
1623 end = (lcn-- * clustersize) | logical_cluster_ofs;
1625 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1626 /* get the correspoinding first chunk */
1627 ofs = vle_get_logical_extent_head(inode, mpage_ret,
1628 &kaddr, lcn, &pcn, &map->m_flags);
1632 errln("unknown cluster type %u at offset %llu of nid %llu",
1633 cluster_type, ofs, EROFS_V(inode)->nid);
1640 map->m_llen = end - ofs;
1641 map->m_plen = clustersize;
1642 map->m_pa = blknr_to_addr(pcn);
1643 map->m_flags |= EROFS_MAP_MAPPED;
1645 kunmap_atomic(kaddr);
1648 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1649 __func__, map->m_la, map->m_pa,
1650 map->m_llen, map->m_plen, map->m_flags);
1652 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */