erofs: clean up cached I/O strategies
authorGao Xiang <hsiangkao@linux.alibaba.com>
Tue, 6 Dec 2022 06:03:52 +0000 (14:03 +0800)
committerGao Xiang <hsiangkao@linux.alibaba.com>
Wed, 7 Dec 2022 02:56:20 +0000 (10:56 +0800)
After commit 4c7e42552b3a ("erofs: remove useless cache strategy of
DELAYEDALLOC"), only one cached I/O allocation strategy is supported:

  When cached I/O is preferred, page allocation is applied without
  direct reclaim.  If allocation fails, fall back to inplace I/O.

Let's get rid of z_erofs_cache_alloctype.  No logical changes.

Reviewed-by: Yue Hu <huyue2@coolpad.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Yue Hu <huyue2@coolpad.com>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20221206060352.152830-1-xiang@kernel.org
fs/erofs/zdata.c

index b792d424d774cd37ce9646f0473b2dae5a1e23e7..b66c16473273f723c064966c0f8f1f2ca9bd5a77 100644 (file)
@@ -175,16 +175,6 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
        DBG_BUGON(1);
 }
 
-/* how to allocate cached pages for a pcluster */
-enum z_erofs_cache_alloctype {
-       DONTALLOC,      /* don't allocate any cached pages */
-       /*
-        * try to use cached I/O if page allocation succeeds or fallback
-        * to in-place I/O instead to avoid any direct reclaim.
-        */
-       TRYALLOC,
-};
-
 /*
  * tagged pointer with 1-bit tag for all compressed pages
  * tag 0 - the page is just found with an extra page reference
@@ -292,12 +282,29 @@ struct z_erofs_decompress_frontend {
        .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
        .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
 
+static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
+{
+       unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
+
+       if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
+               return false;
+
+       if (fe->backmost)
+               return true;
+
+       if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
+           fe->map.m_la < fe->headoffset)
+               return true;
+
+       return false;
+}
+
 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
-                              enum z_erofs_cache_alloctype type,
                               struct page **pagepool)
 {
        struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
        struct z_erofs_pcluster *pcl = fe->pcl;
+       bool shouldalloc = z_erofs_should_alloc_cache(fe);
        bool standalone = true;
        /*
         * optimistic allocation without direct reclaim since inplace I/O
@@ -326,18 +333,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
                } else {
                        /* I/O is needed, no possible to decompress directly */
                        standalone = false;
-                       switch (type) {
-                       case TRYALLOC:
-                               newpage = erofs_allocpage(pagepool, gfp);
-                               if (!newpage)
-                                       continue;
-                               set_page_private(newpage,
-                                                Z_EROFS_PREALLOCATED_PAGE);
-                               t = tag_compressed_page_justfound(newpage);
-                               break;
-                       default:        /* DONTALLOC */
+                       if (!shouldalloc)
                                continue;
-                       }
+
+                       /*
+                        * try to use cached I/O if page allocation
+                        * succeeds or fallback to in-place I/O instead
+                        * to avoid any direct reclaim.
+                        */
+                       newpage = erofs_allocpage(pagepool, gfp);
+                       if (!newpage)
+                               continue;
+                       set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
+                       t = tag_compressed_page_justfound(newpage);
                }
 
                if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL,
@@ -637,20 +645,6 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
        return true;
 }
 
-static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
-                                      unsigned int cachestrategy,
-                                      erofs_off_t la)
-{
-       if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
-               return false;
-
-       if (fe->backmost)
-               return true;
-
-       return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
-               la < fe->headoffset;
-}
-
 static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
                                 struct page *page, unsigned int pageofs,
                                 unsigned int len)
@@ -687,12 +681,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
                                struct page *page, struct page **pagepool)
 {
        struct inode *const inode = fe->inode;
-       struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
        struct erofs_map_blocks *const map = &fe->map;
        const loff_t offset = page_offset(page);
        bool tight = true, exclusive;
-
-       enum z_erofs_cache_alloctype cache_strategy;
        unsigned int cur, end, spiltted;
        int err = 0;
 
@@ -746,13 +737,7 @@ repeat:
                fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
        } else {
                /* bind cache first when cached decompression is preferred */
-               if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
-                                              map->m_la))
-                       cache_strategy = TRYALLOC;
-               else
-                       cache_strategy = DONTALLOC;
-
-               z_erofs_bind_cache(fe, cache_strategy, pagepool);
+               z_erofs_bind_cache(fe, pagepool);
        }
 hitted:
        /*