staging: erofs: compressed_pages should not be accessed again after freed
authorGao Xiang <gaoxiang25@huawei.com>
Wed, 27 Feb 2019 05:33:30 +0000 (13:33 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Feb 2019 14:41:57 +0000 (15:41 +0100)
This patch resolves the following page use-after-free issue,
z_erofs_vle_unzip:
    ...
    for (i = 0; i < nr_pages; ++i) {
        ...
        z_erofs_onlinepage_endio(page);  (1)
    }

    for (i = 0; i < clusterpages; ++i) {
        page = compressed_pages[i];

        if (page->mapping == mngda)      (2)
            continue;
        /* recycle all individual staging pages */
        (void)z_erofs_gather_if_stagingpage(page_pool, page); (3)
        WRITE_ONCE(compressed_pages[i], NULL);
    }
    ...

After (1) is executed, page is freed and could be then reused, if
compressed_pages is scanned after that, it could fall info (2) or
(3) by mistake and that could finally be in a mess.

This patch aims to solve the above issue only with little changes
as much as possible in order to make the fix backport easier.

Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
Cc: <stable@vger.kernel.org> # 4.19+
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/erofs/unzip_vle.c
drivers/staging/erofs/unzip_vle.h
drivers/staging/erofs/unzip_vle_lz4.c

index a127d8d..416dde4 100644 (file)
@@ -986,11 +986,10 @@ repeat:
        if (llen > grp->llen)
                llen = grp->llen;
 
-       err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
-               clusterpages, pages, llen, work->pageofs,
-               z_erofs_onlinepage_endio);
+       err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
+                                           pages, llen, work->pageofs);
        if (err != -ENOTSUPP)
-               goto out_percpu;
+               goto out;
 
        if (sparsemem_pages >= nr_pages)
                goto skip_allocpage;
@@ -1011,8 +1010,25 @@ skip_allocpage:
        erofs_vunmap(vout, nr_pages);
 
 out:
+       /* must handle all compressed pages before endding pages */
+       for (i = 0; i < clusterpages; ++i) {
+               page = compressed_pages[i];
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+               if (page->mapping == MNGD_MAPPING(sbi))
+                       continue;
+#endif
+               /* recycle all individual staging pages */
+               (void)z_erofs_gather_if_stagingpage(page_pool, page);
+
+               WRITE_ONCE(compressed_pages[i], NULL);
+       }
+
        for (i = 0; i < nr_pages; ++i) {
                page = pages[i];
+               if (!page)
+                       continue;
+
                DBG_BUGON(!page->mapping);
 
                /* recycle all individual staging pages */
@@ -1025,20 +1041,6 @@ out:
                z_erofs_onlinepage_endio(page);
        }
 
-out_percpu:
-       for (i = 0; i < clusterpages; ++i) {
-               page = compressed_pages[i];
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-               if (page->mapping == MNGD_MAPPING(sbi))
-                       continue;
-#endif
-               /* recycle all individual staging pages */
-               (void)z_erofs_gather_if_stagingpage(page_pool, page);
-
-               WRITE_ONCE(compressed_pages[i], NULL);
-       }
-
        if (pages == z_pagemap_global)
                mutex_unlock(&z_pagemap_global_lock);
        else if (unlikely(pages != pages_onstack))
index 9dafa8d..517e5ce 100644 (file)
@@ -218,8 +218,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
                                  unsigned int clusterpages,
                                  struct page **pages, unsigned int outlen,
-                                 unsigned short pageofs,
-                                 void (*endio)(struct page *));
+                                 unsigned short pageofs);
 int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
                           unsigned int clusterpages,
                           void *vaddr, unsigned int llen,
index 8e8d705..48b263a 100644 (file)
@@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
                                  unsigned int clusterpages,
                                  struct page **pages,
                                  unsigned int outlen,
-                                 unsigned short pageofs,
-                                 void (*endio)(struct page *))
+                                 unsigned short pageofs)
 {
        void *vin, *vout;
        unsigned int nr_pages, i, j;
@@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
        ret = z_erofs_unzip_lz4(vin, vout + pageofs,
                                clusterpages * PAGE_SIZE, outlen);
 
-       if (ret >= 0) {
-               outlen = ret;
-               ret = 0;
-       }
+       if (ret < 0)
+               goto out;
+       ret = 0;
 
        for (i = 0; i < nr_pages; ++i) {
                j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
 
                if (pages[i]) {
-                       if (ret < 0) {
-                               SetPageError(pages[i]);
-                       } else if (clusterpages == 1 &&
-                                  pages[i] == compressed_pages[0]) {
+                       if (clusterpages == 1 &&
+                           pages[i] == compressed_pages[0]) {
                                memcpy(vin + pageofs, vout + pageofs, j);
                        } else {
                                void *dst = kmap_atomic(pages[i]);
@@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
                                memcpy(dst + pageofs, vout + pageofs, j);
                                kunmap_atomic(dst);
                        }
-                       endio(pages[i]);
                }
                vout += PAGE_SIZE;
                outlen -= j;
                pageofs = 0;
        }
+
+out:
        preempt_enable();
 
        if (clusterpages == 1)