misc: xilinx-sdfec: improve get_user_pages_fast() error handling
authorJohn Hubbard <jhubbard@nvidia.com>
Wed, 27 May 2020 01:26:26 +0000 (18:26 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 24 Jun 2020 15:50:35 +0000 (17:50 +0200)
[ Upstream commit 57343d51613227373759f5b0f2eede257fd4b82e ]

This fixes the case of get_user_pages_fast() returning a -errno.
The result needs to be stored in a signed integer. And for safe
signed/unsigned comparisons, it's best to keep everything signed.
And get_user_pages_fast() also expects a signed value for number
of pages to pin.

Therefore, change most relevant variables, from u32 to int. Leave
"n" unsigned, for convenience in checking for overflow. And provide
a WARN_ON_ONCE() and early return, if overflow occurs.

Also, as long as we're tidying up: rename the page array from page,
to pages, in order to match the conventions used in most other call
sites.

Fixes: 20ec628e8007e ("misc: xilinx_sdfec: Add ability to configure LDPC")
Cc: Derek Kiernan <derek.kiernan@xilinx.com>
Cc: Dragan Cvetic <dragan.cvetic@xilinx.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Michal Simek <michal.simek@xilinx.com>
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Link: https://lore.kernel.org/r/20200527012628.1100649-2-jhubbard@nvidia.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/misc/xilinx_sdfec.c

index 48ba7e0..d4c14b6 100644 (file)
@@ -602,10 +602,10 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
                              const u32 depth)
 {
        u32 reg = 0;
-       u32 res;
-       u32 n, i;
+       int res, i, nr_pages;
+       u32 n;
        u32 *addr = NULL;
-       struct page *page[MAX_NUM_PAGES];
+       struct page *pages[MAX_NUM_PAGES];
 
        /*
         * Writes that go beyond the length of
@@ -622,15 +622,22 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
        if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
                n += 1;
 
-       res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page);
-       if (res < n) {
-               for (i = 0; i < res; i++)
-                       put_page(page[i]);
+       if (WARN_ON_ONCE(n > INT_MAX))
+               return -EINVAL;
+
+       nr_pages = n;
+
+       res = get_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
+       if (res < nr_pages) {
+               if (res > 0) {
+                       for (i = 0; i < res; i++)
+                               put_page(pages[i]);
+               }
                return -EINVAL;
        }
 
-       for (i = 0; i < n; i++) {
-               addr = kmap(page[i]);
+       for (i = 0; i < nr_pages; i++) {
+               addr = kmap(pages[i]);
                do {
                        xsdfec_regwrite(xsdfec,
                                        base_addr + ((offset + reg) *
@@ -639,7 +646,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
                        reg++;
                } while ((reg < len) &&
                         ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
-               put_page(page[i]);
+               put_page(pages[i]);
        }
        return reg;
 }