mm, hwpoison: make __page_handle_poison returns int
authorNaoya Horiguchi <naoya.horiguchi@nec.com>
Thu, 14 Jul 2022 04:24:18 +0000 (13:24 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 9 Aug 2022 01:06:44 +0000 (18:06 -0700)
__page_handle_poison() returns bool that shows whether
take_page_off_buddy() has passed or not now.  But we will want to
distinguish another case of "dissolve has passed but taking off failed" by
its return value.  So change the type of the return value.  No functional
change.

Link: https://lkml.kernel.org/r/20220714042420.1847125-7-naoya.horiguchi@linux.dev
Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Liu Shixin <liushixin2@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory-failure.c

index a748f5d..f43639f 100644 (file)
@@ -74,7 +74,13 @@ atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
 
 static bool hw_memory_failure __read_mostly = false;
 
-static bool __page_handle_poison(struct page *page)
+/*
+ * Return values:
+ *   1:   the page is dissolved (if needed) and taken off from buddy,
+ *   0:   the page is dissolved (if needed) and not taken off from buddy,
+ *   < 0: failed to dissolve.
+ */
+static int __page_handle_poison(struct page *page)
 {
        int ret;
 
@@ -84,7 +90,7 @@ static bool __page_handle_poison(struct page *page)
                ret = take_page_off_buddy(page);
        zone_pcp_enable(page_zone(page));
 
-       return ret > 0;
+       return ret;
 }
 
 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
@@ -94,7 +100,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo
                 * Doing this check for free pages is also fine since dissolve_free_huge_page
                 * returns 0 for non-hugetlb pages as well.
                 */
-               if (!__page_handle_poison(page))
+               if (__page_handle_poison(page) <= 0)
                        /*
                         * We could fail to take off the target page from buddy
                         * for example due to racy page allocation, but that's
@@ -1086,7 +1092,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
                 * subpages.
                 */
                put_page(hpage);
-               if (__page_handle_poison(p)) {
+               if (__page_handle_poison(p) > 0) {
                        page_ref_inc(p);
                        res = MF_RECOVERED;
                }
@@ -1867,7 +1873,7 @@ retry:
        if (res == 0) {
                unlock_page(head);
                res = MF_FAILED;
-               if (__page_handle_poison(p)) {
+               if (__page_handle_poison(p) > 0) {
                        page_ref_inc(p);
                        res = MF_RECOVERED;
                }