mm, hwpoison: make unpoison aware of raw error info in hwpoisoned hugepage
authorNaoya Horiguchi <naoya.horiguchi@nec.com>
Thu, 14 Jul 2022 04:24:16 +0000 (13:24 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 9 Aug 2022 01:06:44 +0000 (18:06 -0700)
Raw error info list needs to be removed when hwpoisoned hugetlb is
unpoisoned.  And unpoison handler needs to know how many errors there are
in the target hugepage.  So add them.

HPageVmemmapOptimized(hpage) and HPageRawHwpUnreliable(hpage)) sometimes
can't be unpoisoned, so skip them.

Link: https://lkml.kernel.org/r/20220714042420.1847125-5-naoya.horiguchi@linux.dev
Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Reported-by: kernel test robot <lkp@intel.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Liu Shixin <liushixin2@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swapops.h
mm/memory-failure.c

index bb7afd0..a3d435b 100644 (file)
@@ -490,6 +490,11 @@ static inline void num_poisoned_pages_dec(void)
        atomic_long_dec(&num_poisoned_pages);
 }
 
+static inline void num_poisoned_pages_sub(long i)
+{
+       atomic_long_sub(i, &num_poisoned_pages);
+}
+
 #else
 
 static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -505,6 +510,10 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
 static inline void num_poisoned_pages_inc(void)
 {
 }
+
+static inline void num_poisoned_pages_sub(long i)
+{
+}
 #endif
 
 static inline int non_swap_entry(swp_entry_t entry)
index 61668ce..e30dbec 100644 (file)
@@ -1678,19 +1678,23 @@ static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
        return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON);
 }
 
-static void __free_raw_hwp_pages(struct page *hpage)
+static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag)
 {
        struct llist_head *head;
        struct llist_node *t, *tnode;
+       unsigned long count = 0;
 
        head = raw_hwp_list_head(hpage);
        llist_for_each_safe(tnode, t, head->first) {
                struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
 
-               SetPageHWPoison(p->page);
+               if (move_flag)
+                       SetPageHWPoison(p->page);
                kfree(p);
+               count++;
        }
        llist_del_all(head);
+       return count;
 }
 
 static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
@@ -1733,17 +1737,36 @@ static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
                 * Once HPageRawHwpUnreliable is set, raw_hwp_page is not
                 * used any more, so free it.
                 */
-               __free_raw_hwp_pages(hpage);
+               __free_raw_hwp_pages(hpage, false);
        }
        return ret;
 }
 
+static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
+{
+       /*
+        * HPageVmemmapOptimized hugepages can't be freed because struct
+        * pages for tail pages are required but they don't exist.
+        */
+       if (move_flag && HPageVmemmapOptimized(hpage))
+               return 0;
+
+       /*
+        * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
+        * definition.
+        */
+       if (HPageRawHwpUnreliable(hpage))
+               return 0;
+
+       return __free_raw_hwp_pages(hpage, move_flag);
+}
+
 void hugetlb_clear_page_hwpoison(struct page *hpage)
 {
        if (HPageRawHwpUnreliable(hpage))
                return;
        ClearPageHWPoison(hpage);
-       __free_raw_hwp_pages(hpage);
+       free_raw_hwp_pages(hpage, true);
 }
 
 /*
@@ -1887,6 +1910,10 @@ static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *
        return 0;
 }
 
+static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
+{
+       return 0;
+}
 #endif /* CONFIG_HUGETLB_PAGE */
 
 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
@@ -2292,6 +2319,7 @@ int unpoison_memory(unsigned long pfn)
        struct page *p;
        int ret = -EBUSY;
        int freeit = 0;
+       unsigned long count = 1;
        static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
                                        DEFAULT_RATELIMIT_BURST);
 
@@ -2339,6 +2367,13 @@ int unpoison_memory(unsigned long pfn)
 
        ret = get_hwpoison_page(p, MF_UNPOISON);
        if (!ret) {
+               if (PageHuge(p)) {
+                       count = free_raw_hwp_pages(page, false);
+                       if (count == 0) {
+                               ret = -EBUSY;
+                               goto unlock_mutex;
+                       }
+               }
                ret = TestClearPageHWPoison(page) ? 0 : -EBUSY;
        } else if (ret < 0) {
                if (ret == -EHWPOISON) {
@@ -2347,6 +2382,13 @@ int unpoison_memory(unsigned long pfn)
                        unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
                                         pfn, &unpoison_rs);
        } else {
+               if (PageHuge(p)) {
+                       count = free_raw_hwp_pages(page, false);
+                       if (count == 0) {
+                               ret = -EBUSY;
+                               goto unlock_mutex;
+                       }
+               }
                freeit = !!TestClearPageHWPoison(p);
 
                put_page(page);
@@ -2359,7 +2401,7 @@ int unpoison_memory(unsigned long pfn)
 unlock_mutex:
        mutex_unlock(&mf_mutex);
        if (!ret || freeit) {
-               num_poisoned_pages_dec();
+               num_poisoned_pages_sub(count);
                unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
                                 page_to_pfn(p), &unpoison_rs);
        }