X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=mm%2Fmemory-failure.c;h=455093f73a70c4eae5444f1ce5d46c6b326caa8f;hb=a2b2b301187809acd064211515ae65a5f7ca1d0c;hp=4d6e43c88489a0ef1755656a7512577902374496;hpb=ff6e6ded54725cd01623b9a1a86b74a523198733;p=platform%2Fkernel%2Flinux-starfive.git diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4d6e43c..455093f 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -595,10 +595,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) /* * Collect processes when the error hit an anonymous page. */ -static void collect_procs_anon(struct page *page, struct list_head *to_kill, - int force_early) +static void collect_procs_anon(struct folio *folio, struct page *page, + struct list_head *to_kill, int force_early) { - struct folio *folio = page_folio(page); struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; @@ -633,12 +632,12 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, /* * Collect processes when the error hit a file mapped page. */ -static void collect_procs_file(struct page *page, struct list_head *to_kill, - int force_early) +static void collect_procs_file(struct folio *folio, struct page *page, + struct list_head *to_kill, int force_early) { struct vm_area_struct *vma; struct task_struct *tsk; - struct address_space *mapping = page->mapping; + struct address_space *mapping = folio->mapping; pgoff_t pgoff; i_mmap_lock_read(mapping); @@ -704,17 +703,17 @@ static void collect_procs_fsdax(struct page *page, /* * Collect the processes who have the corrupted page mapped to kill. */ -static void collect_procs(struct page *page, struct list_head *tokill, - int force_early) +static void collect_procs(struct folio *folio, struct page *page, + struct list_head *tokill, int force_early) { - if (!page->mapping) + if (!folio->mapping) return; if (unlikely(PageKsm(page))) collect_procs_ksm(page, tokill, force_early); else if (PageAnon(page)) - collect_procs_anon(page, tokill, force_early); + collect_procs_anon(folio, page, tokill, force_early); else - collect_procs_file(page, tokill, force_early); + collect_procs_file(folio, page, tokill, force_early); } struct hwpoison_walk { @@ -1571,7 +1570,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * This check implies we don't kill processes if their pages * are in the swap cache early. Those are always late kills. */ - if (!page_mapped(hpage)) + if (!page_mapped(p)) return true; if (PageSwapCache(p)) { @@ -1602,7 +1601,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * mapped in dirty form. This has to be done before try_to_unmap, * because ttu takes the rmap data structures down. */ - collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); + collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); if (PageHuge(hpage) && !PageAnon(hpage)) { /* @@ -1622,10 +1621,10 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, try_to_unmap(folio, ttu); } - unmap_success = !page_mapped(hpage); + unmap_success = !page_mapped(p); if (!unmap_success) pr_err("%#lx: failed to unmap page (mapcount=%d)\n", - pfn, page_mapcount(hpage)); + pfn, page_mapcount(p)); /* * try_to_unmap() might put mlocked page in lru cache, so call @@ -1705,7 +1704,7 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, * mapping being torn down is communicated in siginfo, see * kill_proc() */ - loff_t start = (index << PAGE_SHIFT) & ~(size - 1); + loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1); unmap_mapping_range(mapping, start, size, 0); } @@ -1713,32 +1712,35 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); } +/* + * Only dev_pagemap pages get here, such as fsdax when the filesystem + * either do not claim or fails to claim a hwpoison event, or devdax. + * The fsdax pages are initialized per base page, and the devdax pages + * could be initialized either as base pages, or as compound pages with + * vmemmap optimization enabled. Devdax is simplistic in its dealing with + * hwpoison, such that, if a subpage of a compound page is poisoned, + * simply mark the compound head page is by far sufficient. + */ static int mf_generic_kill_procs(unsigned long long pfn, int flags, struct dev_pagemap *pgmap) { - struct page *page = pfn_to_page(pfn); + struct folio *folio = pfn_folio(pfn); LIST_HEAD(to_kill); dax_entry_t cookie; int rc = 0; /* - * Pages instantiated by device-dax (not filesystem-dax) - * may be compound pages. - */ - page = compound_head(page); - - /* * Prevent the inode from being freed while we are interrogating * the address_space, typically this would be handled by * lock_page(), but dax pages do not use the page lock. This * also prevents changes to the mapping of this pfn until * poison signaling is complete. */ - cookie = dax_lock_page(page); + cookie = dax_lock_folio(folio); if (!cookie) return -EBUSY; - if (hwpoison_filter(page)) { + if (hwpoison_filter(&folio->page)) { rc = -EOPNOTSUPP; goto unlock; } @@ -1760,7 +1762,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, * Use this flag as an indication that the dax page has been * remapped UC to prevent speculative consumption of poison. */ - SetPageHWPoison(page); + SetPageHWPoison(&folio->page); /* * Unlike System-RAM there is no possibility to swap in a @@ -1769,11 +1771,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, * SIGBUS (i.e. MF_MUST_KILL) */ flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; - collect_procs(page, &to_kill, true); + collect_procs(folio, &folio->page, &to_kill, true); - unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); + unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags); unlock: - dax_unlock_page(page, cookie); + dax_unlock_folio(folio, cookie); return rc; }