mm/page_vma_mapped: reformat map_pte() with less indentation
authorHugh Dickins <hughd@google.com>
Fri, 9 Jun 2023 01:14:12 +0000 (18:14 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 19 Jun 2023 23:19:13 +0000 (16:19 -0700)
No functional change here, but adjust the format of map_pte() so that the
following commit will be easier to read: separate out the PVMW_SYNC case
first, and remove two levels of indentation from the ZONE_DEVICE case.

Link: https://lkml.kernel.org/r/bf723f59-e3fc-6839-1cc3-c0631ee248bc@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_vma_mapped.c

index 007dc74..947dc74 100644 (file)
@@ -15,38 +15,41 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 
 static bool map_pte(struct page_vma_mapped_walk *pvmw)
 {
-       pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
-       if (!(pvmw->flags & PVMW_SYNC)) {
-               if (pvmw->flags & PVMW_MIGRATION) {
-                       if (!is_swap_pte(*pvmw->pte))
-                               return false;
-               } else {
-                       /*
-                        * We get here when we are trying to unmap a private
-                        * device page from the process address space. Such
-                        * page is not CPU accessible and thus is mapped as
-                        * a special swap entry, nonetheless it still does
-                        * count as a valid regular mapping for the page (and
-                        * is accounted as such in page maps count).
-                        *
-                        * So handle this special case as if it was a normal
-                        * page mapping ie lock CPU page table and returns
-                        * true.
-                        *
-                        * For more details on device private memory see HMM
-                        * (include/linux/hmm.h or mm/hmm.c).
-                        */
-                       if (is_swap_pte(*pvmw->pte)) {
-                               swp_entry_t entry;
+       if (pvmw->flags & PVMW_SYNC) {
+               /* Use the stricter lookup */
+               pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
+                                               pvmw->address, &pvmw->ptl);
+               return true;
+       }
 
-                               /* Handle un-addressable ZONE_DEVICE memory */
-                               entry = pte_to_swp_entry(*pvmw->pte);
-                               if (!is_device_private_entry(entry) &&
-                                   !is_device_exclusive_entry(entry))
-                                       return false;
-                       } else if (!pte_present(*pvmw->pte))
-                               return false;
-               }
+       pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
+       if (pvmw->flags & PVMW_MIGRATION) {
+               if (!is_swap_pte(*pvmw->pte))
+                       return false;
+       } else if (is_swap_pte(*pvmw->pte)) {
+               swp_entry_t entry;
+               /*
+                * Handle un-addressable ZONE_DEVICE memory.
+                *
+                * We get here when we are trying to unmap a private
+                * device page from the process address space. Such
+                * page is not CPU accessible and thus is mapped as
+                * a special swap entry, nonetheless it still does
+                * count as a valid regular mapping for the page
+                * (and is accounted as such in page maps count).
+                *
+                * So handle this special case as if it was a normal
+                * page mapping ie lock CPU page table and return true.
+                *
+                * For more details on device private memory see HMM
+                * (include/linux/hmm.h or mm/hmm.c).
+                */
+               entry = pte_to_swp_entry(*pvmw->pte);
+               if (!is_device_private_entry(entry) &&
+                   !is_device_exclusive_entry(entry))
+                       return false;
+       } else if (!pte_present(*pvmw->pte)) {
+               return false;
        }
        pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
        spin_lock(pvmw->ptl);