mm: numa: Trap pmd hinting faults only if we would otherwise trap PTE faults
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / mprotect.c
index 94722a4..5aae390 100644 (file)
@@ -37,14 +37,15 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable, int prot_numa, bool *ret_all_same_node)
+               int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte, oldpte;
        spinlock_t *ptl;
        unsigned long pages = 0;
-       bool all_same_node = true;
+       bool all_same_nidpid = true;
        int last_nid = -1;
+       int last_pid = -1;
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        arch_enter_lazy_mmu_mode();
@@ -63,15 +64,20 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                                page = vm_normal_page(vma, addr, oldpte);
                                if (page) {
-                                       int this_nid = page_to_nid(page);
+                                       int nidpid = page_nidpid_last(page);
+                                       int this_nid = nidpid_to_nid(nidpid);
+                                       int this_pid = nidpid_to_pid(nidpid);
+
                                        if (last_nid == -1)
                                                last_nid = this_nid;
-                                       if (last_nid != this_nid)
-                                               all_same_node = false;
+                                       if (last_pid == -1)
+                                               last_pid = this_pid;
+                                       if (last_nid != this_nid ||
+                                           last_pid != this_pid) {
+                                               all_same_nidpid = false;
+                                       }
 
-                                       /* only check non-shared pages */
-                                       if (!pte_numa(oldpte) &&
-                                           page_mapcount(page) == 1) {
+                                       if (!pte_numa(oldpte)) {
                                                ptent = pte_mknuma(ptent);
                                                updated = true;
                                        }
@@ -101,14 +107,15 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                make_migration_entry_read(&entry);
                                set_pte_at(mm, addr, pte,
                                        swp_entry_to_pte(entry));
+
+                               pages++;
                        }
-                       pages++;
                }
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 
-       *ret_all_same_node = all_same_node;
+       *ret_all_same_nidpid = all_same_nidpid;
        return pages;
 }
 
@@ -135,25 +142,34 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
        pmd_t *pmd;
        unsigned long next;
        unsigned long pages = 0;
-       bool all_same_node;
+       bool all_same_nidpid;
 
        pmd = pmd_offset(pud, addr);
        do {
+               unsigned long this_pages;
+
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE)
                                split_huge_page_pmd(vma, addr, pmd);
-                       else if (change_huge_pmd(vma, pmd, addr, newprot,
-                                                prot_numa)) {
-                               pages += HPAGE_PMD_NR;
-                               continue;
+                       else {
+                               int nr_ptes = change_huge_pmd(vma, pmd, addr,
+                                               newprot, prot_numa);
+
+                               if (nr_ptes) {
+                                       if (nr_ptes == HPAGE_PMD_NR)
+                                               pages++;
+
+                                       continue;
+                               }
                        }
                        /* fall through */
                }
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               pages += change_pte_range(vma, pmd, addr, next, newprot,
-                                dirty_accountable, prot_numa, &all_same_node);
+               this_pages = change_pte_range(vma, pmd, addr, next, newprot,
+                                dirty_accountable, prot_numa, &all_same_nidpid);
+               pages += this_pages;
 
                /*
                 * If we are changing protections for NUMA hinting faults then
@@ -161,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                 * node. This allows a regular PMD to be handled as one fault
                 * and effectively batches the taking of the PTL
                 */
-               if (prot_numa && all_same_node)
+               if (prot_numa && this_pages && all_same_nidpid)
                        change_pmd_protnuma(vma->vm_mm, addr, pmd);
        } while (pmd++, addr = next, addr != end);