sched/numa: Set preferred NUMA node based on number of private faults
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / migrate.c
index b7ded7e..025d1e3 100644 (file)
@@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l)
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               if (unlikely(balloon_page_movable(page)))
+               if (unlikely(isolated_balloon_page(page)))
                        balloon_page_putback(page);
                else
                        putback_lru_page(page);
@@ -311,7 +311,7 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
  * 2 for pages with a mapping
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
-static int migrate_page_move_mapping(struct address_space *mapping,
+int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
                struct buffer_head *head, enum migrate_mode mode)
 {
@@ -1498,7 +1498,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
                                          __GFP_NOWARN) &
                                         ~GFP_IOFS, 0);
        if (newpage)
-               page_nid_xchg_last(newpage, page_nid_last(page));
+               page_nidpid_xchg_last(newpage, page_nidpid_last(page));
 
        return newpage;
 }
@@ -1599,7 +1599,8 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
  * node. Caller is expected to have an elevated reference count on
  * the page that will be dropped by this function before returning.
  */
-int migrate_misplaced_page(struct page *page, int node)
+int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+                          int node)
 {
        pg_data_t *pgdat = NODE_DATA(node);
        int isolated;
@@ -1607,10 +1608,11 @@ int migrate_misplaced_page(struct page *page, int node)
        LIST_HEAD(migratepages);
 
        /*
-        * Don't migrate pages that are mapped in multiple processes.
-        * TODO: Handle false sharing detection instead of this hammer
+        * Don't migrate file pages that are mapped in multiple processes
+        * with execute permissions as they are probably shared libraries.
         */
-       if (page_mapcount(page) != 1)
+       if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
+           (vma->vm_flags & VM_EXEC))
                goto out;
 
        /*
@@ -1661,13 +1663,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        int page_lru = page_is_file_cache(page);
 
        /*
-        * Don't migrate pages that are mapped in multiple processes.
-        * TODO: Handle false sharing detection instead of this hammer
-        */
-       if (page_mapcount(page) != 1)
-               goto out_dropref;
-
-       /*
         * Rate-limit the amount of data that is being migrated to a node.
         * Optimal placement is no good if the memory bus is saturated and
         * all the time is being spent migrating!
@@ -1680,7 +1675,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        if (!new_page)
                goto out_fail;
 
-       page_nid_xchg_last(new_page, page_nid_last(page));
+       page_nidpid_xchg_last(new_page, page_nidpid_last(page));
 
        isolated = numamigrate_isolate_page(pgdat, page);
        if (!isolated) {
@@ -1713,12 +1708,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                unlock_page(new_page);
                put_page(new_page);             /* Free it */
 
-               unlock_page(page);
+               /* Retake the callers reference and putback on LRU */
+               get_page(page);
                putback_lru_page(page);
-
-               count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-               isolated = 0;
-               goto out;
+               mod_zone_page_state(page_zone(page),
+                        NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
+               goto out_fail;
        }
 
        /*
@@ -1735,9 +1730,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
        entry = pmd_mkhuge(entry);
 
-       page_add_new_anon_rmap(new_page, vma, haddr);
-
+       pmdp_clear_flush(vma, haddr, pmd);
        set_pmd_at(mm, haddr, pmd, entry);
+       page_add_new_anon_rmap(new_page, vma, haddr);
        update_mmu_cache_pmd(vma, address, &entry);
        page_remove_rmap(page);
        /*
@@ -1756,7 +1751,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
        count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
 
-out:
        mod_zone_page_state(page_zone(page),
                        NR_ISOLATED_ANON + page_lru,
                        -HPAGE_PMD_NR);
@@ -1765,6 +1759,10 @@ out:
 out_fail:
        count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
 out_dropref:
+       entry = pmd_mknonnuma(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       update_mmu_cache_pmd(vma, address, &entry);
+
        unlock_page(page);
        put_page(page);
        return 0;