Correct .gbs.conf settings
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / migrate.c
index b494fdb..3acac4a 100644 (file)
@@ -148,8 +148,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
        if (pte_swp_soft_dirty(*ptep))
                pte = pte_mksoft_dirty(pte);
+
+       /* Recheck VMA as permissions can change since migration started  */
        if (is_write_migration_entry(entry))
-               pte = pte_mkwrite(pte);
+               pte = maybe_mkwrite(pte, vma);
+
 #ifdef CONFIG_HUGETLB_PAGE
        if (PageHuge(new)) {
                pte = pte_mkhuge(pte);
@@ -178,6 +181,37 @@ out:
 }
 
 /*
+ * Congratulations to trinity for discovering this bug.
+ * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
+ * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
+ * replace the specified range by file ptes throughout (maybe populated after).
+ * If page migration finds a page within that range, while it's still located
+ * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
+ * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
+ * But if the migrating page is in a part of the vma outside the range to be
+ * remapped, then it will not be cleared, and remove_migration_ptes() needs to
+ * deal with it.  Fortunately, this part of the vma is of course still linear,
+ * so we just need to use linear location on the nonlinear list.
+ */
+static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
+               struct address_space *mapping, void *arg)
+{
+       struct vm_area_struct *vma;
+       /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       unsigned long addr;
+
+       list_for_each_entry(vma,
+               &mapping->i_mmap_nonlinear, shared.nonlinear) {
+
+               addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+               if (addr >= vma->vm_start && addr < vma->vm_end)
+                       remove_migration_pte(page, vma, addr, arg);
+       }
+       return SWAP_AGAIN;
+}
+
+/*
  * Get rid of all migration entries and replace them by
  * references to the indicated page.
  */
@@ -186,6 +220,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
        struct rmap_walk_control rwc = {
                .rmap_one = remove_migration_pte,
                .arg = old,
+               .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
        };
 
        rmap_walk(new, &rwc);
@@ -906,8 +941,9 @@ out:
  * Obtain the lock on page, remove all ptes and migrate the page
  * to the newly allocated page in newpage.
  */
-static int unmap_and_move(new_page_t get_new_page, unsigned long private,
-                       struct page *page, int force, enum migrate_mode mode)
+static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
+                       unsigned long private, struct page *page, int force,
+                       enum migrate_mode mode)
 {
        int rc = 0;
        int *result = NULL;
@@ -951,11 +987,18 @@ out:
                                page_is_file_cache(page));
                putback_lru_page(page);
        }
+
        /*
-        * Move the new page to the LRU. If migration was not successful
-        * then this will free the page.
+        * If migration was not successful and there's a freeing callback, use
+        * it.  Otherwise, putback_lru_page() will drop the reference grabbed
+        * during isolation.
         */
-       putback_lru_page(newpage);
+       if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+               ClearPageSwapBacked(newpage);
+               put_new_page(newpage, private);
+       } else
+               putback_lru_page(newpage);
+
        if (result) {
                if (rc)
                        *result = rc;
@@ -984,8 +1027,9 @@ out:
  * will wait in the page fault for migration to complete.
  */
 static int unmap_and_move_huge_page(new_page_t get_new_page,
-                               unsigned long private, struct page *hpage,
-                               int force, enum migrate_mode mode)
+                               free_page_t put_new_page, unsigned long private,
+                               struct page *hpage, int force,
+                               enum migrate_mode mode)
 {
        int rc = 0;
        int *result = NULL;
@@ -1024,20 +1068,30 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        if (!page_mapped(hpage))
                rc = move_to_new_page(new_hpage, hpage, 1, mode);
 
-       if (rc)
+       if (rc != MIGRATEPAGE_SUCCESS)
                remove_migration_ptes(hpage, hpage);
 
        if (anon_vma)
                put_anon_vma(anon_vma);
 
-       if (!rc)
+       if (rc == MIGRATEPAGE_SUCCESS)
                hugetlb_cgroup_migrate(hpage, new_hpage);
 
        unlock_page(hpage);
 out:
        if (rc != -EAGAIN)
                putback_active_hugepage(hpage);
-       put_page(new_hpage);
+
+       /*
+        * If migration was not successful and there's a freeing callback, use
+        * it.  Otherwise, put_page() will drop the reference grabbed during
+        * isolation.
+        */
+       if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+               put_new_page(new_hpage, private);
+       else
+               put_page(new_hpage);
+
        if (result) {
                if (rc)
                        *result = rc;
@@ -1054,6 +1108,8 @@ out:
  * @from:              The list of pages to be migrated.
  * @get_new_page:      The function used to allocate free pages to be used
  *                     as the target of the page migration.
+ * @put_new_page:      The function used to free target pages if migration
+ *                     fails, or NULL if no special handling is necessary.
  * @private:           Private data to be passed on to get_new_page()
  * @mode:              The migration mode that specifies the constraints for
  *                     page migration, if any.
@@ -1067,7 +1123,8 @@ out:
  * Returns the number of pages that were not migrated, or an error code.
  */
 int migrate_pages(struct list_head *from, new_page_t get_new_page,
-               unsigned long private, enum migrate_mode mode, int reason)
+               free_page_t put_new_page, unsigned long private,
+               enum migrate_mode mode, int reason)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -1089,10 +1146,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
 
                        if (PageHuge(page))
                                rc = unmap_and_move_huge_page(get_new_page,
-                                               private, page, pass > 2, mode);
+                                               put_new_page, private, page,
+                                               pass > 2, mode);
                        else
-                               rc = unmap_and_move(get_new_page, private,
-                                               page, pass > 2, mode);
+                               rc = unmap_and_move(get_new_page, put_new_page,
+                                               private, page, pass > 2, mode);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -1241,7 +1299,7 @@ set_status:
 
        err = 0;
        if (!list_empty(&pagelist)) {
-               err = migrate_pages(&pagelist, new_page_node,
+               err = migrate_pages(&pagelist, new_page_node, NULL,
                                (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
                if (err)
                        putback_movable_pages(&pagelist);
@@ -1697,7 +1755,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
 
        list_add(&page->lru, &migratepages);
        nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
-                                    node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
+                                    NULL, node, MIGRATE_ASYNC,
+                                    MR_NUMA_MISPLACED);
        if (nr_remaining) {
                if (!list_empty(&migratepages)) {
                        list_del(&page->lru);