x86: add tizen_qemu_x86_defconfig & tizen_qemu_x86_64_defconfig
[platform/kernel/linux-rpi.git] / mm / memory.c
index be6247c..d037e84 100644 (file)
@@ -1301,6 +1301,17 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
        return ret;
 }
 
+/* Whether we should zap all COWed (private) pages too */
+static inline bool should_zap_cows(struct zap_details *details)
+{
+       /* By default, zap all pages */
+       if (!details)
+               return true;
+
+       /* Or, we zap COWed pages only if the caller wants to */
+       return !details->check_mapping;
+}
+
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pmd_t *pmd,
                                unsigned long addr, unsigned long end,
@@ -1396,16 +1407,18 @@ again:
                        continue;
                }
 
-               /* If details->check_mapping, we leave swap entries. */
-               if (unlikely(details))
-                       continue;
-
-               if (!non_swap_entry(entry))
+               if (!non_swap_entry(entry)) {
+                       /* Genuine swap entry, hence a private anon page */
+                       if (!should_zap_cows(details))
+                               continue;
                        rss[MM_SWAPENTS]--;
-               else if (is_migration_entry(entry)) {
+               else if (is_migration_entry(entry)) {
                        struct page *page;
 
                        page = pfn_swap_entry_to_page(entry);
+                       if (details && details->check_mapping &&
+                           details->check_mapping != page_rmapping(page))
+                               continue;
                        rss[mm_counter(page)]--;
                }
                if (unlikely(!free_swap_and_cache(entry)))
@@ -3861,14 +3874,18 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
                return ret;
 
        if (unlikely(PageHWPoison(vmf->page))) {
+               struct page *page = vmf->page;
                vm_fault_t poisonret = VM_FAULT_HWPOISON;
                if (ret & VM_FAULT_LOCKED) {
+                       if (page_mapped(page))
+                               unmap_mapping_pages(page_mapping(page),
+                                                   page->index, 1, false);
                        /* Retry if a clean page was removed from the cache. */
-                       if (invalidate_inode_page(vmf->page))
-                               poisonret = 0;
-                       unlock_page(vmf->page);
+                       if (invalidate_inode_page(page))
+                               poisonret = VM_FAULT_NOPAGE;
+                       unlock_page(page);
                }
-               put_page(vmf->page);
+               put_page(page);
                vmf->page = NULL;
                return poisonret;
        }
@@ -4054,9 +4071,12 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
                }
        }
 
-       /* See comment in handle_pte_fault() */
+       /*
+        * See comment in handle_pte_fault() for how this scenario happens, we
+        * need to return NOPAGE so that we drop this page.
+        */
        if (pmd_devmap_trans_unstable(vmf->pmd))
-               return 0;
+               return VM_FAULT_NOPAGE;
 
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
                                      vmf->address, &vmf->ptl);
@@ -4073,7 +4093,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
 }
 
 static unsigned long fault_around_bytes __read_mostly =
-       rounddown_pow_of_two(65536);
+       rounddown_pow_of_two(4096);
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
@@ -4474,6 +4494,19 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
        defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
        /* No support for anonymous transparent PUD pages yet */
        if (vma_is_anonymous(vmf->vma))
+               return VM_FAULT_FALLBACK;
+       if (vmf->vma->vm_ops->huge_fault)
+               return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+       return VM_FAULT_FALLBACK;
+}
+
+static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&                    \
+       defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+       /* No support for anonymous transparent PUD pages yet */
+       if (vma_is_anonymous(vmf->vma))
                goto split;
        if (vmf->vma->vm_ops->huge_fault) {
                vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
@@ -4484,19 +4517,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
 split:
        /* COW or write-notify not handled on PUD level: split pud.*/
        __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-       return VM_FAULT_FALLBACK;
-}
-
-static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       /* No support for anonymous transparent PUD pages yet */
-       if (vma_is_anonymous(vmf->vma))
-               return VM_FAULT_FALLBACK;
-       if (vmf->vma->vm_ops->huge_fault)
-               return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
        return VM_FAULT_FALLBACK;
 }
 
@@ -5270,7 +5291,7 @@ void __might_fault(const char *file, int line)
                return;
        if (pagefault_disabled())
                return;
-       __might_sleep(file, line, 0);
+       __might_sleep(file, line);
 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
        if (current->mm)
                might_lock_read(&current->mm->mmap_lock);
@@ -5450,6 +5471,8 @@ long copy_huge_page_from_user(struct page *dst_page,
                if (rc)
                        break;
 
+               flush_dcache_page(subpage);
+
                cond_resched();
        }
        return ret_val;