WIP: update tizen_qemu_defconfig
[platform/kernel/linux-starfive.git] / mm / mempolicy.c
index 61aa9ae..7d36dd9 100644 (file)
@@ -600,7 +600,8 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
 
        /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
        if (flags & (MPOL_MF_MOVE_ALL) ||
-           (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
+           (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
+            !hugetlb_pmd_shared(pte))) {
                if (isolate_hugetlb(page, qp->pagelist) &&
                        (flags & MPOL_MF_STRICT))
                        /*
@@ -783,70 +784,58 @@ static int vma_replace_policy(struct vm_area_struct *vma,
        return err;
 }
 
-/* Step 2: apply policy to a range and do splits. */
-static int mbind_range(struct mm_struct *mm, unsigned long start,
-                      unsigned long end, struct mempolicy *new_pol)
+/* Split or merge the VMA (if required) and apply the new policy */
+static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
+               struct vm_area_struct **prev, unsigned long start,
+               unsigned long end, struct mempolicy *new_pol)
 {
-       MA_STATE(mas, &mm->mm_mt, start, start);
-       struct vm_area_struct *prev;
-       struct vm_area_struct *vma;
-       int err = 0;
+       struct vm_area_struct *merged;
+       unsigned long vmstart, vmend;
        pgoff_t pgoff;
+       int err;
 
-       prev = mas_prev(&mas, 0);
-       if (unlikely(!prev))
-               mas_set(&mas, start);
+       vmend = min(end, vma->vm_end);
+       if (start > vma->vm_start) {
+               *prev = vma;
+               vmstart = start;
+       } else {
+               vmstart = vma->vm_start;
+       }
 
-       vma = mas_find(&mas, end - 1);
-       if (WARN_ON(!vma))
+       if (mpol_equal(vma_policy(vma), new_pol)) {
+               *prev = vma;
                return 0;
+       }
 
-       if (start > vma->vm_start)
-               prev = vma;
-
-       for (; vma; vma = mas_next(&mas, end - 1)) {
-               unsigned long vmstart = max(start, vma->vm_start);
-               unsigned long vmend = min(end, vma->vm_end);
-
-               if (mpol_equal(vma_policy(vma), new_pol))
-                       goto next;
-
-               pgoff = vma->vm_pgoff +
-                       ((vmstart - vma->vm_start) >> PAGE_SHIFT);
-               prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-                                vma->anon_vma, vma->vm_file, pgoff,
-                                new_pol, vma->vm_userfaultfd_ctx,
-                                anon_vma_name(vma));
-               if (prev) {
-                       /* vma_merge() invalidated the mas */
-                       mas_pause(&mas);
-                       vma = prev;
-                       goto replace;
-               }
-               if (vma->vm_start != vmstart) {
-                       err = split_vma(vma->vm_mm, vma, vmstart, 1);
-                       if (err)
-                               goto out;
-                       /* split_vma() invalidated the mas */
-                       mas_pause(&mas);
-               }
-               if (vma->vm_end != vmend) {
-                       err = split_vma(vma->vm_mm, vma, vmend, 0);
-                       if (err)
-                               goto out;
-                       /* split_vma() invalidated the mas */
-                       mas_pause(&mas);
-               }
-replace:
-               err = vma_replace_policy(vma, new_pol);
+       pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
+       merged = vma_merge(vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
+                          vma->anon_vma, vma->vm_file, pgoff, new_pol,
+                          vma->vm_userfaultfd_ctx, anon_vma_name(vma));
+       if (merged) {
+               *prev = merged;
+               /* vma_merge() invalidated the mas */
+               mas_pause(&vmi->mas);
+               return vma_replace_policy(merged, new_pol);
+       }
+
+       if (vma->vm_start != vmstart) {
+               err = split_vma(vma->vm_mm, vma, vmstart, 1);
                if (err)
-                       goto out;
-next:
-               prev = vma;
+                       return err;
+               /* split_vma() invalidated the mas */
+               mas_pause(&vmi->mas);
        }
 
-out:
-       return err;
+       if (vma->vm_end != vmend) {
+               err = split_vma(vma->vm_mm, vma, vmend, 0);
+               if (err)
+                       return err;
+               /* split_vma() invalidated the mas */
+               mas_pause(&vmi->mas);
+       }
+
+       *prev = vma;
+       return vma_replace_policy(vma, new_pol);
 }
 
 /* Set the process memory policy */
@@ -1258,6 +1247,8 @@ static long do_mbind(unsigned long start, unsigned long len,
                     nodemask_t *nmask, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma, *prev;
+       struct vma_iterator vmi;
        struct mempolicy *new;
        unsigned long end;
        int err;
@@ -1327,7 +1318,13 @@ static long do_mbind(unsigned long start, unsigned long len,
                goto up_out;
        }
 
-       err = mbind_range(mm, start, end, new);
+       vma_iter_init(&vmi, mm, start);
+       prev = vma_prev(&vmi);
+       for_each_vma_range(vmi, vma, end) {
+               err = mbind_range(&vmi, vma, &prev, start, end, new);
+               if (err)
+                       break;
+       }
 
        if (!err) {
                int nr_failed = 0;
@@ -1488,10 +1485,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
                unsigned long, home_node, unsigned long, flags)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        struct mempolicy *new;
-       unsigned long vmstart;
-       unsigned long vmend;
        unsigned long end;
        int err = -ENOENT;
        VMA_ITERATOR(vmi, mm, start);
@@ -1520,9 +1515,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
        if (end == start)
                return 0;
        mmap_write_lock(mm);
+       prev = vma_prev(&vmi);
        for_each_vma_range(vmi, vma, end) {
-               vmstart = max(start, vma->vm_start);
-               vmend   = min(end, vma->vm_end);
                new = mpol_dup(vma_policy(vma));
                if (IS_ERR(new)) {
                        err = PTR_ERR(new);
@@ -1540,12 +1534,13 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
                 * the home node for vmas we already updated before.
                 */
                if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
+                       mpol_put(new);
                        err = -EOPNOTSUPP;
                        break;
                }
 
                new->home_node = home_node;
-               err = mbind_range(mm, vmstart, vmend, new);
+               err = mbind_range(&vmi, vma, &prev, start, end, new);
                mpol_put(new);
                if (err)
                        break;