Merge tag 'backport/v3.14.24-ltsi-rc1/rcar-snd-to-v3.18-rc6' into backport/v3.14...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / hugetlb.c
index 0b7656e..67d0c17 100644 (file)
@@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg)
        return 0;
 }
 
-static void copy_gigantic_page(struct page *dst, struct page *src)
-{
-       int i;
-       struct hstate *h = page_hstate(src);
-       struct page *dst_base = dst;
-       struct page *src_base = src;
-
-       for (i = 0; i < pages_per_huge_page(h); ) {
-               cond_resched();
-               copy_highpage(dst, src);
-
-               i++;
-               dst = mem_map_next(dst, dst_base, i);
-               src = mem_map_next(src, src_base, i);
-       }
-}
-
-void copy_huge_page(struct page *dst, struct page *src)
-{
-       int i;
-       struct hstate *h = page_hstate(src);
-
-       if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
-               copy_gigantic_page(dst, src);
-               return;
-       }
-
-       might_sleep();
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               cond_resched();
-               copy_highpage(dst + i, src + i);
-       }
-}
-
 static void enqueue_huge_page(struct hstate *h, struct page *page)
 {
        int nid = page_to_nid(page);
@@ -574,7 +540,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
                goto err;
 
 retry_cpuset:
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
        zonelist = huge_zonelist(vma, address,
                                        htlb_alloc_mask(h), &mpol, &nodemask);
 
@@ -596,7 +562,7 @@ retry_cpuset:
        }
 
        mpol_cond_put(mpol);
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
        return page;
 
@@ -618,7 +584,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
                                1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1 << PG_writeback);
        }
-       VM_BUG_ON(hugetlb_cgroup_from_page(page));
+       VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        arch_release_hugepage(page);
@@ -724,18 +690,26 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  */
 int PageHuge(struct page *page)
 {
-       compound_page_dtor *dtor;
-
        if (!PageCompound(page))
                return 0;
 
        page = compound_head(page);
-       dtor = get_compound_page_dtor(page);
-
-       return dtor == free_huge_page;
+       return get_compound_page_dtor(page) == free_huge_page;
 }
 EXPORT_SYMBOL_GPL(PageHuge);
 
+/*
+ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
+ * normal or transparent huge pages.
+ */
+int PageHeadHuge(struct page *page_head)
+{
+       if (!PageHead(page_head))
+               return 0;
+
+       return get_compound_page_dtor(page_head) == free_huge_page;
+}
+
 pgoff_t __basepage_index(struct page *page)
 {
        struct page *page_head = compound_head(page);
@@ -1115,7 +1089,7 @@ retry:
                 * no users -- drop the buddy allocator's reference.
                 */
                put_page_testzero(page);
-               VM_BUG_ON(page_count(page));
+               VM_BUG_ON_PAGE(page_count(page), page);
                enqueue_huge_page(h, page);
        }
 free:
@@ -1160,6 +1134,7 @@ static void return_unused_surplus_pages(struct hstate *h,
        while (nr_pages--) {
                if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
                        break;
+               cond_resched_lock(&hugetlb_lock);
        }
 }
 
@@ -1297,9 +1272,9 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
        for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
                void *addr;
 
-               addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
-                               huge_page_size(h), huge_page_size(h), 0);
-
+               addr = memblock_virt_alloc_try_nid_nopanic(
+                               huge_page_size(h), huge_page_size(h),
+                               0, BOOTMEM_ALLOC_ACCESSIBLE, node);
                if (addr) {
                        /*
                         * Use the beginning of the huge page to store the
@@ -1339,8 +1314,8 @@ static void __init gather_bootmem_prealloc(void)
 
 #ifdef CONFIG_HIGHMEM
                page = pfn_to_page(m->phys >> PAGE_SHIFT);
-               free_bootmem_late((unsigned long)m,
-                                 sizeof(struct huge_bootmem_page));
+               memblock_free_late(__pa(m),
+                                  sizeof(struct huge_bootmem_page));
 #else
                page = virt_to_page(m);
 #endif
@@ -1535,6 +1510,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
        while (min_count < persistent_huge_pages(h)) {
                if (!free_pool_huge_page(h, nodes_allowed, 0))
                        break;
+               cond_resched_lock(&hugetlb_lock);
        }
        while (count < persistent_huge_pages(h)) {
                if (!adjust_pool_surplus(h, nodes_allowed, 1))
@@ -2095,6 +2071,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
        unsigned long tmp;
        int ret;
 
+       if (!hugepages_supported())
+               return -ENOTSUPP;
+
        tmp = h->max_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
@@ -2148,6 +2127,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
        unsigned long tmp;
        int ret;
 
+       if (!hugepages_supported())
+               return -ENOTSUPP;
+
        tmp = h->nr_overcommit_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
@@ -2173,6 +2155,8 @@ out:
 void hugetlb_report_meminfo(struct seq_file *m)
 {
        struct hstate *h = &default_hstate;
+       if (!hugepages_supported())
+               return;
        seq_printf(m,
                        "HugePages_Total:   %5lu\n"
                        "HugePages_Free:    %5lu\n"
@@ -2189,6 +2173,8 @@ void hugetlb_report_meminfo(struct seq_file *m)
 int hugetlb_report_node_meminfo(int nid, char *buf)
 {
        struct hstate *h = &default_hstate;
+       if (!hugepages_supported())
+               return 0;
        return sprintf(buf,
                "Node %d HugePages_Total: %5u\n"
                "Node %d HugePages_Free:  %5u\n"
@@ -2203,6 +2189,9 @@ void hugetlb_show_meminfo(void)
        struct hstate *h;
        int nid;
 
+       if (!hugepages_supported())
+               return;
+
        for_each_node_state(nid, N_MEMORY)
                for_each_hstate(h)
                        pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
@@ -2362,6 +2351,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
                update_mmu_cache(vma, address, ptep);
 }
 
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_migration_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+               return 1;
+       else
+               return 0;
+}
 
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
@@ -2372,24 +2386,53 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        int cow;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+       int ret = 0;
 
        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 
+       mmun_start = vma->vm_start;
+       mmun_end = vma->vm_end;
+       if (cow)
+               mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
+
        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
+               spinlock_t *src_ptl, *dst_ptl;
                src_pte = huge_pte_offset(src, addr);
                if (!src_pte)
                        continue;
                dst_pte = huge_pte_alloc(dst, addr, sz);
-               if (!dst_pte)
-                       goto nomem;
+               if (!dst_pte) {
+                       ret = -ENOMEM;
+                       break;
+               }
 
                /* If the pagetables are shared don't copy or take references */
                if (dst_pte == src_pte)
                        continue;
 
-               spin_lock(&dst->page_table_lock);
-               spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
-               if (!huge_pte_none(huge_ptep_get(src_pte))) {
+               dst_ptl = huge_pte_lock(h, dst, dst_pte);
+               src_ptl = huge_pte_lockptr(h, src, src_pte);
+               spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+               entry = huge_ptep_get(src_pte);
+               if (huge_pte_none(entry)) { /* skip none entry */
+                       ;
+               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+                                   is_hugetlb_entry_hwpoisoned(entry))) {
+                       swp_entry_t swp_entry = pte_to_swp_entry(entry);
+
+                       if (is_write_migration_entry(swp_entry) && cow) {
+                               /*
+                                * COW mappings require pages in both
+                                * parent and child to be set to read.
+                                */
+                               make_migration_entry_read(&swp_entry);
+                               entry = swp_entry_to_pte(swp_entry);
+                               set_huge_pte_at(src, addr, src_pte, entry);
+                       }
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
                        entry = huge_ptep_get(src_pte);
@@ -2398,39 +2441,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        page_dup_rmap(ptepage);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
                }
-               spin_unlock(&src->page_table_lock);
-               spin_unlock(&dst->page_table_lock);
+               spin_unlock(src_ptl);
+               spin_unlock(dst_ptl);
        }
-       return 0;
 
-nomem:
-       return -ENOMEM;
-}
+       if (cow)
+               mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
 
-static int is_hugetlb_entry_migration(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
-               return 1;
-       else
-               return 0;
+       return ret;
 }
 
 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
@@ -2442,6 +2460,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        unsigned long address;
        pte_t *ptep;
        pte_t pte;
+       spinlock_t *ptl;
        struct page *page;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
@@ -2455,25 +2474,25 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        tlb_start_vma(tlb, vma);
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 again:
-       spin_lock(&mm->page_table_lock);
        for (address = start; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
 
+               ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, &address, ptep))
-                       continue;
+                       goto unlock;
 
                pte = huge_ptep_get(ptep);
                if (huge_pte_none(pte))
-                       continue;
+                       goto unlock;
 
                /*
                 * HWPoisoned hugepage is already unmapped and dropped reference
                 */
                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
                        huge_pte_clear(mm, address, ptep);
-                       continue;
+                       goto unlock;
                }
 
                page = pte_page(pte);
@@ -2484,7 +2503,7 @@ again:
                 */
                if (ref_page) {
                        if (page != ref_page)
-                               continue;
+                               goto unlock;
 
                        /*
                         * Mark the VMA as having unmapped its page so that
@@ -2501,13 +2520,18 @@ again:
 
                page_remove_rmap(page);
                force_flush = !__tlb_remove_page(tlb, page);
-               if (force_flush)
+               if (force_flush) {
+                       spin_unlock(ptl);
                        break;
+               }
                /* Bail out after unmapping reference page if supplied */
-               if (ref_page)
+               if (ref_page) {
+                       spin_unlock(ptl);
                        break;
+               }
+unlock:
+               spin_unlock(ptl);
        }
-       spin_unlock(&mm->page_table_lock);
        /*
         * mmu_gather ran out of room to batch pages, we break out of
         * the PTE lock to avoid doing the potential expensive TLB invalidate
@@ -2613,7 +2637,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte,
-                       struct page *pagecache_page)
+                       struct page *pagecache_page, spinlock_t *ptl)
 {
        struct hstate *h = hstate_vma(vma);
        struct page *old_page, *new_page;
@@ -2647,8 +2671,8 @@ retry_avoidcopy:
 
        page_cache_get(old_page);
 
-       /* Drop page_table_lock as buddy allocator may be called */
-       spin_unlock(&mm->page_table_lock);
+       /* Drop page table lock as buddy allocator may be called */
+       spin_unlock(ptl);
        new_page = alloc_huge_page(vma, address, outside_reserve);
 
        if (IS_ERR(new_page)) {
@@ -2666,13 +2690,13 @@ retry_avoidcopy:
                        BUG_ON(huge_pte_none(pte));
                        if (unmap_ref_private(mm, vma, old_page, address)) {
                                BUG_ON(huge_pte_none(pte));
-                               spin_lock(&mm->page_table_lock);
+                               spin_lock(ptl);
                                ptep = huge_pte_offset(mm, address & huge_page_mask(h));
                                if (likely(pte_same(huge_ptep_get(ptep), pte)))
                                        goto retry_avoidcopy;
                                /*
-                                * race occurs while re-acquiring page_table_lock, and
-                                * our job is done.
+                                * race occurs while re-acquiring page table
+                                * lock, and our job is done.
                                 */
                                return 0;
                        }
@@ -2680,7 +2704,7 @@ retry_avoidcopy:
                }
 
                /* Caller expects lock to be held */
-               spin_lock(&mm->page_table_lock);
+               spin_lock(ptl);
                if (err == -ENOMEM)
                        return VM_FAULT_OOM;
                else
@@ -2695,7 +2719,7 @@ retry_avoidcopy:
                page_cache_release(new_page);
                page_cache_release(old_page);
                /* Caller expects lock to be held */
-               spin_lock(&mm->page_table_lock);
+               spin_lock(ptl);
                return VM_FAULT_OOM;
        }
 
@@ -2707,10 +2731,10 @@ retry_avoidcopy:
        mmun_end = mmun_start + huge_page_size(h);
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        /*
-        * Retake the page_table_lock to check for racing updates
+        * Retake the page table lock to check for racing updates
         * before the page tables are altered
         */
-       spin_lock(&mm->page_table_lock);
+       spin_lock(ptl);
        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
        if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                ClearPagePrivate(new_page);
@@ -2724,13 +2748,13 @@ retry_avoidcopy:
                /* Make the old page be freed below */
                new_page = old_page;
        }
-       spin_unlock(&mm->page_table_lock);
+       spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        page_cache_release(new_page);
        page_cache_release(old_page);
 
        /* Caller expects lock to be held */
-       spin_lock(&mm->page_table_lock);
+       spin_lock(ptl);
        return 0;
 }
 
@@ -2778,6 +2802,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        struct address_space *mapping;
        pte_t new_pte;
+       spinlock_t *ptl;
 
        /*
         * Currently, we are forced to kill the process in the event the
@@ -2864,7 +2889,8 @@ retry:
                        goto backout_unlocked;
                }
 
-       spin_lock(&mm->page_table_lock);
+       ptl = huge_pte_lockptr(h, mm, ptep);
+       spin_lock(ptl);
        size = i_size_read(mapping->host) >> huge_page_shift(h);
        if (idx >= size)
                goto backout;
@@ -2885,16 +2911,16 @@ retry:
 
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
-               ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+               ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
        }
 
-       spin_unlock(&mm->page_table_lock);
+       spin_unlock(ptl);
        unlock_page(page);
 out:
        return ret;
 
 backout:
-       spin_unlock(&mm->page_table_lock);
+       spin_unlock(ptl);
 backout_unlocked:
        unlock_page(page);
        put_page(page);
@@ -2906,6 +2932,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        pte_t *ptep;
        pte_t entry;
+       spinlock_t *ptl;
        int ret;
        struct page *page = NULL;
        struct page *pagecache_page = NULL;
@@ -2918,7 +2945,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (ptep) {
                entry = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_migration(entry))) {
-                       migration_entry_wait_huge(mm, ptep);
+                       migration_entry_wait_huge(vma, mm, ptep);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
@@ -2974,17 +3001,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (page != pagecache_page)
                lock_page(page);
 
-       spin_lock(&mm->page_table_lock);
+       ptl = huge_pte_lockptr(h, mm, ptep);
+       spin_lock(ptl);
        /* Check for a racing update before calling hugetlb_cow */
        if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
-               goto out_page_table_lock;
+               goto out_ptl;
 
 
        if (flags & FAULT_FLAG_WRITE) {
                if (!huge_pte_write(entry)) {
                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
-                                                       pagecache_page);
-                       goto out_page_table_lock;
+                                       pagecache_page, ptl);
+                       goto out_ptl;
                }
                entry = huge_pte_mkdirty(entry);
        }
@@ -2993,8 +3021,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                flags & FAULT_FLAG_WRITE))
                update_mmu_cache(vma, address, ptep);
 
-out_page_table_lock:
-       spin_unlock(&mm->page_table_lock);
+out_ptl:
+       spin_unlock(ptl);
 
        if (pagecache_page) {
                unlock_page(pagecache_page);
@@ -3020,9 +3048,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long remainder = *nr_pages;
        struct hstate *h = hstate_vma(vma);
 
-       spin_lock(&mm->page_table_lock);
        while (vaddr < vma->vm_end && remainder) {
                pte_t *pte;
+               spinlock_t *ptl = NULL;
                int absent;
                struct page *page;
 
@@ -3030,8 +3058,12 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 * Some archs (sparc64, sh*) have multiple pte_ts to
                 * each hugepage.  We have to make sure we get the
                 * first, for the page indexing below to work.
+                *
+                * Note that page table lock is not held when pte is null.
                 */
                pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
+               if (pte)
+                       ptl = huge_pte_lock(h, mm, pte);
                absent = !pte || huge_pte_none(huge_ptep_get(pte));
 
                /*
@@ -3043,6 +3075,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                if (absent && (flags & FOLL_DUMP) &&
                    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
+                       if (pte)
+                               spin_unlock(ptl);
                        remainder = 0;
                        break;
                }
@@ -3062,10 +3096,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                      !huge_pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
-                       spin_unlock(&mm->page_table_lock);
+                       if (pte)
+                               spin_unlock(ptl);
                        ret = hugetlb_fault(mm, vma, vaddr,
                                (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
-                       spin_lock(&mm->page_table_lock);
                        if (!(ret & VM_FAULT_ERROR))
                                continue;
 
@@ -3078,7 +3112,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
-                       get_page(pages[i]);
+                       get_page_foll(pages[i]);
                }
 
                if (vmas)
@@ -3096,8 +3130,8 @@ same_page:
                         */
                        goto same_page;
                }
+               spin_unlock(ptl);
        }
-       spin_unlock(&mm->page_table_lock);
        *nr_pages = remainder;
        *position = vaddr;
 
@@ -3118,13 +3152,15 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        flush_cache_range(vma, address, end);
 
        mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
-       spin_lock(&mm->page_table_lock);
        for (; address < end; address += huge_page_size(h)) {
+               spinlock_t *ptl;
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
+               ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        pages++;
+                       spin_unlock(ptl);
                        continue;
                }
                if (!huge_pte_none(huge_ptep_get(ptep))) {
@@ -3134,8 +3170,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                        set_huge_pte_at(mm, address, ptep, pte);
                        pages++;
                }
+               spin_unlock(ptl);
        }
-       spin_unlock(&mm->page_table_lock);
        /*
         * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
         * may have cleared our pud entry and done put_page on the page table:
@@ -3298,6 +3334,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        unsigned long saddr;
        pte_t *spte = NULL;
        pte_t *pte;
+       spinlock_t *ptl;
 
        if (!vma_shareable(vma, addr))
                return (pte_t *)pmd_alloc(mm, pud, addr);
@@ -3320,13 +3357,14 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (!spte)
                goto out;
 
-       spin_lock(&mm->page_table_lock);
+       ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
+       spin_lock(ptl);
        if (pud_none(*pud))
                pud_populate(mm, pud,
                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
        else
                put_page(virt_to_page(spte));
-       spin_unlock(&mm->page_table_lock);
+       spin_unlock(ptl);
 out:
        pte = (pte_t *)pmd_alloc(mm, pud, addr);
        mutex_unlock(&mapping->i_mmap_mutex);
@@ -3340,7 +3378,7 @@ out:
  * indicated by page_count > 1, unmap is achieved by clearing pud and
  * decrementing the ref count. If count == 1, the pte page is not shared.
  *
- * called with vma->vm_mm->page_table_lock held.
+ * called with page table lock held.
  *
  * returns: 1 successfully unmapped a shared pte page
  *         0 the underlying pte page is not shared, or it is the last user
@@ -3496,7 +3534,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
 
 bool isolate_huge_page(struct page *page, struct list_head *list)
 {
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        if (!get_page_unless_zero(page))
                return false;
        spin_lock(&hugetlb_lock);
@@ -3507,7 +3545,7 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
 
 void putback_active_hugepage(struct page *page)
 {
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
@@ -3516,7 +3554,7 @@ void putback_active_hugepage(struct page *page)
 
 bool is_hugepage_active(struct page *page)
 {
-       VM_BUG_ON(!PageHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page), page);
        /*
         * This function can be called for a tail page because the caller,
         * scan_movable_pages, scans through a given pfn-range which typically