tizen: packaging: Add baselibs.conf to provide 64-bit kernel & modules for 32-bit...
[platform/kernel/linux-rpi.git] / mm / hugetlb.c
index 52d2607..5e6c4d3 100644 (file)
@@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
                unsigned long start, unsigned long end);
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
 
 static inline bool subpool_is_free(struct hugepage_subpool *spool)
 {
@@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                down_read(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               down_read(&resv_map->rw_sema);
        }
 }
 
@@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                up_read(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               up_read(&resv_map->rw_sema);
        }
 }
 
@@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                down_write(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               down_write(&resv_map->rw_sema);
        }
 }
 
@@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                up_write(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               up_write(&resv_map->rw_sema);
        }
 }
 
 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
 {
-       struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
-       if (!__vma_shareable_lock(vma))
-               return 1;
+       if (__vma_shareable_lock(vma)) {
+               struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+               return down_write_trylock(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
 
-       return down_write_trylock(&vma_lock->rw_sema);
+               return down_write_trylock(&resv_map->rw_sema);
+       }
+
+       return 1;
 }
 
 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                lockdep_assert_held(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               lockdep_assert_held(&resv_map->rw_sema);
        }
 }
 
@@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                __hugetlb_vma_unlock_write_put(vma_lock);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               /* no free for anon vmas, but still need to unlock */
+               up_write(&resv_map->rw_sema);
        }
 }
 
@@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
        kref_init(&resv_map->refs);
        spin_lock_init(&resv_map->lock);
        INIT_LIST_HEAD(&resv_map->regions);
+       init_rwsem(&resv_map->rw_sema);
 
        resv_map->adds_in_progress = 0;
        /*
@@ -1138,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 
-       set_vma_private_data(vma, (get_vma_private_data(vma) &
-                               HPAGE_RESV_MASK) | (unsigned long)map);
+       set_vma_private_data(vma, (unsigned long)map);
 }
 
 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
@@ -1157,6 +1189,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
        return (get_vma_private_data(vma) & flag) != 0;
 }
 
+bool __vma_private_lock(struct vm_area_struct *vma)
+{
+       return !(vma->vm_flags & VM_MAYSHARE) &&
+               get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
+               is_vma_resv_set(vma, HPAGE_RESV_OWNER);
+}
+
 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
 {
        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
@@ -5274,9 +5313,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
        return len + old_addr - old_end;
 }
 
-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+                           unsigned long start, unsigned long end,
+                           struct page *ref_page, zap_flags_t zap_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -5405,16 +5444,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
                tlb_flush_mmu_tlbonly(tlb);
 }
 
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
-                         struct vm_area_struct *vma, unsigned long start,
-                         unsigned long end, struct page *ref_page,
-                         zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+                        unsigned long *start, unsigned long *end)
 {
+       if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
+               return;
+
+       adjust_range_if_pmd_sharing_possible(vma, start, end);
        hugetlb_vma_lock_write(vma);
-       i_mmap_lock_write(vma->vm_file->f_mapping);
+       if (vma->vm_file)
+               i_mmap_lock_write(vma->vm_file->f_mapping);
+}
 
-       /* mmu notification performed in caller */
-       __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+                      struct zap_details *details)
+{
+       zap_flags_t zap_flags = details ? details->zap_flags : 0;
+
+       if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
+               return;
 
        if (zap_flags & ZAP_FLAG_UNMAP) {       /* final unmap */
                /*
@@ -5427,11 +5475,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
                 * someone else.
                 */
                __hugetlb_vma_unlock_write_free(vma);
-               i_mmap_unlock_write(vma->vm_file->f_mapping);
        } else {
-               i_mmap_unlock_write(vma->vm_file->f_mapping);
                hugetlb_vma_unlock_write(vma);
        }
+
+       if (vma->vm_file)
+               i_mmap_unlock_write(vma->vm_file->f_mapping);
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -6478,7 +6527,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
                        }
                }
 
-               page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+               page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
 
                /*
                 * Note that page may be a sub-page, and with vmemmap
@@ -6811,8 +6860,10 @@ out_err:
                 */
                if (chg >= 0 && add < 0)
                        region_abort(resv_map, from, to, regions_needed);
-       if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+       if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
                kref_put(&resv_map->refs, resv_map_release);
+               set_vma_resv_map(vma, NULL);
+       }
        return false;
 }