hugetlb file truncation/hole punch code may need to back out and take
locks in order in the routine hugetlb_unmap_file_folio(). This code could
race with vma freeing as pointed out in [1] and result in accessing a
stale vma pointer. To address this, take the vma_lock when clearing the
vma_lock->vma pointer.
[1] https://lore.kernel.org/linux-mm/
01f10195-7088-4462-6def-
909549c75ef4@huawei.com/
[mike.kravetz@oracle.com: address build issues]
Link: https://lkml.kernel.org/r/Yz5L1uxQYR1VqFtJ@monkey
Link: https://lkml.kernel.org/r/20221005011707.514612-3-mike.kravetz@oracle.com
Fixes: "hugetlb: use new vma_lock for pmd sharing synchronization"
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: James Houghton <jthoughton@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static int hugetlb_acct_memory(struct hstate *h, long delta);
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static int hugetlb_acct_memory(struct hstate *h, long delta);
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
* be asynchrously deleted. If the page tables are shared, there
* will be issues when accessed by someone else.
*/
* be asynchrously deleted. If the page tables are shared, there
* will be issues when accessed by someone else.
*/
- hugetlb_vma_unlock_write(vma);
- hugetlb_vma_lock_free(vma);
+ __hugetlb_vma_unlock_write_free(vma);
i_mmap_unlock_write(vma->vm_file->f_mapping);
}
i_mmap_unlock_write(vma->vm_file->f_mapping);
}
+void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
+{
+ struct vm_area_struct *vma = vma_lock->vma;
+
+ /*
+ * vma_lock structure may or not be released as a result of put,
+ * it certainly will no longer be attached to vma so clear pointer.
+ * Semaphore synchronizes access to vma_lock->vma field.
+ */
+ vma_lock->vma = NULL;
+ vma->vm_private_data = NULL;
+ up_write(&vma_lock->rw_sema);
+ kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+}
+
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ __hugetlb_vma_unlock_write_put(vma_lock);
+ }
+}
+
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{
/*
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{
/*
if (vma->vm_private_data) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
if (vma->vm_private_data) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- /*
- * vma_lock structure may or not be released, but it
- * certainly will no longer be attached to vma so clear
- * pointer.
- */
- vma_lock->vma = NULL;
- kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
- vma->vm_private_data = NULL;
+ down_write(&vma_lock->rw_sema);
+ __hugetlb_vma_unlock_write_put(vma_lock);
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+{
+}
+
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{
}
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{
}