mm: thp: add missing pte_unmap() calls 79/266279/1 accepted/tizen/6.5/unified/20211111.224635 accepted/tizen/unified/20211112.135639 submit/tizen/20211111.012243 submit/tizen_6.5/20211111.012236
authorMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 10 Nov 2021 07:39:34 +0000 (08:39 +0100)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 10 Nov 2021 07:40:02 +0000 (08:40 +0100)
pte_offset_map() requires to call pte_unmap() after using the returned
pte pointer. Add missing calls.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Change-Id: I1d883c10f627334feec4be4e6d6efce0a3fde38f

arch/arm64/mm/huge_memory.c

index 402fb11..947f41a 100644 (file)
@@ -483,6 +483,7 @@ vm_fault_t arm64_wp_huge_pte(struct vm_fault *vmf, pte_t orig_pte)
        hpte_p = pte_offset_map(vmf->pmd, haddr);
        spin_unlock(vmf->ptl);
        __split_huge_pte(vmf->vma, vmf->pmd, hpte_p, haddr, false, NULL);
+       pte_unmap(hpte_p);
        spin_lock(vmf->ptl);
 
        return VM_FAULT_FALLBACK;
@@ -682,6 +683,7 @@ void huge_cont_pte_set_accessed(struct vm_fault *vmf, pte_t orig_pte)
                ptep_set_access_flags(vmf->vma, haddr, pte, entry, write);
        }
        update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
+       pte_unmap(pte);
 }
 
 /*
@@ -721,6 +723,7 @@ struct page *follow_trans_huge_pte(struct vm_area_struct *vma,
 
        pte = pte_offset_map(pmd, addr);
        page = pte_page(*pte);
+       pte_unmap(pte);
        VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
 
        if (!try_grab_page(page, flags))
@@ -1094,6 +1097,7 @@ void split_huge_pte_address(struct vm_area_struct *vma, unsigned long address,
                return;
 
        __split_huge_pte(vma, pmd, pte, haddr, freeze, page);
+       pte_unmap(pte);
 }
 
 void set_huge_pte_migration_entry(