From d9642117914c9d3f800b3bacc19d7e388b04edb4 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 16 Aug 2019 05:41:40 +0000 Subject: [PATCH] powerpc/mm: define empty update_mmu_cache() as static inline Only BOOK3S and FSL_BOOK3E have a usefull update_mmu_cache(). For the others, just define it static inline. In the meantime, simplify the FSL_BOOK3E related ifdef as book3e_hugetlb_preload() only exists when CONFIG_PPC_FSL_BOOK3E is selected. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/668aba4db6b9af6d8a151174e11a4289f1a6bbcd.1565933217.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/pgtable.h | 11 +++++++++++ arch/powerpc/include/asm/nohash/pgtable.h | 13 +++++++++++++ arch/powerpc/include/asm/pgtable.h | 12 ------------ arch/powerpc/mm/mem.c | 11 +++++++---- 4 files changed, 31 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h index 6436b65..0e1263455 100644 --- a/arch/powerpc/include/asm/book3s/pgtable.h +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -26,5 +26,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #define __HAVE_PHYS_MEM_ACCESS_PROT +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 1ca1c18..7fed9dc 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -293,5 +293,18 @@ static inline int pgd_huge(pgd_t pgd) #define is_hugepd(hpd) (hugepd_ok(hpd)) #endif +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + */ +#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); +#else +static inline +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index c58ba79..c70916a 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -77,18 +77,6 @@ extern void paging_init(void); #include - -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to ensure coherency between the i-cache and d-cache - * for the page which has just been mapped in. - * On machines which use an MMU hash table, we use this to put a - * corresponding HPTE into the hash table ahead of time, instead of - * waiting for the inevitable extra hash-table miss exception. - */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); - #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_large(pmd) 0 #endif diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 828c535..ee8225cd 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -415,10 +415,10 @@ EXPORT_SYMBOL(flush_icache_user_range); * * This must always be called with the pte lock held. */ +#ifdef CONFIG_PPC_BOOK3S void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { -#ifdef CONFIG_PPC_BOOK3S /* * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held @@ -456,13 +456,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, } hash_preload(vma->vm_mm, address, is_exec, trap); +} #endif /* CONFIG_PPC_BOOK3S */ -#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ - && defined(CONFIG_HUGETLB_PAGE) +#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep) +{ if (is_vm_hugetlb_page(vma)) book3e_hugetlb_preload(vma, address, *ptep); -#endif } +#endif /* * System memory should not be in /proc/iomem but various tools expect it -- 2.7.4