From: Christophe Leroy Date: Thu, 29 Nov 2018 14:07:01 +0000 (+0000) Subject: powerpc/mm: Extend pte_fragment functionality to PPC32 X-Git-Tag: v5.15~7340^2~144 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=32ea4c14999006fea541b5f78d008fffc1656849;p=platform%2Fkernel%2Flinux-starfive.git powerpc/mm: Extend pte_fragment functionality to PPC32 In order to allow the 8xx to handle pte_fragments, this patch extends the use of pte_fragments to PPC32 platforms. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index 5bd26c2..2bb500d 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ #define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ + /* * 32-bit hash table MMU support */ @@ -9,6 +10,8 @@ * BATs */ +#include + /* Block size masks */ #define BL_128K 0x000 #define BL_256K 0x001 @@ -43,7 +46,7 @@ struct ppc_bat { u32 batl; }; -typedef struct page *pgtable_t; +typedef pte_t *pgtable_t; #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index eb8882c..0f58e5b 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -59,30 +59,31 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); + *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +void pte_frag_destroy(void *pte_frag); +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); +void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_page((unsigned long)pte); + pte_fragment_free((unsigned long *)pte, 1); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { - pgtable_page_dtor(ptepage); - __free_page(ptepage); + pte_fragment_free((unsigned long *)ptepage, 0); } static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { - pgtable_page_dtor(virt_to_page(table)); - free_page((unsigned long)table); + pte_fragment_free((unsigned long *)table, 0); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(index_size), table); @@ -120,6 +121,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - pgtable_free_tlb(tlb, page_address(table), 0); + pgtable_free_tlb(tlb, table, 0); } #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index b2c588a..727692f 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -329,7 +329,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) #define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) @@ -346,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ + (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) #define pte_unmap(pte) kunmap_atomic(pte) /* diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 0381394..c05efd2 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -223,7 +223,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, return 0; } -#ifndef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_BOOK3E_64 static inline void arch_exit_mmap(struct mm_struct *mm) { } diff --git a/arch/powerpc/include/asm/nohash/32/mmu.h b/arch/powerpc/include/asm/nohash/32/mmu.h index f61f933..7d94a36 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu.h +++ b/arch/powerpc/include/asm/nohash/32/mmu.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_NOHASH_32_MMU_H_ #define _ASM_POWERPC_NOHASH_32_MMU_H_ +#include + #if defined(CONFIG_40x) /* 40x-style software loaded TLB */ #include @@ -17,7 +19,7 @@ #endif #ifndef __ASSEMBLY__ -typedef struct page *pgtable_t; +typedef pte_t *pgtable_t; #endif #endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 8825953..7e23458 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -61,11 +61,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER | - _PMD_PRESENT); + *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) #else static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, @@ -77,31 +76,32 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); + *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) #endif extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +void pte_frag_destroy(void *pte_frag); +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); +void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_page((unsigned long)pte); + pte_fragment_free((unsigned long *)pte, 1); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { - pgtable_page_dtor(ptepage); - __free_page(ptepage); + pte_fragment_free((unsigned long *)ptepage, 0); } static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { - pgtable_page_dtor(virt_to_page(table)); - free_page((unsigned long)table); + pte_fragment_free((unsigned long *)table, 0); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(index_size), table); @@ -140,6 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { tlb_flush_pgtable(tlb, address); - pgtable_free_tlb(tlb, page_address(table), 0); + pgtable_free_tlb(tlb, table, 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 3ffb0ff..e9c8604 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -333,12 +333,12 @@ static inline int pte_young(pte_t pte) */ #ifndef CONFIG_BOOKE #define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) #endif @@ -357,7 +357,8 @@ static inline int pte_young(pte_t pte) (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ + (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) #define pte_unmap(pte) kunmap_atomic(pte) /* diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 314a289..56ef543 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -125,6 +125,10 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p) ctx->pte_frag = p; } #else +#define PTE_FRAG_NR 1 +#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT +#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) + static inline void *pte_frag_get(mm_context_t *ctx) { return NULL; diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 86f9dc7..53c1ffa 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \ $(hash64-y) mmu_context_book3s64.o \ pgtable-book3s64.o pgtable-frag.o +obj-$(CONFIG_PPC32) += pgtable-frag.o obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c index f84e14f..bb52320 100644 --- a/arch/powerpc/mm/mmu_context.c +++ b/arch/powerpc/mm/mmu_context.c @@ -15,6 +15,7 @@ #include #include +#include #if defined(CONFIG_PPC32) static inline void switch_mm_pgdir(struct task_struct *tsk, @@ -97,3 +98,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, switch_mmu_context(prev, next, tsk); } +#ifdef CONFIG_PPC32 +void arch_exit_mmap(struct mm_struct *mm) +{ + void *frag = pte_frag_get(&mm->context); + + if (frag) + pte_frag_destroy(frag); +} +#endif diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 2faca46..431ecf3 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -385,6 +385,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) #endif mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; + pte_frag_set(&mm->context, NULL); return 0; } @@ -487,4 +488,3 @@ void __init mmu_context_init(void) next_context = FIRST_CONTEXT; nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1; } - diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 10c4369..4fc77a9 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -45,32 +45,15 @@ extern char etext[], _stext[], _sinittext[], _einittext[]; __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte; + if (!slab_is_available()) + return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); - if (slab_is_available()) { - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - } else { - pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); - if (pte) - clear_page(pte); - } - return pte; + return (pte_t *)pte_fragment_alloc(mm, address, 1); } pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *ptepage; - - gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT; - - ptepage = alloc_pages(flags, 0); - if (!ptepage) - return NULL; - if (!pgtable_page_ctor(ptepage)) { - __free_page(ptepage); - return NULL; - } - return ptepage; + return (pgtable_t)pte_fragment_alloc(mm, address, 0); } void __iomem *