From 79d1befe054ad4adb277fbd2d2756b1394eaf24e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 25 Nov 2020 07:10:47 +0000 Subject: [PATCH] powerpc/32s: Don't hash_preload() kernel text We now always map kernel text with BATs. Neither need to preload hash with kernel text addresses nor ensure they are never evicted. This is more or less a revert of commit ee4f2ea48674 ("[POWERPC] Fix 32-bit mm operations when not using BATs") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/0a0bab7fadd89aa829e33420fbc10d60c59040a7.1606285014.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/book3s32/hash_low.S | 18 +----------------- arch/powerpc/mm/book3s32/mmu.c | 2 +- arch/powerpc/mm/mmu_decl.h | 2 -- arch/powerpc/mm/pgtable_32.c | 4 ---- 4 files changed, 2 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index b2c912e..48415c8 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -411,30 +411,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) * and we know there is a definite (although small) speed * advantage to putting the PTE in the primary PTEG, we always * put the PTE in the primary PTEG. - * - * In addition, we skip any slot that is mapping kernel text in - * order to avoid a deadlock when not using BAT mappings if - * trying to hash in the kernel hash code itself after it has - * already taken the hash table lock. This works in conjunction - * with pre-faulting of the kernel text. - * - * If the hash table bucket is full of kernel text entries, we'll - * lockup here but that shouldn't happen */ -1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */ + lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */ lwz r6, (next_slot - PAGE_OFFSET)@l(r4) addi r6,r6,HPTE_SIZE /* search for candidate */ andi. r6,r6,7*HPTE_SIZE stw r6,next_slot@l(r4) add r4,r3,r6 - LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */ - clrrwi r0,r0,12 - lis r6,etext@h - ori r6,r6,etext@l /* get etext */ - tophys(r6,r6) - cmpl cr0,r0,r6 /* compare and try again */ - blt 1b #ifndef CONFIG_SMP /* Store PTE in PTEG */ diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 5c60dca..23f60e9 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -302,7 +302,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, /* * Preload a translation in the hash table */ -void hash_preload(struct mm_struct *mm, unsigned long ea) +static void hash_preload(struct mm_struct *mm, unsigned long ea) { pmd_t *pmd; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 1b6d39e..0ad6d47 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -91,8 +91,6 @@ void print_system_hash_info(void); #ifdef CONFIG_PPC32 -void hash_preload(struct mm_struct *mm, unsigned long ea); - extern void mapin_ram(void); extern void setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 888b971..e0ec67a 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -112,10 +112,6 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) ktext = ((char *)v >= _stext && (char *)v < etext) || ((char *)v >= _sinittext && (char *)v < _einittext); map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); -#ifdef CONFIG_PPC_BOOK3S_32 - if (ktext) - hash_preload(&init_mm, v); -#endif v += PAGE_SIZE; p += PAGE_SIZE; } -- 2.7.4