powerpc/44x: Don't support 47x code and non 47x code at the same time
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Sun, 18 Oct 2020 17:25:18 +0000 (17:25 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 3 Dec 2020 14:01:34 +0000 (01:01 +1100)
440/460 variants and 470 variants are not compatible, no
need to make code supporting both and using MMU features.

Just use CONFIG_PPC_47x to decide what to build.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c3e64da3d5d068c69a201e03bbae7da055761e5b.1603041883.git.christophe.leroy@csgroup.eu
arch/powerpc/kernel/entry_32.S
arch/powerpc/mm/nohash/tlb_low.S

index c7c28e8..58177c7 100644 (file)
@@ -448,15 +448,13 @@ syscall_exit_cont:
        andis.  r10,r0,DBCR0_IDM@h
        bnel-   load_dbcr0
 #endif
-#ifdef CONFIG_44x
-BEGIN_MMU_FTR_SECTION
+#ifdef CONFIG_PPC_47x
        lis     r4,icache_44x_need_flush@ha
        lwz     r5,icache_44x_need_flush@l(r4)
        cmplwi  cr0,r5,0
        bne-    2f
+#endif /* CONFIG_PPC_47x */
 1:
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
-#endif /* CONFIG_44x */
 BEGIN_FTR_SECTION
        lwarx   r7,0,r1
 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
@@ -966,10 +964,7 @@ restore_kuap:
 
        /* interrupts are hard-disabled at this point */
 restore:
-#ifdef CONFIG_44x
-BEGIN_MMU_FTR_SECTION
-       b       1f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+#if defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
        lis     r4,icache_44x_need_flush@ha
        lwz     r5,icache_44x_need_flush@l(r4)
        cmplwi  cr0,r5,0
index eaeee40..68797e0 100644 (file)
@@ -92,36 +92,25 @@ _GLOBAL(__tlbil_va)
        tlbsx.  r6,0,r3
        bne     10f
        sync
-BEGIN_MMU_FTR_SECTION
-       b       2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+#ifndef CONFIG_PPC_47x
        /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
         * 22, is clear.  Since 22 is the V bit in the TLB_PAGEID, loading this
         * value will invalidate the TLB entry.
         */
        tlbwe   r6,r6,PPC44x_TLB_PAGEID
-       isync
-10:    wrtee   r10
-       blr
-2:
-#ifdef CONFIG_PPC_47x
+#else
        oris    r7,r6,0x8000    /* specify way explicitly */
        clrrwi  r4,r3,12        /* get an EPN for the hashing with V = 0 */
        ori     r4,r4,PPC47x_TLBE_SIZE
        tlbwe   r4,r7,0         /* write it */
+#endif /* !CONFIG_PPC_47x */
        isync
-       wrtee   r10
+10:    wrtee   r10
        blr
-#else /* CONFIG_PPC_47x */
-1:     trap
-       EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
-#endif /* !CONFIG_PPC_47x */
 
 _GLOBAL(_tlbil_all)
 _GLOBAL(_tlbil_pid)
-BEGIN_MMU_FTR_SECTION
-       b       2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+#ifndef CONFIG_PPC_47x
        li      r3,0
        sync
 
@@ -136,8 +125,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 
        isync
        blr
-2:
-#ifdef CONFIG_PPC_47x
+#else
        /* 476 variant. There's not simple way to do this, hopefully we'll
         * try to limit the amount of such full invalidates
         */
@@ -179,11 +167,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
        b       1b              /* Then loop */
 1:     isync                   /* Sync shadows */
        wrtee   r11
-#else /* CONFIG_PPC_47x */
-1:     trap
-       EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
-#endif /* !CONFIG_PPC_47x */
        blr
+#endif /* !CONFIG_PPC_47x */
 
 #ifdef CONFIG_PPC_47x