powerpc: Remove CONFIG_PPC_FSL_BOOK3E
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Mon, 19 Sep 2022 17:01:38 +0000 (19:01 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 26 Sep 2022 13:00:13 +0000 (23:00 +1000)
CONFIG_PPC_FSL_BOOK3E is redundant with CONFIG_PPC_E500.

Remove it.

And rename five files accordingly.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
[mpe: Rename include guards to match new file names]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/795cb93b88c9a0279289712e674f39e3b108a1b4.1663606876.git.christophe.leroy@csgroup.eu
37 files changed:
arch/powerpc/Kconfig
arch/powerpc/include/asm/barrier.h
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/include/asm/nohash/hugetlb-book3e.h [deleted file]
arch/powerpc/include/asm/nohash/hugetlb-e500.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/include/asm/nohash/pte-book3e.h [deleted file]
arch/powerpc/include/asm/nohash/pte-e500.h [new file with mode: 0644]
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/setup.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_e500.S [new file with mode: 0644]
arch/powerpc/kernel/cpu_setup_fsl_booke.S [deleted file]
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/interrupt_64.S
arch/powerpc/kernel/security.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/lib/feature-fixups.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/mmu_decl.h
arch/powerpc/mm/nohash/Makefile
arch/powerpc/mm/nohash/book3e_hugetlbpage.c [deleted file]
arch/powerpc/mm/nohash/e500.c [new file with mode: 0644]
arch/powerpc/mm/nohash/e500_hugetlbpage.c [new file with mode: 0644]
arch/powerpc/mm/nohash/fsl_book3e.c [deleted file]
arch/powerpc/mm/nohash/tlb.c
arch/powerpc/mm/nohash/tlb_low.S
arch/powerpc/platforms/Kconfig.cputype

index 9d721cac20b80609ddc7eec0f28ac4e3ff639fd3..a7b58645cc3f1401806107df643a10899e87758f 100644 (file)
@@ -290,7 +290,7 @@ config PPC_LONG_DOUBLE_128
 config PPC_BARRIER_NOSPEC
        bool
        default y
-       depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+       depends on PPC_BOOK3S_64 || PPC_E500
 
 config EARLY_PRINTK
        bool
index ef2d8b15eaabe3d876174588886d76851c6d6142..e80b2c0e9315a1fc2515d207c7cadcba5f425ca8 100644 (file)
@@ -86,7 +86,7 @@ do {                                                                  \
 
 #ifdef CONFIG_PPC_BOOK3S_64
 #define NOSPEC_BARRIER_SLOT   nop
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#elif defined(CONFIG_PPC_E500)
 #define NOSPEC_BARRIER_SLOT   nop; nop
 #endif
 
index 32ce0fb7548f83712fe4a6b7fcc7be5e06c51f67..ea71f7245a63e5b4ae522b7767a3b01845a86e36 100644 (file)
@@ -7,8 +7,8 @@
 
 #ifdef CONFIG_PPC_BOOK3S_64
 #include <asm/book3s/64/hugetlb.h>
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
-#include <asm/nohash/hugetlb-book3e.h>
+#elif defined(CONFIG_PPC_E500)
+#include <asm/nohash/hugetlb-e500.h>
 #elif defined(CONFIG_PPC_8xx)
 #include <asm/nohash/32/hugetlb-8xx.h>
 #endif /* CONFIG_PPC_BOOK3S_64 */
index c2b003550dc9845e1645bc0e93de80622da9284a..caea15dcb91dd0d192100615315fd863f22b3b03 100644 (file)
@@ -443,7 +443,7 @@ struct kvmppc_passthru_irqmap {
 };
 #endif
 
-# ifdef CONFIG_PPC_FSL_BOOK3E
+# ifdef CONFIG_PPC_E500
 #define KVMPPC_BOOKE_IAC_NUM   2
 #define KVMPPC_BOOKE_DAC_NUM   2
 # else
index 5b46da9ba7f699eb5a22762f4d07c571f3aabc0f..39057320e4363f6e535dde1b50d3b159cb0d3f00 100644 (file)
 
 typedef pte_t *pgtable_t;
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 #include <asm/percpu.h>
 DECLARE_PER_CPU(int, next_tlbcam_idx);
 #endif
index 197e7552d9f611710e7e132cf3994a3fa31ba5ef..0d40b33184ebe9a7c1f3372c3dbaaa78aa1b4e9a 100644 (file)
@@ -131,7 +131,7 @@ void unmap_kernel_page(unsigned long va);
 #elif defined(CONFIG_44x)
 #include <asm/nohash/32/pte-44x.h>
 #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
-#include <asm/nohash/pte-book3e.h>
+#include <asm/nohash/pte-e500.h>
 #elif defined(CONFIG_PPC_85xx)
 #include <asm/nohash/32/pte-85xx.h>
 #elif defined(CONFIG_PPC_8xx)
index 599921cc257e0ec4a6ea932356c081769aa9162d..879e9a6e5a870a49c1dbd5f8c37ac5086f359b22 100644 (file)
@@ -70,7 +70,7 @@
 /*
  * Include the PTE bits definitions
  */
-#include <asm/nohash/pte-book3e.h>
+#include <asm/nohash/pte-e500.h>
 
 #define PTE_RPN_MASK   (~((1UL << PTE_RPN_SHIFT) - 1))
 
diff --git a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h b/arch/powerpc/include/asm/nohash/hugetlb-book3e.h
deleted file mode 100644 (file)
index ecd8694..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
-#define _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
-
-static inline pte_t *hugepd_page(hugepd_t hpd)
-{
-       if (WARN_ON(!hugepd_ok(hpd)))
-               return NULL;
-
-       return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
-}
-
-static inline unsigned int hugepd_shift(hugepd_t hpd)
-{
-       return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
-}
-
-static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
-                                   unsigned int pdshift)
-{
-       /*
-        * On FSL BookE, we have multiple higher-level table entries that
-        * point to the same hugepte.  Just use the first one since they're all
-        * identical.  So for that case, idx=0.
-        */
-       return hugepd_page(hpd);
-}
-
-void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-
-static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
-{
-       /* We use the old format for PPC_FSL_BOOK3E */
-       *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
-}
-
-static inline int check_and_get_huge_psize(int shift)
-{
-       if (shift & 1)  /* Not a power of 4 */
-               return -EINVAL;
-
-       return shift_to_mmu_psize(shift);
-}
-
-#endif /* _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H */
diff --git a/arch/powerpc/include/asm/nohash/hugetlb-e500.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h
new file mode 100644 (file)
index 0000000..8f04ad2
--- /dev/null
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H
+#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+       if (WARN_ON(!hugepd_ok(hpd)))
+               return NULL;
+
+       return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+       return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
+}
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+                                   unsigned int pdshift)
+{
+       /*
+        * On FSL BookE, we have multiple higher-level table entries that
+        * point to the same hugepte.  Just use the first one since they're all
+        * identical.  So for that case, idx=0.
+        */
+       return hugepd_page(hpd);
+}
+
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+       /* We use the old format for PPC_E500 */
+       *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
+}
+
+static inline int check_and_get_huge_psize(int shift)
+{
+       if (shift & 1)  /* Not a power of 4 */
+               return -EINVAL;
+
+       return shift_to_mmu_psize(shift);
+}
+
+#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */
index 4fd73c7412d0d5940f0c1de7c74d5eea4a558129..d9067dfc531ccdd3f739a7fb29407a38a355f5d9 100644 (file)
@@ -266,7 +266,7 @@ static inline int pud_huge(pud_t pud)
  * We use it to ensure coherency between the i-cache and d-cache
  * for the page which has just been mapped in.
  */
-#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
+#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
 #else
 static inline
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
deleted file mode 100644 (file)
index f798640..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
-#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
-#ifdef __KERNEL__
-
-/* PTE bit definitions for processors compliant to the Book3E
- * architecture 2.06 or later. The position of the PTE bits
- * matches the HW definition of the optional Embedded Page Table
- * category.
- */
-
-/* Architected bits */
-#define _PAGE_PRESENT  0x000001 /* software: pte contains a translation */
-#define _PAGE_SW1      0x000002
-#define _PAGE_BIT_SWAP_TYPE    2
-#define _PAGE_BAP_SR   0x000004
-#define _PAGE_BAP_UR   0x000008
-#define _PAGE_BAP_SW   0x000010
-#define _PAGE_BAP_UW   0x000020
-#define _PAGE_BAP_SX   0x000040
-#define _PAGE_BAP_UX   0x000080
-#define _PAGE_PSIZE_MSK        0x000f00
-#define _PAGE_PSIZE_4K 0x000200
-#define _PAGE_PSIZE_8K 0x000300
-#define _PAGE_PSIZE_16K        0x000400
-#define _PAGE_PSIZE_32K        0x000500
-#define _PAGE_PSIZE_64K        0x000600
-#define _PAGE_PSIZE_128K       0x000700
-#define _PAGE_PSIZE_256K       0x000800
-#define _PAGE_PSIZE_512K       0x000900
-#define _PAGE_PSIZE_1M 0x000a00
-#define _PAGE_PSIZE_2M 0x000b00
-#define _PAGE_PSIZE_4M 0x000c00
-#define _PAGE_PSIZE_8M 0x000d00
-#define _PAGE_PSIZE_16M        0x000e00
-#define _PAGE_PSIZE_32M        0x000f00
-#define _PAGE_DIRTY    0x001000 /* C: page changed */
-#define _PAGE_SW0      0x002000
-#define _PAGE_U3       0x004000
-#define _PAGE_U2       0x008000
-#define _PAGE_U1       0x010000
-#define _PAGE_U0       0x020000
-#define _PAGE_ACCESSED 0x040000
-#define _PAGE_ENDIAN   0x080000
-#define _PAGE_GUARDED  0x100000
-#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
-#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
-#define _PAGE_WRITETHRU        0x800000 /* W: cache write-through */
-
-/* "Higher level" linux bit combinations */
-#define _PAGE_EXEC             (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
-#define _PAGE_RW               (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
-#define _PAGE_KERNEL_RW                (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
-#define _PAGE_KERNEL_RO                (_PAGE_BAP_SR)
-#define _PAGE_KERNEL_RWX       (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
-#define _PAGE_KERNEL_ROX       (_PAGE_BAP_SR | _PAGE_BAP_SX)
-#define _PAGE_USER             (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
-#define _PAGE_PRIVILEGED       (_PAGE_BAP_SR)
-
-#define _PAGE_SPECIAL  _PAGE_SW0
-
-/* Base page size */
-#define _PAGE_PSIZE    _PAGE_PSIZE_4K
-#define        PTE_RPN_SHIFT   (24)
-
-#define PTE_WIMGE_SHIFT (19)
-#define PTE_BAP_SHIFT  (2)
-
-/* On 32-bit, we never clear the top part of the PTE */
-#ifdef CONFIG_PPC32
-#define _PTE_NONE_MASK 0xffffffff00000000ULL
-#define _PMD_PRESENT   0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD       (~PAGE_MASK)
-#define _PMD_USER      0
-#else
-#define _PTE_NONE_MASK 0
-#endif
-
-/*
- * We define 2 sets of base prot bits, one for basic pages (ie,
- * cacheable kernel and user pages) and one for non cacheable
- * pages. We always set _PAGE_COHERENT when SMP is enabled or
- * the processor might need it for DMA coherency.
- */
-#define _PAGE_BASE_NC  (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
-#if defined(CONFIG_SMP)
-#define _PAGE_BASE     (_PAGE_BASE_NC | _PAGE_COHERENT)
-#else
-#define _PAGE_BASE     (_PAGE_BASE_NC)
-#endif
-
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE      __pgprot(_PAGE_BASE)
-#define PAGE_SHARED    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X  __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
-#define PAGE_COPY      __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
-#define PAGE_READONLY  __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X        __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
-
-#ifndef __ASSEMBLY__
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
-       return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED);
-}
-
-#define pte_mkprivileged pte_mkprivileged
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
-       return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER);
-}
-
-#define pte_mkuser pte_mkuser
-
-static inline pte_t pte_mkexec(pte_t pte)
-{
-       if (pte_val(pte) & _PAGE_BAP_UR)
-               return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
-       else
-               return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
-}
-#define pte_mkexec pte_mkexec
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */
diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
new file mode 100644 (file)
index 0000000..0934e89
--- /dev/null
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H
+#define _ASM_POWERPC_NOHASH_PTE_E500_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for processors compliant to the Book3E
+ * architecture 2.06 or later. The position of the PTE bits
+ * matches the HW definition of the optional Embedded Page Table
+ * category.
+ */
+
+/* Architected bits */
+#define _PAGE_PRESENT  0x000001 /* software: pte contains a translation */
+#define _PAGE_SW1      0x000002
+#define _PAGE_BIT_SWAP_TYPE    2
+#define _PAGE_BAP_SR   0x000004
+#define _PAGE_BAP_UR   0x000008
+#define _PAGE_BAP_SW   0x000010
+#define _PAGE_BAP_UW   0x000020
+#define _PAGE_BAP_SX   0x000040
+#define _PAGE_BAP_UX   0x000080
+#define _PAGE_PSIZE_MSK        0x000f00
+#define _PAGE_PSIZE_4K 0x000200
+#define _PAGE_PSIZE_8K 0x000300
+#define _PAGE_PSIZE_16K        0x000400
+#define _PAGE_PSIZE_32K        0x000500
+#define _PAGE_PSIZE_64K        0x000600
+#define _PAGE_PSIZE_128K       0x000700
+#define _PAGE_PSIZE_256K       0x000800
+#define _PAGE_PSIZE_512K       0x000900
+#define _PAGE_PSIZE_1M 0x000a00
+#define _PAGE_PSIZE_2M 0x000b00
+#define _PAGE_PSIZE_4M 0x000c00
+#define _PAGE_PSIZE_8M 0x000d00
+#define _PAGE_PSIZE_16M        0x000e00
+#define _PAGE_PSIZE_32M        0x000f00
+#define _PAGE_DIRTY    0x001000 /* C: page changed */
+#define _PAGE_SW0      0x002000
+#define _PAGE_U3       0x004000
+#define _PAGE_U2       0x008000
+#define _PAGE_U1       0x010000
+#define _PAGE_U0       0x020000
+#define _PAGE_ACCESSED 0x040000
+#define _PAGE_ENDIAN   0x080000
+#define _PAGE_GUARDED  0x100000
+#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
+#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
+#define _PAGE_WRITETHRU        0x800000 /* W: cache write-through */
+
+/* "Higher level" linux bit combinations */
+#define _PAGE_EXEC             (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
+#define _PAGE_RW               (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+#define _PAGE_KERNEL_RW                (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO                (_PAGE_BAP_SR)
+#define _PAGE_KERNEL_RWX       (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
+#define _PAGE_KERNEL_ROX       (_PAGE_BAP_SR | _PAGE_BAP_SX)
+#define _PAGE_USER             (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
+#define _PAGE_PRIVILEGED       (_PAGE_BAP_SR)
+
+#define _PAGE_SPECIAL  _PAGE_SW0
+
+/* Base page size */
+#define _PAGE_PSIZE    _PAGE_PSIZE_4K
+#define        PTE_RPN_SHIFT   (24)
+
+#define PTE_WIMGE_SHIFT (19)
+#define PTE_BAP_SHIFT  (2)
+
+/* On 32-bit, we never clear the top part of the PTE */
+#ifdef CONFIG_PPC32
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+#define _PMD_PRESENT   0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD       (~PAGE_MASK)
+#define _PMD_USER      0
+#else
+#define _PTE_NONE_MASK 0
+#endif
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC  (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#if defined(CONFIG_SMP)
+#define _PAGE_BASE     (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE     (_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE      __pgprot(_PAGE_BASE)
+#define PAGE_SHARED    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X  __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
+#define PAGE_COPY      __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
+#define PAGE_READONLY  __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X        __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
+
+#ifndef __ASSEMBLY__
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+       return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED);
+}
+
+#define pte_mkprivileged pte_mkprivileged
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+       return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER);
+}
+
+#define pte_mkuser pte_mkuser
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+       if (pte_val(pte) & _PAGE_BAP_UR)
+               return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
+       else
+               return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
+}
+#define pte_mkexec pte_mkexec
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_PTE_E500_H */
index 7f20636d13eddd15c89f007b6bb6cc604019bc9f..edf1dd1b0ca99e8de7fb58db22a6743ec27995ff 100644 (file)
@@ -31,7 +31,7 @@ extern unsigned int hpage_shift;
 #define HPAGE_SHIFT hpage_shift
 #elif defined(CONFIG_PPC_8xx)
 #define HPAGE_SHIFT            19      /* 512k pages */
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#elif defined(CONFIG_PPC_E500)
 #define HPAGE_SHIFT            22      /* 4M pages */
 #endif
 #define HPAGE_SIZE             ((1UL) << HPAGE_SHIFT)
index 55149a0384dbfd61097abd55366c740ee497b2d4..7e4fe766e247991100bd0d1ca06feefd0940b505 100644 (file)
@@ -342,7 +342,7 @@ n:
 #endif
 
 /* various errata or part fixups */
-#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500)
 #define MFTB(dest)                     \
 90:    mfspr dest, SPRN_TBRL;          \
 BEGIN_FTR_SECTION_NESTED(96);          \
@@ -768,7 +768,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
        stringify_in_c(.llong (_target);)       \
        stringify_in_c(.previous)
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 #define BTB_FLUSH(reg)                 \
        lis reg,BUCSR_INIT@h;           \
        ori reg,reg,BUCSR_INIT@l;       \
@@ -776,6 +776,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
        isync;
 #else
 #define BTB_FLUSH(reg)
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
 
 #endif /* _ASM_POWERPC_PPC_ASM_H */
index dd461b2c825cd911dbd2e6ec6cf0e464b200777f..85143849a586f41d6455302191aad5ecc9fc82c2 100644 (file)
@@ -69,7 +69,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
 static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }
 #endif
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 void __init setup_spectre_v2(void);
 #else
 static inline void setup_spectre_v2(void) {}
index 33dafd12e81d7248eab54a0d50339b3b04430e75..658c4dffaa56d941093ce39bcce31043b5b02023 100644 (file)
@@ -114,7 +114,7 @@ endif
 obj64-$(CONFIG_HIBERNATION)    += swsusp_asm64.o
 obj-$(CONFIG_MODULES)          += module.o module_$(BITS).o
 obj-$(CONFIG_44x)              += cpu_setup_44x.o
-obj-$(CONFIG_PPC_FSL_BOOK3E)   += cpu_setup_fsl_booke.o
+obj-$(CONFIG_PPC_E500)         += cpu_setup_e500.o
 obj-$(CONFIG_PPC_DOORBELL)     += dbell.o
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
 
index 10ce03052a194529077e636f0930b95b698f1175..4ce2a4aa3985436e3ff788c3b06223a931f86269 100644 (file)
@@ -59,7 +59,7 @@
 #endif
 #endif
 
-#if defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_E500)
 #include "../mm/mmu_decl.h"
 #endif
 
@@ -651,7 +651,7 @@ int main(void)
        DEFINE(PGD_T_LOG2, PGD_T_LOG2);
        DEFINE(PTE_T_LOG2, PTE_T_LOG2);
 #endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
        OFFSET(TLBCAM_MAS0, tlbcam, MAS0);
        OFFSET(TLBCAM_MAS1, tlbcam, MAS1);
diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S
new file mode 100644 (file)
index 0000000..0583360
--- /dev/null
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This file contains low level CPU setup functions.
+ * Kumar Gala <galak@kernel.crashing.org>
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ *
+ * Based on cpu_setup_6xx code by
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ */
+
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/nohash/mmu-book3e.h>
+#include <asm/asm-offsets.h>
+#include <asm/mpc85xx.h>
+
+_GLOBAL(__e500_icache_setup)
+       mfspr   r0, SPRN_L1CSR1
+       andi.   r3, r0, L1CSR1_ICE
+       bnelr                           /* Already enabled */
+       oris    r0, r0, L1CSR1_CPE@h
+       ori     r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR |  L1CSR1_ICE)
+       mtspr   SPRN_L1CSR1, r0         /* Enable I-Cache */
+       isync
+       blr
+
+_GLOBAL(__e500_dcache_setup)
+       mfspr   r0, SPRN_L1CSR0
+       andi.   r3, r0, L1CSR0_DCE
+       bnelr                           /* Already enabled */
+       msync
+       isync
+       li      r0, 0
+       mtspr   SPRN_L1CSR0, r0         /* Disable */
+       msync
+       isync
+       li      r0, (L1CSR0_DCFI | L1CSR0_CLFC)
+       mtspr   SPRN_L1CSR0, r0         /* Invalidate */
+       isync
+1:     mfspr   r0, SPRN_L1CSR0
+       andi.   r3, r0, L1CSR0_CLFC
+       bne+    1b                      /* Wait for lock bits reset */
+       oris    r0, r0, L1CSR0_CPE@h
+       ori     r0, r0, L1CSR0_DCE
+       msync
+       isync
+       mtspr   SPRN_L1CSR0, r0         /* Enable */
+       isync
+       blr
+
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for PW20_WAIT_IDLE_BIT.
+ */
+#define PW20_WAIT_IDLE_BIT             50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_pw20_idle)
+       mfspr   r3, SPRN_PWRMGTCR0
+
+       /* Set PW20_WAIT bit, enable pw20 state*/
+       ori     r3, r3, PWRMGTCR0_PW20_WAIT
+       li      r11, PW20_WAIT_IDLE_BIT
+
+       /* Set Automatic PW20 Core Idle Count */
+       rlwimi  r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
+
+       mtspr   SPRN_PWRMGTCR0, r3
+
+       blr
+
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for AV_WAIT_IDLE_BIT.
+ */
+#define AV_WAIT_IDLE_BIT               50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_altivec_idle)
+       mfspr   r3, SPRN_PWRMGTCR0
+
+       /* Enable Altivec Idle */
+       oris    r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
+       li      r11, AV_WAIT_IDLE_BIT
+
+       /* Set Automatic AltiVec Idle Count */
+       rlwimi  r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
+
+       mtspr   SPRN_PWRMGTCR0, r3
+
+       blr
+
+#ifdef CONFIG_PPC_E500MC
+_GLOBAL(__setup_cpu_e6500)
+       mflr    r6
+#ifdef CONFIG_PPC64
+       bl      setup_altivec_ivors
+       /* Touch IVOR42 only if the CPU supports E.HV category */
+       mfspr   r10,SPRN_MMUCFG
+       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+       beq     1f
+       bl      setup_lrat_ivor
+1:
+#endif
+       bl      setup_pw20_idle
+       bl      setup_altivec_idle
+       bl      __setup_cpu_e5500
+       mtlr    r6
+       blr
+#endif /* CONFIG_PPC_E500MC */
+
+#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_E500
+#ifndef CONFIG_PPC_E500MC
+_GLOBAL(__setup_cpu_e500v1)
+_GLOBAL(__setup_cpu_e500v2)
+       mflr    r4
+       bl      __e500_icache_setup
+       bl      __e500_dcache_setup
+       bl      __setup_e500_ivors
+#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
+       /* Ensure that RFXE is set */
+       mfspr   r3,SPRN_HID1
+       oris    r3,r3,HID1_RFXE@h
+       mtspr   SPRN_HID1,r3
+#endif
+       mtlr    r4
+       blr
+#else /* CONFIG_PPC_E500MC */
+_GLOBAL(__setup_cpu_e500mc)
+_GLOBAL(__setup_cpu_e5500)
+       mflr    r5
+       bl      __e500_icache_setup
+       bl      __e500_dcache_setup
+       bl      __setup_e500mc_ivors
+       /*
+        * We only want to touch IVOR38-41 if we're running on hardware
+        * that supports category E.HV.  The architectural way to determine
+        * this is MMUCFG[LPIDSIZE].
+        */
+       mfspr   r3, SPRN_MMUCFG
+       rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
+       beq     1f
+       bl      __setup_ehv_ivors
+       b       2f
+1:
+       lwz     r3, CPU_SPEC_FEATURES(r4)
+       /* We need this check as cpu_setup is also called for
+        * the secondary cores. So, if we have already cleared
+        * the feature on the primary core, avoid doing it on the
+        * secondary core.
+        */
+       andi.   r6, r3, CPU_FTR_EMB_HV
+       beq     2f
+       rlwinm  r3, r3, 0, ~CPU_FTR_EMB_HV
+       stw     r3, CPU_SPEC_FEATURES(r4)
+2:
+       mtlr    r5
+       blr
+#endif /* CONFIG_PPC_E500MC */
+#endif /* CONFIG_PPC_E500 */
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC_BOOK3E_64
+_GLOBAL(__restore_cpu_e6500)
+       mflr    r5
+       bl      setup_altivec_ivors
+       /* Touch IVOR42 only if the CPU supports E.HV category */
+       mfspr   r10,SPRN_MMUCFG
+       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+       beq     1f
+       bl      setup_lrat_ivor
+1:
+       bl      setup_pw20_idle
+       bl      setup_altivec_idle
+       bl      __restore_cpu_e5500
+       mtlr    r5
+       blr
+
+_GLOBAL(__restore_cpu_e5500)
+       mflr    r4
+       bl      __e500_icache_setup
+       bl      __e500_dcache_setup
+       bl      __setup_base_ivors
+       bl      setup_perfmon_ivor
+       bl      setup_doorbell_ivors
+       /*
+        * We only want to touch IVOR38-41 if we're running on hardware
+        * that supports category E.HV.  The architectural way to determine
+        * this is MMUCFG[LPIDSIZE].
+        */
+       mfspr   r10,SPRN_MMUCFG
+       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+       beq     1f
+       bl      setup_ehv_ivors
+1:
+       mtlr    r4
+       blr
+
+_GLOBAL(__setup_cpu_e5500)
+       mflr    r5
+       bl      __e500_icache_setup
+       bl      __e500_dcache_setup
+       bl      __setup_base_ivors
+       bl      setup_perfmon_ivor
+       bl      setup_doorbell_ivors
+       /*
+        * We only want to touch IVOR38-41 if we're running on hardware
+        * that supports category E.HV.  The architectural way to determine
+        * this is MMUCFG[LPIDSIZE].
+        */
+       mfspr   r10,SPRN_MMUCFG
+       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+       beq     1f
+       bl      setup_ehv_ivors
+       b       2f
+1:
+       ld      r10,CPU_SPEC_FEATURES(r4)
+       LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
+       andc    r10,r10,r9
+       std     r10,CPU_SPEC_FEATURES(r4)
+2:
+       mtlr    r5
+       blr
+#endif
+
+/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */
+_GLOBAL(flush_dcache_L1)
+       mfmsr   r10
+       wrteei  0
+
+       mfspr   r3,SPRN_L1CFG0
+       rlwinm  r5,r3,9,3       /* Extract cache block size */
+       twlgti  r5,1            /* Only 32 and 64 byte cache blocks
+                                * are currently defined.
+                                */
+       li      r4,32
+       subfic  r6,r5,2         /* r6 = log2(1KiB / cache block size) -
+                                *      log2(number of ways)
+                                */
+       slw     r5,r4,r5        /* r5 = cache block size */
+
+       rlwinm  r7,r3,0,0xff    /* Extract number of KiB in the cache */
+       mulli   r7,r7,13        /* An 8-way cache will require 13
+                                * loads per set.
+                                */
+       slw     r7,r7,r6
+
+       /* save off HID0 and set DCFA */
+       mfspr   r8,SPRN_HID0
+       ori     r9,r8,HID0_DCFA@l
+       mtspr   SPRN_HID0,r9
+       isync
+
+       LOAD_REG_IMMEDIATE(r6, KERNELBASE)
+       mr      r4, r6
+       mtctr   r7
+
+1:     lwz     r3,0(r4)        /* Load... */
+       add     r4,r4,r5
+       bdnz    1b
+
+       msync
+       mr      r4, r6
+       mtctr   r7
+
+1:     dcbf    0,r4            /* ...and flush. */
+       add     r4,r4,r5
+       bdnz    1b
+
+       /* restore HID0 */
+       mtspr   SPRN_HID0,r8
+       isync
+
+       wrtee r10
+
+       blr
+
+has_L2_cache:
+       /* skip L2 cache on P2040/P2040E as they have no L2 cache */
+       mfspr   r3, SPRN_SVR
+       /* shift right by 8 bits and clear E bit of SVR */
+       rlwinm  r4, r3, 24, ~0x800
+
+       lis     r3, SVR_P2040@h
+       ori     r3, r3, SVR_P2040@l
+       cmpw    r4, r3
+       beq     1f
+
+       li      r3, 1
+       blr
+1:
+       li      r3, 0
+       blr
+
+/* flush backside L2 cache */
+flush_backside_L2_cache:
+       mflr    r10
+       bl      has_L2_cache
+       mtlr    r10
+       cmpwi   r3, 0
+       beq     2f
+
+       /* Flush the L2 cache */
+       mfspr   r3, SPRN_L2CSR0
+       ori     r3, r3, L2CSR0_L2FL@l
+       msync
+       isync
+       mtspr   SPRN_L2CSR0,r3
+       isync
+
+       /* check if it is complete */
+1:     mfspr   r3,SPRN_L2CSR0
+       andi.   r3, r3, L2CSR0_L2FL@l
+       bne     1b
+2:
+       blr
+
+_GLOBAL(cpu_down_flush_e500v2)
+       mflr r0
+       bl      flush_dcache_L1
+       mtlr r0
+       blr
+
+_GLOBAL(cpu_down_flush_e500mc)
+_GLOBAL(cpu_down_flush_e5500)
+       mflr r0
+       bl      flush_dcache_L1
+       bl      flush_backside_L2_cache
+       mtlr r0
+       blr
+
+/* L1 Data Cache of e6500 contains no modified data, no flush is required */
+_GLOBAL(cpu_down_flush_e6500)
+       blr
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
deleted file mode 100644 (file)
index 0583360..0000000
+++ /dev/null
@@ -1,333 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * This file contains low level CPU setup functions.
- * Kumar Gala <galak@kernel.crashing.org>
- * Copyright 2009 Freescale Semiconductor, Inc.
- *
- * Based on cpu_setup_6xx code by
- * Benjamin Herrenschmidt <benh@kernel.crashing.org>
- */
-
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/ppc_asm.h>
-#include <asm/nohash/mmu-book3e.h>
-#include <asm/asm-offsets.h>
-#include <asm/mpc85xx.h>
-
-_GLOBAL(__e500_icache_setup)
-       mfspr   r0, SPRN_L1CSR1
-       andi.   r3, r0, L1CSR1_ICE
-       bnelr                           /* Already enabled */
-       oris    r0, r0, L1CSR1_CPE@h
-       ori     r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR |  L1CSR1_ICE)
-       mtspr   SPRN_L1CSR1, r0         /* Enable I-Cache */
-       isync
-       blr
-
-_GLOBAL(__e500_dcache_setup)
-       mfspr   r0, SPRN_L1CSR0
-       andi.   r3, r0, L1CSR0_DCE
-       bnelr                           /* Already enabled */
-       msync
-       isync
-       li      r0, 0
-       mtspr   SPRN_L1CSR0, r0         /* Disable */
-       msync
-       isync
-       li      r0, (L1CSR0_DCFI | L1CSR0_CLFC)
-       mtspr   SPRN_L1CSR0, r0         /* Invalidate */
-       isync
-1:     mfspr   r0, SPRN_L1CSR0
-       andi.   r3, r0, L1CSR0_CLFC
-       bne+    1b                      /* Wait for lock bits reset */
-       oris    r0, r0, L1CSR0_CPE@h
-       ori     r0, r0, L1CSR0_DCE
-       msync
-       isync
-       mtspr   SPRN_L1CSR0, r0         /* Enable */
-       isync
-       blr
-
-/*
- * FIXME - we haven't yet done testing to determine a reasonable default
- * value for PW20_WAIT_IDLE_BIT.
- */
-#define PW20_WAIT_IDLE_BIT             50 /* 1ms, TB frequency is 41.66MHZ */
-_GLOBAL(setup_pw20_idle)
-       mfspr   r3, SPRN_PWRMGTCR0
-
-       /* Set PW20_WAIT bit, enable pw20 state*/
-       ori     r3, r3, PWRMGTCR0_PW20_WAIT
-       li      r11, PW20_WAIT_IDLE_BIT
-
-       /* Set Automatic PW20 Core Idle Count */
-       rlwimi  r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
-
-       mtspr   SPRN_PWRMGTCR0, r3
-
-       blr
-
-/*
- * FIXME - we haven't yet done testing to determine a reasonable default
- * value for AV_WAIT_IDLE_BIT.
- */
-#define AV_WAIT_IDLE_BIT               50 /* 1ms, TB frequency is 41.66MHZ */
-_GLOBAL(setup_altivec_idle)
-       mfspr   r3, SPRN_PWRMGTCR0
-
-       /* Enable Altivec Idle */
-       oris    r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
-       li      r11, AV_WAIT_IDLE_BIT
-
-       /* Set Automatic AltiVec Idle Count */
-       rlwimi  r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
-
-       mtspr   SPRN_PWRMGTCR0, r3
-
-       blr
-
-#ifdef CONFIG_PPC_E500MC
-_GLOBAL(__setup_cpu_e6500)
-       mflr    r6
-#ifdef CONFIG_PPC64
-       bl      setup_altivec_ivors
-       /* Touch IVOR42 only if the CPU supports E.HV category */
-       mfspr   r10,SPRN_MMUCFG
-       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
-       beq     1f
-       bl      setup_lrat_ivor
-1:
-#endif
-       bl      setup_pw20_idle
-       bl      setup_altivec_idle
-       bl      __setup_cpu_e5500
-       mtlr    r6
-       blr
-#endif /* CONFIG_PPC_E500MC */
-
-#ifdef CONFIG_PPC32
-#ifdef CONFIG_PPC_E500
-#ifndef CONFIG_PPC_E500MC
-_GLOBAL(__setup_cpu_e500v1)
-_GLOBAL(__setup_cpu_e500v2)
-       mflr    r4
-       bl      __e500_icache_setup
-       bl      __e500_dcache_setup
-       bl      __setup_e500_ivors
-#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
-       /* Ensure that RFXE is set */
-       mfspr   r3,SPRN_HID1
-       oris    r3,r3,HID1_RFXE@h
-       mtspr   SPRN_HID1,r3
-#endif
-       mtlr    r4
-       blr
-#else /* CONFIG_PPC_E500MC */
-_GLOBAL(__setup_cpu_e500mc)
-_GLOBAL(__setup_cpu_e5500)
-       mflr    r5
-       bl      __e500_icache_setup
-       bl      __e500_dcache_setup
-       bl      __setup_e500mc_ivors
-       /*
-        * We only want to touch IVOR38-41 if we're running on hardware
-        * that supports category E.HV.  The architectural way to determine
-        * this is MMUCFG[LPIDSIZE].
-        */
-       mfspr   r3, SPRN_MMUCFG
-       rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
-       beq     1f
-       bl      __setup_ehv_ivors
-       b       2f
-1:
-       lwz     r3, CPU_SPEC_FEATURES(r4)
-       /* We need this check as cpu_setup is also called for
-        * the secondary cores. So, if we have already cleared
-        * the feature on the primary core, avoid doing it on the
-        * secondary core.
-        */
-       andi.   r6, r3, CPU_FTR_EMB_HV
-       beq     2f
-       rlwinm  r3, r3, 0, ~CPU_FTR_EMB_HV
-       stw     r3, CPU_SPEC_FEATURES(r4)
-2:
-       mtlr    r5
-       blr
-#endif /* CONFIG_PPC_E500MC */
-#endif /* CONFIG_PPC_E500 */
-#endif /* CONFIG_PPC32 */
-
-#ifdef CONFIG_PPC_BOOK3E_64
-_GLOBAL(__restore_cpu_e6500)
-       mflr    r5
-       bl      setup_altivec_ivors
-       /* Touch IVOR42 only if the CPU supports E.HV category */
-       mfspr   r10,SPRN_MMUCFG
-       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
-       beq     1f
-       bl      setup_lrat_ivor
-1:
-       bl      setup_pw20_idle
-       bl      setup_altivec_idle
-       bl      __restore_cpu_e5500
-       mtlr    r5
-       blr
-
-_GLOBAL(__restore_cpu_e5500)
-       mflr    r4
-       bl      __e500_icache_setup
-       bl      __e500_dcache_setup
-       bl      __setup_base_ivors
-       bl      setup_perfmon_ivor
-       bl      setup_doorbell_ivors
-       /*
-        * We only want to touch IVOR38-41 if we're running on hardware
-        * that supports category E.HV.  The architectural way to determine
-        * this is MMUCFG[LPIDSIZE].
-        */
-       mfspr   r10,SPRN_MMUCFG
-       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
-       beq     1f
-       bl      setup_ehv_ivors
-1:
-       mtlr    r4
-       blr
-
-_GLOBAL(__setup_cpu_e5500)
-       mflr    r5
-       bl      __e500_icache_setup
-       bl      __e500_dcache_setup
-       bl      __setup_base_ivors
-       bl      setup_perfmon_ivor
-       bl      setup_doorbell_ivors
-       /*
-        * We only want to touch IVOR38-41 if we're running on hardware
-        * that supports category E.HV.  The architectural way to determine
-        * this is MMUCFG[LPIDSIZE].
-        */
-       mfspr   r10,SPRN_MMUCFG
-       rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
-       beq     1f
-       bl      setup_ehv_ivors
-       b       2f
-1:
-       ld      r10,CPU_SPEC_FEATURES(r4)
-       LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
-       andc    r10,r10,r9
-       std     r10,CPU_SPEC_FEATURES(r4)
-2:
-       mtlr    r5
-       blr
-#endif
-
-/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */
-_GLOBAL(flush_dcache_L1)
-       mfmsr   r10
-       wrteei  0
-
-       mfspr   r3,SPRN_L1CFG0
-       rlwinm  r5,r3,9,3       /* Extract cache block size */
-       twlgti  r5,1            /* Only 32 and 64 byte cache blocks
-                                * are currently defined.
-                                */
-       li      r4,32
-       subfic  r6,r5,2         /* r6 = log2(1KiB / cache block size) -
-                                *      log2(number of ways)
-                                */
-       slw     r5,r4,r5        /* r5 = cache block size */
-
-       rlwinm  r7,r3,0,0xff    /* Extract number of KiB in the cache */
-       mulli   r7,r7,13        /* An 8-way cache will require 13
-                                * loads per set.
-                                */
-       slw     r7,r7,r6
-
-       /* save off HID0 and set DCFA */
-       mfspr   r8,SPRN_HID0
-       ori     r9,r8,HID0_DCFA@l
-       mtspr   SPRN_HID0,r9
-       isync
-
-       LOAD_REG_IMMEDIATE(r6, KERNELBASE)
-       mr      r4, r6
-       mtctr   r7
-
-1:     lwz     r3,0(r4)        /* Load... */
-       add     r4,r4,r5
-       bdnz    1b
-
-       msync
-       mr      r4, r6
-       mtctr   r7
-
-1:     dcbf    0,r4            /* ...and flush. */
-       add     r4,r4,r5
-       bdnz    1b
-
-       /* restore HID0 */
-       mtspr   SPRN_HID0,r8
-       isync
-
-       wrtee r10
-
-       blr
-
-has_L2_cache:
-       /* skip L2 cache on P2040/P2040E as they have no L2 cache */
-       mfspr   r3, SPRN_SVR
-       /* shift right by 8 bits and clear E bit of SVR */
-       rlwinm  r4, r3, 24, ~0x800
-
-       lis     r3, SVR_P2040@h
-       ori     r3, r3, SVR_P2040@l
-       cmpw    r4, r3
-       beq     1f
-
-       li      r3, 1
-       blr
-1:
-       li      r3, 0
-       blr
-
-/* flush backside L2 cache */
-flush_backside_L2_cache:
-       mflr    r10
-       bl      has_L2_cache
-       mtlr    r10
-       cmpwi   r3, 0
-       beq     2f
-
-       /* Flush the L2 cache */
-       mfspr   r3, SPRN_L2CSR0
-       ori     r3, r3, L2CSR0_L2FL@l
-       msync
-       isync
-       mtspr   SPRN_L2CSR0,r3
-       isync
-
-       /* check if it is complete */
-1:     mfspr   r3,SPRN_L2CSR0
-       andi.   r3, r3, L2CSR0_L2FL@l
-       bne     1b
-2:
-       blr
-
-_GLOBAL(cpu_down_flush_e500v2)
-       mflr r0
-       bl      flush_dcache_L1
-       mtlr r0
-       blr
-
-_GLOBAL(cpu_down_flush_e500mc)
-_GLOBAL(cpu_down_flush_e5500)
-       mflr r0
-       bl      flush_dcache_L1
-       bl      flush_backside_L2_cache
-       mtlr r0
-       blr
-
-/* L1 Data Cache of e6500 contains no modified data, no flush is required */
-_GLOBAL(cpu_down_flush_e6500)
-       blr
index a2f82ced6e4aacc9a84b9348375810344313eb86..1047dc053b476ca8a0bc5d1452cd1f8f5de4745d 100644 (file)
@@ -34,7 +34,7 @@
  */
 #define THREAD_NORMSAVE(offset)        (THREAD_NORMSAVES + (offset * 4))
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 #define BOOKE_CLEAR_BTB(reg)                                                                   \
 START_BTB_FLUSH_SECTION                                                                \
        BTB_FLUSH(reg)                                                                  \
index 4b4ba3364665af4e5cdb6ff18ba9b43ede35f74d..a2d3abb4807507a8024f62470e6770cdc95e75b6 100644 (file)
@@ -230,7 +230,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_common)
        std     r0,GPR0(r1)
        std     r10,GPR1(r1)
        std     r2,GPR2(r1)
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 START_BTB_FLUSH_SECTION
        BTB_FLUSH(r10)
 END_BTB_FLUSH_SECTION
index b562a1d2c7500700b04e2ca530512c6ece82ac6f..206475e3e0b480116719b69b0ddd0873f18dd529 100644 (file)
@@ -35,7 +35,7 @@ static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_N
 bool barrier_nospec_enabled;
 static bool no_nospec;
 static bool btb_flush_enabled;
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
+#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
 static bool no_spectrev2;
 #endif
 
@@ -122,7 +122,7 @@ static __init int security_feature_debugfs_init(void)
 device_initcall(security_feature_debugfs_init);
 #endif /* CONFIG_DEBUG_FS */
 
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
+#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
 static int __init handle_nospectre_v2(char *p)
 {
        no_spectrev2 = true;
@@ -130,9 +130,9 @@ static int __init handle_nospectre_v2(char *p)
        return 0;
 }
 early_param("nospectre_v2", handle_nospectre_v2);
-#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 void __init setup_spectre_v2(void)
 {
        if (no_spectrev2 || cpu_mitigations_off())
@@ -140,7 +140,7 @@ void __init setup_spectre_v2(void)
        else
                btb_flush_enabled = true;
 }
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
 
 #ifdef CONFIG_PPC_BOOK3S_64
 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
index 169703fead57668a8667ab483079f161546547d7..11ded19186b9103eb8f664304e5e0be4e3d0d2f3 100644 (file)
@@ -708,7 +708,7 @@ static struct task_struct *current_set[NR_CPUS];
 static void smp_store_cpu_info(int id)
 {
        per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        per_cpu(next_tlbcam_idx, id)
                = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 #endif
index 3a10cda9c05ec775787e7199ed5adc4b7ccc6ce7..ef9a61718940369f887c187bddf0a6dbff3def06 100644 (file)
@@ -228,7 +228,7 @@ static void __init sysfs_create_dscr_default(void)
 }
 #endif /* CONFIG_PPC64 */
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 #define MAX_BIT                                63
 
 static u64 pw20_wt;
@@ -907,7 +907,7 @@ static int register_cpu_online(unsigned int cpu)
                device_create_file(s, &dev_attr_tscr);
 #endif /* CONFIG_PPC64 */
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
                device_create_file(s, &dev_attr_pw20_state);
                device_create_file(s, &dev_attr_pw20_wait_time);
@@ -1003,7 +1003,7 @@ static int unregister_cpu_online(unsigned int cpu)
                device_remove_file(s, &dev_attr_tscr);
 #endif /* CONFIG_PPC64 */
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
                device_remove_file(s, &dev_attr_pw20_state);
                device_remove_file(s, &dev_attr_pw20_wait_time);
index b60d81acccfcc82442ea7ddd2702b502e452c50b..c025c83dfdc3f413907e6638bbd52fa6c8c3a905 100644 (file)
@@ -239,7 +239,7 @@ SECTIONS
        }
 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        . = ALIGN(8);
        __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
                __start__btb_flush_fixup = .;
index 993d3f31832af676303ace05ff3443e4cbe930f6..31f40f544de547b826a9287d924376efa93a2f52 100644 (file)
@@ -550,7 +550,7 @@ void do_barrier_nospec_fixups(bool enable)
 }
 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
 {
        unsigned int instr[2], *dest;
@@ -602,7 +602,7 @@ void __init do_btb_flush_fixups(void)
        for (; start < end; start += 2)
                patch_btb_flush_section(start);
 }
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
 
 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
 {
index bc84a594ca62cb90cd6fe6056972fa5b2583c3c6..8c3ea5300ac304ef4c0356b077270b36539b2ed2 100644 (file)
@@ -623,7 +623,7 @@ static int __init hugetlbpage_init(void)
                if (pdshift > shift) {
                        if (!IS_ENABLED(CONFIG_PPC_8xx))
                                pgtable_cache_add(pdshift - shift);
-               } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
+               } else if (IS_ENABLED(CONFIG_PPC_E500) ||
                           IS_ENABLED(CONFIG_PPC_8xx)) {
                        pgtable_cache_add(PTE_T_ORDER);
                }
index 6ddbd6cb3a2acd98b00774fc2e232e7e6530320a..84d171953ba44eb4fbcfb42772da104404554ff3 100644 (file)
@@ -308,7 +308,7 @@ void __init mem_init(void)
        }
 #endif /* CONFIG_HIGHMEM */
 
-#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
+#if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
        /*
         * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
         * functions.... do it here for the non-smp case.
index 341c2e0c71d20af560eab4d348aa7ee2312931c1..bd9784f77f2ee23b295ba792df843cf82f00fd33 100644 (file)
@@ -111,7 +111,7 @@ void MMU_init_hw_patch(void);
 unsigned long mmu_mapin_ram(unsigned long base, unsigned long top);
 #endif
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx,
                                     bool dryrun, bool init);
 #ifdef CONFIG_PPC32
@@ -157,7 +157,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; }
 static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
 #endif
 
-#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500)
 void mmu_mark_initmem_nx(void);
 void mmu_mark_rodata_ro(void);
 #else
index b467a25ee155d16e61295a4da2a58816f9a42ec6..f3894e79d5f700f57d9f7a35e92c030ad1f49f5f 100644 (file)
@@ -7,13 +7,13 @@ obj-$(CONFIG_PPC_BOOK3E_64)   += tlb_low_64e.o book3e_pgtable.o
 obj-$(CONFIG_40x)              += 40x.o
 obj-$(CONFIG_44x)              += 44x.o
 obj-$(CONFIG_PPC_8xx)          += 8xx.o
-obj-$(CONFIG_PPC_FSL_BOOK3E)   += fsl_book3e.o
+obj-$(CONFIG_PPC_E500)         += e500.o
 obj-$(CONFIG_RANDOMIZE_BASE)   += kaslr_booke.o
 ifdef CONFIG_HUGETLB_PAGE
-obj-$(CONFIG_PPC_FSL_BOOK3E)   += book3e_hugetlbpage.o
+obj-$(CONFIG_PPC_E500) += e500_hugetlbpage.o
 endif
 
 # Disable kcov instrumentation on sensitive code
 # This is necessary for booting with kcov enabled on book3e machines
 KCOV_INSTRUMENT_tlb.o := n
-KCOV_INSTRUMENT_fsl_book3e.o := n
+KCOV_INSTRUMENT_e500.o := n
diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
deleted file mode 100644 (file)
index c7d4b31..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PPC Huge TLB Page Support for Book3E MMU
- *
- * Copyright (C) 2009 David Gibson, IBM Corporation.
- * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
- *
- */
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-
-#include <asm/mmu.h>
-
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-
-static inline int tlb1_next(void)
-{
-       struct paca_struct *paca = get_paca();
-       struct tlb_core_data *tcd;
-       int this, next;
-
-       tcd = paca->tcd_ptr;
-       this = tcd->esel_next;
-
-       next = this + 1;
-       if (next >= tcd->esel_max)
-               next = tcd->esel_first;
-
-       tcd->esel_next = next;
-       return this;
-}
-
-static inline void book3e_tlb_lock(void)
-{
-       struct paca_struct *paca = get_paca();
-       unsigned long tmp;
-       int token = smp_processor_id() + 1;
-
-       /*
-        * Besides being unnecessary in the absence of SMT, this
-        * check prevents trying to do lbarx/stbcx. on e5500 which
-        * doesn't implement either feature.
-        */
-       if (!cpu_has_feature(CPU_FTR_SMT))
-               return;
-
-       asm volatile("1: lbarx %0, 0, %1;"
-                    "cmpwi %0, 0;"
-                    "bne 2f;"
-                    "stbcx. %2, 0, %1;"
-                    "bne 1b;"
-                    "b 3f;"
-                    "2: lbzx %0, 0, %1;"
-                    "cmpwi %0, 0;"
-                    "bne 2b;"
-                    "b 1b;"
-                    "3:"
-                    : "=&r" (tmp)
-                    : "r" (&paca->tcd_ptr->lock), "r" (token)
-                    : "memory");
-}
-
-static inline void book3e_tlb_unlock(void)
-{
-       struct paca_struct *paca = get_paca();
-
-       if (!cpu_has_feature(CPU_FTR_SMT))
-               return;
-
-       isync();
-       paca->tcd_ptr->lock = 0;
-}
-#else
-static inline int tlb1_next(void)
-{
-       int index, ncams;
-
-       ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
-
-       index = this_cpu_read(next_tlbcam_idx);
-
-       /* Just round-robin the entries and wrap when we hit the end */
-       if (unlikely(index == ncams - 1))
-               __this_cpu_write(next_tlbcam_idx, tlbcam_index);
-       else
-               __this_cpu_inc(next_tlbcam_idx);
-
-       return index;
-}
-
-static inline void book3e_tlb_lock(void)
-{
-}
-
-static inline void book3e_tlb_unlock(void)
-{
-}
-#endif
-
-static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
-{
-       int found = 0;
-
-       mtspr(SPRN_MAS6, pid << 16);
-       asm volatile(
-               "tlbsx  0,%1\n"
-               "mfspr  %0,0x271\n"
-               "srwi   %0,%0,31\n"
-               : "=&r"(found) : "r"(ea));
-
-       return found;
-}
-
-static void
-book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
-{
-       unsigned long mas1, mas2;
-       u64 mas7_3;
-       unsigned long psize, tsize, shift;
-       unsigned long flags;
-       struct mm_struct *mm;
-       int index;
-
-       if (unlikely(is_kernel_addr(ea)))
-               return;
-
-       mm = vma->vm_mm;
-
-       psize = vma_mmu_pagesize(vma);
-       shift = __ilog2(psize);
-       tsize = shift - 10;
-       /*
-        * We can't be interrupted while we're setting up the MAS
-        * registers or after we've confirmed that no tlb exists.
-        */
-       local_irq_save(flags);
-
-       book3e_tlb_lock();
-
-       if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
-               book3e_tlb_unlock();
-               local_irq_restore(flags);
-               return;
-       }
-
-       /* We have to use the CAM(TLB1) on FSL parts for hugepages */
-       index = tlb1_next();
-       mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
-
-       mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
-       mas2 = ea & ~((1UL << shift) - 1);
-       mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
-       mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
-       mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
-       if (!pte_dirty(pte))
-               mas7_3 &= ~(MAS3_SW|MAS3_UW);
-
-       mtspr(SPRN_MAS1, mas1);
-       mtspr(SPRN_MAS2, mas2);
-
-       if (mmu_has_feature(MMU_FTR_BIG_PHYS))
-               mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
-       mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
-
-       asm volatile ("tlbwe");
-
-       book3e_tlb_unlock();
-       local_irq_restore(flags);
-}
-
-/*
- * This is called at the end of handling a user page fault, when the
- * fault has been handled by updating a PTE in the linux page tables.
- *
- * This must always be called with the pte lock held.
- */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
-{
-       if (is_vm_hugetlb_page(vma))
-               book3e_hugetlb_preload(vma, address, *ptep);
-}
-
-void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
-{
-       struct hstate *hstate = hstate_file(vma->vm_file);
-       unsigned long tsize = huge_page_shift(hstate) - 10;
-
-       __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
-}
diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c
new file mode 100644 (file)
index 0000000..40a4e69
--- /dev/null
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
+ * E500 Book E processors.
+ *
+ * Copyright 2004,2010 Freescale Semiconductor, Inc.
+ *
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+#include <linux/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include <asm/paca.h>
+
+#include <mm/mmu_decl.h>
+
+unsigned int tlbcam_index;
+
+struct tlbcam TLBCAM[NUM_TLBCAMS];
+
+static struct {
+       unsigned long start;
+       unsigned long limit;
+       phys_addr_t phys;
+} tlbcam_addrs[NUM_TLBCAMS];
+
+#ifdef CONFIG_PPC_85xx
+/*
+ * Return PA for this VA if it is mapped by a CAM, or 0
+ */
+phys_addr_t v_block_mapped(unsigned long va)
+{
+       int b;
+       for (b = 0; b < tlbcam_index; ++b)
+               if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
+                       return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
+       return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_block_mapped(phys_addr_t pa)
+{
+       int b;
+       for (b = 0; b < tlbcam_index; ++b)
+               if (pa >= tlbcam_addrs[b].phys
+                       && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+                             +tlbcam_addrs[b].phys)
+                       return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
+       return 0;
+}
+#endif
+
+/*
+ * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
+ * in particular size must be a power of 4 between 4k and the max supported by
+ * an implementation; max may further be limited by what can be represented in
+ * an unsigned long (for example, 32-bit implementations cannot support a 4GB
+ * size).
+ */
+static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+               unsigned long size, unsigned long flags, unsigned int pid)
+{
+       unsigned int tsize;
+
+       tsize = __ilog2(size) - 10;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+       if ((flags & _PAGE_NO_CACHE) == 0)
+               flags |= _PAGE_COHERENT;
+#endif
+
+       TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
+       TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
+       TLBCAM[index].MAS2 = virt & PAGE_MASK;
+
+       TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
+
+       TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
+       TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
+       if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+               TLBCAM[index].MAS7 = (u64)phys >> 32;
+
+       /* Below is unlikely -- only for large user pages or similar */
+       if (pte_user(__pte(flags))) {
+               TLBCAM[index].MAS3 |= MAS3_UR;
+               TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
+               TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
+       } else {
+               TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
+       }
+
+       tlbcam_addrs[index].start = virt;
+       tlbcam_addrs[index].limit = virt + size - 1;
+       tlbcam_addrs[index].phys = phys;
+}
+
+static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
+                                phys_addr_t phys)
+{
+       unsigned int camsize = __ilog2(ram);
+       unsigned int align = __ffs(virt | phys);
+       unsigned long max_cam;
+
+       if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
+               /* Convert (4^max) kB to (2^max) bytes */
+               max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
+               camsize &= ~1U;
+               align &= ~1U;
+       } else {
+               /* Convert (2^max) kB to (2^max) bytes */
+               max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
+       }
+
+       if (camsize > align)
+               camsize = align;
+       if (camsize > max_cam)
+               camsize = max_cam;
+
+       return 1UL << camsize;
+}
+
+static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
+                                       unsigned long ram, int max_cam_idx,
+                                       bool dryrun, bool init)
+{
+       int i;
+       unsigned long amount_mapped = 0;
+       unsigned long boundary;
+
+       if (strict_kernel_rwx_enabled())
+               boundary = (unsigned long)(_sinittext - _stext);
+       else
+               boundary = ram;
+
+       /* Calculate CAM values */
+       for (i = 0; boundary && i < max_cam_idx; i++) {
+               unsigned long cam_sz;
+               pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
+
+               cam_sz = calc_cam_sz(boundary, virt, phys);
+               if (!dryrun)
+                       settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
+
+               boundary -= cam_sz;
+               amount_mapped += cam_sz;
+               virt += cam_sz;
+               phys += cam_sz;
+       }
+       for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
+               unsigned long cam_sz;
+               pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
+
+               cam_sz = calc_cam_sz(ram, virt, phys);
+               if (!dryrun)
+                       settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
+
+               ram -= cam_sz;
+               amount_mapped += cam_sz;
+               virt += cam_sz;
+               phys += cam_sz;
+       }
+
+       if (dryrun)
+               return amount_mapped;
+
+       if (init) {
+               loadcam_multi(0, i, max_cam_idx);
+               tlbcam_index = i;
+       } else {
+               loadcam_multi(0, i, 0);
+               WARN_ON(i > tlbcam_index);
+       }
+
+#ifdef CONFIG_PPC64
+       get_paca()->tcd.esel_next = i;
+       get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+       get_paca()->tcd.esel_first = i;
+#endif
+
+       return amount_mapped;
+}
+
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
+{
+       unsigned long virt = PAGE_OFFSET;
+       phys_addr_t phys = memstart_addr;
+
+       return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
+}
+
+#ifdef CONFIG_PPC32
+
+#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
+#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
+#endif
+
+unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
+{
+       return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
+}
+
+void flush_instruction_cache(void)
+{
+       unsigned long tmp;
+
+       tmp = mfspr(SPRN_L1CSR1);
+       tmp |= L1CSR1_ICFI | L1CSR1_ICLFR;
+       mtspr(SPRN_L1CSR1, tmp);
+       isync();
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+       flush_instruction_cache();
+}
+
+static unsigned long __init tlbcam_sz(int idx)
+{
+       return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
+}
+
+void __init adjust_total_lowmem(void)
+{
+       unsigned long ram;
+       int i;
+
+       /* adjust lowmem size to __max_low_memory */
+       ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
+
+       i = switch_to_as1();
+       __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
+       restore_to_as0(i, 0, NULL, 1);
+
+       pr_info("Memory CAM mapping: ");
+       for (i = 0; i < tlbcam_index - 1; i++)
+               pr_cont("%lu/", tlbcam_sz(i) >> 20);
+       pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
+               (unsigned int)((total_lowmem - __max_low_memory) >> 20));
+
+       memblock_set_current_limit(memstart_addr + __max_low_memory);
+}
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mmu_mark_rodata_ro(void)
+{
+       unsigned long remapped;
+
+       remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
+
+       WARN_ON(__max_low_memory != remapped);
+}
+#endif
+
+void mmu_mark_initmem_nx(void)
+{
+       /* Everything is done in mmu_mark_rodata_ro() */
+}
+
+void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+                               phys_addr_t first_memblock_size)
+{
+       phys_addr_t limit = first_memblock_base + first_memblock_size;
+
+       /* 64M mapped initially according to head_fsl_booke.S */
+       memblock_set_current_limit(min_t(u64, limit, 0x04000000));
+}
+
+#ifdef CONFIG_RELOCATABLE
+int __initdata is_second_reloc;
+notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
+{
+       unsigned long base = kernstart_virt_addr;
+       phys_addr_t size;
+
+       kernstart_addr = start;
+       if (is_second_reloc) {
+               virt_phys_offset = PAGE_OFFSET - memstart_addr;
+               kaslr_late_init();
+               return;
+       }
+
+       /*
+        * Relocatable kernel support based on processing of dynamic
+        * relocation entries. Before we get the real memstart_addr,
+        * We will compute the virt_phys_offset like this:
+        * virt_phys_offset = stext.run - kernstart_addr
+        *
+        * stext.run = (KERNELBASE & ~0x3ffffff) +
+        *                              (kernstart_addr & 0x3ffffff)
+        * When we relocate, we have :
+        *
+        *      (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
+        *
+        * hence:
+        *  virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
+        *                              (kernstart_addr & ~0x3ffffff)
+        *
+        */
+       start &= ~0x3ffffff;
+       base &= ~0x3ffffff;
+       virt_phys_offset = base - start;
+       early_get_first_memblock_info(__va(dt_ptr), &size);
+       /*
+        * We now get the memstart_addr, then we should check if this
+        * address is the same as what the PAGE_OFFSET map to now. If
+        * not we have to change the map of PAGE_OFFSET to memstart_addr
+        * and do a second relocation.
+        */
+       if (start != memstart_addr) {
+               int n;
+               long offset = start - memstart_addr;
+
+               is_second_reloc = 1;
+               n = switch_to_as1();
+               /* map a 64M area for the second relocation */
+               if (memstart_addr > start)
+                       map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
+                                       false, true);
+               else
+                       map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
+                                       0x4000000, CONFIG_LOWMEM_CAM_NUM,
+                                       false, true);
+               restore_to_as0(n, offset, __va(dt_ptr), 1);
+               /* We should never reach here */
+               panic("Relocation error");
+       }
+
+       kaslr_early_init(__va(dt_ptr), size);
+}
+#endif
+#endif
diff --git a/arch/powerpc/mm/nohash/e500_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
new file mode 100644 (file)
index 0000000..c7d4b31
--- /dev/null
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PPC Huge TLB Page Support for Book3E MMU
+ *
+ * Copyright (C) 2009 David Gibson, IBM Corporation.
+ * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
+ *
+ */
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline int tlb1_next(void)
+{
+       struct paca_struct *paca = get_paca();
+       struct tlb_core_data *tcd;
+       int this, next;
+
+       tcd = paca->tcd_ptr;
+       this = tcd->esel_next;
+
+       next = this + 1;
+       if (next >= tcd->esel_max)
+               next = tcd->esel_first;
+
+       tcd->esel_next = next;
+       return this;
+}
+
+static inline void book3e_tlb_lock(void)
+{
+       struct paca_struct *paca = get_paca();
+       unsigned long tmp;
+       int token = smp_processor_id() + 1;
+
+       /*
+        * Besides being unnecessary in the absence of SMT, this
+        * check prevents trying to do lbarx/stbcx. on e5500 which
+        * doesn't implement either feature.
+        */
+       if (!cpu_has_feature(CPU_FTR_SMT))
+               return;
+
+       asm volatile("1: lbarx %0, 0, %1;"
+                    "cmpwi %0, 0;"
+                    "bne 2f;"
+                    "stbcx. %2, 0, %1;"
+                    "bne 1b;"
+                    "b 3f;"
+                    "2: lbzx %0, 0, %1;"
+                    "cmpwi %0, 0;"
+                    "bne 2b;"
+                    "b 1b;"
+                    "3:"
+                    : "=&r" (tmp)
+                    : "r" (&paca->tcd_ptr->lock), "r" (token)
+                    : "memory");
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+       struct paca_struct *paca = get_paca();
+
+       if (!cpu_has_feature(CPU_FTR_SMT))
+               return;
+
+       isync();
+       paca->tcd_ptr->lock = 0;
+}
+#else
+static inline int tlb1_next(void)
+{
+       int index, ncams;
+
+       ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+
+       index = this_cpu_read(next_tlbcam_idx);
+
+       /* Just round-robin the entries and wrap when we hit the end */
+       if (unlikely(index == ncams - 1))
+               __this_cpu_write(next_tlbcam_idx, tlbcam_index);
+       else
+               __this_cpu_inc(next_tlbcam_idx);
+
+       return index;
+}
+
+static inline void book3e_tlb_lock(void)
+{
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+}
+#endif
+
+static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
+{
+       int found = 0;
+
+       mtspr(SPRN_MAS6, pid << 16);
+       asm volatile(
+               "tlbsx  0,%1\n"
+               "mfspr  %0,0x271\n"
+               "srwi   %0,%0,31\n"
+               : "=&r"(found) : "r"(ea));
+
+       return found;
+}
+
+static void
+book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
+{
+       unsigned long mas1, mas2;
+       u64 mas7_3;
+       unsigned long psize, tsize, shift;
+       unsigned long flags;
+       struct mm_struct *mm;
+       int index;
+
+       if (unlikely(is_kernel_addr(ea)))
+               return;
+
+       mm = vma->vm_mm;
+
+       psize = vma_mmu_pagesize(vma);
+       shift = __ilog2(psize);
+       tsize = shift - 10;
+       /*
+        * We can't be interrupted while we're setting up the MAS
+        * registers or after we've confirmed that no tlb exists.
+        */
+       local_irq_save(flags);
+
+       book3e_tlb_lock();
+
+       if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
+               book3e_tlb_unlock();
+               local_irq_restore(flags);
+               return;
+       }
+
+       /* We have to use the CAM(TLB1) on FSL parts for hugepages */
+       index = tlb1_next();
+       mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
+
+       mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
+       mas2 = ea & ~((1UL << shift) - 1);
+       mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
+       mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
+       mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
+       if (!pte_dirty(pte))
+               mas7_3 &= ~(MAS3_SW|MAS3_UW);
+
+       mtspr(SPRN_MAS1, mas1);
+       mtspr(SPRN_MAS2, mas2);
+
+       if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+               mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
+       mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
+
+       asm volatile ("tlbwe");
+
+       book3e_tlb_unlock();
+       local_irq_restore(flags);
+}
+
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ *
+ * This must always be called with the pte lock held.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+       if (is_vm_hugetlb_page(vma))
+               book3e_hugetlb_preload(vma, address, *ptep);
+}
+
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+       struct hstate *hstate = hstate_file(vma->vm_file);
+       unsigned long tsize = huge_page_shift(hstate) - 10;
+
+       __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
+}
diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c
deleted file mode 100644 (file)
index 40a4e69..0000000
+++ /dev/null
@@ -1,375 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
- * E500 Book E processors.
- *
- * Copyright 2004,2010 Freescale Semiconductor, Inc.
- *
- * This file contains the routines for initializing the MMU
- * on the 4xx series of chips.
- *  -- paulus
- *
- *  Derived from arch/ppc/mm/init.c:
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
- *    Copyright (C) 1996 Paul Mackerras
- *
- *  Derived from "arch/i386/mm/init.c"
- *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/stddef.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/highmem.h>
-#include <linux/memblock.h>
-#include <linux/of_fdt.h>
-
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#include <asm/mmu.h>
-#include <linux/uaccess.h>
-#include <asm/smp.h>
-#include <asm/machdep.h>
-#include <asm/setup.h>
-#include <asm/paca.h>
-
-#include <mm/mmu_decl.h>
-
-unsigned int tlbcam_index;
-
-struct tlbcam TLBCAM[NUM_TLBCAMS];
-
-static struct {
-       unsigned long start;
-       unsigned long limit;
-       phys_addr_t phys;
-} tlbcam_addrs[NUM_TLBCAMS];
-
-#ifdef CONFIG_PPC_85xx
-/*
- * Return PA for this VA if it is mapped by a CAM, or 0
- */
-phys_addr_t v_block_mapped(unsigned long va)
-{
-       int b;
-       for (b = 0; b < tlbcam_index; ++b)
-               if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
-                       return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
-       return 0;
-}
-
-/*
- * Return VA for a given PA or 0 if not mapped
- */
-unsigned long p_block_mapped(phys_addr_t pa)
-{
-       int b;
-       for (b = 0; b < tlbcam_index; ++b)
-               if (pa >= tlbcam_addrs[b].phys
-                       && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
-                             +tlbcam_addrs[b].phys)
-                       return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
-       return 0;
-}
-#endif
-
-/*
- * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
- * in particular size must be a power of 4 between 4k and the max supported by
- * an implementation; max may further be limited by what can be represented in
- * an unsigned long (for example, 32-bit implementations cannot support a 4GB
- * size).
- */
-static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
-               unsigned long size, unsigned long flags, unsigned int pid)
-{
-       unsigned int tsize;
-
-       tsize = __ilog2(size) - 10;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
-       if ((flags & _PAGE_NO_CACHE) == 0)
-               flags |= _PAGE_COHERENT;
-#endif
-
-       TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
-       TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
-       TLBCAM[index].MAS2 = virt & PAGE_MASK;
-
-       TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
-       TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
-       TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
-       TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
-       TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
-
-       TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
-       TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
-       if (mmu_has_feature(MMU_FTR_BIG_PHYS))
-               TLBCAM[index].MAS7 = (u64)phys >> 32;
-
-       /* Below is unlikely -- only for large user pages or similar */
-       if (pte_user(__pte(flags))) {
-               TLBCAM[index].MAS3 |= MAS3_UR;
-               TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
-               TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
-       } else {
-               TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
-       }
-
-       tlbcam_addrs[index].start = virt;
-       tlbcam_addrs[index].limit = virt + size - 1;
-       tlbcam_addrs[index].phys = phys;
-}
-
-static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
-                                phys_addr_t phys)
-{
-       unsigned int camsize = __ilog2(ram);
-       unsigned int align = __ffs(virt | phys);
-       unsigned long max_cam;
-
-       if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
-               /* Convert (4^max) kB to (2^max) bytes */
-               max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
-               camsize &= ~1U;
-               align &= ~1U;
-       } else {
-               /* Convert (2^max) kB to (2^max) bytes */
-               max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
-       }
-
-       if (camsize > align)
-               camsize = align;
-       if (camsize > max_cam)
-               camsize = max_cam;
-
-       return 1UL << camsize;
-}
-
-static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
-                                       unsigned long ram, int max_cam_idx,
-                                       bool dryrun, bool init)
-{
-       int i;
-       unsigned long amount_mapped = 0;
-       unsigned long boundary;
-
-       if (strict_kernel_rwx_enabled())
-               boundary = (unsigned long)(_sinittext - _stext);
-       else
-               boundary = ram;
-
-       /* Calculate CAM values */
-       for (i = 0; boundary && i < max_cam_idx; i++) {
-               unsigned long cam_sz;
-               pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
-
-               cam_sz = calc_cam_sz(boundary, virt, phys);
-               if (!dryrun)
-                       settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
-
-               boundary -= cam_sz;
-               amount_mapped += cam_sz;
-               virt += cam_sz;
-               phys += cam_sz;
-       }
-       for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
-               unsigned long cam_sz;
-               pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
-
-               cam_sz = calc_cam_sz(ram, virt, phys);
-               if (!dryrun)
-                       settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
-
-               ram -= cam_sz;
-               amount_mapped += cam_sz;
-               virt += cam_sz;
-               phys += cam_sz;
-       }
-
-       if (dryrun)
-               return amount_mapped;
-
-       if (init) {
-               loadcam_multi(0, i, max_cam_idx);
-               tlbcam_index = i;
-       } else {
-               loadcam_multi(0, i, 0);
-               WARN_ON(i > tlbcam_index);
-       }
-
-#ifdef CONFIG_PPC64
-       get_paca()->tcd.esel_next = i;
-       get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
-       get_paca()->tcd.esel_first = i;
-#endif
-
-       return amount_mapped;
-}
-
-unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
-{
-       unsigned long virt = PAGE_OFFSET;
-       phys_addr_t phys = memstart_addr;
-
-       return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
-}
-
-#ifdef CONFIG_PPC32
-
-#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
-#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
-#endif
-
-unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
-{
-       return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
-}
-
-void flush_instruction_cache(void)
-{
-       unsigned long tmp;
-
-       tmp = mfspr(SPRN_L1CSR1);
-       tmp |= L1CSR1_ICFI | L1CSR1_ICLFR;
-       mtspr(SPRN_L1CSR1, tmp);
-       isync();
-}
-
-/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
- */
-void __init MMU_init_hw(void)
-{
-       flush_instruction_cache();
-}
-
-static unsigned long __init tlbcam_sz(int idx)
-{
-       return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
-}
-
-void __init adjust_total_lowmem(void)
-{
-       unsigned long ram;
-       int i;
-
-       /* adjust lowmem size to __max_low_memory */
-       ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
-
-       i = switch_to_as1();
-       __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
-       restore_to_as0(i, 0, NULL, 1);
-
-       pr_info("Memory CAM mapping: ");
-       for (i = 0; i < tlbcam_index - 1; i++)
-               pr_cont("%lu/", tlbcam_sz(i) >> 20);
-       pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
-               (unsigned int)((total_lowmem - __max_low_memory) >> 20));
-
-       memblock_set_current_limit(memstart_addr + __max_low_memory);
-}
-
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void mmu_mark_rodata_ro(void)
-{
-       unsigned long remapped;
-
-       remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
-
-       WARN_ON(__max_low_memory != remapped);
-}
-#endif
-
-void mmu_mark_initmem_nx(void)
-{
-       /* Everything is done in mmu_mark_rodata_ro() */
-}
-
-void setup_initial_memory_limit(phys_addr_t first_memblock_base,
-                               phys_addr_t first_memblock_size)
-{
-       phys_addr_t limit = first_memblock_base + first_memblock_size;
-
-       /* 64M mapped initially according to head_fsl_booke.S */
-       memblock_set_current_limit(min_t(u64, limit, 0x04000000));
-}
-
-#ifdef CONFIG_RELOCATABLE
-int __initdata is_second_reloc;
-notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
-{
-       unsigned long base = kernstart_virt_addr;
-       phys_addr_t size;
-
-       kernstart_addr = start;
-       if (is_second_reloc) {
-               virt_phys_offset = PAGE_OFFSET - memstart_addr;
-               kaslr_late_init();
-               return;
-       }
-
-       /*
-        * Relocatable kernel support based on processing of dynamic
-        * relocation entries. Before we get the real memstart_addr,
-        * We will compute the virt_phys_offset like this:
-        * virt_phys_offset = stext.run - kernstart_addr
-        *
-        * stext.run = (KERNELBASE & ~0x3ffffff) +
-        *                              (kernstart_addr & 0x3ffffff)
-        * When we relocate, we have :
-        *
-        *      (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
-        *
-        * hence:
-        *  virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
-        *                              (kernstart_addr & ~0x3ffffff)
-        *
-        */
-       start &= ~0x3ffffff;
-       base &= ~0x3ffffff;
-       virt_phys_offset = base - start;
-       early_get_first_memblock_info(__va(dt_ptr), &size);
-       /*
-        * We now get the memstart_addr, then we should check if this
-        * address is the same as what the PAGE_OFFSET map to now. If
-        * not we have to change the map of PAGE_OFFSET to memstart_addr
-        * and do a second relocation.
-        */
-       if (start != memstart_addr) {
-               int n;
-               long offset = start - memstart_addr;
-
-               is_second_reloc = 1;
-               n = switch_to_as1();
-               /* map a 64M area for the second relocation */
-               if (memstart_addr > start)
-                       map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
-                                       false, true);
-               else
-                       map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
-                                       0x4000000, CONFIG_LOWMEM_CAM_NUM,
-                                       false, true);
-               restore_to_as0(n, offset, __va(dt_ptr), 1);
-               /* We should never reach here */
-               panic("Relocation error");
-       }
-
-       kaslr_early_init(__va(dt_ptr), size);
-}
-#endif
-#endif
index f21896ebdc5afd5dc963680a56174c036760b397..fcb1e5ae5c55316048263ebe8191a02ab921af3f 100644 (file)
@@ -50,7 +50,7 @@
  * indirect page table entries.
  */
 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
        [MMU_PAGE_4K] = {
                .shift  = 12,
@@ -166,7 +166,7 @@ int extlb_level_exc;
 
 #endif /* CONFIG_PPC64 */
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
 DEFINE_PER_CPU(int, next_tlbcam_idx);
 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
@@ -441,7 +441,7 @@ static void __init setup_page_sizes(void)
        unsigned int eptcfg;
        int i, psize;
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        unsigned int mmucfg = mfspr(SPRN_MMUCFG);
        int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
 
@@ -584,7 +584,7 @@ static void __init setup_mmu_htw(void)
                patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
                patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
                break;
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        case PPC_HTW_E6500:
                extlb_level_exc = EX_TLB_SIZE;
                patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
@@ -627,7 +627,7 @@ static void early_init_this_mmu(void)
        }
        mtspr(SPRN_MAS4, mas4);
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
                unsigned int num_cams;
                bool map = true;
@@ -680,7 +680,7 @@ static void __init early_init_mmu_global(void)
        /* Look for HW tablewalk support */
        setup_mmu_htw();
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
                if (book3e_htw_mode == PPC_HTW_NONE) {
                        extlb_level_exc = EX_TLB_SIZE;
@@ -701,7 +701,7 @@ static void __init early_init_mmu_global(void)
 
 static void __init early_mmu_set_memory_limit(void)
 {
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
                /*
                 * Limit memory so we dont have linear faults.
@@ -750,7 +750,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
         * We crop it to the size of the first MEMBLOCK to
         * avoid going over total available memory just in case...
         */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
        if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
                unsigned long linear_sz;
                unsigned int num_cams;
index 6914bc8e4eada339960a8e277da517c52249bbfa..e1199608ff4dcdfc878b68d4af01d127cd568685 100644 (file)
@@ -364,7 +364,7 @@ _GLOBAL(_tlbivax_bcast)
 #error Unsupported processor type !
 #endif
 
-#if defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_E500)
 /*
  * extern void loadcam_entry(unsigned int index)
  *
index 5b065186ace5f7db1318e24a9b8bfe36c0e4e593..32c60ad8f45d5fbfba62abe7e03c5fac698c8d2c 100644 (file)
@@ -107,7 +107,6 @@ config PPC_BOOK3S_64
 
 config PPC_BOOK3E_64
        bool "Embedded processors"
-       select PPC_FSL_BOOK3E
        select PPC_E500
        select PPC_E500MC
        select PPC_FPU # Make it a choice ?
@@ -259,8 +258,11 @@ config PPC_BOOK3S
 
 config PPC_E500
        select FSL_EMB_PERFMON
-       select PPC_FSL_BOOK3E
        bool
+       select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
+       select PPC_SMP_MUXED_IPI
+       select PPC_DOORBELL
+       select PPC_KUEP
 
 config PPC_E500MC
        bool "e500mc Support"
@@ -320,16 +322,6 @@ config BOOKE_OR_40x
        depends on BOOKE || 40x
        default y
 
-# this is for common code between PPC32 & PPC64 FSL BOOKE
-config PPC_FSL_BOOK3E
-       bool
-       select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
-       imply FSL_EMB_PERFMON
-       select PPC_SMP_MUXED_IPI
-       select PPC_DOORBELL
-       select PPC_KUEP
-       default y if PPC_85xx
-
 config PTE_64BIT
        bool
        depends on 44x || PPC_E500 || PPC_86xx