can be useful when debugging issues that require an SLB
miss to occur.
+ stress_slb [PPC]
+ Limits the number of kernel SLB entries, and flushes
+ them frequently to increase the rate of SLB faults
+ on kernel addresses.
+
disable= [IPV6]
- See Documentation/networking/ipv6.txt.
+ See Documentation/networking/ipv6.rst.
hardened_usercopy=
[KNL] Under CONFIG_HARDENED_USERCOPY, whether
#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
-#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS)
+#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS)
- #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
- #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
- #define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
- #define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
+ static inline unsigned long pgd_index(unsigned long address)
+ {
+ return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
+ }
+
+ static inline unsigned long pud_index(unsigned long address)
+ {
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+ }
+
+ static inline unsigned long pmd_index(unsigned long address)
+ {
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+ }
+
+ static inline unsigned long pte_index(unsigned long address)
+ {
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ }
/*
* Find an entry in a page-table-directory. We combine the address region
* that an executable user mapping was modified, which is needed
* to properly flush the virtually tagged instruction cache of
* those implementations.
+ *
+ * On the 8xx, the page tables are a bit special. For 16k pages, we have
+ * 4 identical entries. For 512k pages, we have 128 entries as if it was
+ * 4k pages, but they are flagged as 512k pages for the hardware.
+ * For other page sizes, we have a single entry in the table.
*/
- #ifndef CONFIG_PTE_64BIT
- static inline unsigned long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+ #ifdef CONFIG_PPC_8xx
+ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
- #ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__("\
- 1: lwarx %0,0,%3\n\
- andc %1,%0,%4\n\
- or %1,%1,%5\n"
- PPC405_ERR77(0,%3)
- " stwcx. %1,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
- #else /* PTE_ATOMIC_UPDATES */
- unsigned long old = pte_val(*p);
- unsigned long new = (old & ~clr) | set;
-
- #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
- p->pte = p->pte1 = p->pte2 = p->pte3 = new;
- #else
- *p = __pte(new);
- #endif
- #endif /* !PTE_ATOMIC_UPDATES */
+ pte_basic_t *entry = &p->pte;
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+ int num, i;
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
++ pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
+
+ if (!huge)
+ num = PAGE_SIZE / SZ_4K;
+ else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) != _PMD_PAGE_8M)
+ num = SZ_512K / SZ_4K;
+ else
+ num = 1;
+
+ for (i = 0; i < num; i++, entry++, new += SZ_4K)
+ *entry = new;
- #ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
- icache_44x_need_flush = 1;
- #endif
return old;
}
- #else /* CONFIG_PTE_64BIT */
- static inline unsigned long long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+ #else
+ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
- #ifdef PTE_ATOMIC_UPDATES
- unsigned long long old;
- unsigned long tmp;
-
- __asm__ __volatile__("\
- 1: lwarx %L0,0,%4\n\
- lwzx %0,0,%3\n\
- andc %1,%L0,%5\n\
- or %1,%1,%6\n"
- PPC405_ERR77(0,%3)
- " stwcx. %1,0,%4\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
- #else /* PTE_ATOMIC_UPDATES */
- unsigned long long old = pte_val(*p);
- *p = __pte((old & ~(unsigned long long)clr) | set);
- #endif /* !PTE_ATOMIC_UPDATES */
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+
+ *p = __pte(new);
#ifdef CONFIG_44x
if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
void system_reset_exception(struct pt_regs *regs)
{
unsigned long hsrr0, hsrr1;
- bool nested = in_nmi();
bool saved_hsrrs = false;
+ u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
+
+ this_cpu_set_ftrace_enabled(0);
- /*
- * Avoid crashes in case of nested NMI exceptions. Recoverability
- * is determined by RI and in_nmi
- */
- if (!nested)
- nmi_enter();
+ nmi_enter();
/*
* System reset can interrupt code where HSRRs are live and MSR[RI]=1.
mtspr(SPRN_HSRR1, hsrr1);
}
- if (!nested)
- nmi_exit();
+ nmi_exit();
+ this_cpu_set_ftrace_enabled(ftrace_enabled);
+
/* What should we do here? We could issue a shutdown or hard reset. */
}
void machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
- bool nested;
- nmi_enter();
+ /*
+ * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * (it uses its own early real-mode handler to handle the MCE proper
+ * and then raises irq_work to call this handler when interrupts are
- * enabled). Set nested = true for this case, which just makes it avoid
- * the nmi_enter/exit.
++ * enabled).
++ *
++ * This is silly. The BOOK3S_64 should just call a different function
++ * rather than expecting semantics to magically change. Something
++ * like 'non_nmi_machine_check_exception()', perhaps?
+ */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) || in_nmi())
- nested = true;
- else
- nested = false;
- if (!nested)
- nmi_enter();
++ const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
++
++ if (nmi) nmi_enter();
__this_cpu_inc(irq_stat.mce_exceptions);
if (check_io_access(regs))
goto bail;
- nmi_exit();
- if (!nested)
- nmi_exit();
++ if (nmi) nmi_exit();
die("Machine check", regs, SIGBUS);
return;
bail:
- nmi_exit();
- if (!nested)
- nmi_exit();
++ if (nmi) nmi_exit();
}
void SMIException(struct pt_regs *regs)
* the hash pagetable.
*/
for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
- if (pgd_none(*pgd) || pgd_is_leaf(*pgd))
- note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE);
- else if (is_hugepd(__hugepd(pgd_val(*pgd))))
- walk_hugepd(st, (hugepd_t *)pgd, addr, PGDIR_SHIFT, 1);
+ p4d_t *p4d = p4d_offset(pgd, 0);
+
- if (!p4d_none(*p4d) && !p4d_is_leaf(*p4d))
- /* pgd exists */
- walk_pud(st, p4d, addr);
++ if (p4d_none(*p4d) || p4d_is_leaf(*p4d))
++ note_page(st, addr, 1, p4d_val(*p4d), PGDIR_SIZE);
++ else if (is_hugepd(__hugepd(p4d_val(*p4d))))
++ walk_hugepd(st, (hugepd_t *)p4d, addr, PGDIR_SHIFT, 1);
else
- note_page(st, addr, 1, p4d_val(*p4d));
- /* pgd exists */
- walk_pud(st, pgd, addr);
++ /* p4d exists */
++ walk_pud(st, p4d, addr);
}
}