select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_CPU_MIPS32_R2_EVA
+ select SYS_HAS_CPU_MIPS32_R6
select SYS_HAS_CPU_MIPS64_R1
select SYS_HAS_CPU_MIPS64_R2
+ select SYS_HAS_CPU_MIPS64_R6
select SYS_HAS_CPU_NEVADA
select SYS_HAS_CPU_RM7000
select SYS_HAS_EARLY_PRINTK
specific type of processor in your system, choose those that one
otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
+config CPU_MIPS32_R6
+ bool "MIPS32 Release 6"
+ depends on SYS_HAS_CPU_MIPS32_R6
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ help
+ Choose this option to build a kernel for release 6 or later of the
+ MIPS32 architecture.
+
+config CPU_MIPS64_R6
+ bool "MIPS64 Release 6"
+ depends on SYS_HAS_CPU_MIPS64_R6
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ help
+ Choose this option to build a kernel for release 6 or later of the
+ MIPS64 architecture.
+
config CPU_R3000
bool "R3000"
depends on SYS_HAS_CPU_R3000
config SYS_HAS_CPU_MIPS64_R2
bool
+config SYS_HAS_CPU_MIPS32_R6
+ bool
+
+config SYS_HAS_CPU_MIPS64_R6
+ bool
+
config SYS_HAS_CPU_R3000
bool
#
config CPU_MIPS32
bool
- default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+ default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
config CPU_MIPS64
bool
- default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+ default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
#
# These two indicate the revision of the architecture, either Release 1 or Release 2
bool
default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+config GENERIC_CSUM
+ bool
+
+config CPU_MIPSR6
+ bool
+ default y if CPU_MIPS64_R6 || CPU_MIPS32_R6
+ select GENERIC_CSUM
+
config EVA
bool
#
config HARDWARE_WATCHPOINTS
bool
- default y if CPU_MIPSR1 || CPU_MIPSR2
+ default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
menu "Kernel type"
depends on CPU_MIPS32_R2
#depends on CPU_MIPS64_R2 # once there is hardware ...
depends on SYS_SUPPORTS_MULTITHREADING
+ depends on !CPU_MIPSR6
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select MIPS_MT
-Wa,-mips32 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6) += $(call cc-option,-march=mips32r6,-mips32r6 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
+ -Wa,-mips32r6 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6) += $(call cc-option,-march=mips64r6,-mips64r6 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
+ -Wa,-mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
-Wa,--trap
*/
#ifdef CONFIG_CPU_HAS_PREFETCH
-#define PREF(hint,addr) \
+#ifdef CONFIG_CPU_MIPSR6
+
+#define PREF(hint,addr) \
+ .set push; \
+ .set mips64r6; \
+ pref hint, addr; \
+ .set pop
+
+
+#define PREFX(hint, addr)
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
+#define PREF(hint,addr) \
.set push; \
.set mips4; \
pref hint, addr; \
.set mips4; \
prefx hint, addr; \
.set pop
+#endif /* CONFIG_CPU_MIPSR6 */
#else /* !CONFIG_CPU_HAS_PREFETCH */
#endif /* !CONFIG_CPU_HAS_PREFETCH */
-/*
- * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
- */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set reorder; \
- beqz rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set reorder; \
- bnez rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- bnezl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- beqzl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
- (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt) \
- movn rd, rs, rt
-#define MOVZ(rd, rs, rt) \
- movz rd, rs, rt
-#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
-
/*
* Stack alignment
*/
#if (_MIPS_SZINT == 32)
#define INT_ADD add
#define INT_ADDU addu
-#define INT_ADDI addi
-#define INT_ADDIU addiu
+#ifndef CONFIG_CPU_MIPSR6
+#define INT_ADDI addi
+#endif
+#define INT_ADDIU addiu
#define INT_SUB sub
#define INT_SUBU subu
#define INT_L lw
#if (_MIPS_SZINT == 64)
#define INT_ADD dadd
#define INT_ADDU daddu
-#define INT_ADDI daddi
+#ifndef CONFIG_CPU_MIPSR6
+#define INT_ADDI daddi
+#endif
#define INT_ADDIU daddiu
#define INT_SUB dsub
#define INT_SUBU dsubu
#if (_MIPS_SZLONG == 32)
#define LONG_ADD add
#define LONG_ADDU addu
+#ifndef CONFIG_CPU_MIPSR6
#define LONG_ADDI addi
+#endif
#define LONG_ADDIU addiu
#define LONG_SUB sub
#define LONG_SUBU subu
#if (_MIPS_SZLONG == 64)
#define LONG_ADD dadd
#define LONG_ADDU daddu
+#ifndef CONFIG_CPU_MIPSR6
#define LONG_ADDI daddi
+#endif
#define LONG_ADDIU daddiu
#define LONG_SUB dsub
#define LONG_SUBU dsubu
#if (_MIPS_SZPTR == 32)
#define PTR_ADD add
#define PTR_ADDU addu
+#ifndef CONFIG_CPU_MIPSR6
#define PTR_ADDI addi
+#endif
#define PTR_ADDIU addiu
#define PTR_SUB sub
#define PTR_SUBU subu
#if (_MIPS_SZPTR == 64)
#define PTR_ADD dadd
#define PTR_ADDU daddu
+#ifndef CONFIG_CPU_MIPSR6
#define PTR_ADDI daddi
+#endif
#define PTR_ADDIU daddiu
#define PTR_SUB dsub
#define PTR_SUBU dsubu
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
/* copy stuff from MIPS64 */
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
: "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
- " .set mips3 \n"
- " ll %0, %1 # atomic_add \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %1 \n"
+ " ll %0, 0(%2) # atomic_add \n"
+ " addu %0, %3 \n"
+ " sc %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
+ " .set mips3 \n"
+ " ll %0, %1 # atomic_add \n"
" addu %0, %2 \n"
" sc %0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
: "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %1 \n"
+ " ll %0, 0(%2) # atomic_sub \n"
+ " subu %0, %3 \n"
+ " sc %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " ll %0, %1 # atomic_sub \n"
+ " ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n"
" sc %0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
: "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %2 \n"
+ " ll %1, 0(%3) # atomic_add_return \n"
+ " addu %0, %1, %4 \n"
+ " sc %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
+ " ll %1, %2 # atomic_add_return \n"
+ " addu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!result));
result = temp + i;
result = temp - i;
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %2 \n"
+ " ll %1, 0(%3) # atomic_sub_return \n"
+ " subu %0, %1, %4 \n"
+ " sc %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " ll %1, %2 # atomic_sub_return \n"
+ " ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!result));
result = temp - i;
: "memory");
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %2 \n"
+ "1: ll %1, 0(%3) # atomic_sub_if_positive \n"
+ " subu %0, %1, %4 \n"
+ " bltz %0, 1f \n"
+ " sc %0, 0(%3) \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %4 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
+ "1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} else {
unsigned long flags;
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %1 \n"
+ " lld %0, 0(%2) # atomic64_add \n"
+ " daddu %0, %3 \n"
+ " scd %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " lld %0, %1 # atomic64_add \n"
+ " lld %0, %1 # atomic64_add \n"
" daddu %0, %2 \n"
" scd %0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %1 \n"
+ " lld %0, 0(%2) # atomic64_sub \n"
+ " dsubu %0, %3 \n"
+ " scd %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
" lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %2 \n"
+ " lld %1, 0(%3) # atomic64_add_return \n"
+ " daddu %0, %1, %4 \n"
+ " scd %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
" lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
+#endif
} while (unlikely(!result));
result = temp + i;
: "memory");
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %2 \n"
+ " lld %1, 0(%3) # atomic64_sub_return \n"
+ " dsubu %0, %1, %4 \n"
+ " scd %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " lld %1, %2 # atomic64_sub_return \n"
+ " lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
+#endif
} while (unlikely(!result));
result = temp - i;
: "memory");
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %2 \n"
+ "1: lld %1, 0(%3) # atomic64_sub_if_positive \n"
+ " dsubu %0, %1, %4 \n"
+ " bltz %0, 1f \n"
+ " scd %0, 0(%3) \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " dsubu %0, %1, %4 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- "1: lld %1, %2 # atomic64_sub_if_positive\n"
+ "1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
" scd %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} else {
unsigned long flags;
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
int bit = nr & SZLONG_MASK;
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << bit), "m" (*m));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
: "=&r" (temp), "+m" (*m)
: "ir" (bit), "r" (~0));
} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %1 \n"
+ " " __LL "%0, 0(%2) # set_bit \n"
+ " or %0, %3 \n"
+ " " __SC "%0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+ : "ir" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # set_bit \n"
+ " " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
+#endif
} while (unlikely(!temp));
} else
__mips_set_bit(nr, addr);
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
int bit = nr & SZLONG_MASK;
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
: "=&r" (temp), "+m" (*m)
: "ir" (bit));
} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %1 \n"
+ " " __LL "%0, 0(%2) # clear_bit \n"
+ " and %0, %3 \n"
+ " " __SC "%0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+ : "ir" (~(1UL << bit)));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # clear_bit \n"
+ " " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
+#endif
} while (unlikely(!temp));
} else
__mips_clear_bit(nr, addr);
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %1 \n"
+ " " __LL "%0, 0(%2) # change_bit \n"
+ " xor %0, %3 \n"
+ " " __SC "%0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+ : "ir" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # change_bit \n"
+ " " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
+#endif
} while (unlikely(!temp));
} else
__mips_change_bit(nr, addr);
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %1 \n"
+ " " __LL "%0, 0(%3) # test_and_set_bit \n"
+ " or %2, %0, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %1 \n"
+ " " __LL "%0, 0(%3) # test_and_set_bit \n"
+ " or %2, %0, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
{
int bit = nr & SZLONG_MASK;
unsigned long res;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
smp_mb__before_llsc();
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %1 \n"
+ " " __LL "%0, 0(%3) # test_and_clear_bit \n"
+ " or %2, %0, %4 \n"
+ " xor %2, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " __SC "%2, %1 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %3, %1 \n"
+ " " __LL "%0, 0(%3) # test_and_change_bit \n"
+ " xor %2, %0, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_change_bit \n"
+ " " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips32 \n"
- " clz %0, %1 \n"
+#endif
+ " clz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
__builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
__asm__(
" .set push \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips64 \n"
- " dclz %0, %1 \n"
+#endif
+ " dclz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
#define BOOT_MEM_ROM_DATA 2
#define BOOT_MEM_RESERVED 3
#define BOOT_MEM_INIT_RAM 4
+#define BOOT_MEM_INUSE 5
/*
* A memory map that's built upon what was determined
#include <asm/uaccess.h>
+/*
+ * Fold a partial checksum without adding pseudo headers
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ __asm__(
+ " .set push # csum_fold\n"
+ " .set noat \n"
+ " sll $1, %0, 16 \n"
+ " addu %0, $1 \n"
+ " sltu $1, %0, $1 \n"
+ " srl %0, %0, 16 \n"
+ " addu %0, $1 \n"
+ " xori %0, 0xffff \n"
+ " .set pop"
+ : "=r" (sum)
+ : "0" (sum));
+
+ return (__force __sum16)sum;
+}
+
+#ifdef CONFIG_CPU_MIPSR6
+
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#ifndef csum_tcpudp_nofold
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum);
+#endif
+
+#ifndef csum_tcpudp_magic
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+#endif
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
+
+#ifndef csum_partial_copy_nocheck
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy((src), (dst), (len), (sum))
+#endif
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
int len, __wsum sum);
/*
- * Fold a partial checksum without adding pseudo headers
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
*/
-static inline __sum16 csum_fold(__wsum sum)
-{
- __asm__(
- " .set push # csum_fold\n"
- " .set noat \n"
- " sll $1, %0, 16 \n"
- " addu %0, $1 \n"
- " sltu $1, %0, $1 \n"
- " srl %0, %0, 16 \n"
- " addu %0, $1 \n"
- " xori %0, 0xffff \n"
- " .set pop"
- : "=r" (sum)
- : "0" (sum));
- return (__force __sum16)sum;
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
}
/*
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-static inline __sum16 ip_compute_csum(const void *buff, int len)
-{
- return csum_fold(csum_partial(buff, len, 0));
-}
+#endif /* !CONFIG_CPU_MIPSR6 */
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long tmp;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %3 \n"
+ " ll %0, 0(%2) # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %1, %z4 \n"
+ " .set mips64r6 \n"
+ " sc %1, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=&r" (dummy), "=&r" (tmp),
+ "+m" (*m)
+ : "Jr" (val));
+#else
" .set mips3 \n"
- " ll %0, %3 # xchg_u32 \n"
+ " ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set mips3 \n"
- " sc %2, %1 \n"
+ " sc %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
+#endif
} while (unlikely(!dummy));
} else {
unsigned long flags;
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long tmp;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+ " dla %2, %3 \n"
+ " lld %0, 0(%2) # xchg_u64 \n"
+ " move %1, %z4 \n"
+ " scd %1, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=&r" (dummy), "=&r" (tmp),
+ "+m" (*m)
+ : "Jr" (val));
+#else
" .set mips3 \n"
- " lld %0, %3 # xchg_u64 \n"
+ " lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
+#endif
} while (unlikely(!dummy));
} else {
unsigned long flags;
#define __HAVE_ARCH_CMPXCHG 1
+#ifdef CONFIG_CPU_MIPSR6
+#define __cmpxchg_asm(ld, st, m, old, new) \
+({ \
+ __typeof(*(m)) __ret; \
+ register unsigned long tmp; \
+ \
+ if (kernel_uses_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips64r6 \n" \
+ "1: dla %1, %2 \n" \
+ ld " %0, 0(%1) # __cmpxchg_asm \n" \
+ " bne %0, %z3, 2f \n" \
+ " .set mips0 \n" \
+ " move $1, %z4 \n" \
+ " .set mips64r6 \n" \
+ " " st " $1, 0(%1) \n" \
+ " beqz $1, 1b \n" \
+ " .set pop \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (tmp), "+m" (*m) \
+ : "Jr" (old), "Jr" (new)); \
+ } else { \
+ unsigned long __flags; \
+ \
+ raw_local_irq_save(__flags); \
+ __ret = *m; \
+ if (__ret == old) \
+ *m = new; \
+ raw_local_irq_restore(__flags); \
+ } \
+ \
+ __ret; \
+})
+#else /* !CONFIG_CPU_MIPSR6 */
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set mips3 \n" \
- " " st " $1, %1 \n" \
+ " " st " $1, %1 \n" \
" beqzl $1, 1b \n" \
"2: \n" \
" .set pop \n" \
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set mips3 \n" \
- " " st " $1, %1 \n" \
+ " " st " $1, %1 \n" \
" beqz $1, 1b \n" \
" .set pop \n" \
"2: \n" \
\
__ret; \
})
+#endif /* CONFIG_CPU_MIPSR6 */
/*
* This function doesn't exist, so you'll get a linker error
#ifndef cpu_has_tlbinv
#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV)
#endif
+#ifndef cpu_has_tlbinv_full
+#define cpu_has_tlbinv_full (cpu_data[0].options & MIPS_CPU_TLBINV_FULL)
+#endif
#ifndef cpu_has_4kex
#define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX)
#endif
#define cpu_has_cache_cdex_s (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
#endif
#ifndef cpu_has_prefetch
-#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#ifdef CONFIG_CPU_MIPSR6
+#define cpu_has_prefetch (0)
+#else
+#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#endif
#endif
#ifndef cpu_has_mcheck
#define cpu_has_mcheck (cpu_data[0].options & MIPS_CPU_MCHECK)
#ifndef cpu_has_rixi
#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
#endif
+#ifndef cpu_has_rixi_except
+#define cpu_has_rixi_except (cpu_data[0].options & MIPS_CPU_RIXI_EXCEPT)
+#endif
#ifndef cpu_has_mmips
# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
#define cpu_has_cm2 (0)
#define cpu_has_cm2_l2sync (0)
#endif
+#ifndef cpu_has_maar
+#define cpu_has_maar (cpu_data[0].options2 & MIPS_CPU_MAAR)
+#endif
/*
* I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
# ifndef cpu_has_mips32r2
# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
# endif
+# ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+# endif
# ifndef cpu_has_mips64r1
# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
# endif
# ifndef cpu_has_mips64r2
# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
# endif
+# ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+# endif
/*
* Shortcuts ...
*/
-#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6)
#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
- cpu_has_mips64r1 | cpu_has_mips64r2)
+ cpu_has_mips64r1 | cpu_has_mips64r2 | \
+ cpu_has_mips32r6 | cpu_has_mips64r6)
#ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
#endif
/*
# define cpu_has_clo_clz cpu_has_mips_r
# endif
+#ifdef CONFIG_CPU_MIPSR6
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
#ifndef cpu_has_dsp
#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
#endif
#define cpu_has_dsp2 (cpu_data[0].ases & MIPS_ASE_DSP2P)
#endif
+#endif /* CONFIG_CPU_MIPSR6 */
+
#ifndef cpu_has_mipsmt
#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT)
#endif
* Capability and feature descriptor structure for MIPS CPU
*/
unsigned long options;
+ unsigned long options2;
unsigned long ases;
unsigned int processor_id;
unsigned int fpu_id;
#define MIPS_CPU_ISA_M32R2 0x00000040
#define MIPS_CPU_ISA_M64R1 0x00000080
#define MIPS_CPU_ISA_M64R2 0x00000100
+#define MIPS_CPU_ISA_M32R6 0x00000200
+#define MIPS_CPU_ISA_M64R6 0x00000400
#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
- MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
+ MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |MIPS_CPU_ISA_M32R6)
#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
- MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+ MIPS_CPU_ISA_M64R6)
/*
* CPU Option encodings
#define MIPS_CPU_TLBINV 0x08000000 /* CPU supports TLBINV/F */
#define MIPS_CPU_CM2 0x10000000 /* CPU has CM2 */
#define MIPS_CPU_CM2_L2SYNC 0x20000000 /* CPU has CM2 L2-only SYNC feature */
+#define MIPS_CPU_TLBINV_FULL 0x40000000 /* CPU supports single TLBINV/F for full V/FTLB */
+#define MIPS_CPU_RIXI_EXCEPT 0x80000000 /* CPU has TLB Read/eXec Inhibit exceptions */
+
+/*
+ * CPU Option2 encodings
+ */
+#define MIPS_CPU_MAAR 0x00000001 /* MAAR exists */
/*
* CPU ASE encodings
#include <asm/hazards.h>
#include <asm/mipsregs.h>
+#ifndef CONFIG_CPU_MIPSR6
+
#define DSP_DEFAULT 0x00000000
#define DSP_MASK 0x3f
tsk->thread.dsp.dspr; \
})
+#else
+
+#define __init_dsp(void) do { } while(0)
+#define init_dsp(void) do { } while(0)
+#define save_dsp(void) do { } while(0)
+#define restore_dsp(void) do { } while(0)
+#define __save_dsp(void) do { } while(0)
+#define __restore_dsp(void) do { } while(0)
+
+#endif /* CONFIG_CPU_MIPSR6 */
+
#endif /* _ASM_DSP_H */
*/
__asm__ __volatile__ (
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips2 \n"
+#endif
"1: ll %0, %1 # atomic_scrub \n"
" addu %0, $0 \n"
" sc %0, %1 \n"
" beqz %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*virt_addr)
- : "m" (*virt_addr));
+ : "=&r" (temp), "+m" (*virt_addr));
virt_addr++;
}
{
int ret = 0;
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_CPU_MIPS64)
if (test_thread_flag(TIF_32BIT_REGS)) {
change_c0_status(ST0_CU1|ST0_FR,ST0_CU1);
KSTK_STATUS(current) |= ST0_CU1;
#include <asm/war.h>
#ifndef CONFIG_EVA
+#ifdef CONFIG_CPU_MIPSR6
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ if (cpu_has_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips64r6 \n" \
+ "1: ll %1, %4 # __futex_atomic_op \n" \
+ " .set mips0 \n" \
+ " " insn " \n" \
+ " .set mips64r6 \n" \
+ "2: sc $1, %2 \n" \
+ " beqz $1, 1b \n" \
+ __WEAK_LLSC_MB \
+ "3: \n" \
+ " .set pop \n" \
+ " .set mips0 \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %6 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
+ : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
+ : "memory"); \
+ } else \
+ ret = -ENOSYS; \
+}
+#else /* !CONFIG_CPU_MIPSR6 */
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
if (cpu_has_llsc && R10000_LLSC_WAR) { \
} else \
ret = -ENOSYS; \
}
+#endif /* CONFIG_CPU_MIPSR6 */
#else
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips3 \n"
+#endif
"1: ll %1, %3 \n"
" bne %1, %z4, 3f \n"
" .set mips0 \n"
" move $1, %z5 \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips3 \n"
+#endif
"2: sc $1, %2 \n"
" beqz $1, 1b \n"
__WEAK_LLSC_MB
fw_dontuse,
fw_code,
fw_free,
+ fw_inuse,
};
typedef struct {
#define GCMP_GCB_GAOR2MASK_OFS 0x0218 /* Attribute-Only Region2 Mask */
#define GCMP_GCB_GAOR3BA_OFS 0x0220 /* Attribute-Only Region3 Base Address */
#define GCMP_GCB_GAOR3MASK_OFS 0x0228 /* Attribute-Only Region3 Mask */
+#define GCMP_GCB_GCML2P_OFS 0x0300 /* L2 Prefetch Control */
+#define GCMP_GCB_GCML2P_PAGE_MASK 0xfffff000 /* ... page mask */
+#define GCMP_GCB_GCML2P_PFTEN 0x00000100 /* L2 Prefetch Enable */
+#define GCMP_GCB_GCML2P_NPFT 0x000000ff /* N.of L2 Prefetch */
+#define GCMP_GCB_GCML2PB_OFS 0x0308 /* L2 Prefetch Control B */
+#define GCMP_GCB_GCML2PB_CODE_PFTEN 0x00000100 /* L2 Code Prefetch Enable */
/* Core local/Core other control block registers */
#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
/*
* TLB hazards
*/
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR6) || (defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON))
/*
* MIPSR2 defines ehb for hazard avoidance
* The alterantive is switching the assembler to 64-bit code which happens
* to work right even for 32-bit code ...
*/
+#ifdef CONFIG_CPU_MIPSR6
+#define instruction_hazard() \
+do { \
+ unsigned long tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips64r6 \n" \
+ " dla %0, 1f \n" \
+ " jr.hb %0 \n" \
+ " .set pop \n" \
+ "1: \n" \
+ : "=r" (tmp)); \
+} while (0)
+#else /* !CONFIG_CPU_MIPSR6 */
#define instruction_hazard() \
do { \
unsigned long tmp; \
"1: \n" \
: "=r" (tmp)); \
} while (0)
+#endif
#elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
defined(CONFIG_CPU_BMIPS)
#define instruction_hazard() \
do { \
- if (cpu_has_mips_r2) \
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) \
__instruction_hazard(); \
} while (0)
#define __disable_fpu_hazard
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
#define __enable_fpu_hazard \
___ehb
#include <linux/stringify.h>
#include <asm/hazards.h>
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
+#if (defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_CPU_MIPSR2)) && !defined(CONFIG_MIPS_MT_SMTC)
static inline void arch_local_irq_disable(void)
{
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
-#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+#endif /* if (defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_CPU_MIPSR2)) && !defined(CONFIG_MIPS_MT_SMTC) */
extern void smtc_ipi_replay(void);
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
" ei \n"
#else
" mfc0 $1,$12 \n"
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
- "1:" __LL "%1, %2 # local_add_return \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
+ " .set mips3 \n"
+#endif
+ "1:" __LL "%1, %2 # local_add_return \n"
__ADDU "%0, %1, %3 \n"
__SC "%0, %2 \n"
" beqz %0, 1b \n"
unsigned long temp;
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips3 \n"
- "1:" __LL "%1, %2 # local_sub_return \n"
+#endif
+ "1:" __LL "%1, %2 # local_sub_return \n"
__SUBU "%0, %1, %3 \n"
__SC "%0, %2 \n"
" beqz %0, 1b \n"
#define PG_XIE (_ULCAST_(1) << 30)
#define PG_ELPA (_ULCAST_(1) << 29)
#define PG_ESP (_ULCAST_(1) << 28)
+#define PG_IEC (_ULCAST_(1) << 27)
+#define PG_MCCAUSE (_ULCAST_(0x1f) << 0)
/*
* R4x00 interrupt enable / cause bits
#define MIPS_CONF4_VTLBSIZEEXT_SHIFT (24)
#define MIPS_CONF4_VTLBSIZEEXT (_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
#define MIPS_CONF4_AE (_ULCAST_(1) << 28)
-#define MIPS_CONF4_IE (_ULCAST_(3) << 29)
-#define MIPS_CONF4_TLBINV (_ULCAST_(2) << 29)
+#define MIPS_CONF4_IE (_ULCAST_(3) << 29)
+#define MIPS_CONF4_TLBINV (_ULCAST_(2) << 29)
+#define MIPS_CONF4_TLBINV_FULL (_ULCAST_(1) << 29)
+#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
#define MIPS_CONF5_K (_ULCAST_(1) << 30)
/* ebase register bit definition */
#define MIPS_EBASE_WG (_ULCAST_(1) << 11)
+/* MAAR bits definitions */
+#define MIPS_MAAR_V (_ULCAST_(1))
+#define MIPS_MAAR_S (_ULCAST_(1) << 1)
+#define MIPS_MAAR_HI_V (_ULCAST_(1) << 31)
+
+#define MIPS_MAAR_MAX 64
+
#ifndef __ASSEMBLY__
/*
#define read_c0_prid() __read_32bit_c0_register($15, 0)
#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3)
+#define read_c0_bevva() __read_ulong_c0_register($15, 4)
#define read_c0_config() __read_32bit_c0_register($16, 0)
#define read_c0_config1() __read_32bit_c0_register($16, 1)
#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val)
#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val)
+#define read_c0_lladdr() __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val)
+/*
+ * MAAR registers
+ */
+#define read_c0_maar() __read_ulong_c0_register($17, 1)
+#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val)
+#define read_c0_maarindex() __read_32bit_c0_register($17, 2)
+#define write_c0_maarindex(val) __write_32bit_c0_register($17, 2, val)
+
/*
* The WatchLo register. There may be up to 8 of them.
*/
:: "r" (value)); \
})
+#ifndef CONFIG_CPU_MIPSR6
/*
* Macros to access the DSP ASE registers
*/
#endif /* CONFIG_CPU_MICROMIPS */
#endif
+#endif /* CONFIG_CPU_MIPSR6 */
/*
* TLB operations.
static inline void tlbinvf(void)
{
__asm__ __volatile__(
- ".set push\n\t"
- ".set noreorder\n\t"
- ".word 0x42000004\n\t"
- ".set pop");
+ ".word 0x42000004");
}
/*
{
__asm__ __volatile__(
" .set mips32r2 \n"
- " ehb \n" " .set mips0 \n");
+ " ehb \n"
+ " .set mips0 \n");
}
/*
#define MODULE_PROC_FAMILY "MIPS64_R1 "
#elif defined CONFIG_CPU_MIPS64_R2
#define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
#elif defined CONFIG_CPU_R3000
#define MODULE_PROC_FAMILY "R3000 "
#elif defined CONFIG_CPU_TX39XX
#else /* 'Normal' r4K case */
-#ifndef CONFIG_CPU_MIPSR2
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
/*
* When using the RI/XI bit support, we have 13 bits of flags below
* the physical address. The RI/XI bits are placed such that a SRL 5
#define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
#define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; })
-#else /* CONFIG_CPU_MIPSR2 */
+#else /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
/* static bits allocation in MIPS R2, two variants -
HUGE TLB in 64BIT kernel support or not.
#endif /* CONFIG_64BIT */
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
#define INDEX_BASE CKSEG0
#endif
+#ifdef CONFIG_CPU_MIPSR6
#define cache_op(op,addr) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6\n\t \n" \
+ " cache %0, %1 \n" \
+ " .set pop \n" \
+ : \
+ : "i" (op), "R" (*(unsigned char *)(addr)))
+#else
+#define cache_op(op,addr) \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
" .set pop \n" \
: \
: "i" (op), "R" (*(unsigned char *)(addr)))
+#endif
#ifdef CONFIG_MIPS_MT
/*
cache_op(Hit_Writeback_Inv_SD, addr);
}
+#ifdef CONFIG_CPU_MIPSR6
#define protected_cache_op(op,addr) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ "1: cache %0, (%1) \n" \
+ "2: .set pop \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "STR(PTR)" 1b, 2b \n" \
+ " .previous" \
+ : \
+ : "i" (op), "r" (addr))
+#else
+#define protected_cache_op(op,addr) \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
" .previous" \
: \
: "i" (op), "r" (addr))
+#endif
#ifdef CONFIG_EVA
#define protected_cachee_op(op,addr) \
cache_op(Page_Invalidate_T, addr);
}
+#ifdef CONFIG_CPU_MIPSR6
+
+#define cache16_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
+ " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
+ " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
+ " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
+ " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
+ " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
+ " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
+ " addiu $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x010($1) \n" \
+ " cache %1, 0x020($1); cache %1, 0x030($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x050($1) \n" \
+ " cache %1, 0x060($1); cache %1, 0x070($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x090($1) \n" \
+ " cache %1, 0x0a0($1); cache %1, 0x0b0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0d0($1) \n" \
+ " cache %1, 0x0e0($1); cache %1, 0x0f0($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache32_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
+ " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
+ " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
+ " addiu $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache64_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
+ " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
+ " addiu $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache128_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
+ " addiu $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " addiu $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
#define cache16_unroll32(base,op) \
__asm__ __volatile__( \
" .set push \n" \
: \
: "r" (base), \
"i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
#ifdef CONFIG_EVA
#define cache16_unroll32_user(base,op) \
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder # arch_read_lock \n"
+ "1: ll %0, 0(%1) \n"
+ " bltz %0, 1b \n"
+ " addu %0, 1 \n"
+ "2: sc %0, 0(%1) \n"
+ " .set pop \n"
+ : "=&r" (tmp)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_lock \n"
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
}
smp_llsc_mb();
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ "1: ll %0, 0(%1) # arch_read_unlock \n"
+ " li $1, 1 \n"
+ " sub %0, %0, $1 \n"
+ " sc %0, 0(%1) \n"
+ " .set pop \n"
+ : "=&r" (tmp)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
}
}
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder # arch_write_lock \n"
+ "1: ll %0, 0(%1) \n"
+ " bnez %0, 1b \n"
+ " lui %0, 0x8000 \n"
+ "2: sc %0, 0(%1) \n"
+ " .set pop \n"
+ : "=&r" (tmp)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_write_lock \n"
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
}
smp_llsc_mb();
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ __asm__ __volatile__(
+ " .set noreorder # arch_read_trylock \n"
+ " li %1, 0 \n"
+ "1: ll %0, 0(%2) \n"
+ " bltz %0, 2f \n"
+ " addu %0, 1 \n"
+ " sc %0, 0(%2) \n"
+ " beqz %0, 1b \n"
+ " nop \n"
+ " .set reorder \n"
+ __WEAK_LLSC_MB
+ " li %2, 1 \n"
+ "2: \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (tmp2)
+ : "memory");
+#else
__asm__ __volatile__(
" .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
: "m" (rw->lock)
: "memory");
+#endif
}
return ret;
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+ "1: ll %0, 0(%2) \n"
+ " li %1, 0 # arch_write_trylock \n"
+ " bnez %0, 2f \n"
+ " lui %0, 0x8000 \n"
+ " sc %0, 0(%2) \n"
+ " li %1, 1 \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
" ll %1, %3 # arch_write_trylock \n"
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
smp_llsc_mb();
}
#ifndef _MIPS_SPRAM_H
#define _MIPS_SPRAM_H
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
extern __init void spram_config(void);
#else
static inline void spram_config(void) { };
LONG_S v1, PT_HI(sp)
mflhxu v1
LONG_S v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
mfhi v1
#endif
#ifdef CONFIG_32BIT
LONG_S $10, PT_R10(sp)
LONG_S $11, PT_R11(sp)
LONG_S $12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
LONG_S v1, PT_HI(sp)
mflo v1
#endif
LONG_S $14, PT_R14(sp)
LONG_S $15, PT_R15(sp)
LONG_S $24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
LONG_S v1, PT_LO(sp)
#endif
.endm
mtlhx $24
LONG_L $24, PT_LO(sp)
mtlhx $24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
LONG_L $24, PT_LO(sp)
mtlo $24
LONG_L $24, PT_HI(sp)
.macro RESTORE_SP_AND_RET
LONG_L sp, PT_R29(sp)
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+ eretnc
+#else
.set mips3
eret
+#endif
.set mips0
.endm
#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
#endif
-#define __clear_software_ll_bit() \
+#ifdef CONFIG_CPU_MIPSR6
+#define __clear_ll_bit() \
+do { \
+ write_c0_lladdr(0); \
+} while (0)
+#else
+#define __clear_ll_bit() \
do { \
if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
ll_bit = 0; \
} while (0)
+#endif
#define switch_to(prev, next, last) \
do { \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \
__save_dsp(prev); \
- __clear_software_ll_bit(); \
+ __clear_ll_bit(); \
__usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
(last) = resume(prev, next, task_thread_info(next), __usedfpu); \
} while (0)
*/
extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask);
+void remove_wired_entry(void);
+int wired_push(unsigned long entryhi, unsigned long entrylo0,
+ unsigned long entrylo1, unsigned long pagemask);
+int wired_pop(void);
#endif /* __ASM_TLBMISC_H */
enum major_op {
spec_op, bcond_op, j_op, jal_op,
beq_op, bne_op, blez_op, bgtz_op,
+#ifndef CONFIG_CPU_MIPSR6
addi_op, addiu_op, slti_op, sltiu_op,
+#else
+ cbcond0_op, addiu_op, slti_op, sltiu_op,
+#endif
andi_op, ori_op, xori_op, lui_op,
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
+#ifndef CONFIG_CPU_MIPSR6
daddi_op, daddiu_op, ldl_op, ldr_op,
+#else
+ cbcond1_op, daddiu_op, ldl_op, ldr_op,
+#endif
spec2_op, jalx_op, mdmx_op, spec3_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
sdl_op, sdr_op, swr_op, cache_op,
+#ifndef CONFIG_CPU_MIPSR6
ll_op, lwc1_op, lwc2_op, pref_op,
lld_op, ldc1_op, ldc2_op, ld_op,
sc_op, swc1_op, swc2_op, major_3b_op,
scd_op, sdc1_op, sdc2_op, sd_op
+#else
+ ll_op, lwc1_op, bc_op, pref_op,
+ lld_op, ldc1_op, jump_op, ld_op,
+ sc_op, swc1_op, balc_op, major_3b_op,
+ scd_op, sdc1_op, jump2_op, sd_op
+#endif
};
/*
sce_op = 0x1e, swe_op = 0x1f,
bshfl_op = 0x20, swle_op = 0x21,
swre_op = 0x22, prefe_op = 0x23,
- dbshfl_op = 0x24,
+ dbshfl_op = 0x24, cache6_op = 0x25,
+ sc6_op = 0x26, scd6_op = 0x27,
lbue_op = 0x28, lhue_op = 0x29,
lbe_op = 0x2c, lhe_op = 0x2d,
lle_op = 0x2e, lwe_op = 0x2f,
+ pref6_op = 0x35, ll6_op = 0x36,
+ lld6_op = 0x37,
rdhwr_op = 0x3b
};
spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
tgei_op, tgeiu_op, tlti_op, tltiu_op,
teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
+#ifndef CONFIG_CPU_MIPSR6
bltzal_op, bgezal_op, bltzall_op, bgezall_op,
+#else
+ nal_op, bal_op, rt_op_0x12_op, rt_op_0x13_op,
+#endif
rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
cfc_op = 0x02, mfhc_op = 0x03,
mtc_op = 0x04, dmtc_op = 0x05,
ctc_op = 0x06, mthc_op = 0x07,
- bc_op = 0x08, cop_op = 0x10,
+ rs_bc_op = 0x08, bc1eqz_op = 0x09,
+ bc1nez_op = 0x0d, cop_op = 0x10,
copm_op = 0x18
};
/*
- * rt field of cop.bc_op opcodes
+ * rt field of cop.rs_bc_op opcodes
*/
enum bcop_op {
bcf_op, bct_op, bcfl_op, bctl_op
#define __SWAB_64_THRU_32__
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
#define __arch_swab32 __arch_swab32
/*
- * Having already checked for CONFIG_CPU_MIPSR2, enable the
- * optimized version for 64-bit kernel on r2 CPUs.
+ * Having already checked for CONFIG_CPU_MIPSR2/R6, enable the
+ * optimized version for 64-bit kernel on R2 & R6 CPUs.
*/
#ifdef CONFIG_64BIT
static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
}
#define __arch_swab64 __arch_swab64
#endif /* CONFIG_64BIT */
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
#endif /* _ASM_SWAB_H */
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*/
+/* Note on R6 compact branches:
+ * Compact branches doesn't do exception (besides BC1EQZ/BC1NEZ)
+ * and doesn't execute instruction in Forbidden Slot if branch is
+ * to be taken. It means that return EPC for them can be safely set
+ * to EPC + 8 because it is the only case to get a BD precise exception
+ * doing instruction in Forbidden Slot while no branch.
+ *
+ * Unconditional compact jump/branches added for full picture
+ * (not doing BD precise exception, actually).
+ */
int __compute_return_epc_for_insn(struct pt_regs *regs,
union mips_instruction insn)
{
- unsigned int bit, fcr31, dspcontrol;
+ unsigned int bit;
long epc = regs->cp0_epc;
int ret = 0;
+#ifdef CONFIG_CPU_MIPSR6
+ int reg;
+#else
+ unsigned int fcr31;
+ unsigned dspcontrol;
+#endif
switch (insn.i_format.opcode) {
/*
case jalr_op:
regs->regs[insn.r_format.rd] = epc + 8;
/* Fall through */
+#ifndef CONFIG_CPU_MIPSR6
case jr_op:
+#endif
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
case bcond_op:
switch (insn.i_format.rt) {
case bltz_op:
+#ifndef CONFIG_CPU_MIPSR6
case bltzl_op:
+#endif
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
if (insn.i_format.rt == bltzl_op)
ret = BRANCH_LIKELY_TAKEN;
+#endif
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bgez_op:
+#ifndef CONFIG_CPU_MIPSR6
case bgezl_op:
+#endif
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
if (insn.i_format.rt == bgezl_op)
ret = BRANCH_LIKELY_TAKEN;
+#endif
} else
epc += 8;
regs->cp0_epc = epc;
break;
+#ifdef CONFIG_CPU_MIPSR6
+ case nal_op: /* MIPSR6: nal == bltzal $0 */
+ if (insn.i_format.rs)
+ break;
+ regs->regs[31] = epc + 8;
+ epc += 4;
+ regs->cp0_epc = epc;
+ break;
+
+ case bal_op: /* MIPSR6: bal == bgezal $0 */
+ if (insn.i_format.rs)
+ break;
+ regs->regs[31] = epc + 8;
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ regs->cp0_epc = epc;
+ break;
+#else
case bltzal_op:
case bltzall_op:
regs->regs[31] = epc + 8;
epc += 8;
regs->cp0_epc = epc;
break;
+#endif
}
break;
* These are conditional and in i_format.
*/
case beq_op:
+#ifndef CONFIG_CPU_MIPSR6
case beql_op:
+#endif
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
if (insn.i_format.opcode == beql_op)
ret = BRANCH_LIKELY_TAKEN;
+#endif
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bne_op:
+#ifndef CONFIG_CPU_MIPSR6
case bnel_op:
+#endif
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
if (insn.i_format.opcode == bnel_op)
ret = BRANCH_LIKELY_TAKEN;
+#endif
} else
epc += 8;
regs->cp0_epc = epc;
break;
case blez_op: /* not really i_format */
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: blezalc, bgezalc, bgeuc
+ */
+ if (insn.i_format.rt) {
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* blezalc, bgezalc */
+ regs->regs[31] = epc + 4;
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+
+ if ((long)regs->regs[insn.i_format.rs] <= 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+#endif
case blezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: blezc, bgezc, bgec
+ */
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+#else
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
epc += 8;
regs->cp0_epc = epc;
break;
+#endif
case bgtz_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: bltzalc, bgtzalc, bltuc
+ */
+ if (insn.i_format.rt) {
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* bltzalc, bgtzalc */
+ regs->regs[31] = epc + 4;
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+
+ if ((long)regs->regs[insn.i_format.rs] > 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+#endif
case bgtzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: bltc, bltzc, bgtzc
+ */
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+#else
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+ case cbcond0_op:
+ /*
+ * Compact branches: bovc, beqc, beqzalc
+ */
+
+ /* fall through */
+ case cbcond1_op:
+ /*
+ * Compact branches: bnvc, bnec, bnezalc
+ */
+ if (insn.i_format.rt && !insn.i_format.rs) /* beqzalc/bnezalc */
+ regs->regs[31] = epc + 4;
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+#endif
+
/*
* And now the FPA/cp1 branch instructions.
*/
case cop1_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if ((insn.i_format.rs != bc1eqz_op) &&
+ (insn.i_format.rs != bc1nez_op))
+ break;
+
+ lose_fpu(1); /* Save FPU state for the emulator. */
+ reg = insn.i_format.rt;
+ bit = 0;
+ switch (insn.i_format.rs) {
+ case bc1eqz_op:
+ if (current->thread.fpu.fpr[reg] == (__u64)0)
+ bit = 1;
+ break;
+ case bc1nez_op:
+ if (current->thread.fpu.fpr[reg] != (__u64)0)
+ bit = 1;
+ break;
+ }
+ own_fpu(1); /* Restore FPU state. */
+ if (bit)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+#else
preempt_disable();
if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
break;
}
break;
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+ case bc_op:
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case jump_op:
+ if (insn.i_format.rs) /* beqzc */
+ epc = epc + 8;
+ else /* jic, no offset shift */
+ epc = regs->regs[insn.i_format.rt] + insn.i_format.simmediate;
+ regs->cp0_epc = epc;
+ break;
+
+ case balc_op:
+ regs->regs[31] = epc + 4;
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ regs->cp0_epc = epc;
+ break;
+
+ case jump2_op:
+ if (insn.i_format.rs) /* bnezc */
+ epc = epc + 8;
+ else { /* jialc, no offset shift */
+ regs->regs[31] = epc + 4;
+ epc = regs->regs[insn.i_format.rt] + insn.i_format.simmediate;
+ }
+ regs->cp0_epc = epc;
+ break;
+#endif
+
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case lwc2_op: /* This is bbit0 on Octeon */
if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
return ret;
+#ifndef CONFIG_CPU_MIPSR6
sigill:
printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+#endif
}
EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
#ifndef CONFIG_MIPS_MT_SMTC
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
- const int r2 = cpu_has_mips_r2;
+ const int r2 = cpu_has_mips_r2 | cpu_has_mips_r6;
struct clock_event_device *cd;
int cpu = smp_processor_id();
#include <asm/mipsregs.h>
#include <asm/setup.h>
+#ifndef CONFIG_CPU_MIPSR6
+
static char bug64hit[] __initdata =
"reliable operation impossible!\n%s";
static char nowar[] __initdata =
{
check_daddi();
}
+
+#else /* CONFIG_CPU_MIPSR6 */
+
+static volatile int daddi_ov __cpuinitdata;
+int daddiu_bug = 0;
+
+void __init check_bugs64_early(void) {}
+
+void __init check_bugs64(void) {}
+
+#endif /* CONFIG_CPU_MIPSR6 */
case 1:
set_isa(c, MIPS_CPU_ISA_M32R2);
break;
+ case 2:
+ c->isa_level = MIPS_CPU_ISA_M32R6;
+ break;
default:
goto unknown;
}
case 1:
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
+ case 2:
+ c->isa_level = MIPS_CPU_ISA_M64R6;
+ break;
}
break;
default:
if (config4 & MIPS_CONF4_TLBINV) {
c->options |= MIPS_CPU_TLBINV;
printk("TLBINV/F supported, config4=0x%0x\n",config4);
+ if (config4 & MIPS_CONF4_TLBINV_FULL)
+ c->options |= MIPS_CPU_TLBINV_FULL;
}
- /* TBW: page walker support starts here */
}
+#ifdef CONFIG_CPU_MIPSR6
+ c->tlbsizevtlb = ((c->tlbsizevtlb - 1) |
+ (((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE)) + 1;
+ c->tlbsize = c->tlbsizevtlb;
+
+ newcf4 = (config4 & ~MIPS_CONF4_FTLBPAGESIZE) |
+ ((((fls(PAGE_SIZE >> BASIC_PAGE_SHIFT)-1)/2)+1) <<
+ MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+ write_c0_config4(newcf4);
+ back_to_back_c0_hazard();
+ config4 = read_c0_config4();
+ if (config4 != newcf4) {
+ printk(KERN_ERR "PAGE_SIZE 0x%0lx is not supported by FTLB (config4=0x%0x)\n",
+ PAGE_SIZE, config4);
+ if (conf6available && (cpu_capability & MIPS_FTLB_CAPABLE)) {
+ printk("Switching FTLB OFF\n");
+ config6 = read_c0_config6();
+ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
+ }
+ printk("Total TLB(VTLB) inuse: %d\n",c->tlbsizevtlb);
+ } else {
+ c->tlbsizeftlbsets = 1 <<
+ ((config4 & MIPS_CONF4_FTLBSETS) >>
+ MIPS_CONF4_FTLBSETS_SHIFT);
+ c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+ MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+ c->tlbsize += (c->tlbsizeftlbways *
+ c->tlbsizeftlbsets);
+ printk("V/FTLB found: VTLB=%d, FTLB sets=%d, ways=%d total TLB=%d\n",
+ c->tlbsizevtlb, c->tlbsizeftlbsets, c->tlbsizeftlbways, c->tlbsize);
+ }
+#else
switch (config4 & MIPS_CONF4_MMUEXTDEF) {
case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
c->tlbsize =
c->tlbsizevtlb, c->tlbsizeftlbsets, c->tlbsizeftlbways, c->tlbsize);
break;
}
+#endif
}
c->kscratch_mask = (config4 >> 16) & 0xff;
if (config5 & MIPS_CONF5_EVA)
c->options |= MIPS_CPU_EVA;
+ if (config5 & MIPS_CONF5_MRP)
+ c->options2 |= MIPS_CPU_MAAR;
return config5 & MIPS_CONF_M;
}
mips_probe_watch_registers(c);
- if (cpu_has_mips_r2)
+ if (cpu_has_mips_r2 || cpu_has_mips_r6)
c->core = read_c0_ebase() & 0x3ff;
+
+ if (cpu_has_rixi) {
+ write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+ back_to_back_c0_hazard();
+ if (read_c0_pagegrain() & PG_IEC) {
+ c->options |= MIPS_CPU_RIXI_EXCEPT;
+ pr_info("TLBRI/TLBXI exceptions are used\n");
+ }
+ }
}
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
}
}
- if (cpu_has_mips_r2) {
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
/* R2 has Performance Counter Interrupt indicator */
c->options |= MIPS_CPU_PCI;
jal syscall_trace_leave
b resume_userspace
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT)
/*
* MIPS32R2 Instruction Hazard Barrier - must be called
.align 8
#endif
LEAF(mips_ihb)
- .set mips32r2
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+#else
+ .set mips32r2
+#endif
jr.hb ra
nop
+ .set mips0
#ifdef CONFIG_EVA
.align 8
#endif
END(mips_ihb)
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
*/
NESTED(except_vec3_r4000, 0, sp)
.set push
- .set mips3
- .set noat
+ .set mips3
+ .set noat
mfc0 k1, CP0_CAUSE
li k0, 31<<2
andi k1, k1, 0x7c
/* end of rollback region (the region size must be power of two) */
1:
jr ra
- nop
+ nop
.set pop
END(__r4k_wait)
#else
and k0, ST0_IE
bnez k0, 1f
-
+#ifdef CONFIG_CPU_MIPSR6
+ eretnc
+#else
eret
#endif
+#endif
1:
.set pop
#endif
and k0, k0, k1
mtc0 k0, CP0_STATUS
ehb
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+ eretnc
+#else
.set mips3
eret
+#endif
.set pop
END(nmi_handler)
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+ eretnc
+#else
.set mips3
eret
+#endif
.set mips0
#endif
.set pop
END(handle_ri_rdhwr)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && !defined(CONFIG_CPU_MIPSR6)
/* A temporary overflow handler used by check_daddi(). */
__INIT
EXPORT_SYMBOL(__strncpy_from_user_asm);
#endif
+#ifndef CONFIG_CPU_MIPSR6
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_partial_copy_user);
EXPORT_SYMBOL(__csum_partial_copy_fromuser);
EXPORT_SYMBOL(__csum_partial_copy_touser);
#endif
+#endif /* !CONFIG_CPU_MIPSR6 */
EXPORT_SYMBOL(invalid_pte_table);
#ifdef CONFIG_FUNCTION_TRACER
seq_printf(m, "%s", " mips32r1");
if (cpu_has_mips32r2)
seq_printf(m, "%s", " mips32r2");
+ if (cpu_has_mips32r6)
+ seq_printf(m, "%s", " mips32r6");
if (cpu_has_mips64r1)
seq_printf(m, "%s", " mips64r1");
if (cpu_has_mips64r2)
seq_printf(m, "%s", " mips64r2");
+ if (cpu_has_mips64r6)
+ seq_printf(m, "%s", " mips64r6");
seq_printf(m, "\n");
}
return 1;
if (ip->j_format.opcode == jal_op)
return 1;
+#ifdef CONFIG_CPU_MIPSR6
+ if (((ip->i_format.opcode == jump_op) || /* jic */
+ (ip->i_format.opcode == jump2_op)) && /* jialc */
+ (ip->i_format.rs == 0))
+ return 1;
+ if (ip->r_format.opcode != spec_op)
+ return 0;
+ return ((ip->r_format.func == jalr_op) && !ip->r_format.rt);
+#else
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
#endif
+#endif
}
static inline int is_sp_move_ins(union mips_instruction *ip)
preempt_enable();
break;
}
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
}
tmp = child->thread.dsp.dspcontrol;
break;
+#endif /* CONFIG_CPU_MIPSR6 */
default:
tmp = 0;
ret = -EIO;
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
}
child->thread.dsp.dspcontrol = data;
break;
+#endif /* CONFIG_CPU_MIPSR6 */
default:
/* The rest are not allowed. */
ret = -EIO;
preempt_enable();
break;
}
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
}
tmp = child->thread.dsp.dspcontrol;
break;
+#endif
default:
tmp = 0;
ret = -EIO;
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
}
child->thread.dsp.dspcontrol = data;
break;
+#endif
default:
/* The rest are not allowed. */
ret = -EIO;
.endm
.set noreorder
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+#else
.set mips3
+#endif
LEAF(_save_fp_context)
cfc1 t1, fcr31
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR6)
/* Store the 16 odd double precision registers */
EX sdc1 $f1, SC_FPREGS+8(a0)
EX sdc1 $f3, SC_FPREGS+24(a0)
EX sdc1 $f29, SC_FPREGS+232(a0)
EX sdc1 $f31, SC_FPREGS+248(a0)
#else
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set push
.set mips64r2
.set noreorder
*/
LEAF(_restore_fp_context)
EX lw t0, SC_FPC_CSR(a0)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR6)
EX ldc1 $f1, SC_FPREGS+8(a0)
EX ldc1 $f3, SC_FPREGS+24(a0)
EX ldc1 $f5, SC_FPREGS+40(a0)
EX ldc1 $f31, SC_FPREGS+248(a0)
#else
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set push
.set mips64r2
.set noreorder
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
.set push
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+#else
.set mips64r2
+#endif
.set noreorder
/* Restore an o32 sigcontext. */
/* Now copy FR from it */
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
#ifdef CONFIG_MIPS_MT_SMTC
li t3, ST0_FR
mtc0 t1, CP0_TCSTATUS
#endif /* CONFIG_MIPS_MT_SMTC */
move v0, a0
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
jr.hb ra
#else
_ehb
* Save a thread's fp context.
*/
LEAF(_save_fp)
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
mfc0 t0, CP0_STATUS
#endif
fpu_save_double a0 t0 t1 # clobbers t1
* Restore a thread's fp context.
*/
LEAF(_restore_fp)
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
mfc0 t0, CP0_STATUS
#endif
fpu_restore_double a0 t0 t1 # clobbers t1
sll t0, t0, 31 - _ST0_FR
bgez t0, 1f # 16 / 32 register mode?
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
enable_fpu_hazard
li t2, FPU_CSR_NAN2008
cfc1 t3, fcr31
#endif /* CONFIG_64BIT */
#ifdef CONFIG_CPU_MIPS32
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
sll t0, t0, 31 - _ST0_FR
bgez t0, 2f # 16 / 32 register mode?
mtc1 t1, $f30
mtc1 t1, $f31
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
bgez t0, 1f # 16 / 32 register mode?
move t1, t3 # move SNaN, DP high word
mthc1 t1, $f31
.set pop
1:
-#endif /* CONFIG_CPU_MIPS32_R2 */
-#else /* CONFIG_CPU_MIPS32 */
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
+#else /* !CONFIG_CPU_MIPS32 */
+#ifdef CONFIG_CPU_MIPS64_R6
+ .set mips64r6
+#else
.set mips3
+#endif
dmtc1 t1, $f0
dmtc1 t1, $f2
dmtc1 t1, $f4
case BOOT_MEM_RESERVED:
printk(KERN_CONT "(reserved)\n");
break;
+ case BOOT_MEM_INUSE:
+ printk(KERN_CONT "(in-use, reserved)\n");
+ break;
default:
printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
break;
#else /* !CONFIG_SGI_IP27 */
+static int maar_last = -2;
+static int nomaar_flag;
+static unsigned long maar_regs[MIPS_MAAR_MAX * 2];
+
+static int __init early_parse_nomaar(char *p)
+{
+ nomaar_flag = 1;
+ return(0);
+}
+early_param("nomaar", early_parse_nomaar);
+
+static void __init maar_reset(void)
+{
+ int maar = 0;
+
+ do {
+ write_c0_maarindex(maar);
+ back_to_back_c0_hazard();
+ if (read_c0_maarindex() != maar)
+ return;
+ write_c0_maar(0);
+ back_to_back_c0_hazard();
+ } while (++maar < MIPS_MAAR_MAX);
+}
+
+void __init maar_setup(void)
+{
+ int maar = 0;
+ phys_t low;
+ phys_t upper;
+
+ if (nomaar_flag || !cpu_has_maar)
+ return;
+
+ pr_info("MAAR setup:\n");
+ maar_reset();
+
+ for (maar=0; maar<(maar_last+2); maar++) {
+ write_c0_maarindex(maar);
+ back_to_back_c0_hazard();
+ if (read_c0_maarindex() != maar) {
+ pr_err("CPU has only %d MAARs, resetting...\n",maar - 1);
+ maar_reset();
+ return;
+ }
+ write_c0_maar(maar_regs[maar]);
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ write_c0_hi_maar(maar_regs[maar + MIPS_MAAR_MAX]);
+#endif
+ back_to_back_c0_hazard();
+ if (maar & 1) {
+ low = (((phys_t)maar_regs[maar]) << 4) & ~0xffff;
+ upper = (((phys_t)maar_regs[maar - 1]) << 4) & ~0xffff;
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ low += (((phys_t)maar_regs[maar + MIPS_MAAR_MAX]) << 36) & ~MIPS_MAAR_HI_V;
+ upper += (((phys_t)maar_regs[maar - 1 + MIPS_MAAR_MAX]) << 36) & ~MIPS_MAAR_HI_V;
+#endif
+ upper = (upper & ~0xffff) + 0xffff;
+ pr_info(" [%0#10lx-%0#10lx] %s\n", low, upper,
+ (maar_regs[maar -1] & MIPS_MAAR_S)?"speculative":"");
+ }
+ }
+}
+
+static void __init maar_update(phys_t begin, phys_t end, int speculative)
+{
+ phys_t start;
+
+ /* rounding, let's be conservative if speculative */
+ if (speculative) {
+ if (begin & 0xffff)
+ start = (begin + 0x10000) & ~0xffff;
+ else
+ start = begin;
+ end = (end - 0x10000) & ~0xffff;
+ } else {
+ start = begin & ~0xffff;
+ end = (end - 1) & ~0xffff;
+ }
+ if (speculative && (end == start))
+ return;
+
+ maar_regs[maar_last + 1] = ((start >> 4) | MIPS_MAAR_V | (speculative?MIPS_MAAR_S:0));
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ maar_regs[maar_last + 1 + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (start >> 36);
+#endif
+ maar_regs[maar_last] = ((end >> 4) | MIPS_MAAR_V | (speculative?MIPS_MAAR_S:0));
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ maar_regs[maar_last + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (end >> 36);
+#endif
+ return;
+}
+
+void __init add_maar_region(phys_t start, phys_t end, int speculative)
+{
+ phys_t upper;
+ unsigned sbit;
+ int i;
+
+ if (nomaar_flag || !cpu_has_maar)
+ return;
+
+ if (maar_last < 0) {
+ maar_last = 0;
+ maar_update(start, end, speculative);
+ return;
+ }
+
+ /* try merge with previous region */
+ upper = maar_regs[maar_last];
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ upper |= (((phys_t)maar_regs[maar_last + MIPS_MAAR_MAX] << 32) & ~MIPS_MAAR_HI_V);
+#endif
+ sbit = (upper & MIPS_MAAR_S)? MIPS_MAAR_S : 0;
+ speculative = speculative? MIPS_MAAR_S : 0;
+ upper = ((upper << 4) + 0x10000) & ~0xffffUL;
+ if (((upper == (start & ~0xffffUL)) ||
+ (upper == ((start + 0xffffUL) & ~0xffffUL))) &&
+ (sbit == speculative)) {
+ if (speculative)
+ end = (end - 0x10000) & ~0xffff;
+ else
+ end = (end - 1) & ~0xffff;
+ maar_regs[maar_last] = (end >> 4) | MIPS_MAAR_V | sbit;
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ maar_regs[maar_last + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (end >> 36);
+#endif
+ return;
+ }
+
+ maar_last += 2;
+ if (maar_last >= MIPS_MAAR_MAX) {
+ pr_err("Attempt to initialize more than %d MAARs\n", MIPS_MAAR_MAX);
+ for (i=0; i<MIPS_MAAR_MAX; i++) {
+ maar_regs[i] = 0;
+ maar_regs[i + MIPS_MAAR_MAX] = 0;
+ }
+ return;
+ }
+ maar_update(start, end, speculative);
+}
+
static void __init bootmem_init(void)
{
unsigned long reserved_end;
bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
min_low_pfn, max_low_pfn);
-
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
if (end <= start)
continue;
#endif
+ if ((!nomaar_flag) && cpu_has_maar &&
+ ((boot_mem_map.map[i].type == BOOT_MEM_RAM) ||
+ (boot_mem_map.map[i].type == BOOT_MEM_ROM_DATA) ||
+ (boot_mem_map.map[i].type == BOOT_MEM_INUSE) ||
+ (boot_mem_map.map[i].type == BOOT_MEM_INIT_RAM)))
+ add_maar_region(PFN_PHYS(start), PFN_PHYS(end), 1);
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
}
+ if (cpu_has_maar && !nomaar_flag)
+ maar_setup();
/*
* Register fully available low RAM pages with the bootmem allocator.
case BOOT_MEM_ROM_DATA:
res->name = "System RAM";
break;
+ case BOOT_MEM_INUSE:
+ res->name = "InUse memory";
+ break;
case BOOT_MEM_RESERVED:
default:
res->name = "reserved";
#endif
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
if (cpu_has_dsp) {
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mflo3(), &sc->sc_lo3);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
+#endif
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned int used_math;
+#ifndef CONFIG_CPU_MIPSR6
unsigned long treg;
+#endif
int err = 0;
int i;
#endif
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
+#endif
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
if (cpu_has_dsp) {
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
}
+#endif
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
{
u32 used_math;
int err = 0;
+#ifndef CONFIG_CPU_MIPSR6
s32 treg;
+#endif
int i;
/* Always make any pending restarted system calls return -EINTR */
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
+#endif
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
#endif
+#ifdef CONFIG_CPU_MIPSR6
+ pr_info("BEVVA = %lx\n", read_c0_bevva());
+#endif
+
#ifdef CONFIG_EVA
if (gcmp_present)
BEV_overlay_segment();
if (gcmp_present >= 0)
return gcmp_present;
- if (cpu_has_mips_r2 && (read_c0_config3() & MIPS_CONF3_CMGCR)) {
+ if ((cpu_has_mips_r2 || cpu_has_mips_r6) &&
+ (read_c0_config3() & MIPS_CONF3_CMGCR)) {
/* try CMGCRBase */
confaddr = read_c0_cmgcrbase() << 4;
_gcmp_base = (unsigned long) ioremap_nocache(confaddr, size);
cpu_data[0].options |= MIPS_CPU_CM2_L2SYNC;
printk("L2-only SYNC available\n");
}
+ if (cpu_has_cm2) {
+ unsigned int l2p;
+
+ l2p = GCMPGCB(GCML2P);
+ if (l2p & GCMP_GCB_GCML2P_NPFT) {
+ GCMPGCB(GCML2P) = (l2p & ~GCMP_GCB_GCML2P_PAGE_MASK) |
+ PAGE_MASK | GCMP_GCB_GCML2P_PFTEN;
+ GCMPGCB(GCML2PB) |= GCMP_GCB_GCML2PB_CODE_PFTEN;
+ }
+ }
return gcmp_present;
}
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
+extern void maar_setup(void);
+
/* CPU siblings in MIPS:
*
* SMVP kernel - VPEs on common core are siblings
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
+ maar_setup();
per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__ (
- " .set mips3 \n"
- " li %[err], 0 \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
+ " .set mips3 \n"
+#endif
+ " li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
"2: sc %[tmp], (%[addr]) \n"
}
}
+#ifdef CONFIG_CPU_MIPSR6
+char *mcheck_code[32] = { "non R6 multiple hit in TLB: Status.TS = 1",
+ "multiple hit in TLB",
+ "multiple hit in TLB, speculative access",
+ "page size mismatch, unsupported FTLB page mask",
+ "index doesn't match EntryHI.VPN2 position in FTLB",
+ "HW PageTableWalker: Valid bits mismatch in PTE pair on directory level",
+ "HW PageTableWalker: Dual page mode is not implemented"
+ };
+#endif
+
asmlinkage void do_mcheck(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
+#ifdef CONFIG_CPU_MIPSR6
+ int code = 0;
+#endif
show_regs(regs);
+#ifdef CONFIG_CPU_MIPSR6
+ if (multi_match || (code = read_c0_pagegrain() & PG_MCCAUSE)) {
+ printk("PageGrain: %0x\n", read_c0_pagegrain());
+ printk("BadVAddr: %0*lx\n", field, read_c0_badvaddr());
+#else
if (multi_match) {
+#endif
printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
show_code((unsigned int __user *) regs->cp0_epc);
+#ifdef CONFIG_CPU_MIPSR6
+ panic("Caught Machine Check exception - %s",mcheck_code[code]);
+#else
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
+#endif
}
asmlinkage void do_mt(struct pt_regs *regs)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
- if (cpu_has_mips_r2)
+ if (cpu_has_mips_r2 || cpu_has_mips_r6)
hwrena |= 0x0000000f;
if (!noulri && cpu_has_userlocal)
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
- if (cpu_has_mips_r2) {
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
__setup("rdhwr_noopt", set_rdhwr_noopt);
+extern void tlb_do_page_fault_0(void);
+
void __init trap_init(void)
{
extern char except_vec3_generic;
} else {
#ifdef CONFIG_KVM_GUEST
#define KVM_GUEST_KSEG0 0x40000000
- ebase = KVM_GUEST_KSEG0;
+ ebase = KVM_GUEST_KSEG0;
#else
- ebase = CKSEG0;
+ ebase = CKSEG0;
#endif
- if (cpu_has_mips_r2)
+ if (cpu_has_mips_r2 || cpu_has_mips_r6)
ebase += (read_c0_ebase() & 0x3ffff000);
}
set_except_vector(15, handle_fpe);
set_except_vector(16, handle_ftlb);
+
+ if (cpu_has_rixi && cpu_has_rixi_except) {
+ set_except_vector(19, tlb_do_page_fault_0);
+ set_except_vector(20, tlb_do_page_fault_0);
+ }
+
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#endif
#ifdef __LITTLE_ENDIAN
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+#endif
+
+#elif defined(CONFIG_CPU_MIPSR6)
+/* non-EVA R6 variant */
+
+#ifdef __BIG_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlb\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlb\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 2(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 3(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlb\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "5:\tlbu\t$1, 4(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "6:\tlbu\t$1, 5(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "7:\tlbu\t$1, 6(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "8:\tlbu\t$1, 7(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 1(%2)\n\t" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\tsb\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 3(%2)\n\t" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\tsb\t$1, 2(%2)\n\t" \
+ "srl\t$1, $1, 0x8\n" \
+ "3:\tsb\t$1, 1(%2)\n\t" \
+ "srl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 7(%2)\n\t" \
+ "dsrl\t$1, %1, 0x8\n" \
+ "2:\tsb\t$1, 6(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "3:\tsb\t$1, 5(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 4(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "2:\tsb\t$1, 3(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "3:\tsb\t$1, 2(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 1(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlb\t%0, 1(%2)\n" \
+ "2:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlb\t%0, 3(%2)\n" \
+ "2:\tlbu\t$1, 2(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 1(%2)\n" \
+ "2:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 3(%2)\n" \
+ "2:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 0(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 7(%2)\n" \
+ "2:\tlbu\t$1, 6(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 5(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 4(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "5:\tlbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "6:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "7:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "8:\tlbu\t$1, 0(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 0(%2)\n\t" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\tsb\t$1, 1(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 0(%2)\n\t" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\tsb\t$1, 1(%2)\n\t" \
+ "srl\t$1,$1, 0x8\n" \
+ "3:\tsb\t$1, 2(%2)\n\t" \
+ "srl\t$1,$1, 0x8\n" \
+ "4:\tsb\t$1, 3(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 0(%2)\n\t" \
+ "dsrl\t$1,%1, 0x8\n" \
+ "2:\tsb\t$1, 1(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "3:\tsb\t$1, 2(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "4:\tsb\t$1, 3(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "5:\tsb\t$1, 4(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "6:\tsb\t$1, 5(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "7:\tsb\t$1, 6(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "8:\tsb\t$1, 7(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#endif
#else
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreDW(addr, value, res) \
__asm__ __volatile__ ( \
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#endif
#ifdef __LITTLE_ENDIAN
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreDW(addr, value, res) \
__asm__ __volatile__ ( \
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#endif
#endif
break;
return;
+#ifndef CONFIG_CPU_MIPSR6
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
+#endif
default:
/*
* end of memory on some systems. It's also a seriously bad idea on non
* dma-coherent systems.
*/
+#ifndef CONFIG_CPU_MIPSR6
+
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_MIPS_MALTA)
#undef CONFIG_CPU_HAS_PREFETCH
#endif
END(__csum_partial_copy_touser)
#endif /* CONFIG_EVA */
+
+#endif /* !CONFIG_CPU_MIPSR6 */
#ifdef CONFIG_MIPS_MALTA
#undef CONFIG_CPU_HAS_PREFETCH
#endif
+#ifdef CONFIG_CPU_MIPSR6
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#ifdef USE_DOUBLE
#define LOAD ld
+#ifndef CONFIG_CPU_MIPSR6
#define LOADL ldl
#define LOADR ldr
#define STOREL sdl
#define STORER sdr
+#endif
#define STORE sd
#define ADD daddu
#define SUB dsubu
#else
#define LOAD lw
+#ifndef CONFIG_CPU_MIPSR6
#define LOADL lwl
#define LOADR lwr
#define STOREL swl
#define STORER swr
+#endif
#define STORE sw
#define ADD addu
#define SUB subu
and t0, src, ADDRMASK
PREF( 0, 2*32(src) )
PREF( 1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
bnez t1, .Ldst_unaligned
nop
bnez t0, .Lsrc_unaligned_dst_aligned
+#else
+ or t0, t0, t1
+ bnez t0, .Lcopy_unaligned_bytes
+#endif
/*
* use delay slot for fall-through
* src and dst are aligned; need to compute rem
bne rem, len, 1b
.set noreorder
+#ifndef CONFIG_CPU_MIPSR6
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
ADD dst, dst, NBYTES
bne len, rem, 1b
.set noreorder
+#endif /* !CONFIG_CPU_MIPSR6 */
.Lcopy_bytes_checklen:
beqz len, .Ldone
.Ldone:
jr ra
nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes:
+1:
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+ COPY_BYTE(6)
+ COPY_BYTE(7)
+ ADD src, src, 8
+ b 1b
+ ADD dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
+
END(memcpy)
.Ll_exc_copy:
.set at
#endif
+#ifndef CONFIG_CPU_MIPSR6
+
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_L, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */
#ifdef __MIPSEL__
EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */
#endif
- PTR_SUBU a0, t0 /* long align ptr */
+ PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
+#else /* CONFIG_CPU_MIPSR6 */
+
+#define STORE_BYTE(N) \
+EX( sb, a1, N(a0), .Lbyte_fixup); \
+ beqz t0, 0f; \
+ PTR_ADDU t0, 1;
+
+ PTR_ADDU a2, t0 /* correct size */
+
+ PTR_ADDU t0, 1
+ STORE_BYTE(0)
+ STORE_BYTE(1)
+#if LONGSIZE == 4
+ EX( sb, a1, 2(a0), .Lbyte_fixup)
+#else
+ STORE_BYTE(2)
+ STORE_BYTE(3)
+ STORE_BYTE(4)
+ STORE_BYTE(5)
+ EX( sb, a1, 6(a0), .Lbyte_fixup)
+#endif
+0:
+ ori a0, STORMASK
+ xori a0, STORMASK
+ PTR_ADDIU a0, STORSIZE
+
+#endif /* CONFIG_CPU_MIPSR6 */
+
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
beqz t1, .Lmemset_partial /* no block to fill */
andi a2, STORMASK /* At most one long to go */
beqz a2, 1f
- PTR_ADDU a0, a2 /* What's left */
+#ifndef CONFIG_CPU_MIPSR6
+ PTR_ADDU a0, a2 /* What's left */
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_R, a1, -1(a0), .Llast_fixup)
#ifdef __MIPSEL__
EX(LONG_S_L, a1, -1(a0), .Llast_fixup)
#endif
+#else /* CONFIG_CPU_MIPSR6 */
+ PTR_SUBU t0, $0, a2
+ PTR_ADDIU t0, 1
+ STORE_BYTE(0)
+ STORE_BYTE(1)
+#if LONGSIZE == 4
+ EX( sb, a1, 2(a0), .Lbyte_fixup)
+#else
+ STORE_BYTE(2)
+ STORE_BYTE(3)
+ STORE_BYTE(4)
+ STORE_BYTE(5)
+ EX( sb, a1, 6(a0), .Lbyte_fixup)
+#endif
+0:
+#endif /* CONFIG_CPU_MIPSR6 */
1: jr ra
move a2, zero
jr ra
andi v1, a2, STORMASK
+#ifdef CONFIG_CPU_MIPSR6
+.Lbyte_fixup:
+ PTR_SUBU a2, $0, t0
+ jr ra
+ PTR_ADDIU a2, 1
+#endif /* CONFIG_CPU_MIPSR6 */
+
#ifdef CONFIG_EVA
/* ++++++++ */
/* EVA stuff */
#include <linux/export.h>
#include <linux/stringify.h>
-#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+#if (!defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)) || defined(CONFIG_MIPS_MT_SMTC)
/*
* For cli() we have to insert nops to make sure that the new value
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
/* see irqflags.h for inline function */
#else
" mfc0 $1,$12 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi %[flags], %[flags], 0x400 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
/* see irqflags.h for inline function */
#else
" mfc0 %[flags], $12 \n"
" xori $1, 0x400 \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#elif (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
/* see irqflags.h for inline function */
#else
" mfc0 $1, $12 \n"
}
EXPORT_SYMBOL(__arch_local_irq_restore);
-#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
+#endif /* (!defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)) || defined(CONFIG_MIPS_MT_SMTC) */
if ((insn.mm_i_format.rt == mm_bc1f_op) ||
(insn.mm_i_format.rt == mm_bc1t_op)) {
mips32_insn.fb_format.opcode = cop1_op;
- mips32_insn.fb_format.bc = bc_op;
+ mips32_insn.fb_format.bc = rs_bc_op;
mips32_insn.fb_format.flag =
(insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
} else
* a single subroutine should be used across both
* modules.
*/
+/* Note on R6 compact branches:
+ * Compact branches doesn't do exception (besides BC1EQZ/BC1NEZ)
+ * and doesn't execute instruction in Forbidden Slot if branch is
+ * to be taken. It means that return EPC for them can be safely set
+ * to EPC + 8 because it is the only case to get a BD precise exception
+ * doing instruction in Forbidden Slot while no branch.
+ *
+ * Unconditional compact jump/branches added for full picture
+ * (not doing BD precise exception, actually).
+ */
static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
- unsigned int fcr31;
unsigned int bit = 0;
+#ifdef CONFIG_CPU_MIPSR6
+ int reg;
+#else
+ unsigned int fcr31;
+#endif
switch (insn.i_format.opcode) {
case spec_op:
regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
+#ifndef CONFIG_CPU_MIPSR6
case jr_op:
+#endif
*contpc = regs->regs[insn.r_format.rs];
return 1;
break;
break;
case bcond_op:
switch (insn.i_format.rt) {
+#ifdef CONFIG_CPU_MIPSR6
+ case nal_op: /* MIPSR6: nal == bltzal $0 */
+ if (insn.i_format.rs)
+ break;
+#else
case bltzal_op:
case bltzall_op:
+#endif
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case bltz_op:
+#ifndef CONFIG_CPU_MIPSR6
case bltzl_op:
+#endif
if ((long)regs->regs[insn.i_format.rs] < 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
break;
+#ifdef CONFIG_CPU_MIPSR6
+ case bal_op: /* MIPSR6: bal == bgezal $0 */
+ if (insn.i_format.rs)
+ break;
+#else
case bgezal_op:
case bgezall_op:
+#endif
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case bgez_op:
+#ifndef CONFIG_CPU_MIPSR6
case bgezl_op:
+#endif
if ((long)regs->regs[insn.i_format.rs] >= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
return 1;
break;
case beq_op:
+#ifndef CONFIG_CPU_MIPSR6
case beql_op:
+#endif
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt])
*contpc = regs->cp0_epc +
return 1;
break;
case bne_op:
+#ifndef CONFIG_CPU_MIPSR6
case bnel_op:
+#endif
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt])
*contpc = regs->cp0_epc +
return 1;
break;
case blez_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: blezalc, bgezalc, bgeuc
+ */
+ if (insn.i_format.rt) {
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* blezalc, bgezalc */
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ }
+
+ if ((long)regs->regs[insn.i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+#endif
case blezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: blezc, bgezc, bgec
+ */
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+#else
if ((long)regs->regs[insn.i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
break;
+#endif
case bgtz_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: bltzalc, bgtzalc, bltuc
+ */
+ if (insn.i_format.rt) {
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* bltzalc, bgtzalc */
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ }
+
+ if ((long)regs->regs[insn.i_format.rs] > 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+#endif
case bgtzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: bltc, bltzc, bgtzc
+ */
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+#else
if ((long)regs->regs[insn.i_format.rs] > 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
break;
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+ case cbcond0_op:
+ /*
+ * Compact branches: bovc, beqc, beqzalc
+ */
+
+ /* fall through */
+ case cbcond1_op:
+ /*
+ * Compact branches: bnvc, bnec, bnezalc
+ */
+ if (insn.i_format.rt && !insn.i_format.rs) /* beqzalc/bnezalc */
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+ case cop1_op:
+ if ((insn.i_format.rs != bc1eqz_op) &&
+ (insn.i_format.rs != bc1nez_op))
+ break;
+
+ reg = insn.i_format.rt;
+ bit = 0;
+ switch (insn.i_format.rs) {
+ case bc1eqz_op:
+ if (current->thread.fpu.fpr[reg] == (__u64)0)
+ bit = 1;
+ break;
+ case bc1nez_op:
+ if (current->thread.fpu.fpr[reg] != (__u64)0)
+ bit = 1;
+ break;
+ }
+ if (bit)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+#else
case cop0_op:
case cop1_op:
case cop2_op:
case cop1x_op:
- if (insn.i_format.rs == bc_op) {
+ if (insn.i_format.rs == rs_bc_op) {
preempt_disable();
if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
}
}
break;
+#endif
}
return 0;
}
break;
#endif
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
case mfhc_op:
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
break;
}
- case bc_op:{
+#ifdef CONFIG_CPU_MIPSR6
+ case bc1eqz_op:
+ case bc1nez_op:
+ {
+ int reg;
+
+ if (xcp->cp0_cause & CAUSEF_BD)
+ return SIGILL;
+
+ reg = MIPSInst_FT(ir);
+ cond = 0;
+ switch (MIPSInst_RS(ir)) {
+ case bc1eqz_op:
+ if (current->thread.fpu.fpr[reg] == (__u64)0)
+ cond = 1;
+ break;
+ case bc1nez_op:
+ if (current->thread.fpu.fpr[reg] != (__u64)0)
+ cond = 1;
+ break;
+ }
+#else /* !CONFIG_CPU_MIPSR6 */
+ case rs_bc_op:{
int likely = 0;
if (xcp->cp0_cause & CAUSEF_BD)
/* thats an illegal instruction */
return SIGILL;
}
+#endif /* CONFIG_CPU_MIPSR6 */
xcp->cp0_cause |= CAUSEF_BD;
if (cond) {
#endif
/* its one of ours */
goto emul;
+#ifndef CONFIG_CPU_MIPSR6
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) == movc_op)
goto emul;
break;
#endif
+#endif /* CONFIG_CPU_MIPSR6 */
}
/*
*/
return mips_dsemul(xcp, ir, contpc);
}
+#ifndef CONFIG_CPU_MIPSR6
else {
/* branch not taken */
if (likely) {
*/
}
}
+#endif /* CONFIG_CPU_MIPSR6 */
break;
}
}
#endif
+#ifndef CONFIG_CPU_MIPSR6
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) != movc_op)
xcp->regs[MIPSInst_RD(ir)] =
xcp->regs[MIPSInst_RS(ir)];
break;
+#endif
#endif
default:
* very much about what happens in that case. Usually a segmentation
* fault will dump the process later on anyway ...
*/
+#ifdef CONFIG_CPU_MIPSR6
+static void local_r4k_flush_cache_sigtramp(void * arg)
+{
+ register unsigned long addr = (unsigned long) arg;
+
+ __asm__ __volatile__(
+ "synci 0(%0) \n"
+ "sync \n"
+ ::"r"(addr):"memory");
+}
+#else
static void local_r4k_flush_cache_sigtramp(void * arg)
{
unsigned long ic_lsize = cpu_icache_line_size();
if (MIPS_CACHE_SYNC_WAR)
__asm__ __volatile__ ("sync");
}
+#endif
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
default:
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#include <asm/tlbmisc.h>
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC
else
tlb_write_indexed();
#else
- tlbidx = read_c0_wired();
+ tlbidx = read_c0_wired() & 0xffff;
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
+ wired_push(vaddr & (PAGE_MASK << 1),entrylo,entrylo,PM_DEFAULT_MASK);
tlb_write_indexed();
#endif
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
EXIT_CRITICAL(flags);
return (void*) vaddr;
ENTER_CRITICAL(flags);
old_ctx = read_c0_entryhi();
- wired = read_c0_wired() - 1;
+ wired = (read_c0_wired() & 0xffff) - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
+ wired_pop();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
EXIT_CRITICAL(flags);
#endif
dec_preempt_count();
uasm_i_addiu(buf, T9, ZERO, off);
uasm_i_daddu(buf, reg1, reg2, T9);
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ if (off > 0xff) {
+#else
if (off > 0x7fff) {
+#endif
uasm_i_lui(buf, T9, uasm_rel_hi(off));
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
UASM_i_ADDU(buf, reg1, reg2, T9);
/* Ignore anything but MIPSxx processors */
if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
#endif
+int lowest_wired;
+int current_wired;
+static struct WiredEntry {
+ unsigned long EntryHi;
+ unsigned long EntryLo0;
+ unsigned long EntryLo1;
+ unsigned long PageMask;
+} wired_entry_array[64];
+
+
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
+ unsigned long old_pagemask;
int entry;
int ftlbhighset;
+ int wired;
ENTER_CRITICAL(flags);
/* Save old context and create impossible VPN2 value */
write_c0_entrylo0(0);
write_c0_entrylo1(0);
- entry = read_c0_wired();
+ entry = read_c0_wired() & 0xffff;
/* Blast 'em all away. */
if (cpu_has_tlbinv) {
- if (current_cpu_data.tlbsizevtlb) {
- write_c0_index(0);
- mtc0_tlbw_hazard();
- tlbinvf(); /* invalide VTLB */
+ old_pagemask = read_c0_pagemask();
+ if (cpu_has_tlbinv_full)
+ tlbinvf(); /* invalide whole V/FTLB, index isn't used */
+ else {
+ if (current_cpu_data.tlbsizevtlb) {
+ write_c0_index(0);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalide VTLB */
+ }
+ ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets;
+ for (entry=current_cpu_data.tlbsizevtlb;
+ entry < ftlbhighset;
+ entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalide one FTLB set */
+ }
}
- ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets;
- for (entry=current_cpu_data.tlbsizevtlb;
- entry < ftlbhighset;
- entry++) {
- write_c0_index(entry);
+ /* restore wired entries */
+ for (wired = lowest_wired; wired < current_wired; wired++) {
+ write_c0_index(wired);
+ tlbw_use_hazard(); /* What is the hazard here? */
+ write_c0_pagemask(wired_entry_array[wired].PageMask);
+ write_c0_entryhi(wired_entry_array[wired].EntryHi);
+ write_c0_entrylo0(wired_entry_array[wired].EntryLo0);
+ write_c0_entrylo1(wired_entry_array[wired].EntryLo1);
mtc0_tlbw_hazard();
- tlbinvf(); /* invalide one FTLB set */
+ tlb_write_indexed();
+ tlbw_use_hazard();
}
+ write_c0_pagemask(old_pagemask);
} else
while (entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
FLUSH_ITLB;
EXIT_CRITICAL(flags);
}
EXIT_CRITICAL(flags);
}
+int wired_push(unsigned long entryhi, unsigned long entrylo0,
+ unsigned long entrylo1, unsigned long pagemask)
+{
+ if (current_wired >= current_cpu_data.tlbsizevtlb) {
+ printk("Attempt to push TLB into wired exceeding VTLV size\n");
+ BUG();
+ }
+
+ wired_entry_array[current_wired].EntryHi = entryhi;
+ wired_entry_array[current_wired].EntryLo0 = entrylo0;
+ wired_entry_array[current_wired].EntryLo1 = entrylo1;
+ wired_entry_array[current_wired].PageMask = pagemask;
+
+ return(current_wired++);
+}
+
+int wired_pop(void)
+{
+ if (current_wired <= lowest_wired) {
+ printk("Attempt to delete a not existed wired TLB\n");
+ BUG();
+ }
+
+ return(--current_wired);
+}
+
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = read_c0_wired() & 0xffff;
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
+ wired_push(entryhi,entrylo0,entrylo1,PM_DEFAULT_MASK);
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+ EXIT_CRITICAL(flags);
+}
+
+void remove_wired_entry(void)
+{
+ unsigned long flags;
+ unsigned long wired;
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ ENTER_CRITICAL(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ wired = read_c0_wired() & 0xffff;
+ write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+ wired_pop();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
- local_flush_tlb_all();
+ mtc0_tlbw_hazard();
EXIT_CRITICAL(flags);
}
back_to_back_c0_hazard();
mask = read_c0_pagemask();
write_c0_pagemask(PM_DEFAULT_MASK);
+ mtc0_tlbw_hazard();
EXIT_CRITICAL(flags);
#endif
write_c0_pagegrain(pg);
}
+ mtc0_tlbw_hazard();
/* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
int wired = current_cpu_data.tlbsize - ntlb;
write_c0_wired(wired);
write_c0_index(wired-1);
+ mtc0_tlbw_hazard();
printk("Restricting TLB to %d entries\n", ntlb);
+ current_wired = wired;
+ lowest_wired = wired;
} else
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
}
case tlb_indexed: tlbw = uasm_i_tlbwi; break;
}
- if (cpu_has_mips_r2) {
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
/*
* The architecture spec says an ehb is required here,
* but a number of cores do not have the hazard and
uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
- if (cpu_has_mips32r2) {
+ if (cpu_has_mips32r2 || cpu_has_mips32r6) {
uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT));
uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT));
return;
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
#ifndef CONFIG_64BIT
- if (cpu_has_mips_r2) {
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
/* For MIPS32R2, PTE ptr offset is obtained from BadVAddr */
UASM_i_MFC0(p, tmp, C0_BADVADDR);
UASM_i_LW(p, ptr, 0, ptr);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
- if (cpu_has_rixi) {
+ if (cpu_has_rixi && !cpu_has_rixi_except) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
uasm_i_nop(&p);
uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
build_tlb_probe_entry(&p);
- if (cpu_has_rixi) {
+ if (cpu_has_rixi && !cpu_has_rixi_except) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
uasm_i_nop(&p);
uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
| (e) << RE_SH \
| (f) << FUNC_SH)
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, f) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << SIMM9_SH \
+ | (f) << FUNC_SH)
+
/* Define these when we are not the ISA the kernel is being compiled with. */
#ifdef CONFIG_CPU_MICROMIPS
#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
{ insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#endif
{ insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
+#else
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS },
+#else
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
+#endif
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 },
+ { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 },
+#else
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 },
+#else
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
{ insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 },
+ { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 },
+#else
{ insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
return (arg >> 2) & JIMM_MASK;
}
+static inline __uasminit u32 build_simm9(s32 arg)
+{
+ WARN(((arg > 0xff) || (arg < -0x100)),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
/*
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
op |= build_set(va_arg(ap, u32));
if (ip->fields & SCIMM)
op |= build_scimm(va_arg(ap, u32));
+ if (ip->fields & SIMM9)
+ op |= build_simm9(va_arg(ap, u32));
va_end(ap);
**buf = op;
JIMM = 0x080,
FUNC = 0x100,
SET = 0x200,
- SCIMM = 0x400
+ SCIMM = 0x400,
+ SIMM9 = 0x800
};
#define OP_MASK 0x3f
#define FUNC_SH 0
#define SET_MASK 0x7
#define SET_SH 0
+#define SIMM9_SH 7
+#define SIMM9_MASK 0x1ff
enum opcode {
insn_invalid,
{
__asm__(
" .set push \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips32 \n"
+#endif
" clz %0, %1 \n"
" .set pop \n"
: "=r" (x)
"Dont use memory",
"YAMON PROM memory",
"Free memmory",
+ "Memory in use",
};
#endif
mdesc[2].base = mdesc[0].base + 0x000f0000UL;
mdesc[2].size = 0x00010000;
- mdesc[3].type = fw_dontuse;
+ mdesc[3].type = fw_inuse;
mdesc[3].base = mdesc[0].base + 0x00100000UL;
mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - 0x00100000UL;
return BOOT_MEM_RAM;
case fw_code:
return BOOT_MEM_ROM_DATA;
+ case fw_inuse:
+ return BOOT_MEM_INUSE;
default:
return BOOT_MEM_RESERVED;
}
{
unsigned int i;
+#ifdef CONFIG_CPU_MIPSR6
+ pr_info("BEVVA = %lx\n", read_c0_bevva());
+#endif
+
#ifdef CONFIG_EVA
#ifdef CONFIG_MIPS_CMP
if (gcmp_present)
unsigned int counter;
int handled = IRQ_NONE;
- if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
+ if ((cpu_has_mips_r2 || cpu_has_mips_r6) && !(read_c0_cause() & (1 << 26)))
return handled;
switch (counters) {