MIPS: First MIPS R6 architecture implementation
authorLeonid Yegoshin <Leonid.Yegoshin@imgtec.com>
Thu, 20 Nov 2014 01:37:18 +0000 (17:37 -0800)
committerRaghu Gandham <raghu.gandham@imgtec.com>
Tue, 2 Dec 2014 00:57:38 +0000 (16:57 -0800)
Many changes in many files, but all functional set under CONFIG_CPU_MIPSR6
preprocessor macro or similar (CONFIG_CPU_MIPS32_R6/CONFIG_CPU_MIPS64_R6).
However, some definitions may be unconditional.

Squashed:
b3f685f35a1b MIPS: R6 compiler may require "memory" constraint in ASM
70c9d5dafffe MIPS: R6 buildtool restriction fix
5bb1b13b4219 MIPS: MIPS R6 basic MAAR support
17446a9a8be1 MIPS: R6 MAAR bugfix
9e06287f7d4d MIPS: Support of R6 architecture CP0 changes
15c77e709963 MIPS: R6: support of separate TLB RI/XI exceptions
01c3083740e1 MIPS: R6 asm of LL/SC optimization fix
72535c6169d2 MIPS: R6 - added CM2 L2 Prefetch support
709763f2eae8 MIPS: R6 emulation of branches
115e5660aad3 MIPS: R6: fixing jumps in get_frame_info
cf733bf4aa01 MIPS: R6: removed SPRAM support
e9656337c82f MIPS: R6: added L2 cache processing
a92ea7efa46c MIPS: R6 - use SYNCI in trampoline instead of IPI
57db20cbc0b9 MIPS: Enforce using of ERETNC instead of ERET in MIPS R6
73c7d199e237 MIPS: R6 bugfix of MIPS32 save/restore on Status.FR1 mode
a7f651f0c1a6 MIPS: restore CP0_WIRED register handling
c359d52036c2 MIPS: R6 bugfix of R6 PREF instruction opcode
b1080dd82704 MIPS: Bugfix of MAAR setup for 2nd core
a869049afa9e MIPS: R6: memcpy has PREF with offset bigger 256B
0e5a04a55192 MIPS: R6 bugfix of unaligned handler store
b1242efec8a5 MIPS: R6: unaligned LWU on MIPS64 R6 should not sign-extend

Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
71 files changed:
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/include/asm/asm.h
arch/mips/include/asm/asmmacro-32.h
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/atomic.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/bootinfo.h
arch/mips/include/asm/checksum.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/dsp.h
arch/mips/include/asm/edac.h
arch/mips/include/asm/fpu.h
arch/mips/include/asm/futex.h
arch/mips/include/asm/fw/fw.h
arch/mips/include/asm/gcmpregs.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/local.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/module.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/spram.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/tlbmisc.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/swab.h
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/entry.S
arch/mips/kernel/genex.S
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/smp-cmp.c
arch/mips/kernel/smp.c
arch/mips/kernel/syscall.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/lib/csum_partial.S
arch/mips/lib/memcpy.S
arch/mips/lib/memset.S
arch/mips/lib/mips-atomic.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/c-r4k.c
arch/mips/mm/init.c
arch/mips/mm/page.c
arch/mips/mm/sc-mips.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-mips.c
arch/mips/mm/uasm.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-memory.c
arch/mips/mti-malta/malta-setup.c
arch/mips/oprofile/op_model_mipsxx.c

index 207881b632dc3780e565260e6b744cbb11c79e31..2e66da2437a41995d2700fd90781419ee7591f1e 100644 (file)
@@ -313,8 +313,10 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS32_R2_EVA
+       select SYS_HAS_CPU_MIPS32_R6
        select SYS_HAS_CPU_MIPS64_R1
        select SYS_HAS_CPU_MIPS64_R2
+       select SYS_HAS_CPU_MIPS64_R6
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_CPU_RM7000
        select SYS_HAS_EARLY_PRINTK
@@ -1276,6 +1278,28 @@ config CPU_MIPS64_R2
          specific type of processor in your system, choose those that one
          otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
+config CPU_MIPS32_R6
+       bool "MIPS32 Release 6"
+       depends on SYS_HAS_CPU_MIPS32_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS32 architecture.
+
+config CPU_MIPS64_R6
+       bool "MIPS64 Release 6"
+       depends on SYS_HAS_CPU_MIPS64_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_64BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_HUGEPAGES
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS64 architecture.
+
 config CPU_R3000
        bool "R3000"
        depends on SYS_HAS_CPU_R3000
@@ -1625,6 +1649,12 @@ config SYS_HAS_CPU_MIPS64_R1
 config SYS_HAS_CPU_MIPS64_R2
        bool
 
+config SYS_HAS_CPU_MIPS32_R6
+       bool
+
+config SYS_HAS_CPU_MIPS64_R6
+       bool
+
 config SYS_HAS_CPU_R3000
        bool
 
@@ -1711,11 +1741,11 @@ endmenu
 #
 config CPU_MIPS32
        bool
-       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
 
 config CPU_MIPS64
        bool
-       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
 
 #
 # These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1728,6 +1758,14 @@ config CPU_MIPSR2
        bool
        default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
 
+config GENERIC_CSUM
+       bool
+
+config CPU_MIPSR6
+       bool
+       default y if CPU_MIPS64_R6 || CPU_MIPS32_R6
+       select GENERIC_CSUM
+
 config EVA
        bool
 
@@ -1756,7 +1794,7 @@ config MIPS_PGD_C0_CONTEXT
 #
 config HARDWARE_WATCHPOINTS
        bool
-       default y if CPU_MIPSR1 || CPU_MIPSR2
+       default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
 
 menu "Kernel type"
 
@@ -1960,6 +1998,7 @@ config MIPS_MT_SMTC
        depends on CPU_MIPS32_R2
        #depends on CPU_MIPS64_R2               # once there is hardware ...
        depends on SYS_SUPPORTS_MULTITHREADING
+       depends on !CPU_MIPSR6
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select MIPS_MT
index d2acf38fab04908f6fb9e2bc025b25d4d503cef7..ce02de7222c3af87d72cae40f3057a41dc34af3c 100644 (file)
@@ -133,10 +133,14 @@ cflags-$(CONFIG_CPU_MIPS32_R1)    += $(call cc-option,-march=mips32,-mips32 -U_MIPS
                        -Wa,-mips32 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
                        -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6) += $(call cc-option,-march=mips32r6,-mips32r6 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
+                       -Wa,-mips32r6 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6)  += $(call cc-option,-march=mips64r6,-mips64r6 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
+                       -Wa,-mips64r6 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)     += -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5432)     += $(call cc-option,-march=r5400,-march=r5000) \
                        -Wa,--trap
index c054d37b9cf190b7ac06025b67ef6bd4000e8d5d..04f1350c95aa429cdd350f15e7a2513fe2c922d6 100644 (file)
@@ -143,7 +143,20 @@ symbol             =       value
  */
 #ifdef CONFIG_CPU_HAS_PREFETCH
 
-#define PREF(hint,addr)                                        \
+#ifdef CONFIG_CPU_MIPSR6
+
+#define PREF(hint,addr)                                 \
+               .set    push;                           \
+               .set    mips64r6;                       \
+               pref    hint, addr;                     \
+               .set    pop
+
+
+#define PREFX(hint, addr)
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
+#define PREF(hint,addr)                                 \
                .set    push;                           \
                .set    mips4;                          \
                pref    hint, addr;                     \
@@ -163,6 +176,7 @@ symbol              =       value
                .set    mips4;                          \
                prefx   hint, addr;                     \
                .set    pop
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #else /* !CONFIG_CPU_HAS_PREFETCH */
 
@@ -172,49 +186,6 @@ symbol             =       value
 
 #endif /* !CONFIG_CPU_HAS_PREFETCH */
 
-/*
- * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
- */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt)                               \
-               .set    push;                           \
-               .set    reorder;                        \
-               beqz    rt, 9f;                         \
-               move    rd, rs;                         \
-               .set    pop;                            \
-9:
-#define MOVZ(rd, rs, rt)                               \
-               .set    push;                           \
-               .set    reorder;                        \
-               bnez    rt, 9f;                         \
-               move    rd, rs;                         \
-               .set    pop;                            \
-9:
-#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt)                               \
-               .set    push;                           \
-               .set    noreorder;                      \
-               bnezl   rt, 9f;                         \
-                move   rd, rs;                         \
-               .set    pop;                            \
-9:
-#define MOVZ(rd, rs, rt)                               \
-               .set    push;                           \
-               .set    noreorder;                      \
-               beqzl   rt, 9f;                         \
-                move   rd, rs;                         \
-               .set    pop;                            \
-9:
-#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
-    (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt)                               \
-               movn    rd, rs, rt
-#define MOVZ(rd, rs, rt)                               \
-               movz    rd, rs, rt
-#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
-
 /*
  * Stack alignment
  */
@@ -263,8 +234,10 @@ symbol             =       value
 #if (_MIPS_SZINT == 32)
 #define INT_ADD                add
 #define INT_ADDU       addu
-#define INT_ADDI       addi
-#define INT_ADDIU      addiu
+#ifndef CONFIG_CPU_MIPSR6
+#define INT_ADDI        addi
+#endif
+#define INT_ADDIU       addiu
 #define INT_SUB                sub
 #define INT_SUBU       subu
 #define INT_L          lw
@@ -280,7 +253,9 @@ symbol              =       value
 #if (_MIPS_SZINT == 64)
 #define INT_ADD                dadd
 #define INT_ADDU       daddu
-#define INT_ADDI       daddi
+#ifndef CONFIG_CPU_MIPSR6
+#define INT_ADDI        daddi
+#endif
 #define INT_ADDIU      daddiu
 #define INT_SUB                dsub
 #define INT_SUBU       dsubu
@@ -300,7 +275,9 @@ symbol              =       value
 #if (_MIPS_SZLONG == 32)
 #define LONG_ADD       add
 #define LONG_ADDU      addu
+#ifndef CONFIG_CPU_MIPSR6
 #define LONG_ADDI      addi
+#endif
 #define LONG_ADDIU     addiu
 #define LONG_SUB       sub
 #define LONG_SUBU      subu
@@ -323,7 +300,9 @@ symbol              =       value
 #if (_MIPS_SZLONG == 64)
 #define LONG_ADD       dadd
 #define LONG_ADDU      daddu
+#ifndef CONFIG_CPU_MIPSR6
 #define LONG_ADDI      daddi
+#endif
 #define LONG_ADDIU     daddiu
 #define LONG_SUB       dsub
 #define LONG_SUBU      dsubu
@@ -349,7 +328,9 @@ symbol              =       value
 #if (_MIPS_SZPTR == 32)
 #define PTR_ADD                add
 #define PTR_ADDU       addu
+#ifndef CONFIG_CPU_MIPSR6
 #define PTR_ADDI       addi
+#endif
 #define PTR_ADDIU      addiu
 #define PTR_SUB                sub
 #define PTR_SUBU       subu
@@ -374,7 +355,9 @@ symbol              =       value
 #if (_MIPS_SZPTR == 64)
 #define PTR_ADD                dadd
 #define PTR_ADDU       daddu
+#ifndef CONFIG_CPU_MIPSR6
 #define PTR_ADDI       daddi
+#endif
 #define PTR_ADDIU      daddiu
 #define PTR_SUB                dsub
 #define PTR_SUBU       dsubu
index c0d49e66ce130203f3a2e8a29093abc6434b0f59..9060c15fef501b9e2dcf185e2a64ff5b12e53722 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
 
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
 
        /* copy stuff from MIPS64 */
 
index 6c8342ae74db88cd3187d1964274aa03c6031165..8aab8b7ec496cb83bfb202e9f5ff760c3d8b6319 100644 (file)
@@ -35,7 +35,7 @@
        mtc0    \reg, CP0_TCSTATUS
        _ehb
        .endm
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        .macro  local_irq_enable reg=t0
        ei
        irq_enable_hazard
index 08b607969a16978055f2b5e5cd1bf833fb8a4618..6c3b03a1248edcce8ee608e8f5fff64ad916cc19 100644 (file)
@@ -63,16 +63,30 @@ static __inline__ void atomic_add(int i, atomic_t * v)
                : "Ir" (i));
        } else if (kernel_uses_llsc) {
                int temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register int temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
-                       "       ll      %0, %1          # atomic_add    \n"
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %1                          \n"
+                       "       ll      %0, 0(%2)       # atomic_add    \n"
+                       "       addu    %0, %3                          \n"
+                       "       sc      %0, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+                       : "Ir" (i));
+#else
+                       "       .set    mips3                           \n"
+                       "       ll      %0, %1          # atomic_add    \n"
                        "       addu    %0, %2                          \n"
                        "       sc      %0, %1                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (v->counter)
                        : "Ir" (i));
+#endif
                } while (unlikely(!temp));
        } else {
                unsigned long flags;
@@ -106,16 +120,30 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
                : "Ir" (i));
        } else if (kernel_uses_llsc) {
                int temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register int temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %1                          \n"
+                       "       ll      %0, 0(%2)       # atomic_sub    \n"
+                       "       subu    %0, %3                          \n"
+                       "       sc      %0, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+                       : "Ir" (i));
+#else
                        "       .set    mips3                           \n"
-                       "       ll      %0, %1          # atomic_sub    \n"
+                       "       ll      %0, %1          # atomic_sub    \n"
                        "       subu    %0, %2                          \n"
                        "       sc      %0, %1                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (v->counter)
                        : "Ir" (i));
+#endif
                } while (unlikely(!temp));
        } else {
                unsigned long flags;
@@ -150,16 +178,31 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                : "Ir" (i));
        } else if (kernel_uses_llsc) {
                int temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register int temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %2                          \n"
+                       "       ll      %1, 0(%3) # atomic_add_return   \n"
+                       "       addu    %0, %1, %4                      \n"
+                       "       sc      %0, 0(%3)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+                         "=&r" (temp2)
+                       : "Ir" (i));
+#else
                        "       .set    mips3                           \n"
-                       "       ll      %1, %2  # atomic_add_return     \n"
-                       "       addu    %0, %1, %3                      \n"
+                       "       ll      %1, %2  # atomic_add_return     \n"
+                       "       addu    %0, %1, %3                      \n"
                        "       sc      %0, %2                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (result), "=&r" (temp), "+m" (v->counter)
                        : "Ir" (i));
+#endif
                } while (unlikely(!result));
 
                result = temp + i;
@@ -202,16 +245,31 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                result = temp - i;
        } else if (kernel_uses_llsc) {
                int temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register int temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %2                          \n"
+                       "       ll      %1, 0(%3) # atomic_sub_return   \n"
+                       "       subu    %0, %1, %4                      \n"
+                       "       sc      %0, 0(%3)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+                         "=&r" (temp2)
+                       : "Ir" (i));
+#else
                        "       .set    mips3                           \n"
-                       "       ll      %1, %2  # atomic_sub_return     \n"
+                       "       ll      %1, %2  # atomic_sub_return     \n"
                        "       subu    %0, %1, %3                      \n"
                        "       sc      %0, %2                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (result), "=&r" (temp), "+m" (v->counter)
                        : "Ir" (i));
+#endif
                } while (unlikely(!result));
 
                result = temp - i;
@@ -264,10 +322,30 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                : "memory");
        } else if (kernel_uses_llsc) {
                int temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register int temp2;
+#endif
 
                __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+               "       dla     %3, %2                                  \n"
+               "1:     ll      %1, 0(%3)      # atomic_sub_if_positive \n"
+               "       subu    %0, %1, %4                              \n"
+               "       bltz    %0, 1f                                  \n"
+               "       sc      %0, 0(%3)                               \n"
+               "       .set    noreorder                               \n"
+               "       beqz    %0, 1b                                  \n"
+               "        subu   %0, %1, %4                              \n"
+               "       .set    reorder                                 \n"
+               "1:                                                     \n"
+               "       .set    mips0                                   \n"
+               : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+                 "=&r" (temp2)
+               : "Ir" (i));
+#else
                "       .set    mips3                                   \n"
-               "1:     ll      %1, %2          # atomic_sub_if_positive\n"
+               "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
                "       sc      %0, %2                                  \n"
@@ -279,6 +357,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "+m" (v->counter)
                : "Ir" (i));
+#endif
        } else {
                unsigned long flags;
 
@@ -430,16 +509,30 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                : "Ir" (i));
        } else if (kernel_uses_llsc) {
                long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %1                          \n"
+                       "       lld     %0, 0(%2)       # atomic64_add  \n"
+                       "       daddu   %0, %3                          \n"
+                       "       scd     %0, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+                       : "Ir" (i));
+#else
                        "       .set    mips3                           \n"
-                       "       lld     %0, %1          # atomic64_add  \n"
+                       "       lld     %0, %1          # atomic64_add  \n"
                        "       daddu   %0, %2                          \n"
                        "       scd     %0, %1                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (v->counter)
                        : "Ir" (i));
+#endif
                } while (unlikely(!temp));
        } else {
                unsigned long flags;
@@ -473,9 +566,22 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                : "Ir" (i));
        } else if (kernel_uses_llsc) {
                long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %1                          \n"
+                       "       lld     %0, 0(%2)       # atomic64_sub  \n"
+                       "       dsubu   %0, %3                          \n"
+                       "       scd     %0, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+                       : "Ir" (i));
+#else
                        "       .set    mips3                           \n"
                        "       lld     %0, %1          # atomic64_sub  \n"
                        "       dsubu   %0, %2                          \n"
@@ -483,6 +589,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (v->counter)
                        : "Ir" (i));
+#endif
                } while (unlikely(!temp));
        } else {
                unsigned long flags;
@@ -517,9 +624,23 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                : "Ir" (i));
        } else if (kernel_uses_llsc) {
                long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %2                          \n"
+                       "       lld     %1, 0(%3) # atomic64_add_return \n"
+                       "       daddu   %0, %1, %4                      \n"
+                       "       scd     %0, 0(%3)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+                         "=&r" (temp2)
+                       : "Ir" (i));
+#else
                        "       .set    mips3                           \n"
                        "       lld     %1, %2  # atomic64_add_return   \n"
                        "       daddu   %0, %1, %3                      \n"
@@ -528,6 +649,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                        : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                        : "Ir" (i), "m" (v->counter)
                        : "memory");
+#endif
                } while (unlikely(!result));
 
                result = temp + i;
@@ -568,17 +690,32 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                : "memory");
        } else if (kernel_uses_llsc) {
                long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %2                          \n"
+                       "       lld     %1, 0(%3) # atomic64_sub_return \n"
+                       "       dsubu   %0, %1, %4                      \n"
+                       "       scd     %0, 0(%3)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+                         "=&r" (temp2)
+                       : "Ir" (i));
+#else
                        "       .set    mips3                           \n"
-                       "       lld     %1, %2  # atomic64_sub_return   \n"
+                       "       lld     %1, %2  # atomic64_sub_return   \n"
                        "       dsubu   %0, %1, %3                      \n"
                        "       scd     %0, %2                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                        : "Ir" (i), "m" (v->counter)
                        : "memory");
+#endif
                } while (unlikely(!result));
 
                result = temp - i;
@@ -631,10 +768,30 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                : "memory");
        } else if (kernel_uses_llsc) {
                long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register long temp2;
+#endif
 
                __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+               "       dla     %3, %2                                  \n"
+               "1:     lld     %1, 0(%3)    # atomic64_sub_if_positive \n"
+               "       dsubu   %0, %1, %4                              \n"
+               "       bltz    %0, 1f                                  \n"
+               "       scd     %0, 0(%3)                               \n"
+               "       .set    noreorder                               \n"
+               "       beqz    %0, 1b                                  \n"
+               "        dsubu  %0, %1, %4                              \n"
+               "       .set    reorder                                 \n"
+               "1:                                                     \n"
+               "       .set    mips0                                   \n"
+               : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+                 "=&r" (temp2)
+               : "Ir" (i));
+#else
                "       .set    mips3                                   \n"
-               "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
+               "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
                "       scd     %0, %2                                  \n"
@@ -646,6 +803,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "+m" (v->counter)
                : "Ir" (i));
+#endif
        } else {
                unsigned long flags;
 
index a548a3374483dbbc4242ab304fdb5d6ba86c9b8f..96954edb149509243d328a7919f284111304a839 100644 (file)
@@ -80,6 +80,9 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
        unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
        int bit = nr & SZLONG_MASK;
        unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+       register unsigned long temp2;
+#endif
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                __asm__ __volatile__(
@@ -91,7 +94,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (*m)
                : "ir" (1UL << bit), "m" (*m));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
@@ -101,17 +104,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (bit), "r" (~0));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %1                          \n"
+                       "       " __LL "%0, 0(%2)       # set_bit       \n"
+                       "       or      %0, %3                          \n"
+                       "       " __SC "%0, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+                       : "ir" (1UL << bit));
+#else
                        "       .set    mips3                           \n"
-                       "       " __LL "%0, %1          # set_bit       \n"
+                       "       " __LL "%0, %1          # set_bit       \n"
                        "       or      %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (1UL << bit));
+#endif
                } while (unlikely(!temp));
        } else
                __mips_set_bit(nr, addr);
@@ -132,6 +146,9 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
        unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
        int bit = nr & SZLONG_MASK;
        unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+       register unsigned long temp2;
+#endif
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                __asm__ __volatile__(
@@ -143,7 +160,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                "       .set    mips0                                   \n"
                : "=&r" (temp), "+m" (*m)
                : "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
@@ -153,17 +170,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (bit));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %1                          \n"
+                       "       " __LL "%0, 0(%2)       # clear_bit     \n"
+                       "       and     %0, %3                          \n"
+                       "       " __SC "%0, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+                       : "ir" (~(1UL << bit)));
+#else
                        "       .set    mips3                           \n"
-                       "       " __LL "%0, %1          # clear_bit     \n"
+                       "       " __LL "%0, %1          # clear_bit     \n"
                        "       and     %0, %2                          \n"
                        "       " __SC "%0, %1                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (~(1UL << bit)));
+#endif
                } while (unlikely(!temp));
        } else
                __mips_clear_bit(nr, addr);
@@ -212,16 +240,30 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
        } else if (kernel_uses_llsc) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register unsigned long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %1                          \n"
+                       "       " __LL "%0, 0(%2)       # change_bit    \n"
+                       "       xor     %0, %3                          \n"
+                       "       " __SC "%0, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+                       : "ir" (1UL << bit));
+#else
                        "       .set    mips3                           \n"
-                       "       " __LL "%0, %1          # change_bit    \n"
+                       "       " __LL "%0, %1          # change_bit    \n"
                        "       xor     %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (1UL << bit));
+#endif
                } while (unlikely(!temp));
        } else
                __mips_change_bit(nr, addr);
@@ -261,17 +303,32 @@ static inline int test_and_set_bit(unsigned long nr,
        } else if (kernel_uses_llsc) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register unsigned long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %1                          \n"
+                       "       " __LL "%0, 0(%3) # test_and_set_bit    \n"
+                       "       or      %2, %0, %4                      \n"
+                       "       " __SC "%2, 0(%3)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (*m), "=&r" (res),
+                         "=&r" (temp2)
+                       : "r" (1UL << bit));
+#else
                        "       .set    mips3                           \n"
-                       "       " __LL "%0, %1  # test_and_set_bit      \n"
+                       "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
+#endif
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
@@ -315,17 +372,32 @@ static inline int test_and_set_bit_lock(unsigned long nr,
        } else if (kernel_uses_llsc) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register unsigned long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %1                          \n"
+                       "       " __LL "%0, 0(%3) # test_and_set_bit    \n"
+                       "       or      %2, %0, %4                      \n"
+                       "       " __SC "%2, 0(%3)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (*m), "=&r" (res),
+                         "=&r" (temp2)
+                       : "r" (1UL << bit));
+#else
                        "       .set    mips3                           \n"
-                       "       " __LL "%0, %1  # test_and_set_bit      \n"
+                       "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
+#endif
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
@@ -349,6 +421,9 @@ static inline int test_and_clear_bit(unsigned long nr,
 {
        int bit = nr & SZLONG_MASK;
        unsigned long res;
+#ifdef CONFIG_CPU_MIPSR6
+       register unsigned long temp2;
+#endif
 
        smp_mb__before_llsc();
 
@@ -368,7 +443,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                : "=&r" (temp), "+m" (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
        } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
@@ -390,8 +465,20 @@ static inline int test_and_clear_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %1                          \n"
+                       "       " __LL  "%0, 0(%3) # test_and_clear_bit \n"
+                       "       or      %2, %0, %4                      \n"
+                       "       xor     %2, %4                          \n"
+                       "       " __SC  "%2, 0(%3)                      \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (*m), "=&r" (res),
+                         "=&r" (temp2)
+                       : "r" (1UL << bit));
+#else
                        "       .set    mips3                           \n"
-                       "       " __LL  "%0, %1 # test_and_clear_bit    \n"
+                       "       " __LL  "%0, %1 # test_and_clear_bit    \n"
                        "       or      %2, %0, %3                      \n"
                        "       xor     %2, %3                          \n"
                        "       " __SC  "%2, %1                         \n"
@@ -399,6 +486,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                        : "=&r" (temp), "+m" (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
+#endif
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
@@ -444,17 +532,32 @@ static inline int test_and_change_bit(unsigned long nr,
        } else if (kernel_uses_llsc) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+               register unsigned long temp2;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %3, %1                          \n"
+                       "       " __LL  "%0, 0(%3) # test_and_change_bit \n"
+                       "       xor     %2, %0, %4                      \n"
+                       "       " __SC  "%2, 0(%3)                      \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (temp), "+m" (*m), "=&r" (res),
+                         "=&r" (temp2)
+                       : "r" (1UL << bit));
+#else
                        "       .set    mips3                           \n"
-                       "       " __LL  "%0, %1 # test_and_change_bit   \n"
+                       "       " __LL  "%0, %1 # test_and_change_bit   \n"
                        "       xor     %2, %0, %3                      \n"
                        "       " __SC  "\t%2, %1                       \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
+#endif
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
@@ -495,8 +598,12 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
                __asm__(
                "       .set    push                                    \n"
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
                "       .set    mips32                                  \n"
-               "       clz     %0, %1                                  \n"
+#endif
+               "       clz     %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
                : "r" (word));
@@ -508,8 +615,12 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
                __asm__(
                "       .set    push                                    \n"
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
                "       .set    mips64                                  \n"
-               "       dclz    %0, %1                                  \n"
+#endif
+               "       dclz    %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
                : "r" (word));
index 4d2cdea5aa37f46e05e1e06e7f8d378bf9331c13..6b1ebeeab558ce12e8c6e009f98234e17814c522 100644 (file)
@@ -87,6 +87,7 @@ extern unsigned long mips_machtype;
 #define BOOT_MEM_ROM_DATA      2
 #define BOOT_MEM_RESERVED      3
 #define BOOT_MEM_INIT_RAM      4
+#define BOOT_MEM_INUSE          5
 
 /*
  * A memory map that's built upon what was determined
index 19d4fc841daaf0a7c995e0640cf6a514578b36cf..f0df06b70ce37cd6531e1fcba27e7829c25a7cc0 100644 (file)
 
 #include <asm/uaccess.h>
 
+/*
+ *     Fold a partial checksum without adding pseudo headers
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+       __asm__(
+       "       .set    push            # csum_fold\n"
+       "       .set    noat            \n"
+       "       sll     $1, %0, 16      \n"
+       "       addu    %0, $1          \n"
+       "       sltu    $1, %0, $1      \n"
+       "       srl     %0, %0, 16      \n"
+       "       addu    %0, $1          \n"
+       "       xori    %0, 0xffff      \n"
+       "       .set    pop"
+       : "=r" (sum)
+       : "0" (sum));
+
+       return (__force __sum16)sum;
+}
+
+#ifdef CONFIG_CPU_MIPSR6
+
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#ifndef csum_tcpudp_nofold
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+               unsigned short proto, __wsum sum);
+#endif
+
+#ifndef csum_tcpudp_magic
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+                 unsigned short proto, __wsum sum)
+{
+       return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+#endif
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+                                       int len, __wsum sum, int *csum_err);
+
+#ifndef csum_partial_copy_nocheck
+#define csum_partial_copy_nocheck(src, dst, len, sum)  \
+       csum_partial_copy((src), (dst), (len), (sum))
+#endif
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
 /*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
@@ -114,24 +198,13 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst,
                                       int len, __wsum sum);
 
 /*
- *     Fold a partial checksum without adding pseudo headers
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
  */
-static inline __sum16 csum_fold(__wsum sum)
-{
-       __asm__(
-       "       .set    push            # csum_fold\n"
-       "       .set    noat            \n"
-       "       sll     $1, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       sltu    $1, %0, $1      \n"
-       "       srl     %0, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       xori    %0, 0xffff      \n"
-       "       .set    pop"
-       : "=r" (sum)
-       : "0" (sum));
 
-       return (__force __sum16)sum;
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+       return csum_fold(csum_partial(buff, len, 0));
 }
 
 /*
@@ -226,14 +299,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
        return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 }
 
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-static inline __sum16 ip_compute_csum(const void *buff, int len)
-{
-       return csum_fold(csum_partial(buff, len, 0));
-}
+#endif /* !CONFIG_CPU_MIPSR6 */
 
 #define _HAVE_ARCH_IPV6_CSUM
 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
index 466069bd846596c4e2940be2f42a5aaa82830bec..f57e76bf82149f56fdfb3096aa3f4cb9d02859d6 100644 (file)
@@ -35,19 +35,36 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
+#ifdef CONFIG_CPU_MIPSR6
+               register unsigned long tmp;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %3                          \n"
+                       "       ll      %0, 0(%2)       # xchg_u32      \n"
+                       "       .set    mips0                           \n"
+                       "       move    %1, %z4                         \n"
+                       "       .set    mips64r6                        \n"
+                       "       sc      %1, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (retval), "=&r" (dummy), "=&r" (tmp),
+                         "+m" (*m)
+                       : "Jr" (val));
+#else
                        "       .set    mips3                           \n"
-                       "       ll      %0, %3          # xchg_u32      \n"
+                       "       ll      %0, %3          # xchg_u32      \n"
                        "       .set    mips0                           \n"
                        "       move    %2, %z4                         \n"
                        "       .set    mips3                           \n"
-                       "       sc      %2, %1                          \n"
+                       "       sc      %2, %1                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (retval), "=m" (*m), "=&r" (dummy)
                        : "R" (*m), "Jr" (val)
                        : "memory");
+#endif
                } while (unlikely(!dummy));
        } else {
                unsigned long flags;
@@ -85,17 +102,32 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
+#ifdef CONFIG_CPU_MIPSR6
+               register unsigned long tmp;
+#endif
 
                do {
                        __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+                       "       .set    mips64r6                        \n"
+                       "       dla     %2, %3                          \n"
+                       "       lld     %0, 0(%2)       # xchg_u64      \n"
+                       "       move    %1, %z4                         \n"
+                       "       scd     %1, 0(%2)                       \n"
+                       "       .set    mips0                           \n"
+                       : "=&r" (retval), "=&r" (dummy), "=&r" (tmp),
+                         "+m" (*m)
+                       : "Jr" (val));
+#else
                        "       .set    mips3                           \n"
-                       "       lld     %0, %3          # xchg_u64      \n"
+                       "       lld     %0, %3          # xchg_u64      \n"
                        "       move    %2, %z4                         \n"
                        "       scd     %2, %1                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (retval), "=m" (*m), "=&r" (dummy)
                        : "R" (*m), "Jr" (val)
                        : "memory");
+#endif
                } while (unlikely(!dummy));
        } else {
                unsigned long flags;
@@ -137,6 +169,42 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 
 #define __HAVE_ARCH_CMPXCHG 1
 
+#ifdef CONFIG_CPU_MIPSR6
+#define __cmpxchg_asm(ld, st, m, old, new)                             \
+({                                                                     \
+       __typeof(*(m)) __ret;                                           \
+       register unsigned long tmp;                                     \
+                                                                       \
+       if (kernel_uses_llsc) {                                         \
+               __asm__ __volatile__(                                   \
+               "       .set    push                            \n"     \
+               "       .set    noat                            \n"     \
+               "       .set    mips64r6                        \n"     \
+               "1:     dla     %1, %2                          \n"     \
+                       ld   "  %0, 0(%1)       # __cmpxchg_asm \n"     \
+               "       bne     %0, %z3, 2f                     \n"     \
+               "       .set    mips0                           \n"     \
+               "       move    $1, %z4                         \n"     \
+               "       .set    mips64r6                        \n"     \
+               "       " st "  $1, 0(%1)                       \n"     \
+               "       beqz    $1, 1b                          \n"     \
+               "       .set    pop                             \n"     \
+               "2:                                             \n"     \
+               : "=&r" (__ret), "=&r" (tmp), "+m" (*m)                 \
+               : "Jr" (old), "Jr" (new));                              \
+       } else {                                                        \
+               unsigned long __flags;                                  \
+                                                                       \
+               raw_local_irq_save(__flags);                            \
+               __ret = *m;                                             \
+               if (__ret == old)                                       \
+                       *m = new;                                       \
+               raw_local_irq_restore(__flags);                         \
+       }                                                               \
+                                                                       \
+       __ret;                                                          \
+})
+#else /* !CONFIG_CPU_MIPSR6 */
 #define __cmpxchg_asm(ld, st, m, old, new)                             \
 ({                                                                     \
        __typeof(*(m)) __ret;                                           \
@@ -146,12 +214,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
                "       .set    mips3                           \n"     \
-               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
+               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
                "       .set    mips3                           \n"     \
-               "       " st "  $1, %1                          \n"     \
+               "       " st "  $1, %1                          \n"     \
                "       beqzl   $1, 1b                          \n"     \
                "2:                                             \n"     \
                "       .set    pop                             \n"     \
@@ -163,12 +231,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
                "       .set    mips3                           \n"     \
-               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
+               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
                "       .set    mips3                           \n"     \
-               "       " st "  $1, %1                          \n"     \
+               "       " st "  $1, %1                          \n"     \
                "       beqz    $1, 1b                          \n"     \
                "       .set    pop                             \n"     \
                "2:                                             \n"     \
@@ -187,6 +255,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                                                                        \
        __ret;                                                          \
 })
+#endif /* CONFIG_CPU_MIPSR6 */
 
 /*
  * This function doesn't exist, so you'll get a linker error
index f5e104899538d822c390c83302688ce3a44bae36..da371532a1b1de5c260c985cff3224ef3c55dd3c 100644 (file)
@@ -27,6 +27,9 @@
 #ifndef cpu_has_tlbinv
 #define cpu_has_tlbinv          (cpu_data[0].options & MIPS_CPU_TLBINV)
 #endif
+#ifndef cpu_has_tlbinv_full
+#define cpu_has_tlbinv_full     (cpu_data[0].options & MIPS_CPU_TLBINV_FULL)
+#endif
 #ifndef cpu_has_4kex
 #define cpu_has_4kex           (cpu_data[0].options & MIPS_CPU_4KEX)
 #endif
 #define cpu_has_cache_cdex_s   (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
 #endif
 #ifndef cpu_has_prefetch
-#define cpu_has_prefetch       (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#ifdef CONFIG_CPU_MIPSR6
+#define cpu_has_prefetch        (0)
+#else
+#define cpu_has_prefetch        (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#endif
 #endif
 #ifndef cpu_has_mcheck
 #define cpu_has_mcheck         (cpu_data[0].options & MIPS_CPU_MCHECK)
 #ifndef cpu_has_rixi
 #define cpu_has_rixi           (cpu_data[0].options & MIPS_CPU_RIXI)
 #endif
+#ifndef cpu_has_rixi_except
+#define cpu_has_rixi_except     (cpu_data[0].options & MIPS_CPU_RIXI_EXCEPT)
+#endif
 #ifndef cpu_has_mmips
 # ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
 #  define cpu_has_mmips                (cpu_data[0].options & MIPS_CPU_MICROMIPS)
 #define cpu_has_cm2             (0)
 #define cpu_has_cm2_l2sync      (0)
 #endif
+#ifndef cpu_has_maar
+#define cpu_has_maar            (cpu_data[0].options2 & MIPS_CPU_MAAR)
+#endif
 
 /*
  * I-Cache snoops remote store.         This only matters on SMP.  Some multiprocessors
 # ifndef cpu_has_mips32r2
 # define cpu_has_mips32r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
 # endif
+# ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6       (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+# endif
 # ifndef cpu_has_mips64r1
 # define cpu_has_mips64r1      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
 # endif
 # ifndef cpu_has_mips64r2
 # define cpu_has_mips64r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
 # endif
+# ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6       (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+# endif
 
 /*
  * Shortcuts ...
  */
-#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32  (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64  (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6)
 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
-                        cpu_has_mips64r1 | cpu_has_mips64r2)
+                        cpu_has_mips64r1 | cpu_has_mips64r2 | \
+                        cpu_has_mips32r6 | cpu_has_mips64r6)
 
 #ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
 #endif
 
 /*
 # define cpu_has_clo_clz       cpu_has_mips_r
 # endif
 
+#ifdef CONFIG_CPU_MIPSR6
+
+#define cpu_has_dsp     0
+#define cpu_has_dsp2    0
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
 #ifndef cpu_has_dsp
 #define cpu_has_dsp            (cpu_data[0].ases & MIPS_ASE_DSP)
 #endif
 #define cpu_has_dsp2           (cpu_data[0].ases & MIPS_ASE_DSP2P)
 #endif
 
+#endif /* CONFIG_CPU_MIPSR6 */
+
 #ifndef cpu_has_mipsmt
 #define cpu_has_mipsmt         (cpu_data[0].ases & MIPS_ASE_MIPSMT)
 #endif
index aa7e22c6bd1407acdcfbba356e9a2d4d5b0ff8cd..7fe98c35c05320f0e1ea1d81070d665632b87860 100644 (file)
@@ -46,6 +46,7 @@ struct cpuinfo_mips {
         * Capability and feature descriptor structure for MIPS CPU
         */
        unsigned long           options;
+       unsigned long           options2;
        unsigned long           ases;
        unsigned int            processor_id;
        unsigned int            fpu_id;
index 87c02db51dc28543d3ac429f38a45b092c03abb8..eeca04a151dd912c7487c4be26f1f4cd13c29700 100644 (file)
@@ -301,11 +301,14 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M32R2     0x00000040
 #define MIPS_CPU_ISA_M64R1     0x00000080
 #define MIPS_CPU_ISA_M64R2     0x00000100
+#define MIPS_CPU_ISA_M32R6      0x00000200
+#define MIPS_CPU_ISA_M64R6      0x00000400
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
-       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
+       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |MIPS_CPU_ISA_M32R6)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
-       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+       MIPS_CPU_ISA_M64R6)
 
 /*
  * CPU Option encodings
@@ -340,6 +343,13 @@ enum cpu_type_enum {
 #define MIPS_CPU_TLBINV         0x08000000 /* CPU supports TLBINV/F */
 #define MIPS_CPU_CM2            0x10000000 /* CPU has CM2 */
 #define MIPS_CPU_CM2_L2SYNC     0x20000000 /* CPU has CM2 L2-only SYNC feature */
+#define MIPS_CPU_TLBINV_FULL    0x40000000 /* CPU supports single TLBINV/F for full V/FTLB */
+#define MIPS_CPU_RIXI_EXCEPT    0x80000000 /* CPU has TLB Read/eXec Inhibit exceptions */
+
+/*
+ * CPU Option2 encodings
+ */
+#define MIPS_CPU_MAAR           0x00000001      /* MAAR exists */
 
 /*
  * CPU ASE encodings
index 7bfad0520e25731c548c949ccecc045e91bba002..175517e1b598ff9201639adfa4228b08fea826e3 100644 (file)
@@ -15,6 +15,8 @@
 #include <asm/hazards.h>
 #include <asm/mipsregs.h>
 
+#ifndef CONFIG_CPU_MIPSR6
+
 #define DSP_DEFAULT    0x00000000
 #define DSP_MASK       0x3f
 
@@ -82,4 +84,15 @@ do {                                                                 \
        tsk->thread.dsp.dspr;                                           \
 })
 
+#else
+
+#define __init_dsp(void)    do { } while(0)
+#define init_dsp(void)      do { } while(0)
+#define save_dsp(void)      do { } while(0)
+#define restore_dsp(void)   do { } while(0)
+#define __save_dsp(void)      do { } while(0)
+#define __restore_dsp(void)   do { } while(0)
+
+#endif /* CONFIG_CPU_MIPSR6 */
+
 #endif /* _ASM_DSP_H */
index 4da0c1fe30d9fedcbfa1641d35f358ca25d9f8ce..5c5cbec1294e96b37eaa081c1d430fa2ed5a7358 100644 (file)
@@ -18,14 +18,17 @@ static inline void atomic_scrub(void *va, u32 size)
                 */
 
                __asm__ __volatile__ (
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
                "       .set    mips2                                   \n"
+#endif
                "1:     ll      %0, %1          # atomic_scrub          \n"
                "       addu    %0, $0                                  \n"
                "       sc      %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "=m" (*virt_addr)
-               : "m" (*virt_addr));
+               : "=&r" (temp), "+m" (*virt_addr));
 
                virt_addr++;
        }
index fe42767ba47ec6a1549cdbff72d51856036a7bb1..134ee8ee293a45d6332e86276f21c7111cf91018 100644 (file)
@@ -59,7 +59,7 @@ static inline int __own_fpu(void)
 {
        int ret = 0;
 
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_CPU_MIPS64)
        if (test_thread_flag(TIF_32BIT_REGS)) {
                change_c0_status(ST0_CU1|ST0_FR,ST0_CU1);
                KSTK_STATUS(current) |= ST0_CU1;
index f8c3a095871d7d3dc19eafdabdb6d7ee123346c1..5660a6e4d066ab496325c4a70587ec6206146d07 100644 (file)
 #include <asm/war.h>
 
 #ifndef CONFIG_EVA
+#ifdef CONFIG_CPU_MIPSR6
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)              \
+{                                                                      \
+       if (cpu_has_llsc) {                                             \
+               __asm__ __volatile__(                                   \
+               "       .set    push                            \n"     \
+               "       .set    noat                            \n"     \
+               "       .set    mips64r6                        \n"     \
+               "1:     ll      %1, %4  # __futex_atomic_op     \n"     \
+               "       .set    mips0                           \n"     \
+               "       " insn  "                               \n"     \
+               "       .set    mips64r6                        \n"     \
+               "2:     sc      $1, %2                          \n"     \
+               "       beqz    $1, 1b                          \n"     \
+               __WEAK_LLSC_MB                                          \
+               "3:                                             \n"     \
+               "       .set    pop                             \n"     \
+               "       .set    mips0                           \n"     \
+               "       .section .fixup,\"ax\"                  \n"     \
+               "4:     li      %0, %6                          \n"     \
+               "       j       3b                              \n"     \
+               "       .previous                               \n"     \
+               "       .section __ex_table,\"a\"               \n"     \
+               "       "__UA_ADDR "\t1b, 4b                    \n"     \
+               "       "__UA_ADDR "\t2b, 4b                    \n"     \
+               "       .previous                               \n"     \
+               : "=r" (ret), "=&r" (oldval), "=R" (*uaddr)             \
+               : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT)    \
+               : "memory");                                            \
+       } else                                                          \
+               ret = -ENOSYS;                                          \
+}
+#else /* !CONFIG_CPU_MIPSR6 */
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
 {                                                                      \
        if (cpu_has_llsc && R10000_LLSC_WAR) {                          \
        } else                                                          \
                ret = -ENOSYS;                                          \
 }
+#endif /* CONFIG_CPU_MIPSR6 */
 #else
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
 {                                                                      \
@@ -236,12 +270,20 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "# futex_atomic_cmpxchg_inatomic                        \n"
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
                "       .set    mips3                                   \n"
+#endif
                "1:     ll      %1, %3                                  \n"
                "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
                "       move    $1, %z5                                 \n"
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
                "       .set    mips3                                   \n"
+#endif
                "2:     sc      $1, %2                                  \n"
                "       beqz    $1, 1b                                  \n"
                __WEAK_LLSC_MB
index 3b194c78e79e2a02c56c3d2812110d5f8d0cba9e..c632702c42a4875005f702fcb4bdbcfccb09fba9 100644 (file)
@@ -14,6 +14,7 @@ enum fw_memtypes {
        fw_dontuse,
        fw_code,
        fw_free,
+       fw_inuse,
 };
 
 typedef struct {
index 3717da70e891fe649076f84e719ab9546b6963d6..becc6ecb7c2d4a879c68fe3b5f034c6bf2b0634b 100644 (file)
 #define GCMP_GCB_GAOR2MASK_OFS            0x0218  /* Attribute-Only Region2 Mask */
 #define GCMP_GCB_GAOR3BA_OFS              0x0220  /* Attribute-Only Region3 Base Address */
 #define GCMP_GCB_GAOR3MASK_OFS            0x0228  /* Attribute-Only Region3 Mask */
+#define GCMP_GCB_GCML2P_OFS               0x0300  /* L2 Prefetch Control */
+#define  GCMP_GCB_GCML2P_PAGE_MASK          0xfffff000  /* ... page mask */
+#define  GCMP_GCB_GCML2P_PFTEN              0x00000100  /* L2 Prefetch Enable */
+#define  GCMP_GCB_GCML2P_NPFT               0x000000ff  /* N.of L2 Prefetch  */
+#define GCMP_GCB_GCML2PB_OFS              0x0308  /* L2 Prefetch Control B */
+#define  GCMP_GCB_GCML2PB_CODE_PFTEN        0x00000100  /* L2 Code Prefetch Enable */
 
 /* Core local/Core other control block registers */
 #define GCMP_CCB_RESETR_OFS            0x0000                  /* Reset Release */
index e3ee92d4dbe750c7aa05a5488f7443cdd64fb387..51f364b182b59c0896c64a4a7a8b1ccc44700f30 100644 (file)
@@ -21,7 +21,7 @@
 /*
  * TLB hazards
  */
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR6) || (defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON))
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
  * The alterantive is switching the assembler to 64-bit code which happens
  * to work right even for 32-bit code ...
  */
+#ifdef CONFIG_CPU_MIPSR6
+#define instruction_hazard()                                           \
+do {                                                                   \
+       unsigned long tmp;                                              \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "       .set    push                                    \n"     \
+       "       .set    mips64r6                                \n"     \
+       "       dla     %0, 1f                                  \n"     \
+       "       jr.hb   %0                                      \n"     \
+       "       .set    pop                                     \n"     \
+       "1:                                                     \n"     \
+       : "=r" (tmp));                                                  \
+} while (0)
+#else /* !CONFIG_CPU_MIPSR6 */
 #define instruction_hazard()                                           \
 do {                                                                   \
        unsigned long tmp;                                              \
@@ -65,6 +80,7 @@ do {                                                                  \
        "1:                                                     \n"     \
        : "=r" (tmp));                                                  \
 } while (0)
+#endif
 
 #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
        defined(CONFIG_CPU_BMIPS)
@@ -132,7 +148,7 @@ do {                                                                        \
 
 #define instruction_hazard()                                           \
 do {                                                                   \
-       if (cpu_has_mips_r2)                                            \
+       if (cpu_has_mips_r2 || cpu_has_mips_r6)                         \
                __instruction_hazard();                                 \
 } while (0)
 
@@ -240,7 +256,7 @@ do {                                                                        \
 
 #define __disable_fpu_hazard
 
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 
 #define __enable_fpu_hazard                                            \
        ___ehb
index 45c00951888b4661ecbcf93bcc46fff33f9a9555..73df9bc5f501fb110cc77eb66d371caa11c8a0ea 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/stringify.h>
 #include <asm/hazards.h>
 
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
+#if (defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_CPU_MIPSR2)) && !defined(CONFIG_MIPS_MT_SMTC)
 
 static inline void arch_local_irq_disable(void)
 {
@@ -118,7 +118,7 @@ void arch_local_irq_disable(void);
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
 void __arch_local_irq_restore(unsigned long flags);
-#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+#endif /* if (defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_CPU_MIPSR2)) && !defined(CONFIG_MIPS_MT_SMTC) */
 
 
 extern void smtc_ipi_replay(void);
@@ -141,7 +141,7 @@ static inline void arch_local_irq_enable(void)
        "       ori     $1, 0x400                                       \n"
        "       xori    $1, 0x400                                       \n"
        "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        "       ei                                                      \n"
 #else
        "       mfc0    $1,$12                                          \n"
index 00c03451e757b267420b39a5d470b747be5860be..c74b048be9067539edfa9189af28f481b21e7207 100644 (file)
@@ -47,8 +47,12 @@ static __inline__ long local_add_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
-               "1:"    __LL    "%1, %2         # local_add_return      \n"
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
+               "       .set    mips3                                   \n"
+#endif
+               "1:"    __LL    "%1, %2         # local_add_return      \n"
                        __ADDU  "%0, %1, %3                             \n"
                        __SC    "%0, %2                                 \n"
                "       beqz    %0, 1b                                  \n"
@@ -92,8 +96,12 @@ static __inline__ long local_sub_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
                "       .set    mips3                                   \n"
-               "1:"    __LL    "%1, %2         # local_sub_return      \n"
+#endif
+               "1:"    __LL    "%1, %2         # local_sub_return      \n"
                        __SUBU  "%0, %1, %3                             \n"
                        __SC    "%0, %2                                 \n"
                "       beqz    %0, 1b                                  \n"
index f4f3fe3bd81831685ffd1a76122ab387341d0627..69f42b5911651311f8978785edd24af105c6df42 100644 (file)
 #define PG_XIE         (_ULCAST_(1) <<  30)
 #define PG_ELPA                (_ULCAST_(1) <<  29)
 #define PG_ESP         (_ULCAST_(1) <<  28)
+#define PG_IEC          (_ULCAST_(1) <<  27)
+#define PG_MCCAUSE      (_ULCAST_(0x1f) << 0)
 
 /*
  * R4x00 interrupt enable / cause bits
 #define MIPS_CONF4_VTLBSIZEEXT_SHIFT   (24)
 #define MIPS_CONF4_VTLBSIZEEXT (_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
 #define MIPS_CONF4_AE          (_ULCAST_(1) << 28)
-#define MIPS_CONF4_IE          (_ULCAST_(3) << 29)
-#define MIPS_CONF4_TLBINV      (_ULCAST_(2) << 29)
+#define MIPS_CONF4_IE           (_ULCAST_(3) << 29)
+#define MIPS_CONF4_TLBINV       (_ULCAST_(2) << 29)
+#define MIPS_CONF4_TLBINV_FULL  (_ULCAST_(1) << 29)
 
+#define MIPS_CONF5_MRP          (_ULCAST_(1) << 3)
 #define MIPS_CONF5_EVA         (_ULCAST_(1) << 28)
 #define MIPS_CONF5_CV          (_ULCAST_(1) << 29)
 #define MIPS_CONF5_K           (_ULCAST_(1) << 30)
 /* ebase register bit definition */
 #define MIPS_EBASE_WG           (_ULCAST_(1) << 11)
 
+/* MAAR bits definitions */
+#define MIPS_MAAR_V             (_ULCAST_(1))
+#define MIPS_MAAR_S             (_ULCAST_(1) << 1)
+#define MIPS_MAAR_HI_V          (_ULCAST_(1) << 31)
+
+#define MIPS_MAAR_MAX           64
+
 #ifndef __ASSEMBLY__
 
 /*
@@ -1006,6 +1017,7 @@ do {                                                                      \
 
 #define read_c0_prid()         __read_32bit_c0_register($15, 0)
 #define read_c0_cmgcrbase()     __read_ulong_c0_register($15, 3)
+#define read_c0_bevva()         __read_ulong_c0_register($15, 4)
 
 #define read_c0_config()       __read_32bit_c0_register($16, 0)
 #define read_c0_config1()      __read_32bit_c0_register($16, 1)
@@ -1024,6 +1036,16 @@ do {                                                                     \
 #define write_c0_config6(val)  __write_32bit_c0_register($16, 6, val)
 #define write_c0_config7(val)  __write_32bit_c0_register($16, 7, val)
 
+#define read_c0_lladdr()        __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val)    __write_ulong_c0_register($17, 0, val)
+/*
+ * MAAR registers
+ */
+#define read_c0_maar()          __read_ulong_c0_register($17, 1)
+#define write_c0_maar(val)      __write_ulong_c0_register($17, 1, val)
+#define read_c0_maarindex()     __read_32bit_c0_register($17, 2)
+#define write_c0_maarindex(val) __write_32bit_c0_register($17, 2, val)
+
 /*
  * The WatchLo register.  There may be up to 8 of them.
  */
@@ -1277,6 +1299,7 @@ do {                                                                      \
        :: "r" (value));                                                \
 })
 
+#ifndef CONFIG_CPU_MIPSR6
 /*
  * Macros to access the DSP ASE registers
  */
@@ -1665,6 +1688,7 @@ do {                                                                      \
 
 #endif /* CONFIG_CPU_MICROMIPS */
 #endif
+#endif /* CONFIG_CPU_MIPSR6 */
 
 /*
  * TLB operations.
@@ -1735,10 +1759,7 @@ static inline void tlb_write_random(void)
 static inline void tlbinvf(void)
 {
        __asm__ __volatile__(
-               ".set push\n\t"
-               ".set noreorder\n\t"
-               ".word 0x42000004\n\t"
-               ".set pop");
+               ".word 0x42000004");
 }
 
 /*
@@ -1833,7 +1854,8 @@ static inline void __ehb(void)
 {
        __asm__ __volatile__(
        "       .set    mips32r2                                        \n"
-       "       ehb                                                     \n"             "       .set    mips0                                           \n");
+       "       ehb                                                     \n"
+       "       .set    mips0                                           \n");
 }
 
 /*
index 44b705d0826218a47ac6f78d5b068165b02732b9..6202276c85d17cab159b1bc882565e2a24f9f86a 100644 (file)
@@ -92,6 +92,10 @@ search_module_dbetables(unsigned long addr)
 #define MODULE_PROC_FAMILY "MIPS64_R1 "
 #elif defined CONFIG_CPU_MIPS64_R2
 #define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
 #elif defined CONFIG_CPU_R3000
 #define MODULE_PROC_FAMILY "R3000 "
 #elif defined CONFIG_CPU_TX39XX
index 1a5cebabd0ae71a962958470641f795769955bb9..bdb1711e42b8d44b0b2751787917564db1a577e1 100644 (file)
 
 #else /* 'Normal' r4K case */
 
-#ifndef CONFIG_CPU_MIPSR2
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
 /*
  * When using the RI/XI bit support, we have 13 bits of flags below
  * the physical address. The RI/XI bits are placed such that a SRL 5
 #define _PAGE_NO_READ_SHIFT    (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
 #define _PAGE_NO_READ          ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; })
 
-#else /* CONFIG_CPU_MIPSR2 */
+#else /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 /* static bits allocation in MIPS R2, two variants -
    HUGE TLB in 64BIT kernel support or not.
 
 #endif /* CONFIG_64BIT */
 
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
 
 
 #define _PAGE_GLOBAL_SHIFT      (_PAGE_NO_READ_SHIFT + 1)
index 19661115ec644f833b84ccddbca90f29b573a1e1..bfb66e1860318a8422d6497ccb45ba445d0cb14d 100644 (file)
 #define INDEX_BASE      CKSEG0
 #endif
 
+#ifdef CONFIG_CPU_MIPSR6
 #define cache_op(op,addr)                                              \
+       __asm__ __volatile__(                                           \
+       "       .set    push                                    \n"     \
+       "       .set    noreorder                               \n"     \
+       "       .set    mips64r6\n\t                            \n"     \
+       "       cache   %0, %1                                  \n"     \
+       "       .set    pop                                     \n"     \
+       :                                                               \
+       : "i" (op), "R" (*(unsigned char *)(addr)))
+#else
+#define cache_op(op,addr)                                               \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noreorder                               \n"     \
@@ -42,6 +53,7 @@
        "       .set    pop                                     \n"     \
        :                                                               \
        : "i" (op), "R" (*(unsigned char *)(addr)))
+#endif
 
 #ifdef CONFIG_MIPS_MT
 /*
@@ -191,7 +203,21 @@ static inline void flush_scache_line(unsigned long addr)
        cache_op(Hit_Writeback_Inv_SD, addr);
 }
 
+#ifdef CONFIG_CPU_MIPSR6
 #define protected_cache_op(op,addr)                            \
+       __asm__ __volatile__(                                   \
+       "       .set    push                    \n"             \
+       "       .set    noreorder               \n"             \
+       "       .set    mips64r6                \n"             \
+       "1:     cache   %0, (%1)                \n"             \
+       "2:     .set    pop                     \n"             \
+       "       .section __ex_table,\"a\"       \n"             \
+       "       "STR(PTR)" 1b, 2b               \n"             \
+       "       .previous"                                      \
+       :                                                       \
+       : "i" (op), "r" (addr))
+#else
+#define protected_cache_op(op,addr)                             \
        __asm__ __volatile__(                                   \
        "       .set    push                    \n"             \
        "       .set    noreorder               \n"             \
@@ -203,6 +229,7 @@ static inline void flush_scache_line(unsigned long addr)
        "       .previous"                                      \
        :                                                       \
        : "i" (op), "r" (addr))
+#endif
 
 #ifdef CONFIG_EVA
 #define protected_cachee_op(op,addr)                            \
@@ -259,6 +286,144 @@ static inline void invalidate_tcache_page(unsigned long addr)
        cache_op(Page_Invalidate_T, addr);
 }
 
+#ifdef CONFIG_CPU_MIPSR6
+
+#define cache16_unroll32(base,op)                                       \
+       __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set noreorder                                  \n"     \
+       "       .set mips64r6                                   \n"     \
+       "       .set noat                                       \n"     \
+       "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
+       "       cache %1, 0x020(%0); cache %1, 0x030(%0)        \n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x050(%0)        \n"     \
+       "       cache %1, 0x060(%0); cache %1, 0x070(%0)        \n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x090(%0)        \n"     \
+       "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)        \n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)        \n"     \
+       "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)        \n"     \
+       "       addiu $1, %0, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x010($1)        \n"     \
+       "       cache %1, 0x020($1); cache %1, 0x030($1)        \n"     \
+       "       cache %1, 0x040($1); cache %1, 0x050($1)        \n"     \
+       "       cache %1, 0x060($1); cache %1, 0x070($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x090($1)        \n"     \
+       "       cache %1, 0x0a0($1); cache %1, 0x0b0($1)        \n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0d0($1)        \n"     \
+       "       cache %1, 0x0e0($1); cache %1, 0x0f0($1)        \n"     \
+       "       .set pop                                        \n"     \
+               :                                                       \
+               : "r" (base),                                           \
+                 "i" (op));
+
+#define cache32_unroll32(base,op)                                      \
+       __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set noreorder                                  \n"     \
+       "       .set mips64r6                                   \n"     \
+       "       .set noat                                       \n"     \
+       "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x060(%0)        \n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)        \n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)        \n"     \
+       "       addiu $1, %0, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)        \n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)        \n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)        \n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)        \n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)        \n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)        \n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)        \n"     \
+       "       .set pop                                        \n"     \
+               :                                                       \
+               : "r" (base),                                           \
+                 "i" (op));
+
+#define cache64_unroll32(base,op)                                      \
+       __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set noreorder                                  \n"     \
+       "       .set mips64r6                                   \n"     \
+       "       .set noat                                       \n"     \
+       "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)        \n"     \
+       "       addiu $1, %0, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)        \n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)        \n"     \
+       "       .set pop                                        \n"     \
+               :                                                       \
+               : "r" (base),                                           \
+                 "i" (op));
+
+#define cache128_unroll32(base,op)                                     \
+       __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set noreorder                                  \n"     \
+       "       .set mips64r6                                   \n"     \
+       "       .set noat                                       \n"     \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
+       "       addiu $1, %0, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       addiu $1, $1, 0x100                             \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x080($1)        \n"     \
+       "       .set pop                                        \n"     \
+               :                                                       \
+               : "r" (base),                                           \
+                 "i" (op));
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
 #define cache16_unroll32(base,op)                                      \
        __asm__ __volatile__(                                           \
        "       .set push                                       \n"     \
@@ -362,6 +527,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
                :                                                       \
                : "r" (base),                                           \
                  "i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #ifdef CONFIG_EVA
 #define cache16_unroll32_user(base,op)                                  \
index 78d201fb6c87c93608b8295327277a6e8804818e..fe768aec229461b9955270af5790ca574bbc5356 100644 (file)
@@ -238,6 +238,23 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
+#ifdef CONFIG_CPU_MIPSR6
+               unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+               do {
+                       __asm__ __volatile__(
+                       "       .set    push                            \n"
+                       "       .set    noreorder   # arch_read_lock    \n"
+                       "1:     ll      %0, 0(%1)                       \n"
+                       "       bltz    %0, 1b                          \n"
+                       "        addu   %0, 1                           \n"
+                       "2:     sc      %0, 0(%1)                       \n"
+                       "       .set    pop                             \n"
+                       : "=&r" (tmp)
+                       : "r" (tmp2)
+                       : "memory");
+               } while (unlikely(!tmp));
+#else
                do {
                        __asm__ __volatile__(
                        "1:     ll      %1, %2  # arch_read_lock        \n"
@@ -248,6 +265,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                        : "m" (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
+#endif
        }
 
        smp_llsc_mb();
@@ -272,6 +290,23 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
+#ifdef CONFIG_CPU_MIPSR6
+               unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+               do {
+                       __asm__ __volatile__(
+                       "       .set    push                            \n"
+                       "       .set    noat                            \n"
+                       "1:     ll      %0, 0(%1)   # arch_read_unlock  \n"
+                       "       li      $1, 1                           \n"
+                       "       sub     %0, %0, $1                      \n"
+                       "       sc      %0, 0(%1)                       \n"
+                       "       .set    pop                             \n"
+                       : "=&r" (tmp)
+                       : "r" (tmp2)
+                       : "memory");
+               } while (unlikely(!tmp));
+#else
                do {
                        __asm__ __volatile__(
                        "1:     ll      %1, %2  # arch_read_unlock      \n"
@@ -281,6 +316,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
                        : "m" (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
+#endif
        }
 }
 
@@ -302,6 +338,23 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
+#ifdef CONFIG_CPU_MIPSR6
+               unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+               do {
+                       __asm__ __volatile__(
+                       "       .set    push                            \n"
+                       "       .set    noreorder   # arch_write_lock   \n"
+                       "1:     ll      %0, 0(%1)                       \n"
+                       "       bnez    %0, 1b                          \n"
+                       "        lui    %0, 0x8000                      \n"
+                       "2:     sc      %0, 0(%1)                       \n"
+                       "       .set    pop                             \n"
+                       : "=&r" (tmp)
+                       : "r" (tmp2)
+                       : "memory");
+               } while (unlikely(!tmp));
+#else
                do {
                        __asm__ __volatile__(
                        "1:     ll      %1, %2  # arch_write_lock       \n"
@@ -312,6 +365,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                        : "m" (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
+#endif
        }
 
        smp_llsc_mb();
@@ -352,6 +406,26 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
+#ifdef CONFIG_CPU_MIPSR6
+               unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+               __asm__ __volatile__(
+               "       .set    noreorder       # arch_read_trylock     \n"
+               "       li      %1, 0                                   \n"
+               "1:     ll      %0, 0(%2)                               \n"
+               "       bltz    %0, 2f                                  \n"
+               "        addu   %0, 1                                   \n"
+               "       sc      %0, 0(%2)                               \n"
+               "       beqz    %0, 1b                                  \n"
+               "        nop                                            \n"
+               "       .set    reorder                                 \n"
+               __WEAK_LLSC_MB
+               "       li      %2, 1                                   \n"
+               "2:                                                     \n"
+               : "=&r" (tmp), "=&r" (ret)
+               : "r" (tmp2)
+               : "memory");
+#else
                __asm__ __volatile__(
                "       .set    noreorder       # arch_read_trylock     \n"
                "       li      %2, 0                                   \n"
@@ -368,6 +442,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
                : "m" (rw->lock)
                : "memory");
+#endif
        }
 
        return ret;
@@ -396,6 +471,26 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
+#ifdef CONFIG_CPU_MIPSR6
+               unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+               do {
+                       __asm__ __volatile__(
+                       "       .set    push                            \n"
+                       "       .set    reorder                         \n"
+                       "1:     ll      %0, 0(%2)                       \n"
+                       "       li      %1, 0   # arch_write_trylock    \n"
+                       "       bnez    %0, 2f                          \n"
+                       "       lui     %0, 0x8000                      \n"
+                       "       sc      %0, 0(%2)                       \n"
+                       "       li      %1, 1                           \n"
+                       "2:                                             \n"
+                       "       .set    pop                             \n"
+                       : "=&r" (tmp), "=&r" (ret)
+                       : "r" (tmp2)
+                       : "memory");
+               } while (unlikely(!tmp));
+#else
                do {
                        __asm__ __volatile__(
                        "       ll      %1, %3  # arch_write_trylock    \n"
@@ -409,6 +504,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                        : "m" (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
+#endif
 
                smp_llsc_mb();
        }
index 0b89006e490788ffcc26a9225762c66f4bd4ea8b..58882ef7f003ea2c59a95a6b310d1c927836e4bd 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _MIPS_SPRAM_H
 #define _MIPS_SPRAM_H
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
 extern __init void spram_config(void);
 #else
 static inline void spram_config(void) { };
index a89d1b10d027ce65d83eecb9dd57bfda7859400e..cfa81c69bf6f2fff4a0c1afe5df9f781bc63fccf 100644 (file)
@@ -49,7 +49,7 @@
                LONG_S  v1, PT_HI(sp)
                mflhxu  v1
                LONG_S  v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                mfhi    v1
 #endif
 #ifdef CONFIG_32BIT
@@ -59,7 +59,7 @@
                LONG_S  $10, PT_R10(sp)
                LONG_S  $11, PT_R11(sp)
                LONG_S  $12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_HI(sp)
                mflo    v1
 #endif
@@ -67,7 +67,7 @@
                LONG_S  $14, PT_R14(sp)
                LONG_S  $15, PT_R15(sp)
                LONG_S  $24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_LO(sp)
 #endif
                .endm
                mtlhx   $24
                LONG_L  $24, PT_LO(sp)
                mtlhx   $24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                LONG_L  $24, PT_LO(sp)
                mtlo    $24
                LONG_L  $24, PT_HI(sp)
 
                .macro  RESTORE_SP_AND_RET
                LONG_L  sp, PT_R29(sp)
+#ifdef CONFIG_CPU_MIPSR6
+               .set    mips64r6
+               eretnc
+#else
                .set    mips3
                eret
+#endif
                .set    mips0
                .endm
 
index fd16bcb6c31111d34009b60fce8db7a1edff472a..271126f529cb757b1caa95150492293cca76f38a 100644 (file)
@@ -58,11 +58,18 @@ do {                                                                        \
 #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
 #endif
 
-#define __clear_software_ll_bit()                                      \
+#ifdef CONFIG_CPU_MIPSR6
+#define __clear_ll_bit()                                                \
+do {                                                                   \
+       write_c0_lladdr(0);                                             \
+} while (0)
+#else
+#define __clear_ll_bit()                                                \
 do {                                                                   \
        if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)       \
                ll_bit = 0;                                             \
 } while (0)
+#endif
 
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
@@ -70,7 +77,7 @@ do {                                                                  \
        __mips_mt_fpaff_switch_to(prev);                                \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
-       __clear_software_ll_bit();                                      \
+       __clear_ll_bit();                                               \
        __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);  \
        (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
 } while (0)
index 3a452282cba0a1d2d02e635c1c35123f6d751f83..6494a51c6adbcb12f19286c0c2721a4e8a05446e 100644 (file)
@@ -6,5 +6,9 @@
  */
 extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        unsigned long entryhi, unsigned long pagemask);
+void remove_wired_entry(void);
+int wired_push(unsigned long entryhi, unsigned long entrylo0,
+              unsigned long entrylo1, unsigned long pagemask);
+int wired_pop(void);
 
 #endif /* __ASM_TLBMISC_H */
index 67b878a42759a0c73d9c2e6aa84e56de8d87f294..871344419e3a6e9e2e2a2ddba07aacda08531fb0 100644 (file)
 enum major_op {
        spec_op, bcond_op, j_op, jal_op,
        beq_op, bne_op, blez_op, bgtz_op,
+#ifndef CONFIG_CPU_MIPSR6
        addi_op, addiu_op, slti_op, sltiu_op,
+#else
+       cbcond0_op, addiu_op, slti_op, sltiu_op,
+#endif
        andi_op, ori_op, xori_op, lui_op,
        cop0_op, cop1_op, cop2_op, cop1x_op,
        beql_op, bnel_op, blezl_op, bgtzl_op,
+#ifndef CONFIG_CPU_MIPSR6
        daddi_op, daddiu_op, ldl_op, ldr_op,
+#else
+       cbcond1_op, daddiu_op, ldl_op, ldr_op,
+#endif
        spec2_op, jalx_op, mdmx_op, spec3_op,
        lb_op, lh_op, lwl_op, lw_op,
        lbu_op, lhu_op, lwr_op, lwu_op,
        sb_op, sh_op, swl_op, sw_op,
        sdl_op, sdr_op, swr_op, cache_op,
+#ifndef CONFIG_CPU_MIPSR6
        ll_op, lwc1_op, lwc2_op, pref_op,
        lld_op, ldc1_op, ldc2_op, ld_op,
        sc_op, swc1_op, swc2_op, major_3b_op,
        scd_op, sdc1_op, sdc2_op, sd_op
+#else
+       ll_op, lwc1_op, bc_op, pref_op,
+       lld_op, ldc1_op, jump_op, ld_op,
+       sc_op, swc1_op, balc_op, major_3b_op,
+       scd_op, sdc1_op, jump2_op, sd_op
+#endif
 };
 
 /*
@@ -80,10 +95,13 @@ enum spec3_op {
        sce_op = 0x1e, swe_op = 0x1f,
        bshfl_op = 0x20, swle_op = 0x21,
        swre_op = 0x22, prefe_op = 0x23,
-       dbshfl_op = 0x24,
+       dbshfl_op = 0x24, cache6_op = 0x25,
+       sc6_op = 0x26, scd6_op = 0x27,
        lbue_op = 0x28, lhue_op = 0x29,
        lbe_op = 0x2c, lhe_op = 0x2d,
        lle_op = 0x2e, lwe_op = 0x2f,
+       pref6_op = 0x35, ll6_op = 0x36,
+       lld6_op = 0x37,
        rdhwr_op = 0x3b
 };
 
@@ -95,7 +113,11 @@ enum rt_op {
        spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
        tgei_op, tgeiu_op, tlti_op, tltiu_op,
        teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
+#ifndef CONFIG_CPU_MIPSR6
        bltzal_op, bgezal_op, bltzall_op, bgezall_op,
+#else
+       nal_op, bal_op, rt_op_0x12_op, rt_op_0x13_op,
+#endif
        rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
        rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
        bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
@@ -109,12 +131,13 @@ enum cop_op {
        cfc_op        = 0x02, mfhc_op       = 0x03,
        mtc_op        = 0x04, dmtc_op       = 0x05,
        ctc_op        = 0x06, mthc_op       = 0x07,
-       bc_op         = 0x08, cop_op        = 0x10,
+       rs_bc_op      = 0x08, bc1eqz_op     = 0x09,
+       bc1nez_op     = 0x0d, cop_op        = 0x10,
        copm_op       = 0x18
 };
 
 /*
- * rt field of cop.bc_op opcodes
+ * rt field of cop.rs_bc_op opcodes
  */
 enum bcop_op {
        bcf_op, bct_op, bcfl_op, bctl_op
index 97c2f81b4b43af66d766778ea2c17dcca6d8303b..7c05e93ce8f677b5533b8dfa260428c93b1e8d81 100644 (file)
@@ -13,7 +13,7 @@
 
 #define __SWAB_64_THRU_32__
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 
 static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
 {
@@ -39,8 +39,8 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 #define __arch_swab32 __arch_swab32
 
 /*
- * Having already checked for CONFIG_CPU_MIPSR2, enable the
- * optimized version for 64-bit kernel on r2 CPUs.
+ * Having already checked for CONFIG_CPU_MIPSR2/R6, enable the
+ * optimized version for 64-bit kernel on R2 & R6 CPUs.
  */
 #ifdef CONFIG_64BIT
 static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
@@ -55,5 +55,5 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 }
 #define __arch_swab64 __arch_swab64
 #endif /* CONFIG_64BIT */
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 #endif /* _ASM_SWAB_H */
index f1b5bd182509332bd0d3db9c302dd95c926544d6..ea5764106683c5624c57b9c68c4e05db22ba0ad6 100644 (file)
@@ -204,12 +204,28 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
  *             returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *             evaluating the branch.
  */
+/*  Note on R6 compact branches:
+ *      Compact branches doesn't do exception (besides BC1EQZ/BC1NEZ)
+ *      and doesn't execute instruction in Forbidden Slot if branch is
+ *      to be taken. It means that return EPC for them can be safely set
+ *      to EPC + 8 because it is the only case to get a BD precise exception
+ *      doing instruction in Forbidden Slot while no branch.
+ *
+ *      Unconditional compact jump/branches added for full picture
+ *      (not doing BD precise exception, actually).
+ */
 int __compute_return_epc_for_insn(struct pt_regs *regs,
                                   union mips_instruction insn)
 {
-       unsigned int bit, fcr31, dspcontrol;
+       unsigned int bit;
        long epc = regs->cp0_epc;
        int ret = 0;
+#ifdef CONFIG_CPU_MIPSR6
+       int reg;
+#else
+       unsigned int fcr31;
+       unsigned dspcontrol;
+#endif
 
        switch (insn.i_format.opcode) {
        /*
@@ -220,7 +236,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                case jalr_op:
                        regs->regs[insn.r_format.rd] = epc + 8;
                        /* Fall through */
+#ifndef CONFIG_CPU_MIPSR6
                case jr_op:
+#endif
                        regs->cp0_epc = regs->regs[insn.r_format.rs];
                        break;
                }
@@ -234,27 +252,52 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
        case bcond_op:
                switch (insn.i_format.rt) {
                case bltz_op:
+#ifndef CONFIG_CPU_MIPSR6
                case bltzl_op:
+#endif
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
                                if (insn.i_format.rt == bltzl_op)
                                        ret = BRANCH_LIKELY_TAKEN;
+#endif
                        } else
                                epc += 8;
                        regs->cp0_epc = epc;
                        break;
 
                case bgez_op:
+#ifndef CONFIG_CPU_MIPSR6
                case bgezl_op:
+#endif
                        if ((long)regs->regs[insn.i_format.rs] >= 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
                                if (insn.i_format.rt == bgezl_op)
                                        ret = BRANCH_LIKELY_TAKEN;
+#endif
                        } else
                                epc += 8;
                        regs->cp0_epc = epc;
                        break;
 
+#ifdef CONFIG_CPU_MIPSR6
+               case nal_op:    /* MIPSR6: nal == bltzal $0 */
+                       if (insn.i_format.rs)
+                               break;
+                       regs->regs[31] = epc + 8;
+                       epc += 4;
+                       regs->cp0_epc = epc;
+                       break;
+
+               case bal_op:    /* MIPSR6: bal == bgezal $0 */
+                       if (insn.i_format.rs)
+                               break;
+                       regs->regs[31] = epc + 8;
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       regs->cp0_epc = epc;
+                       break;
+#else
                case bltzal_op:
                case bltzall_op:
                        regs->regs[31] = epc + 8;
@@ -291,6 +334,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                                epc += 8;
                        regs->cp0_epc = epc;
                        break;
+#endif
                }
                break;
 
@@ -313,31 +357,68 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         * These are conditional and in i_format.
         */
        case beq_op:
+#ifndef CONFIG_CPU_MIPSR6
        case beql_op:
+#endif
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
                        if (insn.i_format.opcode == beql_op)
                                ret = BRANCH_LIKELY_TAKEN;
+#endif
                } else
                        epc += 8;
                regs->cp0_epc = epc;
                break;
 
        case bne_op:
+#ifndef CONFIG_CPU_MIPSR6
        case bnel_op:
+#endif
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
+#ifndef CONFIG_CPU_MIPSR6
                        if (insn.i_format.opcode == bnel_op)
                                ret = BRANCH_LIKELY_TAKEN;
+#endif
                } else
                        epc += 8;
                regs->cp0_epc = epc;
                break;
 
        case blez_op: /* not really i_format */
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: blezalc, bgezalc, bgeuc
+                */
+               if (insn.i_format.rt) {
+                       if ((insn.i_format.rs == insn.i_format.rt) ||
+                           !insn.i_format.rs)   /* blezalc, bgezalc */
+                               regs->regs[31] = epc + 4;
+                       epc += 8;
+                       regs->cp0_epc = epc;
+                       break;
+               }
+
+               if ((long)regs->regs[insn.i_format.rs] <= 0) {
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               } else
+                       epc += 8;
+               regs->cp0_epc = epc;
+               break;
+#endif
        case blezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: blezc, bgezc, bgec
+                */
+               epc += 8;
+               regs->cp0_epc = epc;
+
+               break;
+#else
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] <= 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -347,9 +428,39 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        epc += 8;
                regs->cp0_epc = epc;
                break;
+#endif
 
        case bgtz_op:
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: bltzalc, bgtzalc, bltuc
+                */
+               if (insn.i_format.rt) {
+                       if ((insn.i_format.rs == insn.i_format.rt) ||
+                           !insn.i_format.rs)   /* bltzalc, bgtzalc */
+                               regs->regs[31] = epc + 4;
+                       epc += 8;
+                       regs->cp0_epc = epc;
+                       break;
+               }
+
+               if ((long)regs->regs[insn.i_format.rs] > 0) {
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               } else
+                       epc += 8;
+               regs->cp0_epc = epc;
+               break;
+#endif
        case bgtzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: bltc, bltzc, bgtzc
+                */
+               epc += 8;
+               regs->cp0_epc = epc;
+
+               break;
+#else
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] > 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -360,10 +471,58 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+       case cbcond0_op:
+               /*
+                *  Compact branches: bovc, beqc, beqzalc
+                */
+
+               /* fall through */
+       case cbcond1_op:
+               /*
+                *  Compact branches: bnvc, bnec, bnezalc
+                */
+               if (insn.i_format.rt && !insn.i_format.rs)  /* beqzalc/bnezalc */
+                       regs->regs[31] = epc + 4;
+               epc += 8;
+               regs->cp0_epc = epc;
+
+               break;
+#endif
+
        /*
         * And now the FPA/cp1 branch instructions.
         */
        case cop1_op:
+#ifdef CONFIG_CPU_MIPSR6
+               if ((insn.i_format.rs != bc1eqz_op) &&
+                   (insn.i_format.rs != bc1nez_op))
+                       break;
+
+               lose_fpu(1);    /* Save FPU state for the emulator. */
+               reg = insn.i_format.rt;
+               bit = 0;
+               switch (insn.i_format.rs) {
+               case bc1eqz_op:
+                       if (current->thread.fpu.fpr[reg] == (__u64)0)
+                               bit = 1;
+                       break;
+               case bc1nez_op:
+                       if (current->thread.fpu.fpr[reg] != (__u64)0)
+                               bit = 1;
+                       break;
+               }
+               own_fpu(1);     /* Restore FPU state. */
+               if (bit)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               regs->cp0_epc = epc;
+
+               break;
+#else
                preempt_disable();
                if (is_fpu_owner())
                        asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
@@ -398,6 +557,39 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        break;
                }
                break;
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+       case bc_op:
+               epc += 8;
+               regs->cp0_epc = epc;
+               break;
+
+       case jump_op:
+               if (insn.i_format.rs)   /* beqzc */
+                       epc = epc + 8;
+               else                    /* jic, no offset shift */
+                       epc = regs->regs[insn.i_format.rt] + insn.i_format.simmediate;
+               regs->cp0_epc = epc;
+               break;
+
+       case balc_op:
+               regs->regs[31] = epc + 4;
+               epc = epc + 4 + (insn.i_format.simmediate << 2);
+               regs->cp0_epc = epc;
+               break;
+
+       case jump2_op:
+               if (insn.i_format.rs)   /* bnezc */
+                       epc = epc + 8;
+               else {                  /* jialc, no offset shift */
+                       regs->regs[31] = epc + 4;
+                       epc = regs->regs[insn.i_format.rt] + insn.i_format.simmediate;
+               }
+               regs->cp0_epc = epc;
+               break;
+#endif
+
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        case lwc2_op: /* This is bbit0 on Octeon */
                if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -435,10 +627,12 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
        return ret;
 
+#ifndef CONFIG_CPU_MIPSR6
 sigill:
        printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
        force_sig(SIGBUS, current);
        return -EFAULT;
+#endif
 }
 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
 
index fb1cb727099aa3abc115e26874b130057a271654..52a3a43b48a5789caa5fa8c3d883f97ccf4b9526 100644 (file)
@@ -51,7 +51,7 @@ int cp0_timer_irq_installed;
 #ifndef CONFIG_MIPS_MT_SMTC
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
-       const int r2 = cpu_has_mips_r2;
+       const int r2 = cpu_has_mips_r2 | cpu_has_mips_r6;
        struct clock_event_device *cd;
        int cpu = smp_processor_id();
 
index de3c25ffd9f91e38487303696402b8c97f46bf45..942a65cc6714ccf949dd134830dee41373bec426 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/mipsregs.h>
 #include <asm/setup.h>
 
+#ifndef CONFIG_CPU_MIPSR6
+
 static char bug64hit[] __initdata =
        "reliable operation impossible!\n%s";
 static char nowar[] __initdata =
@@ -317,3 +319,14 @@ void __init check_bugs64(void)
 {
        check_daddi();
 }
+
+#else /* CONFIG_CPU_MIPSR6 */
+
+static volatile int daddi_ov __cpuinitdata;
+int daddiu_bug  = 0;
+
+void __init check_bugs64_early(void) {}
+
+void __init check_bugs64(void) {}
+
+#endif /* CONFIG_CPU_MIPSR6 */
index 4e678db70bb69fcad5a4eced9c4d0d31b4e08b61..bc00bc440ff01e9fafe14c156ece6aedc4999a32 100644 (file)
@@ -204,6 +204,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M32R2);
                        break;
+               case 2:
+                       c->isa_level = MIPS_CPU_ISA_M32R6;
+                       break;
                default:
                        goto unknown;
                }
@@ -216,6 +219,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M64R2);
                        break;
+               case 2:
+                       c->isa_level = MIPS_CPU_ISA_M64R6;
+                       break;
                }
                break;
        default:
@@ -324,9 +330,44 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c, int pass,
                        if (config4 & MIPS_CONF4_TLBINV) {
                                c->options |= MIPS_CPU_TLBINV;
                                printk("TLBINV/F supported, config4=0x%0x\n",config4);
+                               if (config4 & MIPS_CONF4_TLBINV_FULL)
+                                       c->options |= MIPS_CPU_TLBINV_FULL;
                        }
-                       /* TBW: page walker support starts here */
                }
+#ifdef CONFIG_CPU_MIPSR6
+               c->tlbsizevtlb = ((c->tlbsizevtlb - 1) |
+                       (((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+                         MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
+                        MIPS_CONF1_TLBS_SIZE)) + 1;
+               c->tlbsize = c->tlbsizevtlb;
+
+               newcf4 = (config4 & ~MIPS_CONF4_FTLBPAGESIZE) |
+                       ((((fls(PAGE_SIZE >> BASIC_PAGE_SHIFT)-1)/2)+1) <<
+                        MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+               write_c0_config4(newcf4);
+               back_to_back_c0_hazard();
+               config4 = read_c0_config4();
+               if (config4 != newcf4) {
+                       printk(KERN_ERR "PAGE_SIZE 0x%0lx is not supported by FTLB (config4=0x%0x)\n",
+                               PAGE_SIZE, config4);
+                       if (conf6available && (cpu_capability & MIPS_FTLB_CAPABLE)) {
+                               printk("Switching FTLB OFF\n");
+                               config6 = read_c0_config6();
+                               write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
+                       }
+                       printk("Total TLB(VTLB) inuse: %d\n",c->tlbsizevtlb);
+               } else {
+                       c->tlbsizeftlbsets = 1 <<
+                               ((config4 & MIPS_CONF4_FTLBSETS) >>
+                                MIPS_CONF4_FTLBSETS_SHIFT);
+                       c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+                                             MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+                       c->tlbsize += (c->tlbsizeftlbways *
+                                      c->tlbsizeftlbsets);
+                       printk("V/FTLB found: VTLB=%d, FTLB sets=%d, ways=%d total TLB=%d\n",
+                               c->tlbsizevtlb, c->tlbsizeftlbsets, c->tlbsizeftlbways, c->tlbsize);
+               }
+#else
                switch (config4 & MIPS_CONF4_MMUEXTDEF) {
                case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
                        c->tlbsize =
@@ -373,6 +414,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c, int pass,
                                c->tlbsizevtlb, c->tlbsizeftlbsets, c->tlbsizeftlbways, c->tlbsize);
                        break;
                }
+#endif
        }
 
        c->kscratch_mask = (config4 >> 16) & 0xff;
@@ -388,6 +430,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
 
        if (config5 & MIPS_CONF5_EVA)
                c->options |= MIPS_CPU_EVA;
+       if (config5 & MIPS_CONF5_MRP)
+               c->options2 |= MIPS_CPU_MAAR;
 
        return config5 & MIPS_CONF_M;
 }
@@ -448,8 +492,17 @@ static void decode_configs(struct cpuinfo_mips *c)
 
        mips_probe_watch_registers(c);
 
-       if (cpu_has_mips_r2)
+       if (cpu_has_mips_r2 || cpu_has_mips_r6)
                c->core = read_c0_ebase() & 0x3ff;
+
+       if (cpu_has_rixi) {
+               write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+               back_to_back_c0_hazard();
+               if (read_c0_pagegrain() & PG_IEC) {
+                       c->options |= MIPS_CPU_RIXI_EXCEPT;
+                       pr_info("TLBRI/TLBXI exceptions are used\n");
+               }
+       }
 }
 
 #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
@@ -1205,7 +1258,7 @@ __cpuinit void cpu_probe(void)
                }
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2 || cpu_has_mips_r6) {
                c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
                /* R2 has Performance Counter Interrupt indicator */
                c->options |= MIPS_CPU_PCI;
index 8bf27492a2385d7262df84b7f5d36a43e1d35bfe..9d3c8b4c6b022764ab39b1e013648012b14e1720 100644 (file)
@@ -196,7 +196,7 @@ syscall_exit_work:
        jal     syscall_trace_leave
        b       resume_userspace
 
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT)
 
 /*
  * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -207,12 +207,17 @@ syscall_exit_work:
        .align  8
 #endif
 LEAF(mips_ihb)
-       .set    mips32r2
+#ifdef CONFIG_CPU_MIPSR6
+       .set    mips64r6
+#else
+       .set    mips32r2
+#endif
        jr.hb   ra
        nop
+       .set    mips0
 #ifdef CONFIG_EVA
        .align  8
 #endif
        END(mips_ihb)
 
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
index ece887634c66dfe8b6f1961cfcbbdcd50c2823cb..268ecf7ed0af58192242a193082654958f15d21c 100644 (file)
@@ -67,8 +67,8 @@ NESTED(except_vec3_generic, 0, sp)
  */
 NESTED(except_vec3_r4000, 0, sp)
        .set    push
-       .set    mips3
-       .set    noat
+       .set    mips3
+       .set    noat
        mfc0    k1, CP0_CAUSE
        li      k0, 31<<2
        andi    k1, k1, 0x7c
@@ -144,7 +144,7 @@ LEAF(__r4k_wait)
        /* end of rollback region (the region size must be power of two) */
 1:
        jr      ra
-       nop
+        nop
        .set    pop
        END(__r4k_wait)
 
@@ -190,9 +190,12 @@ NESTED(handle_int, PT_SIZE, sp)
 #else
        and     k0, ST0_IE
        bnez    k0, 1f
-
+#ifdef CONFIG_CPU_MIPSR6
+       eretnc
+#else
        eret
 #endif
+#endif
 1:
        .set pop
 #endif
@@ -394,8 +397,13 @@ NESTED(nmi_handler, PT_SIZE, sp)
                and     k0, k0, k1
                mtc0    k0, CP0_STATUS
                ehb
+#ifdef CONFIG_CPU_MIPSR6
+       .set    mips64r6
+       eretnc
+#else
        .set    mips3
        eret
+#endif
        .set    pop
        END(nmi_handler)
 
@@ -583,14 +591,19 @@ isrdhwr:
        ori     k1, _THREAD_MASK
        xori    k1, _THREAD_MASK
        LONG_L  v1, TI_TP_VALUE(k1)
+#ifdef CONFIG_CPU_MIPSR6
+       .set    mips64r6
+       eretnc
+#else
        .set    mips3
        eret
+#endif
        .set    mips0
 #endif
        .set    pop
        END(handle_ri_rdhwr)
 
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && !defined(CONFIG_CPU_MIPSR6)
 /* A temporary overflow handler used by check_daddi(). */
 
        __INIT
index 59d45f9826b0b46ced6f6a40a355d36e84cb29ba..d89af711009a4b990f43c22ee3180460213693ce 100644 (file)
@@ -75,6 +75,7 @@ EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
 EXPORT_SYMBOL(__strncpy_from_user_asm);
 #endif
 
+#ifndef CONFIG_CPU_MIPSR6
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_partial_copy_user);
@@ -82,6 +83,7 @@ EXPORT_SYMBOL(__csum_partial_copy_user);
 EXPORT_SYMBOL(__csum_partial_copy_fromuser);
 EXPORT_SYMBOL(__csum_partial_copy_touser);
 #endif
+#endif /* !CONFIG_CPU_MIPSR6 */
 
 EXPORT_SYMBOL(invalid_pte_table);
 #ifdef CONFIG_FUNCTION_TRACER
index ef6fc20cbb0784f3bd5dfb66550e131d91f2564d..be0c62e28aaa9350523ed32827b0d9bb8211f70f 100644 (file)
@@ -81,10 +81,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                        seq_printf(m, "%s", " mips32r1");
                if (cpu_has_mips32r2)
                        seq_printf(m, "%s", " mips32r2");
+               if (cpu_has_mips32r6)
+                       seq_printf(m, "%s", " mips32r6");
                if (cpu_has_mips64r1)
                        seq_printf(m, "%s", " mips64r1");
                if (cpu_has_mips64r2)
                        seq_printf(m, "%s", " mips64r2");
+               if (cpu_has_mips64r6)
+                       seq_printf(m, "%s", " mips64r6");
                seq_printf(m, "\n");
        }
 
index 44f15d4d7fbde5ccd76fc3302512fa9783b2338a..2847687abc3eaf3a3b150918fe15c87e94e83a6b 100644 (file)
@@ -279,10 +279,20 @@ static inline int is_jump_ins(union mips_instruction *ip)
                return 1;
        if (ip->j_format.opcode == jal_op)
                return 1;
+#ifdef CONFIG_CPU_MIPSR6
+       if (((ip->i_format.opcode == jump_op) ||   /* jic */
+            (ip->i_format.opcode == jump2_op)) && /* jialc */
+           (ip->i_format.rs == 0))
+               return 1;
+       if (ip->r_format.opcode != spec_op)
+               return 0;
+       return ((ip->r_format.func == jalr_op) && !ip->r_format.rt);
+#else
        if (ip->r_format.opcode != spec_op)
                return 0;
        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
 #endif
+#endif
 }
 
 static inline int is_sp_move_ins(union mips_instruction *ip)
index 413f0d0d3efd0b2c7c37a60863a066655e62ca5c..88edb0b3798e1ad83953c7cb202a47f073199fee 100644 (file)
@@ -364,6 +364,7 @@ long arch_ptrace(struct task_struct *child, long request,
                        preempt_enable();
                        break;
                }
+#ifndef CONFIG_CPU_MIPSR6
                case DSP_BASE ... DSP_BASE + 5: {
                        dspreg_t *dregs;
 
@@ -384,6 +385,7 @@ long arch_ptrace(struct task_struct *child, long request,
                        }
                        tmp = child->thread.dsp.dspcontrol;
                        break;
+#endif /* CONFIG_CPU_MIPSR6 */
                default:
                        tmp = 0;
                        ret = -EIO;
@@ -453,6 +455,7 @@ long arch_ptrace(struct task_struct *child, long request,
                case FPC_CSR:
                        child->thread.fpu.fcr31 = data;
                        break;
+#ifndef CONFIG_CPU_MIPSR6
                case DSP_BASE ... DSP_BASE + 5: {
                        dspreg_t *dregs;
 
@@ -472,6 +475,7 @@ long arch_ptrace(struct task_struct *child, long request,
                        }
                        child->thread.dsp.dspcontrol = data;
                        break;
+#endif /* CONFIG_CPU_MIPSR6 */
                default:
                        /* The rest are not allowed. */
                        ret = -EIO;
index 9486055ba660319c415b7ec7affdd07b9eff887e..3f123c0c5aeac364b7d24cd65f6c07880967367b 100644 (file)
@@ -164,6 +164,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        preempt_enable();
                        break;
                }
+#ifndef CONFIG_CPU_MIPSR6
                case DSP_BASE ... DSP_BASE + 5: {
                        dspreg_t *dregs;
 
@@ -184,6 +185,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        }
                        tmp = child->thread.dsp.dspcontrol;
                        break;
+#endif
                default:
                        tmp = 0;
                        ret = -EIO;
@@ -264,6 +266,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                case FPC_CSR:
                        child->thread.fpu.fcr31 = data;
                        break;
+#ifndef CONFIG_CPU_MIPSR6
                case DSP_BASE ... DSP_BASE + 5: {
                        dspreg_t *dregs;
 
@@ -283,6 +286,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        }
                        child->thread.dsp.dspcontrol = data;
                        break;
+#endif
                default:
                        /* The rest are not allowed. */
                        ret = -EIO;
index be6d9815bcd6ffdaff6cfc0e2d50c853958aa3d1..2ab6800977aa1d2e50790ba332dea7d15db00927 100644 (file)
        .endm
 
        .set    noreorder
+#ifdef CONFIG_CPU_MIPSR6
+       .set    mips64r6
+#else
        .set    mips3
+#endif
 
 LEAF(_save_fp_context)
        cfc1    t1, fcr31
 
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR6)
        /* Store the 16 odd double precision registers */
        EX      sdc1 $f1, SC_FPREGS+8(a0)
        EX      sdc1 $f3, SC_FPREGS+24(a0)
@@ -54,7 +58,7 @@ LEAF(_save_fp_context)
        EX      sdc1 $f29, SC_FPREGS+232(a0)
        EX      sdc1 $f31, SC_FPREGS+248(a0)
 #else
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
        .set    push
        .set    mips64r2
        .set    noreorder
@@ -168,7 +172,7 @@ LEAF(_save_fp_context32)
  */
 LEAF(_restore_fp_context)
        EX      lw t0, SC_FPC_CSR(a0)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR6)
        EX      ldc1 $f1, SC_FPREGS+8(a0)
        EX      ldc1 $f3, SC_FPREGS+24(a0)
        EX      ldc1 $f5, SC_FPREGS+40(a0)
@@ -187,7 +191,7 @@ LEAF(_restore_fp_context)
        EX      ldc1 $f31, SC_FPREGS+248(a0)
 
 #else
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
        .set    push
        .set    mips64r2
        .set    noreorder
@@ -240,7 +244,11 @@ LEAF(_restore_fp_context)
 #ifdef CONFIG_MIPS32_COMPAT
 LEAF(_restore_fp_context32)
        .set    push
+#ifdef CONFIG_CPU_MIPSR6
+       .set    mips64r6
+#else
        .set    mips64r2
+#endif
        .set    noreorder
 
        /* Restore an o32 sigcontext.  */
index f4abdb653aa31095568144dff905ead9f2dd5d0f..ba5bb5d166810e8656779a50c6934d5e05d95cf1 100644 (file)
@@ -66,7 +66,7 @@
 
        /* Now copy FR from it */
 
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
 #ifdef CONFIG_MIPS_MT_SMTC
 
        li      t3, ST0_FR
        mtc0    t1, CP0_TCSTATUS
 #endif /* CONFIG_MIPS_MT_SMTC */
        move    v0, a0
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        jr.hb   ra
 #else
        _ehb
  * Save a thread's fp context.
  */
 LEAF(_save_fp)
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_save_double a0 t0 t1                # clobbers t1
@@ -167,7 +167,7 @@ LEAF(_save_fp)
  * Restore a thread's fp context.
  */
 LEAF(_restore_fp)
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_restore_double a0 t0 t1             # clobbers t1
@@ -215,7 +215,7 @@ LEAF(_init_fpu)
        sll     t0, t0, 31 - _ST0_FR
        bgez    t0, 1f                          # 16 / 32 register mode?
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        enable_fpu_hazard
        li      t2, FPU_CSR_NAN2008
        cfc1    t3, fcr31
@@ -244,7 +244,7 @@ LEAF(_init_fpu)
 #endif /* CONFIG_64BIT */
 
 #ifdef CONFIG_CPU_MIPS32
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
        sll     t0, t0, 31 - _ST0_FR
        bgez    t0, 2f                          # 16 / 32 register mode?
 
@@ -291,7 +291,7 @@ LEAF(_init_fpu)
        mtc1    t1, $f30
        mtc1    t1, $f31
 
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
        bgez    t0, 1f                          # 16 / 32 register mode?
 
        move    t1, t3                          # move SNaN, DP high word
@@ -331,9 +331,13 @@ LEAF(_init_fpu)
        mthc1   t1, $f31
        .set    pop
 1:
-#endif /* CONFIG_CPU_MIPS32_R2 */
-#else  /* CONFIG_CPU_MIPS32 */
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
+#else  /* !CONFIG_CPU_MIPS32 */
+#ifdef CONFIG_CPU_MIPS64_R6
+       .set    mips64r6
+#else
        .set    mips3
+#endif
        dmtc1   t1, $f0
        dmtc1   t1, $f2
        dmtc1   t1, $f4
index c538d6e01b7b744cb4af330a5de15b78963cfa7f..11c3f7890f6045bd879afae7c92bff2c30d69a2c 100644 (file)
@@ -167,6 +167,9 @@ static void __init print_memory_map(void)
                case BOOT_MEM_RESERVED:
                        printk(KERN_CONT "(reserved)\n");
                        break;
+               case BOOT_MEM_INUSE:
+                       printk(KERN_CONT "(in-use, reserved)\n");
+                       break;
                default:
                        printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
                        break;
@@ -292,6 +295,148 @@ static void __init bootmem_init(void)
 
 #else  /* !CONFIG_SGI_IP27 */
 
+static int maar_last = -2;
+static int nomaar_flag;
+static unsigned long maar_regs[MIPS_MAAR_MAX * 2];
+
+static int __init early_parse_nomaar(char *p)
+{
+       nomaar_flag = 1;
+       return(0);
+}
+early_param("nomaar", early_parse_nomaar);
+
+static void __init maar_reset(void)
+{
+       int maar = 0;
+
+       do {
+               write_c0_maarindex(maar);
+               back_to_back_c0_hazard();
+               if (read_c0_maarindex() != maar)
+                       return;
+               write_c0_maar(0);
+               back_to_back_c0_hazard();
+       } while (++maar < MIPS_MAAR_MAX);
+}
+
+void __init maar_setup(void)
+{
+       int maar = 0;
+       phys_t low;
+       phys_t upper;
+
+       if (nomaar_flag || !cpu_has_maar)
+               return;
+
+       pr_info("MAAR setup:\n");
+       maar_reset();
+
+       for (maar=0; maar<(maar_last+2); maar++) {
+               write_c0_maarindex(maar);
+               back_to_back_c0_hazard();
+               if (read_c0_maarindex() != maar) {
+                       pr_err("CPU has only %d MAARs, resetting...\n",maar - 1);
+                       maar_reset();
+                       return;
+               }
+               write_c0_maar(maar_regs[maar]);
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+               write_c0_hi_maar(maar_regs[maar + MIPS_MAAR_MAX]);
+#endif
+               back_to_back_c0_hazard();
+               if (maar & 1) {
+                       low = (((phys_t)maar_regs[maar]) << 4) & ~0xffff;
+                       upper = (((phys_t)maar_regs[maar - 1]) << 4) & ~0xffff;
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+                       low += (((phys_t)maar_regs[maar + MIPS_MAAR_MAX]) << 36) & ~MIPS_MAAR_HI_V;
+                       upper += (((phys_t)maar_regs[maar - 1 + MIPS_MAAR_MAX]) << 36) & ~MIPS_MAAR_HI_V;
+#endif
+                       upper = (upper & ~0xffff) + 0xffff;
+                       pr_info("  [%0#10lx-%0#10lx] %s\n", low, upper,
+                               (maar_regs[maar -1] & MIPS_MAAR_S)?"speculative":"");
+               }
+       }
+}
+
+static void __init maar_update(phys_t begin, phys_t end, int speculative)
+{
+       phys_t start;
+
+       /* rounding, let's be conservative if speculative */
+       if (speculative) {
+               if (begin & 0xffff)
+                       start = (begin + 0x10000) & ~0xffff;
+               else
+                       start = begin;
+               end = (end - 0x10000) & ~0xffff;
+       } else {
+               start = begin & ~0xffff;
+               end = (end - 1) & ~0xffff;
+       }
+       if (speculative && (end == start))
+               return;
+
+       maar_regs[maar_last + 1] = ((start >> 4) | MIPS_MAAR_V | (speculative?MIPS_MAAR_S:0));
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+       maar_regs[maar_last + 1 + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (start >> 36);
+#endif
+       maar_regs[maar_last] = ((end >> 4) | MIPS_MAAR_V | (speculative?MIPS_MAAR_S:0));
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+       maar_regs[maar_last + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (end >> 36);
+#endif
+       return;
+}
+
+void __init add_maar_region(phys_t start, phys_t end, int speculative)
+{
+       phys_t upper;
+       unsigned sbit;
+       int i;
+
+       if (nomaar_flag || !cpu_has_maar)
+               return;
+
+       if (maar_last < 0) {
+               maar_last = 0;
+               maar_update(start, end, speculative);
+               return;
+       }
+
+       /* try merge with previous region */
+       upper = maar_regs[maar_last];
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+       upper |= (((phys_t)maar_regs[maar_last + MIPS_MAAR_MAX] << 32) & ~MIPS_MAAR_HI_V);
+#endif
+       sbit = (upper & MIPS_MAAR_S)? MIPS_MAAR_S : 0;
+       speculative = speculative? MIPS_MAAR_S : 0;
+       upper = ((upper << 4) + 0x10000) & ~0xffffUL;
+       if (((upper == (start & ~0xffffUL)) ||
+            (upper == ((start + 0xffffUL) & ~0xffffUL))) &&
+           (sbit == speculative)) {
+               if (speculative)
+                       end = (end - 0x10000) & ~0xffff;
+               else
+                       end = (end - 1) & ~0xffff;
+               maar_regs[maar_last] = (end >> 4) | MIPS_MAAR_V | sbit;
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+               maar_regs[maar_last + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (end >> 36);
+#endif
+               return;
+       }
+
+       maar_last += 2;
+       if (maar_last >= MIPS_MAAR_MAX) {
+               pr_err("Attempt to initialize more than %d MAARs\n", MIPS_MAAR_MAX);
+               for (i=0; i<MIPS_MAAR_MAX; i++) {
+                       maar_regs[i] = 0;
+                       maar_regs[i + MIPS_MAAR_MAX] = 0;
+               }
+               return;
+       }
+       maar_update(start, end, speculative);
+}
+
 static void __init bootmem_init(void)
 {
        unsigned long reserved_end;
@@ -368,7 +513,6 @@ static void __init bootmem_init(void)
        bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
                                         min_low_pfn, max_low_pfn);
 
-
        for (i = 0; i < boot_mem_map.nr_map; i++) {
                unsigned long start, end;
 
@@ -391,9 +535,17 @@ static void __init bootmem_init(void)
                if (end <= start)
                        continue;
 #endif
+               if ((!nomaar_flag) && cpu_has_maar &&
+                   ((boot_mem_map.map[i].type == BOOT_MEM_RAM) ||
+                    (boot_mem_map.map[i].type == BOOT_MEM_ROM_DATA) ||
+                    (boot_mem_map.map[i].type == BOOT_MEM_INUSE) ||
+                    (boot_mem_map.map[i].type == BOOT_MEM_INIT_RAM)))
+                       add_maar_region(PFN_PHYS(start), PFN_PHYS(end), 1);
 
                memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
        }
+       if (cpu_has_maar && !nomaar_flag)
+               maar_setup();
 
        /*
         * Register fully available low RAM pages with the bootmem allocator.
@@ -698,6 +850,9 @@ static void __init resource_init(void)
                case BOOT_MEM_ROM_DATA:
                        res->name = "System RAM";
                        break;
+               case BOOT_MEM_INUSE:
+                       res->name = "InUse memory";
+                       break;
                case BOOT_MEM_RESERVED:
                default:
                        res->name = "reserved";
index 498723fde67d2c6b85b9dc96c937e6f90dfbd15e..d94739dd66d20a7eb5c3abe9ee565d7aa8d574e1 100644 (file)
@@ -142,6 +142,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 #endif
        err |= __put_user(regs->hi, &sc->sc_mdhi);
        err |= __put_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
        if (cpu_has_dsp) {
                err |= __put_user(mfhi1(), &sc->sc_hi1);
                err |= __put_user(mflo1(), &sc->sc_lo1);
@@ -151,6 +152,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                err |= __put_user(mflo3(), &sc->sc_lo3);
                err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
        }
+#endif
 
        used_math = !!used_math();
        err |= __put_user(used_math, &sc->sc_used_math);
@@ -199,7 +201,9 @@ check_and_restore_fp_context(struct sigcontext __user *sc)
 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
        unsigned int used_math;
+#ifndef CONFIG_CPU_MIPSR6
        unsigned long treg;
+#endif
        int err = 0;
        int i;
 
@@ -213,6 +217,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 #endif
        err |= __get_user(regs->hi, &sc->sc_mdhi);
        err |= __get_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
        if (cpu_has_dsp) {
                err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
                err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
@@ -222,6 +227,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
                err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
        }
+#endif
 
        for (i = 1; i < 32; i++)
                err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
index a1169200c8fdfae673be59a33334da197f0ea106..89dea57b95ca10a7f03310f53bd8a219ce2bfb42 100644 (file)
@@ -155,6 +155,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
 
        err |= __put_user(regs->hi, &sc->sc_mdhi);
        err |= __put_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
        if (cpu_has_dsp) {
                err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
                err |= __put_user(mfhi1(), &sc->sc_hi1);
@@ -164,6 +165,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
                err |= __put_user(mfhi3(), &sc->sc_hi3);
                err |= __put_user(mflo3(), &sc->sc_lo3);
        }
+#endif
 
        used_math = !!used_math();
        err |= __put_user(used_math, &sc->sc_used_math);
@@ -195,7 +197,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
 {
        u32 used_math;
        int err = 0;
+#ifndef CONFIG_CPU_MIPSR6
        s32 treg;
+#endif
        int i;
 
        /* Always make any pending restarted system calls return -EINTR */
@@ -204,6 +208,7 @@ static int restore_sigcontext32(struct pt_regs *regs,
        err |= __get_user(regs->cp0_epc, &sc->sc_pc);
        err |= __get_user(regs->hi, &sc->sc_mdhi);
        err |= __get_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
        if (cpu_has_dsp) {
                err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
                err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
@@ -213,6 +218,7 @@ static int restore_sigcontext32(struct pt_regs *regs,
                err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
                err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
        }
+#endif
 
        for (i = 1; i < 32; i++)
                err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
index ab7f0acdf3faf460637b26daa91188549a820027..f69840e9da945bd28e16221c6cc7697d00f84c87 100644 (file)
@@ -212,6 +212,10 @@ static void cmp_init_secondary(void)
        c->tc_id  = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
 #endif
 
+#ifdef CONFIG_CPU_MIPSR6
+       pr_info("BEVVA = %lx\n", read_c0_bevva());
+#endif
+
 #ifdef CONFIG_EVA
        if (gcmp_present)
                BEV_overlay_segment();
@@ -337,7 +341,8 @@ int __init gcmp_probe(unsigned long addr, unsigned long size)
        if (gcmp_present >= 0)
                return gcmp_present;
 
-       if (cpu_has_mips_r2 && (read_c0_config3() & MIPS_CONF3_CMGCR)) {
+       if ((cpu_has_mips_r2 || cpu_has_mips_r6) &&
+           (read_c0_config3() & MIPS_CONF3_CMGCR)) {
                /* try CMGCRBase */
                confaddr = read_c0_cmgcrbase() << 4;
                _gcmp_base = (unsigned long) ioremap_nocache(confaddr, size);
@@ -375,6 +380,16 @@ int __init gcmp_probe(unsigned long addr, unsigned long size)
                        cpu_data[0].options |= MIPS_CPU_CM2_L2SYNC;
                        printk("L2-only SYNC available\n");
                }
+               if (cpu_has_cm2) {
+                       unsigned int l2p;
+
+                       l2p = GCMPGCB(GCML2P);
+                       if (l2p & GCMP_GCB_GCML2P_NPFT) {
+                               GCMPGCB(GCML2P) = (l2p & ~GCMP_GCB_GCML2P_PAGE_MASK) |
+                                       PAGE_MASK | GCMP_GCB_GCML2P_PFTEN;
+                               GCMPGCB(GCML2PB) |= GCMP_GCB_GCML2PB_CODE_PFTEN;
+                       }
+               }
                return gcmp_present;
        }
 
index 54c2046e11740c8516e2bcaed763aaa4988de92e..6de21fd6d769aacaf8ac4842fd64e4b492777465 100644 (file)
@@ -66,6 +66,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 /* representing cpus for which sibling maps can be computed */
 static cpumask_t cpu_sibling_setup_map;
 
+extern void maar_setup(void);
+
 /* CPU siblings in MIPS:
  *
  *      SMVP kernel - VPEs on common core are siblings
@@ -115,6 +117,7 @@ asmlinkage __cpuinit void start_secondary(void)
 #endif /* CONFIG_MIPS_MT_SMTC */
        cpu_probe();
        cpu_report();
+       maar_setup();
        per_cpu_trap_init(false);
        mips_clockevent_init();
        mp_ops->init_secondary();
index b79d13f95bf01b5d666e18fa22caf0ff66e0188b..291509e3065eb4337ade4b7ab30f56e537d775f0 100644 (file)
@@ -135,8 +135,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
                : "memory");
        } else if (cpu_has_llsc) {
                __asm__ __volatile__ (
-               "       .set    mips3                                   \n"
-               "       li      %[err], 0                               \n"
+#ifdef CONFIG_CPU_MIPSR6
+               "       .set    mips64r6                                \n"
+#else
+               "       .set    mips3                                   \n"
+#endif
+               "       li      %[err], 0                               \n"
                "1:     ll      %[old], (%[addr])                       \n"
                "       move    %[tmp], %[new]                          \n"
                "2:     sc      %[tmp], (%[addr])                       \n"
index 83a4882141a7c5f71373b843beb745add47c853a..1db5d43f13cc28d568978201380d7b8d1c99c06b 100644 (file)
@@ -1218,14 +1218,34 @@ asmlinkage void do_watch(struct pt_regs *regs)
        }
 }
 
+#ifdef CONFIG_CPU_MIPSR6
+char *mcheck_code[32] = { "non R6 multiple hit in TLB: Status.TS = 1",
+                         "multiple hit in TLB",
+                         "multiple hit in TLB, speculative access",
+                         "page size mismatch, unsupported FTLB page mask",
+                         "index doesn't match EntryHI.VPN2 position in FTLB",
+                         "HW PageTableWalker: Valid bits mismatch in PTE pair on directory level",
+                         "HW PageTableWalker: Dual page mode is not implemented"
+                       };
+#endif
+
 asmlinkage void do_mcheck(struct pt_regs *regs)
 {
        const int field = 2 * sizeof(unsigned long);
        int multi_match = regs->cp0_status & ST0_TS;
+#ifdef CONFIG_CPU_MIPSR6
+       int code = 0;
+#endif
 
        show_regs(regs);
 
+#ifdef CONFIG_CPU_MIPSR6
+       if (multi_match || (code = read_c0_pagegrain() & PG_MCCAUSE)) {
+               printk("PageGrain: %0x\n", read_c0_pagegrain());
+               printk("BadVAddr: %0*lx\n", field, read_c0_badvaddr());
+#else
        if (multi_match) {
+#endif
                printk("Index   : %0x\n", read_c0_index());
                printk("Pagemask: %0x\n", read_c0_pagemask());
                printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
@@ -1237,6 +1257,9 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
 
        show_code((unsigned int __user *) regs->cp0_epc);
 
+#ifdef CONFIG_CPU_MIPSR6
+       panic("Caught Machine Check exception - %s",mcheck_code[code]);
+#else
        /*
         * Some chips may have other causes of machine check (e.g. SB1
         * graduation timer)
@@ -1244,6 +1267,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
        panic("Caught Machine Check exception - %scaused by multiple "
              "matching entries in the TLB.",
              (multi_match) ? "" : "not ");
+#endif
 }
 
 asmlinkage void do_mt(struct pt_regs *regs)
@@ -1782,7 +1806,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
                         status_set);
 
-       if (cpu_has_mips_r2)
+       if (cpu_has_mips_r2 || cpu_has_mips_r6)
                hwrena |= 0x0000000f;
 
        if (!noulri && cpu_has_userlocal)
@@ -1821,7 +1845,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
         *  o read IntCtl.IPTI to determine the timer interrupt
         *  o read IntCtl.IPPCI to determine the performance counter interrupt
         */
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2 || cpu_has_mips_r6) {
                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -1904,6 +1928,8 @@ static int __init set_rdhwr_noopt(char *str)
 
 __setup("rdhwr_noopt", set_rdhwr_noopt);
 
+extern void tlb_do_page_fault_0(void);
+
 void __init trap_init(void)
 {
        extern char except_vec3_generic;
@@ -1925,11 +1951,11 @@ void __init trap_init(void)
        } else {
 #ifdef CONFIG_KVM_GUEST
 #define KVM_GUEST_KSEG0     0x40000000
-        ebase = KVM_GUEST_KSEG0;
+               ebase = KVM_GUEST_KSEG0;
 #else
-        ebase = CKSEG0;
+               ebase = CKSEG0;
 #endif
-               if (cpu_has_mips_r2)
+               if (cpu_has_mips_r2 || cpu_has_mips_r6)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
 
@@ -2031,6 +2057,12 @@ void __init trap_init(void)
                set_except_vector(15, handle_fpe);
 
        set_except_vector(16, handle_ftlb);
+
+       if (cpu_has_rixi && cpu_has_rixi_except) {
+               set_except_vector(19, tlb_do_page_fault_0);
+               set_except_vector(20, tlb_do_page_fault_0);
+       }
+
        set_except_vector(22, handle_mdmx);
 
        if (cpu_has_mcheck)
index 3b946e28e84e62b4ba7a07a6c90527b1b4bf90ac..e2569b785be2a0ed6b20d7c85f85c2d27f7f78d8 100644 (file)
@@ -210,7 +210,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=r" (res)                        \
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+                       : "r" (value), "r" (addr), "i" (-EFAULT) \
+                       : "memory");
 
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
@@ -228,7 +229,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
 #endif
 
 #ifdef __LITTLE_ENDIAN
@@ -331,7 +333,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=r" (res)                        \
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+                       : "r" (value), "r" (addr), "i" (-EFAULT) \
+                       : "memory");
 
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
@@ -349,7 +352,491 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif
+
+#elif defined(CONFIG_CPU_MIPSR6)
+/* non-EVA R6 variant */
+
+#ifdef __BIG_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 0(%2)\n"               \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlb\t%0, 0(%2)\n"               \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:\tlbu\t$1, 2(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:\tlbu\t$1, 3(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tat\n"                      \
+                       "10:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"                \
+                       "j\t10b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"              \
+                       STR(PTR)"\t2b, 11b\n\t"              \
+                       STR(PTR)"\t3b, 11b\n\t"              \
+                       STR(PTR)"\t4b, 11b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 0(%2)\n"              \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 0(%2)\n"               \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:\tlbu\t$1, 2(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:\tlbu\t$1, 3(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tat\n"                      \
+                       "10:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"                \
+                       "j\t10b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"              \
+                       STR(PTR)"\t2b, 11b\n\t"              \
+                       STR(PTR)"\t3b, 11b\n\t"              \
+                       STR(PTR)"\t4b, 11b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlb\t%0,  0(%2)\n"               \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:\tlbu\t$1, 2(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:\tlbu\t$1, 3(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "5:\tlbu\t$1, 4(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "6:\tlbu\t$1, 5(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "7:\tlbu\t$1, 6(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "8:\tlbu\t$1, 7(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tat\n"                      \
+                       "10:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"                \
+                       "j\t10b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"              \
+                       STR(PTR)"\t2b, 11b\n\t"              \
+                       STR(PTR)"\t3b, 11b\n\t"              \
+                       STR(PTR)"\t4b, 11b\n\t"              \
+                       STR(PTR)"\t5b, 11b\n\t"              \
+                       STR(PTR)"\t6b, 11b\n\t"              \
+                       STR(PTR)"\t7b, 11b\n\t"              \
+                       STR(PTR)"\t8b, 11b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 1(%2)\n\t"             \
+                       "srl\t$1, %1, 0x8\n"                \
+                       "2:\tsb\t$1, 0(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT) \
+                       : "memory");
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 3(%2)\n\t"             \
+                       "srl\t$1, %1, 0x8\n"                \
+                       "2:\tsb\t$1, 2(%2)\n\t"             \
+                       "srl\t$1, $1, 0x8\n"                \
+                       "3:\tsb\t$1, 1(%2)\n\t"             \
+                       "srl\t$1, $1, 0x8\n"                \
+                       "4:\tsb\t$1, 0(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 7(%2)\n\t"             \
+                       "dsrl\t$1, %1, 0x8\n"               \
+                       "2:\tsb\t$1, 6(%2)\n\t"             \
+                       "dsrl\t$1, $1, 0x8\n"               \
+                       "3:\tsb\t$1, 5(%2)\n\t"             \
+                       "dsrl\t$1, $1, 0x8\n"               \
+                       "4:\tsb\t$1, 4(%2)\n\t"             \
+                       "dsrl\t$1, $1, 0x8\n"               \
+                       "2:\tsb\t$1, 3(%2)\n\t"             \
+                       "dsrl\t$1, $1, 0x8\n"               \
+                       "3:\tsb\t$1, 2(%2)\n\t"             \
+                       "dsrl\t$1, $1, 0x8\n"               \
+                       "4:\tsb\t$1, 1(%2)\n\t"             \
+                       "dsrl\t$1, $1, 0x8\n"               \
+                       "4:\tsb\t$1, 0(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 1(%2)\n"               \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlb\t%0,  3(%2)\n"               \
+                       "2:\tlbu\t$1, 2(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tat\n"                      \
+                       "10:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"                \
+                       "j\t10b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"              \
+                       STR(PTR)"\t2b, 11b\n\t"              \
+                       STR(PTR)"\t3b, 11b\n\t"              \
+                       STR(PTR)"\t4b, 11b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 1(%2)\n"              \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 3(%2)\n"               \
+                       "2:\tlbu\t$1, 2(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:\tlbu\t$1, 1(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:\tlbu\t$1, 0(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tat\n"                      \
+                       "10:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"                \
+                       "j\t10b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"              \
+                       STR(PTR)"\t2b, 11b\n\t"              \
+                       STR(PTR)"\t3b, 11b\n\t"              \
+                       STR(PTR)"\t4b, 11b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 7(%2)\n"               \
+                       "2:\tlbu\t$1, 6(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:\tlbu\t$1, 5(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:\tlbu\t$1, 4(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "5:\tlbu\t$1, 3(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "6:\tlbu\t$1, 2(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "7:\tlbu\t$1, 1(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "8:\tlbu\t$1, 0(%2)\n\t"            \
+                       "dsll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tat\n"                      \
+                       "10:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"                \
+                       "j\t10b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"              \
+                       STR(PTR)"\t2b, 11b\n\t"              \
+                       STR(PTR)"\t3b, 11b\n\t"              \
+                       STR(PTR)"\t4b, 11b\n\t"              \
+                       STR(PTR)"\t5b, 11b\n\t"              \
+                       STR(PTR)"\t6b, 11b\n\t"              \
+                       STR(PTR)"\t7b, 11b\n\t"              \
+                       STR(PTR)"\t8b, 11b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 0(%2)\n\t"             \
+                       "srl\t$1,%1, 0x8\n"                 \
+                       "2:\tsb\t$1, 1(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT) \
+                       : "memory");
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 0(%2)\n\t"             \
+                       "srl\t$1,%1, 0x8\n"                 \
+                       "2:\tsb\t$1, 1(%2)\n\t"             \
+                       "srl\t$1,$1, 0x8\n"                 \
+                       "3:\tsb\t$1, 2(%2)\n\t"             \
+                       "srl\t$1,$1, 0x8\n"                 \
+                       "4:\tsb\t$1, 3(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 0(%2)\n\t"             \
+                       "dsrl\t$1,%1, 0x8\n"                \
+                       "2:\tsb\t$1, 1(%2)\n\t"             \
+                       "dsrl\t$1,$1, 0x8\n"                \
+                       "3:\tsb\t$1, 2(%2)\n\t"             \
+                       "dsrl\t$1,$1, 0x8\n"                \
+                       "4:\tsb\t$1, 3(%2)\n\t"             \
+                       "dsrl\t$1,$1, 0x8\n"                \
+                       "5:\tsb\t$1, 4(%2)\n\t"             \
+                       "dsrl\t$1,$1, 0x8\n"                \
+                       "6:\tsb\t$1, 5(%2)\n\t"             \
+                       "dsrl\t$1,$1, 0x8\n"                \
+                       "7:\tsb\t$1, 6(%2)\n\t"             \
+                       "dsrl\t$1,$1, 0x8\n"                \
+                       "8:\tsb\t$1, 7(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
 #endif
 
 #else
@@ -473,7 +960,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=r" (res)                        \
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+                       : "r" (value), "r" (addr), "i" (-EFAULT) \
+                       : "memory");
 
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
@@ -491,7 +979,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
 
 #define     StoreDW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -509,7 +998,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
 #endif
 
 #ifdef __LITTLE_ENDIAN
@@ -630,7 +1120,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                        : "=r" (res)                        \
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+                       : "r" (value), "r" (addr), "i" (-EFAULT) \
+                       : "memory");
 
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
@@ -648,7 +1139,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
 
 #define     StoreDW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -666,7 +1158,8 @@ asmlinkage void do_cpu(struct pt_regs *regs);
                        STR(PTR)"\t2b, 4b\n\t"              \
                        ".previous"                         \
                : "=r" (res)                                \
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
 #endif
 
 #endif
@@ -1375,6 +1868,7 @@ fpu_continue:
                        break;
                return;
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * COP2 is available to implementor for application specific use.
         * It's up to applications to register a notifier chain and do
@@ -1395,6 +1889,7 @@ fpu_continue:
        case sdc2_op:
                cu2_notifier_call_chain(CU2_SDC2_OP, regs);
                break;
+#endif
 
        default:
                /*
index cd15f3005c27bdddb7f665b333a961ab3c7726ff..773002b1fb83f53d96f901d7799094216b6c530b 100644 (file)
@@ -16,6 +16,8 @@
  * end of memory on some systems.  It's also a seriously bad idea on non
  * dma-coherent systems.
  */
+#ifndef CONFIG_CPU_MIPSR6
+
 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_MIPS_MALTA)
 #undef CONFIG_CPU_HAS_PREFETCH
 #endif
@@ -1522,3 +1524,5 @@ EXC(    sbe     t0, NBYTES-2(dst), .LTs_exc)
        END(__csum_partial_copy_touser)
 
 #endif  /* CONFIG_EVA */
+
+#endif /* !CONFIG_CPU_MIPSR6 */
index 835996bc91f0e41eddd959f865a723ae4ba38660..20e7b1876dbd42e6ed0d5951211ca978f894bd8a 100644 (file)
@@ -27,6 +27,9 @@
 #ifdef CONFIG_MIPS_MALTA
 #undef CONFIG_CPU_HAS_PREFETCH
 #endif
+#ifdef CONFIG_CPU_MIPSR6
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
 
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
 #ifdef USE_DOUBLE
 
 #define LOAD   ld
+#ifndef CONFIG_CPU_MIPSR6
 #define LOADL  ldl
 #define LOADR  ldr
 #define STOREL sdl
 #define STORER sdr
+#endif
 #define STORE  sd
 #define ADD    daddu
 #define SUB    dsubu
 #else
 
 #define LOAD   lw
+#ifndef CONFIG_CPU_MIPSR6
 #define LOADL  lwl
 #define LOADR  lwr
 #define STOREL swl
 #define STORER swr
+#endif
 #define STORE  sw
 #define ADD    addu
 #define SUB    subu
@@ -231,9 +238,14 @@ __copy_user_common:
         and    t0, src, ADDRMASK
        PREF(   0, 2*32(src) )
        PREF(   1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
        bnez    t1, .Ldst_unaligned
         nop
        bnez    t0, .Lsrc_unaligned_dst_aligned
+#else
+       or      t0, t0, t1
+       bnez    t0, .Lcopy_unaligned_bytes
+#endif
        /*
         * use delay slot for fall-through
         * src and dst are aligned; need to compute rem
@@ -314,6 +326,7 @@ EXC(        STORE   t0, 0(dst),             .Ls_exc_p1u)
        bne     rem, len, 1b
        .set    noreorder
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
         * A loop would do only a byte at a time with possible branch
@@ -414,6 +427,7 @@ EXC(        STORE   t0, 0(dst),             .Ls_exc_p1u)
        ADD     dst, dst, NBYTES
        bne     len, rem, 1b
        .set    noreorder
+#endif /* !CONFIG_CPU_MIPSR6 */
 
 .Lcopy_bytes_checklen:
        beqz    len, .Ldone
@@ -442,6 +456,23 @@ EXC(        sb     t0, NBYTES-2(dst), .Ls_exc_p1)
 .Ldone:
        jr      ra
         nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes:
+1:
+       COPY_BYTE(0)
+       COPY_BYTE(1)
+       COPY_BYTE(2)
+       COPY_BYTE(3)
+       COPY_BYTE(4)
+       COPY_BYTE(5)
+       COPY_BYTE(6)
+       COPY_BYTE(7)
+       ADD     src, src, 8
+       b       1b
+        ADD    dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
+
        END(memcpy)
 
 .Ll_exc_copy:
index e781b008ff20098ebc0d0009f0d5591b92f64a93..274fb38e8ca2536115e5c2e1dcb6b3fa71f24b73 100644 (file)
@@ -133,6 +133,8 @@ FEXPORT(__bzero)
        .set            at
 #endif
 
+#ifndef CONFIG_CPU_MIPSR6
+
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
        EX(LONG_S_L, a1, (a0), .Lfirst_fixup)   /* make word/dword aligned */
@@ -140,9 +142,37 @@ FEXPORT(__bzero)
 #ifdef __MIPSEL__
        EX(LONG_S_R, a1, (a0), .Lfirst_fixup)   /* make word/dword aligned */
 #endif
-       PTR_SUBU        a0, t0                  /* long align ptr */
+       PTR_SUBU        a0, t0                  /* long align ptr */
        PTR_ADDU        a2, t0                  /* correct size */
 
+#else /* CONFIG_CPU_MIPSR6 */
+
+#define STORE_BYTE(N)                   \
+EX( sb, a1, N(a0), .Lbyte_fixup);      \
+    beqz        t0, 0f;                 \
+     PTR_ADDU   t0, 1;
+
+       PTR_ADDU        a2, t0                  /* correct size */
+
+       PTR_ADDU        t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX( sb, a1, 2(a0), .Lbyte_fixup)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX( sb, a1, 6(a0), .Lbyte_fixup)
+#endif
+0:
+       ori             a0, STORMASK
+       xori            a0, STORMASK
+       PTR_ADDIU       a0, STORSIZE
+
+#endif /* CONFIG_CPU_MIPSR6 */
+
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
        beqz            t1, .Lmemset_partial    /* no block to fill */
@@ -181,7 +211,8 @@ FEXPORT(__bzero)
        andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
-        PTR_ADDU       a0, a2                  /* What's left */
+#ifndef CONFIG_CPU_MIPSR6
+        PTR_ADDU       a0, a2                  /* What's left */
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
        EX(LONG_S_R, a1, -1(a0), .Llast_fixup)
@@ -189,6 +220,22 @@ FEXPORT(__bzero)
 #ifdef __MIPSEL__
        EX(LONG_S_L, a1, -1(a0), .Llast_fixup)
 #endif
+#else /* CONFIG_CPU_MIPSR6 */
+        PTR_SUBU       t0, $0, a2
+       PTR_ADDIU       t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX( sb, a1, 2(a0), .Lbyte_fixup)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX( sb, a1, 6(a0), .Lbyte_fixup)
+#endif
+0:
+#endif /* CONFIG_CPU_MIPSR6 */
 1:     jr              ra
         move           a2, zero
 
@@ -229,6 +276,13 @@ FEXPORT(__bzero)
        jr              ra
         andi           v1, a2, STORMASK
 
+#ifdef CONFIG_CPU_MIPSR6
+.Lbyte_fixup:
+       PTR_SUBU        a2, $0, t0
+       jr              ra
+        PTR_ADDIU      a2, 1
+#endif /* CONFIG_CPU_MIPSR6 */
+
 #ifdef CONFIG_EVA
 /*  ++++++++  */
 /*  EVA stuff */
index 6807f7172eaf46f2841301d64882484f11be861f..70ad0c3d55901a5b621bec024f56670a9e2eea66 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/export.h>
 #include <linux/stringify.h>
 
-#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+#if (!defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)) || defined(CONFIG_MIPS_MT_SMTC)
 
 /*
  * For cli() we have to insert nops to make sure that the new value
@@ -47,7 +47,7 @@ notrace void arch_local_irq_disable(void)
        "       ori     $1, 0x400                                       \n"
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        /* see irqflags.h for inline function */
 #else
        "       mfc0    $1,$12                                          \n"
@@ -83,7 +83,7 @@ notrace unsigned long arch_local_irq_save(void)
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $2, 1                                       \n"
        "       andi    %[flags], %[flags], 0x400                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        /* see irqflags.h for inline function */
 #else
        "       mfc0    %[flags], $12                                   \n"
@@ -130,9 +130,9 @@ notrace void arch_local_irq_restore(unsigned long flags)
        "       xori    $1, 0x400                                       \n"
        "       or      %[flags], $1                                    \n"
        "       mtc0    %[flags], $2, 1                                 \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#elif (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && defined(CONFIG_IRQ_CPU)
        /* see irqflags.h for inline function */
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        /* see irqflags.h for inline function */
 #else
        "       mfc0    $1, $12                                         \n"
@@ -192,4 +192,4 @@ notrace void __arch_local_irq_restore(unsigned long flags)
 }
 EXPORT_SYMBOL(__arch_local_irq_restore);
 
-#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
+#endif /* (!defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)) || defined(CONFIG_MIPS_MT_SMTC) */
index 969160f4bc755e7b5ff4613dda71387c720351e9..48b55c456bd9ab35fbbe5fda4535aee22a339950 100644 (file)
@@ -162,7 +162,7 @@ static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
                if ((insn.mm_i_format.rt == mm_bc1f_op) ||
                    (insn.mm_i_format.rt == mm_bc1t_op)) {
                        mips32_insn.fb_format.opcode = cop1_op;
-                       mips32_insn.fb_format.bc = bc_op;
+                       mips32_insn.fb_format.bc = rs_bc_op;
                        mips32_insn.fb_format.flag =
                                (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
                } else
@@ -676,12 +676,26 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
  * a single subroutine should be used across both
  * modules.
  */
+/*  Note on R6 compact branches:
+ *      Compact branches doesn't do exception (besides BC1EQZ/BC1NEZ)
+ *      and doesn't execute instruction in Forbidden Slot if branch is
+ *      to be taken. It means that return EPC for them can be safely set
+ *      to EPC + 8 because it is the only case to get a BD precise exception
+ *      doing instruction in Forbidden Slot while no branch.
+ *
+ *      Unconditional compact jump/branches added for full picture
+ *      (not doing BD precise exception, actually).
+ */
 static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                         unsigned long *contpc)
 {
        union mips_instruction insn = (union mips_instruction)dec_insn.insn;
-       unsigned int fcr31;
        unsigned int bit = 0;
+#ifdef CONFIG_CPU_MIPSR6
+       int reg;
+#else
+       unsigned int fcr31;
+#endif
 
        switch (insn.i_format.opcode) {
        case spec_op:
@@ -691,7 +705,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                regs->cp0_epc + dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
+#ifndef CONFIG_CPU_MIPSR6
                case jr_op:
+#endif
                        *contpc = regs->regs[insn.r_format.rs];
                        return 1;
                        break;
@@ -699,14 +715,22 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                break;
        case bcond_op:
                switch (insn.i_format.rt) {
+#ifdef CONFIG_CPU_MIPSR6
+               case nal_op:    /* MIPSR6: nal == bltzal $0 */
+                       if (insn.i_format.rs)
+                               break;
+#else
                case bltzal_op:
                case bltzall_op:
+#endif
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
                case bltz_op:
+#ifndef CONFIG_CPU_MIPSR6
                case bltzl_op:
+#endif
                        if ((long)regs->regs[insn.i_format.rs] < 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -717,14 +741,22 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                        dec_insn.next_pc_inc;
                        return 1;
                        break;
+#ifdef CONFIG_CPU_MIPSR6
+               case bal_op:    /* MIPSR6: bal == bgezal $0 */
+                       if (insn.i_format.rs)
+                               break;
+#else
                case bgezal_op:
                case bgezall_op:
+#endif
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
                case bgez_op:
+#ifndef CONFIG_CPU_MIPSR6
                case bgezl_op:
+#endif
                        if ((long)regs->regs[insn.i_format.rs] >= 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -754,7 +786,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                return 1;
                break;
        case beq_op:
+#ifndef CONFIG_CPU_MIPSR6
        case beql_op:
+#endif
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -767,7 +801,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                return 1;
                break;
        case bne_op:
+#ifndef CONFIG_CPU_MIPSR6
        case bnel_op:
+#endif
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -780,7 +816,44 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                return 1;
                break;
        case blez_op:
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: blezalc, bgezalc, bgeuc
+                */
+               if (insn.i_format.rt) {
+                       if ((insn.i_format.rs == insn.i_format.rt) ||
+                           !insn.i_format.rs)   /* blezalc, bgezalc */
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               }
+
+               if ((long)regs->regs[insn.i_format.rs] <= 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+#endif
        case blezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: blezc, bgezc, bgec
+                */
+               *contpc = regs->cp0_epc +
+                       dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+               return 1;
+               break;
+#else
                if ((long)regs->regs[insn.i_format.rs] <= 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -791,8 +864,46 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                return 1;
                break;
+#endif
        case bgtz_op:
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: bltzalc, bgtzalc, bltuc
+                */
+               if (insn.i_format.rt) {
+                       if ((insn.i_format.rs == insn.i_format.rt) ||
+                           !insn.i_format.rs)   /* bltzalc, bgtzalc */
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               }
+
+               if ((long)regs->regs[insn.i_format.rs] > 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+#endif
        case bgtzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+               /*
+                *  Compact branches: bltc, bltzc, bgtzc
+                */
+               *contpc = regs->cp0_epc +
+                       dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+               return 1;
+               break;
+#else
                if ((long)regs->regs[insn.i_format.rs] > 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -803,11 +914,62 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                return 1;
                break;
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+       case cbcond0_op:
+               /*
+                *  Compact branches: bovc, beqc, beqzalc
+                */
+
+               /* fall through */
+       case cbcond1_op:
+               /*
+                *  Compact branches: bnvc, bnec, bnezalc
+                */
+               if (insn.i_format.rt && !insn.i_format.rs)  /* beqzalc/bnezalc */
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc;
+               *contpc = regs->cp0_epc +
+                       dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+               return 1;
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+       case cop1_op:
+               if ((insn.i_format.rs != bc1eqz_op) &&
+                   (insn.i_format.rs != bc1nez_op))
+                       break;
+
+               reg = insn.i_format.rt;
+               bit = 0;
+               switch (insn.i_format.rs) {
+               case bc1eqz_op:
+                       if (current->thread.fpu.fpr[reg] == (__u64)0)
+                               bit = 1;
+                       break;
+               case bc1nez_op:
+                       if (current->thread.fpu.fpr[reg] != (__u64)0)
+                               bit = 1;
+                       break;
+               }
+               if (bit)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+#else
        case cop0_op:
        case cop1_op:
        case cop2_op:
        case cop1x_op:
-               if (insn.i_format.rs == bc_op) {
+               if (insn.i_format.rs == rs_bc_op) {
                        preempt_disable();
                        if (is_fpu_owner())
                                asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
@@ -846,6 +1008,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                        }
                }
                break;
+#endif
        }
        return 0;
 }
@@ -1054,7 +1217,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                        break;
 #endif
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
                case mfhc_op:
                        /* copregister rd -> gpr[rt] */
                        if (MIPSInst_RT(ir) != 0) {
@@ -1137,7 +1300,29 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                        break;
                }
 
-               case bc_op:{
+#ifdef CONFIG_CPU_MIPSR6
+               case bc1eqz_op:
+               case bc1nez_op:
+               {
+                       int reg;
+
+                       if (xcp->cp0_cause & CAUSEF_BD)
+                               return SIGILL;
+
+                       reg = MIPSInst_FT(ir);
+                       cond = 0;
+                       switch (MIPSInst_RS(ir)) {
+                       case bc1eqz_op:
+                               if (current->thread.fpu.fpr[reg] == (__u64)0)
+                                       cond = 1;
+                               break;
+                       case bc1nez_op:
+                               if (current->thread.fpu.fpr[reg] != (__u64)0)
+                                       cond = 1;
+                               break;
+                       }
+#else /* !CONFIG_CPU_MIPSR6 */
+               case rs_bc_op:{
                        int likely = 0;
 
                        if (xcp->cp0_cause & CAUSEF_BD)
@@ -1162,6 +1347,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                /* thats an illegal instruction */
                                return SIGILL;
                        }
+#endif /* CONFIG_CPU_MIPSR6 */
 
                        xcp->cp0_cause |= CAUSEF_BD;
                        if (cond) {
@@ -1211,12 +1397,14 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 #endif
                                        /* its one of ours */
                                        goto emul;
+#ifndef CONFIG_CPU_MIPSR6
 #if __mips >= 4
                                case spec_op:
                                        if (MIPSInst_FUNC(ir) == movc_op)
                                                goto emul;
                                        break;
 #endif
+#endif /* CONFIG_CPU_MIPSR6 */
                                }
 
                                /*
@@ -1225,6 +1413,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                 */
                                return mips_dsemul(xcp, ir, contpc);
                        }
+#ifndef CONFIG_CPU_MIPSR6
                        else {
                                /* branch not taken */
                                if (likely) {
@@ -1240,6 +1429,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                         */
                                }
                        }
+#endif /* CONFIG_CPU_MIPSR6 */
                        break;
                }
 
@@ -1265,6 +1455,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
        }
 #endif
 
+#ifndef CONFIG_CPU_MIPSR6
 #if __mips >= 4
        case spec_op:
                if (MIPSInst_FUNC(ir) != movc_op)
@@ -1274,6 +1465,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                        xcp->regs[MIPSInst_RD(ir)] =
                                xcp->regs[MIPSInst_RS(ir)];
                break;
+#endif
 #endif
 
        default:
index 8d48f9fb96ed87159f1cdfa311412b0aea339112..d490926bc8befbe61c83cf9760dfdd144ea0e5c4 100644 (file)
@@ -955,6 +955,17 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  * very much about what happens in that case.  Usually a segmentation
  * fault will dump the process later on anyway ...
  */
+#ifdef CONFIG_CPU_MIPSR6
+static void local_r4k_flush_cache_sigtramp(void * arg)
+{
+       register unsigned long addr = (unsigned long) arg;
+
+       __asm__ __volatile__(
+               "synci  0(%0)       \n"
+               "sync               \n"
+               ::"r"(addr):"memory");
+}
+#else
 static void local_r4k_flush_cache_sigtramp(void * arg)
 {
        unsigned long ic_lsize = cpu_icache_line_size();
@@ -990,6 +1001,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
        if (MIPS_CACHE_SYNC_WAR)
                __asm__ __volatile__ ("sync");
 }
+#endif
 
 static void r4k_flush_cache_sigtramp(unsigned long addr)
 {
@@ -1571,7 +1583,8 @@ static void __cpuinit setup_scache(void)
 
        default:
                if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
+                                   MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
 #ifdef CONFIG_MIPS_CPU_SCACHE
                        if (mips_sc_init ()) {
                                scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
index 356e095aeac2563f6f53b85ee5e29761e2301526..9a6b45e2d4a3ba0596ecfb8f25d55d979f7f0fd2 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
+#include <asm/tlbmisc.h>
 
 /* Atomicity and interruptability */
 #ifdef CONFIG_MIPS_MT_SMTC
@@ -162,14 +163,16 @@ void *kmap_coherent(struct page *page, unsigned long addr)
        else
                tlb_write_indexed();
 #else
-       tlbidx = read_c0_wired();
+       tlbidx = read_c0_wired() & 0xffff;
        write_c0_wired(tlbidx + 1);
        write_c0_index(tlbidx);
        mtc0_tlbw_hazard();
+       wired_push(vaddr & (PAGE_MASK << 1),entrylo,entrylo,PM_DEFAULT_MASK);
        tlb_write_indexed();
 #endif
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
        EXIT_CRITICAL(flags);
 
        return (void*) vaddr;
@@ -187,16 +190,18 @@ void kunmap_coherent(void)
 
        ENTER_CRITICAL(flags);
        old_ctx = read_c0_entryhi();
-       wired = read_c0_wired() - 1;
+       wired = (read_c0_wired() & 0xffff) - 1;
        write_c0_wired(wired);
        write_c0_index(wired);
        write_c0_entryhi(UNIQUE_ENTRYHI(wired));
        write_c0_entrylo0(0);
        write_c0_entrylo1(0);
        mtc0_tlbw_hazard();
+       wired_pop();
        tlb_write_indexed();
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
        EXIT_CRITICAL(flags);
 #endif
        dec_preempt_count();
index 4eb8dcfaf1ce1953760059faf151d54fe135d6a4..b0b048da699095341f32293f09b6a6e6e62ff6eb 100644 (file)
@@ -99,7 +99,11 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
                        uasm_i_addiu(buf, T9, ZERO, off);
                uasm_i_daddu(buf, reg1, reg2, T9);
        } else {
+#ifdef CONFIG_CPU_MIPSR6
+               if (off > 0xff) {
+#else
                if (off > 0x7fff) {
+#endif
                        uasm_i_lui(buf, T9, uasm_rel_hi(off));
                        uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
                        UASM_i_ADDU(buf, reg1, reg2, T9);
index a29a0d961eae4528250a44e8cf4de32d661d7507..6352d407c410745bc3e015a62d87990dcdd3a937 100644 (file)
@@ -106,7 +106,8 @@ static inline int __init mips_sc_probe(void)
 
        /* Ignore anything but MIPSxx processors */
        if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                             MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+                             MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
+                             MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
                return 0;
 
        /* Does this MIPS32/MIPS64 CPU have a config2 register? */
index 4ffebac04f359b58fed9173a06765b29e370d331..1df7eb0664f79d0ce3c9f30b5edbf9af0d15e1f0 100644 (file)
@@ -68,12 +68,24 @@ extern void build_tlb_refill_handler(void);
 
 #endif
 
+int lowest_wired;
+int current_wired;
+static struct WiredEntry {
+       unsigned long   EntryHi;
+       unsigned long   EntryLo0;
+       unsigned long   EntryLo1;
+       unsigned long   PageMask;
+} wired_entry_array[64];
+
+
 void local_flush_tlb_all(void)
 {
        unsigned long flags;
        unsigned long old_ctx;
+       unsigned long old_pagemask;
        int entry;
        int ftlbhighset;
+       int wired;
 
        ENTER_CRITICAL(flags);
        /* Save old context and create impossible VPN2 value */
@@ -81,23 +93,41 @@ void local_flush_tlb_all(void)
        write_c0_entrylo0(0);
        write_c0_entrylo1(0);
 
-       entry = read_c0_wired();
+       entry = read_c0_wired() & 0xffff;
 
        /* Blast 'em all away. */
        if (cpu_has_tlbinv) {
-               if (current_cpu_data.tlbsizevtlb) {
-                       write_c0_index(0);
-                       mtc0_tlbw_hazard();
-                       tlbinvf();  /* invalide VTLB */
+               old_pagemask = read_c0_pagemask();
+               if (cpu_has_tlbinv_full)
+                       tlbinvf();  /* invalide whole V/FTLB, index isn't used */
+               else {
+                       if (current_cpu_data.tlbsizevtlb) {
+                               write_c0_index(0);
+                               mtc0_tlbw_hazard();
+                               tlbinvf();  /* invalide VTLB */
+                       }
+                       ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets;
+                       for (entry=current_cpu_data.tlbsizevtlb;
+                            entry < ftlbhighset;
+                            entry++) {
+                               write_c0_index(entry);
+                               mtc0_tlbw_hazard();
+                               tlbinvf();  /* invalide one FTLB set */
+                       }
                }
-               ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets;
-               for (entry=current_cpu_data.tlbsizevtlb;
-                    entry < ftlbhighset;
-                    entry++) {
-                       write_c0_index(entry);
+               /* restore wired entries */
+               for (wired = lowest_wired; wired < current_wired; wired++) {
+                       write_c0_index(wired);
+                       tlbw_use_hazard();      /* What is the hazard here? */
+                       write_c0_pagemask(wired_entry_array[wired].PageMask);
+                       write_c0_entryhi(wired_entry_array[wired].EntryHi);
+                       write_c0_entrylo0(wired_entry_array[wired].EntryLo0);
+                       write_c0_entrylo1(wired_entry_array[wired].EntryLo1);
                        mtc0_tlbw_hazard();
-                       tlbinvf();  /* invalide one FTLB set */
+                       tlb_write_indexed();
+                       tlbw_use_hazard();
                }
+               write_c0_pagemask(old_pagemask);
        } else
                while (entry < current_cpu_data.tlbsize) {
                        /* Make sure all entries differ. */
@@ -109,6 +139,7 @@ void local_flush_tlb_all(void)
                }
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
        FLUSH_ITLB;
        EXIT_CRITICAL(flags);
 }
@@ -357,6 +388,32 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        EXIT_CRITICAL(flags);
 }
 
+int wired_push(unsigned long entryhi, unsigned long entrylo0,
+              unsigned long entrylo1, unsigned long pagemask)
+{
+       if (current_wired >= current_cpu_data.tlbsizevtlb) {
+               printk("Attempt to push TLB into wired exceeding VTLV size\n");
+               BUG();
+       }
+
+       wired_entry_array[current_wired].EntryHi = entryhi;
+       wired_entry_array[current_wired].EntryLo0 = entrylo0;
+       wired_entry_array[current_wired].EntryLo1 = entrylo1;
+       wired_entry_array[current_wired].PageMask = pagemask;
+
+       return(current_wired++);
+}
+
+int wired_pop(void)
+{
+       if (current_wired <= lowest_wired) {
+               printk("Attempt to delete a not existed wired TLB\n");
+               BUG();
+       }
+
+       return(--current_wired);
+}
+
 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
                     unsigned long entryhi, unsigned long pagemask)
 {
@@ -369,7 +426,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        /* Save old context and create impossible VPN2 value */
        old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
-       wired = read_c0_wired();
+       wired = read_c0_wired() & 0xffff;
        write_c0_wired(wired + 1);
        write_c0_index(wired);
        tlbw_use_hazard();      /* What is the hazard here? */
@@ -378,13 +435,42 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        write_c0_entrylo0(entrylo0);
        write_c0_entrylo1(entrylo1);
        mtc0_tlbw_hazard();
+       wired_push(entryhi,entrylo0,entrylo1,PM_DEFAULT_MASK);
        tlb_write_indexed();
        tlbw_use_hazard();
 
        write_c0_entryhi(old_ctx);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       EXIT_CRITICAL(flags);
+}
+
+void remove_wired_entry(void)
+{
+       unsigned long flags;
+       unsigned long wired;
+       unsigned long old_pagemask;
+       unsigned long old_ctx;
+
+       ENTER_CRITICAL(flags);
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+       wired = read_c0_wired() & 0xffff;
+       write_c0_index(wired);
        tlbw_use_hazard();      /* What is the hazard here? */
+       write_c0_pagemask(PM_DEFAULT_MASK);
+       write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+       wired_pop();
+       tlb_write_indexed();
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_ctx);
        write_c0_pagemask(old_pagemask);
-       local_flush_tlb_all();
+       mtc0_tlbw_hazard();
        EXIT_CRITICAL(flags);
 }
 
@@ -400,6 +486,7 @@ int __init has_transparent_hugepage(void)
        back_to_back_c0_hazard();
        mask = read_c0_pagemask();
        write_c0_pagemask(PM_DEFAULT_MASK);
+       mtc0_tlbw_hazard();
 
        EXIT_CRITICAL(flags);
 
@@ -444,6 +531,7 @@ void __cpuinit tlb_init(void)
 #endif
                write_c0_pagegrain(pg);
        }
+       mtc0_tlbw_hazard();
 
        /* From this point on the ARC firmware is dead.  */
        local_flush_tlb_all();
@@ -455,7 +543,10 @@ void __cpuinit tlb_init(void)
                        int wired = current_cpu_data.tlbsize - ntlb;
                        write_c0_wired(wired);
                        write_c0_index(wired-1);
+                       mtc0_tlbw_hazard();
                        printk("Restricting TLB to %d entries\n", ntlb);
+                       current_wired = wired;
+                       lowest_wired = wired;
                } else
                        printk("Ignoring invalid argument ntlb=%d\n", ntlb);
        }
index a26891cccbc67c27aa79272ffeabe9908f38cefa..4d8233da657aebfacfb73bd1ff62b4f4343e9398 100644 (file)
@@ -512,7 +512,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case tlb_indexed: tlbw = uasm_i_tlbwi; break;
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2 || cpu_has_mips_r6) {
                /*
                 * The architecture spec says an ehb is required here,
                 * but a number of cores do not have the hazard and
@@ -982,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
        uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
        uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
 
-       if (cpu_has_mips32r2) {
+       if (cpu_has_mips32r2 || cpu_has_mips32r6) {
                uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT));
                uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT));
                return;
@@ -1025,7 +1025,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
 static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 {
 #ifndef CONFIG_64BIT
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2 || cpu_has_mips_r6) {
                /* For MIPS32R2, PTE ptr offset is obtained from BadVAddr */
                UASM_i_MFC0(p, tmp, C0_BADVADDR);
                UASM_i_LW(p, ptr, 0, ptr);
@@ -1948,7 +1948,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
        if (m4kc_tlbp_war())
                build_tlb_probe_entry(&p);
 
-       if (cpu_has_rixi) {
+       if (cpu_has_rixi && !cpu_has_rixi_except) {
                /*
                 * If the page is not _PAGE_VALID, RI or XI could not
                 * have triggered it.  Skip the expensive test..
@@ -1963,6 +1963,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
                uasm_i_nop(&p);
 
                uasm_i_tlbr(&p);
+
+               switch (current_cpu_type()) {
+               default:
+                       if (cpu_has_mips_r2 || cpu_has_mips_r6) {
+                               uasm_i_ehb(&p);
+
+               case CPU_CAVIUM_OCTEON:
+               case CPU_CAVIUM_OCTEON_PLUS:
+               case CPU_CAVIUM_OCTEON2:
+                               break;
+                       }
+               }
+
                /* Examine  entrylo 0 or 1 based on ptr. */
                if (use_bbit_insns()) {
                        uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
@@ -2002,7 +2015,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
        build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
        build_tlb_probe_entry(&p);
 
-       if (cpu_has_rixi) {
+       if (cpu_has_rixi && !cpu_has_rixi_except) {
                /*
                 * If the page is not _PAGE_VALID, RI or XI could not
                 * have triggered it.  Skip the expensive test..
@@ -2017,6 +2030,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
                uasm_i_nop(&p);
 
                uasm_i_tlbr(&p);
+
+               switch (current_cpu_type()) {
+               default:
+                       if (cpu_has_mips_r2 || cpu_has_mips_r6) {
+                               uasm_i_ehb(&p);
+
+               case CPU_CAVIUM_OCTEON:
+               case CPU_CAVIUM_OCTEON_PLUS:
+               case CPU_CAVIUM_OCTEON2:
+                               break;
+                       }
+               }
+
                /* Examine  entrylo 0 or 1 based on ptr. */
                if (use_bbit_insns()) {
                        uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
index 5fcdd8fe3e83f683f5fb26ae73c378f6280e2b01..a65c6cc11b7ee09344eeba770314e3c8dea7e729 100644 (file)
         | (e) << RE_SH                                         \
         | (f) << FUNC_SH)
 
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, f)                                       \
+       ((a) << OP_SH                                           \
+        | (b) << RS_SH                                         \
+        | (c) << RT_SH                                         \
+        | (d) << SIMM9_SH                                      \
+        | (f) << FUNC_SH)
+
 /* Define these when we are not the ISA the kernel is being compiled with. */
 #ifdef CONFIG_CPU_MICROMIPS
 #define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
@@ -54,8 +62,10 @@ static struct insn insn_table[] __uasminitdata = {
        { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
        { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
        { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
        { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#endif
        { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
        { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
        { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
@@ -63,7 +73,11 @@ static struct insn insn_table[] __uasminitdata = {
        { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
        { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
        { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifdef CONFIG_CPU_MIPSR6
+       { insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+#else
        { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#endif
        { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
        { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -84,11 +98,20 @@ static struct insn insn_table[] __uasminitdata = {
        { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
        { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
        { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+#ifdef CONFIG_CPU_MIPSR6
+       { insn_jr,  M(spec_op, 0, 0, 0, 0, jalr_op),  RS },
+#else
        { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+#endif
        { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+#ifdef CONFIG_CPU_MIPSR6
+       { insn_lld,  M6(spec3_op, 0, 0, 0, lld6_op),  RS | RT | SIMM9 },
+       { insn_ll,  M6(spec3_op, 0, 0, 0, ll6_op),  RS | RT | SIMM9 },
+#else
        { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#endif
        { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
        { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -96,11 +119,20 @@ static struct insn insn_table[] __uasminitdata = {
        { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
        { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
        { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+#ifdef CONFIG_CPU_MIPSR6
+       { insn_pref,  M6(spec3_op, 0, 0, 0, pref6_op),  RS | RT | SIMM9 },
+#else
        { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#endif
        { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
        { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+#ifdef CONFIG_CPU_MIPSR6
+       { insn_scd,  M6(spec3_op, 0, 0, 0, scd6_op),  RS | RT | SIMM9 },
+       { insn_sc,  M6(spec3_op, 0, 0, 0, sc6_op),  RS | RT | SIMM9 },
+#else
        { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#endif
        { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
        { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
@@ -137,6 +169,14 @@ static inline __uasminit u32 build_jimm(u32 arg)
        return (arg >> 2) & JIMM_MASK;
 }
 
+static inline __uasminit u32 build_simm9(s32 arg)
+{
+       WARN(((arg > 0xff) || (arg < -0x100)),
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
 /*
  * The order of opcode arguments is implicitly left to right,
  * starting with RS and ending with FUNC or IMM.
@@ -181,6 +221,8 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
                op |= build_set(va_arg(ap, u32));
        if (ip->fields & SCIMM)
                op |= build_scimm(va_arg(ap, u32));
+       if (ip->fields & SIMM9)
+               op |= build_simm9(va_arg(ap, u32));
        va_end(ap);
 
        **buf = op;
index 7eb5e4355d25c467fd1241ffda14715dfc69c1c7..7125f64cc2cdda9e0b3309c16806d22978422149 100644 (file)
@@ -24,7 +24,8 @@ enum fields {
        JIMM = 0x080,
        FUNC = 0x100,
        SET = 0x200,
-       SCIMM = 0x400
+       SCIMM = 0x400,
+       SIMM9 = 0x800
 };
 
 #define OP_MASK                0x3f
@@ -41,6 +42,8 @@ enum fields {
 #define FUNC_SH                0
 #define SET_MASK       0x7
 #define SET_SH         0
+#define SIMM9_SH        7
+#define SIMM9_MASK      0x1ff
 
 enum opcode {
        insn_invalid,
index 2ac4f5c7df78bbd875b881cdf917749b5c8fa352..e6ccc86b4905c2c91ff394e5063ab20d0a3fb882 100644 (file)
@@ -201,7 +201,11 @@ static inline int clz(unsigned long x)
 {
        __asm__(
        "       .set    push                                    \n"
+#ifdef CONFIG_CPU_MIPSR6
+       "       .set    mips64r6                                \n"
+#else
        "       .set    mips32                                  \n"
+#endif
        "       clz     %0, %1                                  \n"
        "       .set    pop                                     \n"
        : "=r" (x)
index a3503b654f94c46ab0a6cef386f7f7c07698974f..05ffdcf7aa03af6b5db0237889667d2c4c061995 100644 (file)
@@ -26,6 +26,7 @@ static char *mtypes[3] = {
        "Dont use memory",
        "YAMON PROM memory",
        "Free memmory",
+       "Memory in use",
 };
 #endif
 
@@ -133,7 +134,7 @@ static inline fw_memblock_t * __init prom_getmdesc(void)
        mdesc[2].base = mdesc[0].base + 0x000f0000UL;
        mdesc[2].size = 0x00010000;
 
-       mdesc[3].type = fw_dontuse;
+       mdesc[3].type = fw_inuse;
        mdesc[3].base = mdesc[0].base + 0x00100000UL;
        mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - 0x00100000UL;
 
@@ -184,6 +185,8 @@ static int __init fw_memtype_classify(unsigned int type)
                return BOOT_MEM_RAM;
        case fw_code:
                return BOOT_MEM_ROM_DATA;
+       case fw_inuse:
+               return BOOT_MEM_INUSE;
        default:
                return BOOT_MEM_RESERVED;
        }
index 08ae6b2773c3b104826d69e29d9d029a80866f56..fc34ef5d15fae620b96cda0c5a35a99343a51f4b 100644 (file)
@@ -370,6 +370,10 @@ void __init plat_mem_setup(void)
 {
        unsigned int i;
 
+#ifdef CONFIG_CPU_MIPSR6
+       pr_info("BEVVA = %lx\n", read_c0_bevva());
+#endif
+
 #ifdef CONFIG_EVA
 #ifdef CONFIG_MIPS_CMP
        if (gcmp_present)
index 811564aab262b6382402b1c2a89d5aa711bf33d9..aaf642ab7db1f1ced76c17b4bacb6d2e08e4ebee 100644 (file)
@@ -244,7 +244,7 @@ static int mipsxx_perfcount_handler(void)
        unsigned int counter;
        int handled = IRQ_NONE;
 
-       if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
+       if ((cpu_has_mips_r2 || cpu_has_mips_r6) && !(read_c0_cause() & (1 << 26)))
                return handled;
 
        switch (counters) {