Merge branch 'devel' of git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa...
authorRussell King <rmk+kernel@arm.linux.org.uk>
Wed, 22 Dec 2010 22:46:24 +0000 (22:46 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Wed, 22 Dec 2010 22:46:24 +0000 (22:46 +0000)
35 files changed:
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/include/asm/hw_breakpoint.h
arch/arm/include/asm/system.h
arch/arm/include/asm/traps.h
arch/arm/kernel/Makefile
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/ftrace.c
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/irq.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event_v6.c [new file with mode: 0644]
arch/arm/kernel/perf_event_v7.c [new file with mode: 0644]
arch/arm/kernel/perf_event_xscale.c [new file with mode: 0644]
arch/arm/kernel/ptrace.c
arch/arm/kernel/smp.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-dove/Kconfig
arch/arm/mach-dove/Makefile
arch/arm/mach-dove/cm-a510.c [new file with mode: 0644]
arch/arm/mach-dove/include/mach/dove.h
arch/arm/mach-dove/include/mach/gpio.h
arch/arm/mach-dove/mpp.c [new file with mode: 0644]
arch/arm/mach-dove/mpp.h [new file with mode: 0644]
arch/arm/mach-kirkwood/Kconfig
arch/arm/mach-kirkwood/ts219-setup.c
arch/arm/mach-kirkwood/ts41x-setup.c
arch/arm/mach-msm/Kconfig
arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
arch/arm/mach-orion5x/Kconfig
arch/arm/mach-orion5x/Makefile
arch/arm/mach-orion5x/ls-chl-setup.c [new file with mode: 0644]
arch/arm/plat-versatile/sched-clock.c

index e2c79a2..d571cdb 100644 (file)
@@ -14,6 +14,7 @@ config ARM
        select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
        select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
        select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
+       select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZO
@@ -1205,10 +1206,11 @@ config SMP
        depends on EXPERIMENTAL
        depends on GENERIC_CLOCKEVENTS
        depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
-                MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
-                ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
+                MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
+                ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
+                ARCH_MSM_SCORPIONMP
        select USE_GENERIC_SMP_HELPERS
-       select HAVE_ARM_SCU
+       select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
        help
          This enables support for systems with more than one CPU. If you have
          a system with only one CPU, like most personal computers, say N. If
@@ -1283,6 +1285,7 @@ config NR_CPUS
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
        depends on SMP && HOTPLUG && EXPERIMENTAL
+       depends on !ARCH_MSM
        help
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
@@ -1291,7 +1294,7 @@ config LOCAL_TIMERS
        bool "Use local timer interrupts"
        depends on SMP
        default y
-       select HAVE_ARM_TWD
+       select HAVE_ARM_TWD if !ARCH_MSM_SCORPIONMP
        help
          Enable support for local timers on SMP platforms, rather then the
          legacy IPI broadcast method.  Local timers allows the system
index 2fd0b99..eac6208 100644 (file)
@@ -23,7 +23,7 @@ config STRICT_DEVMEM
 config FRAME_POINTER
        bool
        depends on !THUMB2_KERNEL
-       default y if !ARM_UNWIND
+       default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
        help
          If you say N here, the resulting kernel will be slightly smaller and
          faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
index 4d8ae9d..f389b27 100644 (file)
@@ -20,8 +20,8 @@ struct arch_hw_breakpoint_ctrl {
 struct arch_hw_breakpoint {
        u32     address;
        u32     trigger;
-       struct perf_event *suspended_wp;
-       struct arch_hw_breakpoint_ctrl ctrl;
+       struct  arch_hw_breakpoint_ctrl step_ctrl;
+       struct  arch_hw_breakpoint_ctrl ctrl;
 };
 
 static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
index 1120f18..ec4327a 100644 (file)
 #include <asm/outercache.h>
 
 #define __exception    __attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry  __irq_entry
+#else
+#define __exception_irq_entry  __exception
+#endif
 
 struct thread_info;
 struct task_struct;
index 491960b..124475a 100644 (file)
@@ -15,13 +15,32 @@ struct undef_hook {
 void register_undef_hook(struct undef_hook *hook);
 void unregister_undef_hook(struct undef_hook *hook);
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+       extern char __irqentry_text_start[];
+       extern char __irqentry_text_end[];
+
+       return ptr >= (unsigned long)&__irqentry_text_start &&
+              ptr < (unsigned long)&__irqentry_text_end;
+}
+#else
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+       return 0;
+}
+#endif
+
 static inline int in_exception_text(unsigned long ptr)
 {
        extern char __exception_text_start[];
        extern char __exception_text_end[];
+       int in;
+
+       in = ptr >= (unsigned long)&__exception_text_start &&
+            ptr < (unsigned long)&__exception_text_end;
 
-       return ptr >= (unsigned long)&__exception_text_start &&
-              ptr < (unsigned long)&__exception_text_end;
+       return in ? : __in_irqentry_text(ptr);
 }
 
 extern void __init early_trap_init(void);
index b0f11fa..c73abe4 100644 (file)
@@ -5,7 +5,7 @@
 CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
 AFLAGS_head.o        := -DTEXT_OFFSET=$(TEXT_OFFSET)
 
-ifdef CONFIG_DYNAMIC_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_ftrace.o = -pg
 endif
 
@@ -33,6 +33,7 @@ obj-$(CONFIG_SMP)             += smp.o
 obj-$(CONFIG_HAVE_ARM_SCU)     += smp_scu.o
 obj-$(CONFIG_HAVE_ARM_TWD)     += smp_twd.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_KPROBES)          += kprobes.o kprobes-decode.o
 obj-$(CONFIG_ATAGS_PROC)       += atags.o
index bb96a7d..36199ff 100644 (file)
@@ -198,6 +198,7 @@ __dabt_svc:
        @
        @ set desired IRQ state, then call main handler
        @
+       debug_entry r1
        msr     cpsr_c, r9
        mov     r2, sp
        bl      do_DataAbort
@@ -324,6 +325,7 @@ __pabt_svc:
 #else
        bl      CPU_PABORT_HANDLER
 #endif
+       debug_entry r1
        msr     cpsr_c, r9                      @ Maybe enable interrupts
        mov     r2, sp                          @ regs
        bl      do_PrefetchAbort                @ call abort handler
@@ -439,6 +441,7 @@ __dabt_usr:
        @
        @ IRQs on, then call the main handler
        @
+       debug_entry r1
        enable_irq
        mov     r2, sp
        adr     lr, BSYM(ret_from_exception)
@@ -703,6 +706,7 @@ __pabt_usr:
 #else
        bl      CPU_PABORT_HANDLER
 #endif
+       debug_entry r1
        enable_irq                              @ Enable interrupts
        mov     r2, sp                          @ regs
        bl      do_PrefetchAbort                @ call abort handler
index 8bfa987..aae802e 100644 (file)
@@ -141,98 +141,170 @@ ENDPROC(ret_from_fork)
 #endif
 #endif
 
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(__gnu_mcount_nc)
-       mov     ip, lr
-       ldmia   sp!, {lr}
-       mov     pc, ip
-ENDPROC(__gnu_mcount_nc)
+.macro __mcount suffix
+       mcount_enter
+       ldr     r0, =ftrace_trace_function
+       ldr     r2, [r0]
+       adr     r0, .Lftrace_stub
+       cmp     r0, r2
+       bne     1f
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       ldr     r1, =ftrace_graph_return
+       ldr     r2, [r1]
+       cmp     r0, r2
+       bne     ftrace_graph_caller\suffix
+
+       ldr     r1, =ftrace_graph_entry
+       ldr     r2, [r1]
+       ldr     r0, =ftrace_graph_entry_stub
+       cmp     r0, r2
+       bne     ftrace_graph_caller\suffix
+#endif
 
-ENTRY(ftrace_caller)
-       stmdb   sp!, {r0-r3, lr}
-       mov     r0, lr
+       mcount_exit
+
+1:     mcount_get_lr   r1                      @ lr of instrumented func
+       mov     r0, lr                          @ instrumented function
+       sub     r0, r0, #MCOUNT_INSN_SIZE
+       adr     lr, BSYM(2f)
+       mov     pc, r2
+2:     mcount_exit
+.endm
+
+.macro __ftrace_caller suffix
+       mcount_enter
+
+       mcount_get_lr   r1                      @ lr of instrumented func
+       mov     r0, lr                          @ instrumented function
        sub     r0, r0, #MCOUNT_INSN_SIZE
-       ldr     r1, [sp, #20]
 
-       .global ftrace_call
-ftrace_call:
+       .globl ftrace_call\suffix
+ftrace_call\suffix:
        bl      ftrace_stub
-       ldmia   sp!, {r0-r3, ip, lr}
-       mov     pc, ip
-ENDPROC(ftrace_caller)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .globl ftrace_graph_call\suffix
+ftrace_graph_call\suffix:
+       mov     r0, r0
+#endif
+
+       mcount_exit
+.endm
+
+.macro __ftrace_graph_caller
+       sub     r0, fp, #4              @ &lr of instrumented routine (&parent)
+#ifdef CONFIG_DYNAMIC_FTRACE
+       @ called from __ftrace_caller, saved in mcount_enter
+       ldr     r1, [sp, #16]           @ instrumented routine (func)
+#else
+       @ called from __mcount, untouched in lr
+       mov     r1, lr                  @ instrumented routine (func)
+#endif
+       sub     r1, r1, #MCOUNT_INSN_SIZE
+       mov     r2, fp                  @ frame pointer
+       bl      prepare_ftrace_return
+       mcount_exit
+.endm
 
 #ifdef CONFIG_OLD_MCOUNT
+/*
+ * mcount
+ */
+
+.macro mcount_enter
+       stmdb   sp!, {r0-r3, lr}
+.endm
+
+.macro mcount_get_lr reg
+       ldr     \reg, [fp, #-4]
+.endm
+
+.macro mcount_exit
+       ldr     lr, [fp, #-4]
+       ldmia   sp!, {r0-r3, pc}
+.endm
+
 ENTRY(mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
        stmdb   sp!, {lr}
        ldr     lr, [fp, #-4]
        ldmia   sp!, {pc}
+#else
+       __mcount _old
+#endif
 ENDPROC(mcount)
 
+#ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(ftrace_caller_old)
-       stmdb   sp!, {r0-r3, lr}
-       ldr     r1, [fp, #-4]
-       mov     r0, lr
-       sub     r0, r0, #MCOUNT_INSN_SIZE
-
-       .globl ftrace_call_old
-ftrace_call_old:
-       bl      ftrace_stub
-       ldr     lr, [fp, #-4]                   @ restore lr
-       ldmia   sp!, {r0-r3, pc}
+       __ftrace_caller _old
 ENDPROC(ftrace_caller_old)
 #endif
 
-#else
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller_old)
+       __ftrace_graph_caller
+ENDPROC(ftrace_graph_caller_old)
+#endif
 
-ENTRY(__gnu_mcount_nc)
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+#endif
+
+/*
+ * __gnu_mcount_nc
+ */
+
+.macro mcount_enter
        stmdb   sp!, {r0-r3, lr}
-       ldr     r0, =ftrace_trace_function
-       ldr     r2, [r0]
-       adr     r0, .Lftrace_stub
-       cmp     r0, r2
-       bne     gnu_trace
+.endm
+
+.macro mcount_get_lr reg
+       ldr     \reg, [sp, #20]
+.endm
+
+.macro mcount_exit
        ldmia   sp!, {r0-r3, ip, lr}
        mov     pc, ip
+.endm
 
-gnu_trace:
-       ldr     r1, [sp, #20]                   @ lr of instrumented routine
-       mov     r0, lr
-       sub     r0, r0, #MCOUNT_INSN_SIZE
-       adr     lr, BSYM(1f)
-       mov     pc, r2
-1:
-       ldmia   sp!, {r0-r3, ip, lr}
+ENTRY(__gnu_mcount_nc)
+#ifdef CONFIG_DYNAMIC_FTRACE
+       mov     ip, lr
+       ldmia   sp!, {lr}
        mov     pc, ip
+#else
+       __mcount
+#endif
 ENDPROC(__gnu_mcount_nc)
 
-#ifdef CONFIG_OLD_MCOUNT
-/*
- * This is under an ifdef in order to force link-time errors for people trying
- * to build with !FRAME_POINTER with a GCC which doesn't use the new-style
- * mcount.
- */
-ENTRY(mcount)
-       stmdb   sp!, {r0-r3, lr}
-       ldr     r0, =ftrace_trace_function
-       ldr     r2, [r0]
-       adr     r0, ftrace_stub
-       cmp     r0, r2
-       bne     trace
-       ldr     lr, [fp, #-4]                   @ restore lr
-       ldmia   sp!, {r0-r3, pc}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+       __ftrace_caller
+ENDPROC(ftrace_caller)
+#endif
 
-trace:
-       ldr     r1, [fp, #-4]                   @ lr of instrumented routine
-       mov     r0, lr
-       sub     r0, r0, #MCOUNT_INSN_SIZE
-       mov     lr, pc
-       mov     pc, r2
-       ldr     lr, [fp, #-4]                   @ restore lr
-       ldmia   sp!, {r0-r3, pc}
-ENDPROC(mcount)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       __ftrace_graph_caller
+ENDPROC(ftrace_graph_caller)
 #endif
 
-#endif /* CONFIG_DYNAMIC_FTRACE */
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .globl return_to_handler
+return_to_handler:
+       stmdb   sp!, {r0-r3}
+       mov     r0, fp                  @ frame pointer
+       bl      ftrace_return_to_handler
+       mov     lr, r0                  @ r0 has real ret addr
+       ldmia   sp!, {r0-r3}
+       mov     pc, lr
+#endif
 
 ENTRY(ftrace_stub)
 .Lftrace_stub:
index d93f976..ae94649 100644 (file)
        .endm
 #endif /* !CONFIG_THUMB2_KERNEL */
 
+       @
+       @ Debug exceptions are taken as prefetch or data aborts.
+       @ We must disable preemption during the handler so that
+       @ we can access the debug registers safely.
+       @
+       .macro  debug_entry, fsr
+#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
+       ldr     r4, =0x40f              @ mask out fsr.fs
+       and     r5, r4, \fsr
+       cmp     r5, #2                  @ debug exception
+       bne     1f
+       get_thread_info r10
+       ldr     r6, [r10, #TI_PREEMPT]  @ get preempt count
+       add     r11, r6, #1             @ increment it
+       str     r11, [r10, #TI_PREEMPT]
+1:
+#endif
+       .endm
+
 /*
  * These are the registers used in the syscall handler, and allow us to
  * have in theory up to 7 arguments to a function - r0 to r6.
index 971ac8c..c0062ad 100644 (file)
@@ -24,6 +24,7 @@
 #define        NOP             0xe8bd4000      /* pop {lr} */
 #endif
 
+#ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_OLD_MCOUNT
 #define OLD_MCOUNT_ADDR        ((unsigned long) mcount)
 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
@@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
 }
 #endif
 
-/* construct a branch (BL) instruction to addr */
 #ifdef CONFIG_THUMB2_KERNEL
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
+                                      bool link)
 {
        unsigned long s, j1, j2, i1, i2, imm10, imm11;
        unsigned long first, second;
@@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
        j2 = (!i2) ^ s;
 
        first = 0xf000 | (s << 10) | imm10;
-       second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;
+       second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
+       if (link)
+               second |= 1 << 14;
 
        return (second << 16) | first;
 }
 #else
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
+                                      bool link)
 {
+       unsigned long opcode = 0xea000000;
        long offset;
 
+       if (link)
+               opcode |= 1 << 24;
+
        offset = (long)addr - (long)(pc + 8);
        if (unlikely(offset < -33554432 || offset > 33554428)) {
                /* Can't generate branches that far (from ARM ARM). Ftrace
@@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
 
        offset = (offset >> 2) & 0x00ffffff;
 
-       return 0xeb000000 | offset;
+       return opcode | offset;
 }
 #endif
 
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+       return ftrace_gen_branch(pc, addr, true);
+}
+
 static int ftrace_modify_code(unsigned long pc, unsigned long old,
                              unsigned long new)
 {
@@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data)
 
        return 0;
 }
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long) &return_to_handler;
+       struct ftrace_graph_ent trace;
+       unsigned long old;
+       int err;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+       *parent = return_hooker;
+
+       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+                                      frame_pointer);
+       if (err == -EBUSY) {
+               *parent = old;
+               return;
+       }
+
+       trace.func = self_addr;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace)) {
+               current->curr_ret_stack--;
+               *parent = old;
+       }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+extern unsigned long ftrace_graph_call_old;
+extern void ftrace_graph_caller_old(void);
+
+static int __ftrace_modify_caller(unsigned long *callsite,
+                                 void (*func) (void), bool enable)
+{
+       unsigned long caller_fn = (unsigned long) func;
+       unsigned long pc = (unsigned long) callsite;
+       unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
+       unsigned long nop = 0xe1a00000; /* mov r0, r0 */
+       unsigned long old = enable ? nop : branch;
+       unsigned long new = enable ? branch : nop;
+
+       return ftrace_modify_code(pc, old, new);
+}
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+       int ret;
+
+       ret = __ftrace_modify_caller(&ftrace_graph_call,
+                                    ftrace_graph_caller,
+                                    enable);
+
+#ifdef CONFIG_OLD_MCOUNT
+       if (!ret)
+               ret = __ftrace_modify_caller(&ftrace_graph_call_old,
+                                            ftrace_graph_caller_old,
+                                            enable);
+#endif
+
+       return ret;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 21e3a4a..c9f3f04 100644 (file)
@@ -24,6 +24,7 @@
 #define pr_fmt(fmt) "hw-breakpoint: " fmt
 
 #include <linux/errno.h>
+#include <linux/hardirq.h>
 #include <linux/perf_event.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/smp.h>
@@ -44,6 +45,7 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
 
 /* Number of BRP/WRP registers on this CPU. */
 static int core_num_brps;
+static int core_num_reserved_brps;
 static int core_num_wrps;
 
 /* Debug architecture version. */
@@ -52,87 +54,6 @@ static u8 debug_arch;
 /* Maximum supported watchpoint length. */
 static u8 max_watchpoint_len;
 
-/* Determine number of BRP registers available. */
-static int get_num_brps(void)
-{
-       u32 didr;
-       ARM_DBG_READ(c0, 0, didr);
-       return ((didr >> 24) & 0xf) + 1;
-}
-
-/* Determine number of WRP registers available. */
-static int get_num_wrps(void)
-{
-       /*
-        * FIXME: When a watchpoint fires, the only way to work out which
-        * watchpoint it was is by disassembling the faulting instruction
-        * and working out the address of the memory access.
-        *
-        * Furthermore, we can only do this if the watchpoint was precise
-        * since imprecise watchpoints prevent us from calculating register
-        * based addresses.
-        *
-        * For the time being, we only report 1 watchpoint register so we
-        * always know which watchpoint fired. In the future we can either
-        * add a disassembler and address generation emulator, or we can
-        * insert a check to see if the DFAR is set on watchpoint exception
-        * entry [the ARM ARM states that the DFAR is UNKNOWN, but
-        * experience shows that it is set on some implementations].
-        */
-
-#if 0
-       u32 didr, wrps;
-       ARM_DBG_READ(c0, 0, didr);
-       return ((didr >> 28) & 0xf) + 1;
-#endif
-
-       return 1;
-}
-
-int hw_breakpoint_slots(int type)
-{
-       /*
-        * We can be called early, so don't rely on
-        * our static variables being initialised.
-        */
-       switch (type) {
-       case TYPE_INST:
-               return get_num_brps();
-       case TYPE_DATA:
-               return get_num_wrps();
-       default:
-               pr_warning("unknown slot type: %d\n", type);
-               return 0;
-       }
-}
-
-/* Determine debug architecture. */
-static u8 get_debug_arch(void)
-{
-       u32 didr;
-
-       /* Do we implement the extended CPUID interface? */
-       if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
-               pr_warning("CPUID feature registers not supported. "
-                               "Assuming v6 debug is present.\n");
-               return ARM_DEBUG_ARCH_V6;
-       }
-
-       ARM_DBG_READ(c0, 0, didr);
-       return (didr >> 16) & 0xf;
-}
-
-/* Does this core support mismatch breakpoints? */
-static int core_has_mismatch_bps(void)
-{
-       return debug_arch >= ARM_DEBUG_ARCH_V7_ECP14 && core_num_brps > 1;
-}
-
-u8 arch_get_debug_arch(void)
-{
-       return debug_arch;
-}
-
 #define READ_WB_REG_CASE(OP2, M, VAL)          \
        case ((OP2 << 4) + M):                  \
                ARM_DBG_READ(c ## M, OP2, VAL); \
@@ -210,6 +131,94 @@ static void write_wb_reg(int n, u32 val)
        isb();
 }
 
+/* Determine debug architecture. */
+static u8 get_debug_arch(void)
+{
+       u32 didr;
+
+       /* Do we implement the extended CPUID interface? */
+       if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
+               pr_warning("CPUID feature registers not supported. "
+                               "Assuming v6 debug is present.\n");
+               return ARM_DEBUG_ARCH_V6;
+       }
+
+       ARM_DBG_READ(c0, 0, didr);
+       return (didr >> 16) & 0xf;
+}
+
+u8 arch_get_debug_arch(void)
+{
+       return debug_arch;
+}
+
+/* Determine number of BRP register available. */
+static int get_num_brp_resources(void)
+{
+       u32 didr;
+       ARM_DBG_READ(c0, 0, didr);
+       return ((didr >> 24) & 0xf) + 1;
+}
+
+/* Does this core support mismatch breakpoints? */
+static int core_has_mismatch_brps(void)
+{
+       return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
+               get_num_brp_resources() > 1);
+}
+
+/* Determine number of usable WRPs available. */
+static int get_num_wrps(void)
+{
+       /*
+        * FIXME: When a watchpoint fires, the only way to work out which
+        * watchpoint it was is by disassembling the faulting instruction
+        * and working out the address of the memory access.
+        *
+        * Furthermore, we can only do this if the watchpoint was precise
+        * since imprecise watchpoints prevent us from calculating register
+        * based addresses.
+        *
+        * Providing we have more than 1 breakpoint register, we only report
+        * a single watchpoint register for the time being. This way, we always
+        * know which watchpoint fired. In the future we can either add a
+        * disassembler and address generation emulator, or we can insert a
+        * check to see if the DFAR is set on watchpoint exception entry
+        * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
+        * that it is set on some implementations].
+        */
+
+#if 0
+       int wrps;
+       u32 didr;
+       ARM_DBG_READ(c0, 0, didr);
+       wrps = ((didr >> 28) & 0xf) + 1;
+#endif
+       int wrps = 1;
+
+       if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
+               wrps = get_num_brp_resources() - 1;
+
+       return wrps;
+}
+
+/* We reserve one breakpoint for each watchpoint. */
+static int get_num_reserved_brps(void)
+{
+       if (core_has_mismatch_brps())
+               return get_num_wrps();
+       return 0;
+}
+
+/* Determine number of usable BRPs available. */
+static int get_num_brps(void)
+{
+       int brps = get_num_brp_resources();
+       if (core_has_mismatch_brps())
+               brps -= get_num_reserved_brps();
+       return brps;
+}
+
 /*
  * In order to access the breakpoint/watchpoint control registers,
  * we must be running in debug monitor mode. Unfortunately, we can
@@ -230,8 +239,12 @@ static int enable_monitor_mode(void)
                goto out;
        }
 
+       /* If monitor mode is already enabled, just return. */
+       if (dscr & ARM_DSCR_MDBGEN)
+               goto out;
+
        /* Write to the corresponding DSCR. */
-       switch (debug_arch) {
+       switch (get_debug_arch()) {
        case ARM_DEBUG_ARCH_V6:
        case ARM_DEBUG_ARCH_V6_1:
                ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
@@ -246,15 +259,30 @@ static int enable_monitor_mode(void)
 
        /* Check that the write made it through. */
        ARM_DBG_READ(c1, 0, dscr);
-       if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN),
-                               "failed to enable monitor mode.")) {
+       if (!(dscr & ARM_DSCR_MDBGEN))
                ret = -EPERM;
-       }
 
 out:
        return ret;
 }
 
+int hw_breakpoint_slots(int type)
+{
+       /*
+        * We can be called early, so don't rely on
+        * our static variables being initialised.
+        */
+       switch (type) {
+       case TYPE_INST:
+               return get_num_brps();
+       case TYPE_DATA:
+               return get_num_wrps();
+       default:
+               pr_warning("unknown slot type: %d\n", type);
+               return 0;
+       }
+}
+
 /*
  * Check if 8-bit byte-address select is available.
  * This clobbers WRP 0.
@@ -268,9 +296,6 @@ static u8 get_max_wp_len(void)
        if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
                goto out;
 
-       if (enable_monitor_mode())
-               goto out;
-
        memset(&ctrl, 0, sizeof(ctrl));
        ctrl.len = ARM_BREAKPOINT_LEN_8;
        ctrl_reg = encode_ctrl_reg(ctrl);
@@ -290,23 +315,6 @@ u8 arch_get_max_wp_len(void)
 }
 
 /*
- * Handler for reactivating a suspended watchpoint when the single
- * step `mismatch' breakpoint is triggered.
- */
-static void wp_single_step_handler(struct perf_event *bp, int unused,
-                                  struct perf_sample_data *data,
-                                  struct pt_regs *regs)
-{
-       perf_event_enable(counter_arch_bp(bp)->suspended_wp);
-       unregister_hw_breakpoint(bp);
-}
-
-static int bp_is_single_step(struct perf_event *bp)
-{
-       return bp->overflow_handler == wp_single_step_handler;
-}
-
-/*
  * Install a perf counter breakpoint.
  */
 int arch_install_hw_breakpoint(struct perf_event *bp)
@@ -314,30 +322,41 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        struct perf_event **slot, **slots;
        int i, max_slots, ctrl_base, val_base, ret = 0;
+       u32 addr, ctrl;
 
        /* Ensure that we are in monitor mode and halting mode is disabled. */
        ret = enable_monitor_mode();
        if (ret)
                goto out;
 
+       addr = info->address;
+       ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
+
        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
                /* Breakpoint */
                ctrl_base = ARM_BASE_BCR;
                val_base = ARM_BASE_BVR;
-               slots = __get_cpu_var(bp_on_reg);
-               max_slots = core_num_brps - 1;
-
-               if (bp_is_single_step(bp)) {
-                       info->ctrl.mismatch = 1;
-                       i = max_slots;
-                       slots[i] = bp;
-                       goto setup;
+               slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+               max_slots = core_num_brps;
+               if (info->step_ctrl.enabled) {
+                       /* Override the breakpoint data with the step data. */
+                       addr = info->trigger & ~0x3;
+                       ctrl = encode_ctrl_reg(info->step_ctrl);
                }
        } else {
                /* Watchpoint */
-               ctrl_base = ARM_BASE_WCR;
-               val_base = ARM_BASE_WVR;
-               slots = __get_cpu_var(wp_on_reg);
+               if (info->step_ctrl.enabled) {
+                       /* Install into the reserved breakpoint region. */
+                       ctrl_base = ARM_BASE_BCR + core_num_brps;
+                       val_base = ARM_BASE_BVR + core_num_brps;
+                       /* Override the watchpoint data with the step data. */
+                       addr = info->trigger & ~0x3;
+                       ctrl = encode_ctrl_reg(info->step_ctrl);
+               } else {
+                       ctrl_base = ARM_BASE_WCR;
+                       val_base = ARM_BASE_WVR;
+               }
+               slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
                max_slots = core_num_wrps;
        }
 
@@ -355,12 +374,11 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
                goto out;
        }
 
-setup:
        /* Setup the address register. */
-       write_wb_reg(val_base + i, info->address);
+       write_wb_reg(val_base + i, addr);
 
        /* Setup the control register. */
-       write_wb_reg(ctrl_base + i, encode_ctrl_reg(info->ctrl) | 0x1);
+       write_wb_reg(ctrl_base + i, ctrl);
 
 out:
        return ret;
@@ -375,18 +393,15 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
                /* Breakpoint */
                base = ARM_BASE_BCR;
-               slots = __get_cpu_var(bp_on_reg);
-               max_slots = core_num_brps - 1;
-
-               if (bp_is_single_step(bp)) {
-                       i = max_slots;
-                       slots[i] = NULL;
-                       goto reset;
-               }
+               slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+               max_slots = core_num_brps;
        } else {
                /* Watchpoint */
-               base = ARM_BASE_WCR;
-               slots = __get_cpu_var(wp_on_reg);
+               if (info->step_ctrl.enabled)
+                       base = ARM_BASE_BCR + core_num_brps;
+               else
+                       base = ARM_BASE_WCR;
+               slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
                max_slots = core_num_wrps;
        }
 
@@ -403,7 +418,6 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
        if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
                return;
 
-reset:
        /* Reset the control register. */
        write_wb_reg(base + i, 0);
 }
@@ -537,12 +551,23 @@ static int arch_build_bp_info(struct perf_event *bp)
                return -EINVAL;
        }
 
+       /*
+        * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
+        * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
+        * by the hardware and must be aligned to the appropriate number of
+        * bytes.
+        */
+       if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+           info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+           info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+               return -EINVAL;
+
        /* Address */
        info->address = bp->attr.bp_addr;
 
        /* Privilege */
        info->ctrl.privilege = ARM_BREAKPOINT_USER;
-       if (arch_check_bp_in_kernelspace(bp) && !bp_is_single_step(bp))
+       if (arch_check_bp_in_kernelspace(bp))
                info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
 
        /* Enabled? */
@@ -561,7 +586,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
 {
        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        int ret = 0;
-       u32 bytelen, max_len, offset, alignment_mask = 0x3;
+       u32 offset, alignment_mask = 0x3;
 
        /* Build the arch_hw_breakpoint. */
        ret = arch_build_bp_info(bp);
@@ -571,84 +596,85 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        /* Check address alignment. */
        if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
                alignment_mask = 0x7;
-       if (info->address & alignment_mask) {
-               /*
-                * Try to fix the alignment. This may result in a length
-                * that is too large, so we must check for that.
-                */
-               bytelen = get_hbp_len(info->ctrl.len);
-               max_len = info->ctrl.type == ARM_BREAKPOINT_EXECUTE ? 4 :
-                               max_watchpoint_len;
-
-               if (max_len >= 8)
-                       offset = info->address & 0x7;
-               else
-                       offset = info->address & 0x3;
-
-               if (bytelen > (1 << ((max_len - (offset + 1)) >> 1))) {
-                       ret = -EFBIG;
-                       goto out;
-               }
-
-               info->ctrl.len <<= offset;
-               info->address &= ~offset;
-
-               pr_debug("breakpoint alignment fixup: length = 0x%x, "
-                       "address = 0x%x\n", info->ctrl.len, info->address);
+       offset = info->address & alignment_mask;
+       switch (offset) {
+       case 0:
+               /* Aligned */
+               break;
+       case 1:
+               /* Allow single byte watchpoint. */
+               if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                       break;
+       case 2:
+               /* Allow halfword watchpoints and breakpoints. */
+               if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+                       break;
+       default:
+               ret = -EINVAL;
+               goto out;
        }
 
+       info->address &= ~alignment_mask;
+       info->ctrl.len <<= offset;
+
        /*
         * Currently we rely on an overflow handler to take
         * care of single-stepping the breakpoint when it fires.
         * In the case of userspace breakpoints on a core with V7 debug,
-        * we can use the mismatch feature as a poor-man's hardware single-step.
+        * we can use the mismatch feature as a poor-man's hardware
+        * single-step, but this only works for per-task breakpoints.
         */
        if (WARN_ONCE(!bp->overflow_handler &&
-               (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_bps()),
+               (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
+                || !bp->hw.bp_target),
                        "overflow handler required but none found")) {
                ret = -EINVAL;
-               goto out;
        }
 out:
        return ret;
 }
 
-static void update_mismatch_flag(int idx, int flag)
+/*
+ * Enable/disable single-stepping over the breakpoint bp at address addr.
+ */
+static void enable_single_step(struct perf_event *bp, u32 addr)
 {
-       struct perf_event *bp = __get_cpu_var(bp_on_reg[idx]);
-       struct arch_hw_breakpoint *info;
-
-       if (bp == NULL)
-               return;
+       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       info = counter_arch_bp(bp);
+       arch_uninstall_hw_breakpoint(bp);
+       info->step_ctrl.mismatch  = 1;
+       info->step_ctrl.len       = ARM_BREAKPOINT_LEN_4;
+       info->step_ctrl.type      = ARM_BREAKPOINT_EXECUTE;
+       info->step_ctrl.privilege = info->ctrl.privilege;
+       info->step_ctrl.enabled   = 1;
+       info->trigger             = addr;
+       arch_install_hw_breakpoint(bp);
+}
 
-       /* Update the mismatch field to enter/exit `single-step' mode */
-       if (!bp->overflow_handler && info->ctrl.mismatch != flag) {
-               info->ctrl.mismatch = flag;
-               write_wb_reg(ARM_BASE_BCR + idx, encode_ctrl_reg(info->ctrl) | 0x1);
-       }
+static void disable_single_step(struct perf_event *bp)
+{
+       arch_uninstall_hw_breakpoint(bp);
+       counter_arch_bp(bp)->step_ctrl.enabled = 0;
+       arch_install_hw_breakpoint(bp);
 }
 
 static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
 {
        int i;
-       struct perf_event *bp, **slots = __get_cpu_var(wp_on_reg);
+       struct perf_event *wp, **slots;
        struct arch_hw_breakpoint *info;
-       struct perf_event_attr attr;
+
+       slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
 
        /* Without a disassembler, we can only handle 1 watchpoint. */
        BUG_ON(core_num_wrps > 1);
 
-       hw_breakpoint_init(&attr);
-       attr.bp_addr    = regs->ARM_pc & ~0x3;
-       attr.bp_len     = HW_BREAKPOINT_LEN_4;
-       attr.bp_type    = HW_BREAKPOINT_X;
-
        for (i = 0; i < core_num_wrps; ++i) {
                rcu_read_lock();
 
-               if (slots[i] == NULL) {
+               wp = slots[i];
+
+               if (wp == NULL) {
                        rcu_read_unlock();
                        continue;
                }
@@ -658,24 +684,51 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
                 * single watchpoint, we can set the trigger to the lowest
                 * possible faulting address.
                 */
-               info = counter_arch_bp(slots[i]);
-               info->trigger = slots[i]->attr.bp_addr;
+               info = counter_arch_bp(wp);
+               info->trigger = wp->attr.bp_addr;
                pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
-               perf_bp_event(slots[i], regs);
+               perf_bp_event(wp, regs);
 
                /*
                 * If no overflow handler is present, insert a temporary
                 * mismatch breakpoint so we can single-step over the
                 * watchpoint trigger.
                 */
-               if (!slots[i]->overflow_handler) {
-                       bp = register_user_hw_breakpoint(&attr,
-                                                        wp_single_step_handler,
-                                                        current);
-                       counter_arch_bp(bp)->suspended_wp = slots[i];
-                       perf_event_disable(slots[i]);
-               }
+               if (!wp->overflow_handler)
+                       enable_single_step(wp, instruction_pointer(regs));
+
+               rcu_read_unlock();
+       }
+}
 
+static void watchpoint_single_step_handler(unsigned long pc)
+{
+       int i;
+       struct perf_event *wp, **slots;
+       struct arch_hw_breakpoint *info;
+
+       slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+
+       for (i = 0; i < core_num_reserved_brps; ++i) {
+               rcu_read_lock();
+
+               wp = slots[i];
+
+               if (wp == NULL)
+                       goto unlock;
+
+               info = counter_arch_bp(wp);
+               if (!info->step_ctrl.enabled)
+                       goto unlock;
+
+               /*
+                * Restore the original watchpoint if we've completed the
+                * single-step.
+                */
+               if (info->trigger != pc)
+                       disable_single_step(wp);
+
+unlock:
                rcu_read_unlock();
        }
 }
@@ -683,62 +736,69 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
 static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
 {
        int i;
-       int mismatch;
        u32 ctrl_reg, val, addr;
-       struct perf_event *bp, **slots = __get_cpu_var(bp_on_reg);
+       struct perf_event *bp, **slots;
        struct arch_hw_breakpoint *info;
        struct arch_hw_breakpoint_ctrl ctrl;
 
+       slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+
        /* The exception entry code places the amended lr in the PC. */
        addr = regs->ARM_pc;
 
+       /* Check the currently installed breakpoints first. */
        for (i = 0; i < core_num_brps; ++i) {
                rcu_read_lock();
 
                bp = slots[i];
 
-               if (bp == NULL) {
-                       rcu_read_unlock();
-                       continue;
-               }
+               if (bp == NULL)
+                       goto unlock;
 
-               mismatch = 0;
+               info = counter_arch_bp(bp);
 
                /* Check if the breakpoint value matches. */
                val = read_wb_reg(ARM_BASE_BVR + i);
                if (val != (addr & ~0x3))
-                       goto unlock;
+                       goto mismatch;
 
                /* Possible match, check the byte address select to confirm. */
                ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
                decode_ctrl_reg(ctrl_reg, &ctrl);
                if ((1 << (addr & 0x3)) & ctrl.len) {
-                       mismatch = 1;
-                       info = counter_arch_bp(bp);
                        info->trigger = addr;
-               }
-
-unlock:
-               if ((mismatch && !info->ctrl.mismatch) || bp_is_single_step(bp)) {
                        pr_debug("breakpoint fired: address = 0x%x\n", addr);
                        perf_bp_event(bp, regs);
+                       if (!bp->overflow_handler)
+                               enable_single_step(bp, addr);
+                       goto unlock;
                }
 
-               update_mismatch_flag(i, mismatch);
+mismatch:
+               /* If we're stepping a breakpoint, it can now be restored. */
+               if (info->step_ctrl.enabled)
+                       disable_single_step(bp);
+unlock:
                rcu_read_unlock();
        }
+
+       /* Handle any pending watchpoint single-step breakpoints. */
+       watchpoint_single_step_handler(addr);
 }
 
 /*
  * Called from either the Data Abort Handler [watchpoint] or the
- * Prefetch Abort Handler [breakpoint].
+ * Prefetch Abort Handler [breakpoint] with preemption disabled.
  */
 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
                                 struct pt_regs *regs)
 {
-       int ret = 1; /* Unhandled fault. */
+       int ret = 0;
        u32 dscr;
 
+       /* We must be called with preemption disabled. */
+       WARN_ON(preemptible());
+
        /* We only handle watchpoints and hardware breakpoints. */
        ARM_DBG_READ(c1, 0, dscr);
 
@@ -753,25 +813,47 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
                watchpoint_handler(addr, regs);
                break;
        default:
-               goto out;
+               ret = 1; /* Unhandled fault. */
        }
 
-       ret = 0;
-out:
+       /*
+        * Re-enable preemption after it was disabled in the
+        * low-level exception handling code.
+        */
+       preempt_enable();
+
        return ret;
 }
 
 /*
  * One-time initialisation.
  */
-static void __init reset_ctrl_regs(void *unused)
+static void reset_ctrl_regs(void *unused)
 {
        int i;
 
+       /*
+        * v7 debug contains save and restore registers so that debug state
+        * can be maintained across low-power modes without leaving
+        * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
+        * we can write to the debug registers out of reset, so we must
+        * unlock the OS Lock Access Register to avoid taking undefined
+        * instruction exceptions later on.
+        */
+       if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
+               /*
+                * Unconditionally clear the lock by writing a value
+                * other than 0xC5ACCE55 to the access register.
+                */
+               asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
+               isb();
+       }
+
        if (enable_monitor_mode())
                return;
 
-       for (i = 0; i < core_num_brps; ++i) {
+       /* We must also reset any reserved registers. */
+       for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
                write_wb_reg(ARM_BASE_BCR + i, 0UL);
                write_wb_reg(ARM_BASE_BVR + i, 0UL);
        }
@@ -782,45 +864,57 @@ static void __init reset_ctrl_regs(void *unused)
        }
 }
 
+static int __cpuinit dbg_reset_notify(struct notifier_block *self,
+                                     unsigned long action, void *cpu)
+{
+       if (action == CPU_ONLINE)
+               smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata dbg_reset_nb = {
+       .notifier_call = dbg_reset_notify,
+};
+
 static int __init arch_hw_breakpoint_init(void)
 {
-       int ret = 0;
        u32 dscr;
 
        debug_arch = get_debug_arch();
 
        if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
                pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
-               ret = -ENODEV;
-               goto out;
+               return 0;
        }
 
        /* Determine how many BRPs/WRPs are available. */
        core_num_brps = get_num_brps();
+       core_num_reserved_brps = get_num_reserved_brps();
        core_num_wrps = get_num_wrps();
 
        pr_info("found %d breakpoint and %d watchpoint registers.\n",
-                       core_num_brps, core_num_wrps);
+               core_num_brps + core_num_reserved_brps, core_num_wrps);
 
-       if (core_has_mismatch_bps())
-               pr_info("1 breakpoint reserved for watchpoint single-step.\n");
+       if (core_num_reserved_brps)
+               pr_info("%d breakpoint(s) reserved for watchpoint "
+                               "single-step.\n", core_num_reserved_brps);
 
        ARM_DBG_READ(c1, 0, dscr);
        if (dscr & ARM_DSCR_HDBGEN) {
                pr_warning("halting debug mode enabled. Assuming maximum "
                                "watchpoint size of 4 bytes.");
        } else {
-               /* Work out the maximum supported watchpoint length. */
-               max_watchpoint_len = get_max_wp_len();
-               pr_info("maximum watchpoint size is %u bytes.\n",
-                               max_watchpoint_len);
-
                /*
                 * Reset the breakpoint resources. We assume that a halting
                 * debugger will leave the world in a nice state for us.
                 */
                smp_call_function(reset_ctrl_regs, NULL, 1);
                reset_ctrl_regs(NULL);
+
+               /* Work out the maximum supported watchpoint length. */
+               max_watchpoint_len = get_max_wp_len();
+               pr_info("maximum watchpoint size is %u bytes.\n",
+                               max_watchpoint_len);
        }
 
        /* Register debug fault handler. */
@@ -829,8 +923,9 @@ static int __init arch_hw_breakpoint_init(void)
        hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
                        "breakpoint debug exception");
 
-out:
-       return ret;
+       /* Register hotplug notifier. */
+       register_cpu_notifier(&dbg_reset_nb);
+       return 0;
 }
 arch_initcall(arch_hw_breakpoint_init);
 
index 36ad3be..6d61633 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/list.h>
 #include <linux/kallsyms.h>
 #include <linux/proc_fs.h>
+#include <linux/ftrace.h>
 
 #include <asm/system.h>
 #include <asm/mach/irq.h>
@@ -105,7 +106,8 @@ unlock:
  * come via this function.  Instead, they should provide their
  * own 'handler'
  */
-asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+asmlinkage void __exception_irq_entry
+asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
 
index 07a5035..624e2a5 100644 (file)
@@ -4,9 +4,7 @@
  * ARM performance counter support.
  *
  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
- *
- * ARMv7 support: Jean Pihet <jpihet@mvista.com>
- * 2010 (c) MontaVista Software, LLC.
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  *
  * This code is based on the sparc64 perf event code, which is in turn based
  * on the x86 code. Callchain code is based on the ARM OProfile backtrace
@@ -34,7 +32,7 @@ static struct platform_device *pmu_device;
  * Hardware lock to serialize accesses to PMU registers. Needed for the
  * read/modify/write sequences.
  */
-DEFINE_SPINLOCK(pmu_lock);
+static DEFINE_RAW_SPINLOCK(pmu_lock);
 
 /*
  * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
@@ -67,31 +65,25 @@ struct cpu_hw_events {
         */
        unsigned long           active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
 };
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
-
-/* PMU names. */
-static const char *arm_pmu_names[] = {
-       [ARM_PERF_PMU_ID_XSCALE1] = "xscale1",
-       [ARM_PERF_PMU_ID_XSCALE2] = "xscale2",
-       [ARM_PERF_PMU_ID_V6]      = "v6",
-       [ARM_PERF_PMU_ID_V6MP]    = "v6mpcore",
-       [ARM_PERF_PMU_ID_CA8]     = "ARMv7 Cortex-A8",
-       [ARM_PERF_PMU_ID_CA9]     = "ARMv7 Cortex-A9",
-};
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 
 struct arm_pmu {
        enum arm_perf_pmu_ids id;
+       const char      *name;
        irqreturn_t     (*handle_irq)(int irq_num, void *dev);
        void            (*enable)(struct hw_perf_event *evt, int idx);
        void            (*disable)(struct hw_perf_event *evt, int idx);
-       int             (*event_map)(int evt);
-       u64             (*raw_event)(u64);
        int             (*get_event_idx)(struct cpu_hw_events *cpuc,
                                         struct hw_perf_event *hwc);
        u32             (*read_counter)(int idx);
        void            (*write_counter)(int idx, u32 val);
        void            (*start)(void);
        void            (*stop)(void);
+       const unsigned  (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+                                   [PERF_COUNT_HW_CACHE_OP_MAX]
+                                   [PERF_COUNT_HW_CACHE_RESULT_MAX];
+       const unsigned  (*event_map)[PERF_COUNT_HW_MAX];
+       u32             raw_event_mask;
        int             num_events;
        u64             max_period;
 };
@@ -136,10 +128,6 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
 
 #define CACHE_OP_UNSUPPORTED           0xFFFF
 
-static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
-                                    [PERF_COUNT_HW_CACHE_OP_MAX]
-                                    [PERF_COUNT_HW_CACHE_RESULT_MAX];
-
 static int
 armpmu_map_cache_event(u64 config)
 {
@@ -157,7 +145,7 @@ armpmu_map_cache_event(u64 config)
        if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
                return -EINVAL;
 
-       ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
+       ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
 
        if (ret == CACHE_OP_UNSUPPORTED)
                return -ENOENT;
@@ -166,6 +154,19 @@ armpmu_map_cache_event(u64 config)
 }
 
 static int
+armpmu_map_event(u64 config)
+{
+       int mapping = (*armpmu->event_map)[config];
+       return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
+}
+
+static int
+armpmu_map_raw_event(u64 config)
+{
+       return (int)(config & armpmu->raw_event_mask);
+}
+
+static int
 armpmu_event_set_period(struct perf_event *event,
                        struct hw_perf_event *hwc,
                        int idx)
@@ -458,11 +459,11 @@ __hw_perf_event_init(struct perf_event *event)
 
        /* Decode the generic type into an ARM event identifier. */
        if (PERF_TYPE_HARDWARE == event->attr.type) {
-               mapping = armpmu->event_map(event->attr.config);
+               mapping = armpmu_map_event(event->attr.config);
        } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
                mapping = armpmu_map_cache_event(event->attr.config);
        } else if (PERF_TYPE_RAW == event->attr.type) {
-               mapping = armpmu->raw_event(event->attr.config);
+               mapping = armpmu_map_raw_event(event->attr.config);
        } else {
                pr_debug("event type %x not supported\n", event->attr.type);
                return -EOPNOTSUPP;
@@ -603,2366 +604,10 @@ static struct pmu pmu = {
        .read           = armpmu_read,
 };
 
-/*
- * ARMv6 Performance counter handling code.
- *
- * ARMv6 has 2 configurable performance counters and a single cycle counter.
- * They all share a single reset bit but can be written to zero so we can use
- * that for a reset.
- *
- * The counters can't be individually enabled or disabled so when we remove
- * one event and replace it with another we could get spurious counts from the
- * wrong event. However, we can take advantage of the fact that the
- * performance counters can export events to the event bus, and the event bus
- * itself can be monitored. This requires that we *don't* export the events to
- * the event bus. The procedure for disabling a configurable counter is:
- *     - change the counter to count the ETMEXTOUT[0] signal (0x20). This
- *       effectively stops the counter from counting.
- *     - disable the counter's interrupt generation (each counter has it's
- *       own interrupt enable bit).
- * Once stopped, the counter value can be written as 0 to reset.
- *
- * To enable a counter:
- *     - enable the counter's interrupt generation.
- *     - set the new event type.
- *
- * Note: the dedicated cycle counter only counts cycles and can't be
- * enabled/disabled independently of the others. When we want to disable the
- * cycle counter, we have to just disable the interrupt reporting and start
- * ignoring that counter. When re-enabling, we have to reset the value and
- * enable the interrupt.
- */
-
-enum armv6_perf_types {
-       ARMV6_PERFCTR_ICACHE_MISS           = 0x0,
-       ARMV6_PERFCTR_IBUF_STALL            = 0x1,
-       ARMV6_PERFCTR_DDEP_STALL            = 0x2,
-       ARMV6_PERFCTR_ITLB_MISS             = 0x3,
-       ARMV6_PERFCTR_DTLB_MISS             = 0x4,
-       ARMV6_PERFCTR_BR_EXEC               = 0x5,
-       ARMV6_PERFCTR_BR_MISPREDICT         = 0x6,
-       ARMV6_PERFCTR_INSTR_EXEC            = 0x7,
-       ARMV6_PERFCTR_DCACHE_HIT            = 0x9,
-       ARMV6_PERFCTR_DCACHE_ACCESS         = 0xA,
-       ARMV6_PERFCTR_DCACHE_MISS           = 0xB,
-       ARMV6_PERFCTR_DCACHE_WBACK          = 0xC,
-       ARMV6_PERFCTR_SW_PC_CHANGE          = 0xD,
-       ARMV6_PERFCTR_MAIN_TLB_MISS         = 0xF,
-       ARMV6_PERFCTR_EXPL_D_ACCESS         = 0x10,
-       ARMV6_PERFCTR_LSU_FULL_STALL        = 0x11,
-       ARMV6_PERFCTR_WBUF_DRAINED          = 0x12,
-       ARMV6_PERFCTR_CPU_CYCLES            = 0xFF,
-       ARMV6_PERFCTR_NOP                   = 0x20,
-};
-
-enum armv6_counters {
-       ARMV6_CYCLE_COUNTER = 1,
-       ARMV6_COUNTER0,
-       ARMV6_COUNTER1,
-};
-
-/*
- * The hardware events that we support. We do support cache operations but
- * we have harvard caches and no way to combine instruction and data
- * accesses/misses in hardware.
- */
-static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6_PERFCTR_INSTR_EXEC,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6_PERFCTR_BR_MISPREDICT,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
-};
-
-static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
-                                         [PERF_COUNT_HW_CACHE_OP_MAX]
-                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-       [C(L1D)] = {
-               /*
-                * The performance counters don't differentiate between read
-                * and write accesses/misses so this isn't strictly correct,
-                * but it's the best we can do. Writes and reads get
-                * combined.
-                */
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV6_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DCACHE_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV6_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DCACHE_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(L1I)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(LL)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(DTLB)] = {
-               /*
-                * The ARM performance counters can count micro DTLB misses,
-                * micro ITLB misses and main TLB misses. There isn't an event
-                * for TLB misses, so use the micro misses here and if users
-                * want the main TLB misses they can use a raw counter.
-                */
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DTLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DTLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(ITLB)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(BPU)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-};
-
-enum armv6mpcore_perf_types {
-       ARMV6MPCORE_PERFCTR_ICACHE_MISS     = 0x0,
-       ARMV6MPCORE_PERFCTR_IBUF_STALL      = 0x1,
-       ARMV6MPCORE_PERFCTR_DDEP_STALL      = 0x2,
-       ARMV6MPCORE_PERFCTR_ITLB_MISS       = 0x3,
-       ARMV6MPCORE_PERFCTR_DTLB_MISS       = 0x4,
-       ARMV6MPCORE_PERFCTR_BR_EXEC         = 0x5,
-       ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
-       ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
-       ARMV6MPCORE_PERFCTR_INSTR_EXEC      = 0x8,
-       ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
-       ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
-       ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
-       ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
-       ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
-       ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
-       ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
-       ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
-       ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
-       ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
-       ARMV6MPCORE_PERFCTR_CPU_CYCLES      = 0xFF,
-};
-
-/*
- * The hardware events that we support. We do support cache operations but
- * we have harvard caches and no way to combine instruction and data
- * accesses/misses in hardware.
- */
-static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
-};
-
-static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
-                                       [PERF_COUNT_HW_CACHE_OP_MAX]
-                                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-       [C(L1D)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]  =
-                               ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
-                       [C(RESULT_MISS)]    =
-                               ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]  =
-                               ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
-                       [C(RESULT_MISS)]    =
-                               ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(L1I)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(LL)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(DTLB)] = {
-               /*
-                * The ARM performance counters can count micro DTLB misses,
-                * micro ITLB misses and main TLB misses. There isn't an event
-                * for TLB misses, so use the micro misses here and if users
-                * want the main TLB misses they can use a raw counter.
-                */
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(ITLB)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(BPU)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
-               },
-       },
-};
-
-static inline unsigned long
-armv6_pmcr_read(void)
-{
-       u32 val;
-       asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
-       return val;
-}
-
-static inline void
-armv6_pmcr_write(unsigned long val)
-{
-       asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
-}
-
-#define ARMV6_PMCR_ENABLE              (1 << 0)
-#define ARMV6_PMCR_CTR01_RESET         (1 << 1)
-#define ARMV6_PMCR_CCOUNT_RESET                (1 << 2)
-#define ARMV6_PMCR_CCOUNT_DIV          (1 << 3)
-#define ARMV6_PMCR_COUNT0_IEN          (1 << 4)
-#define ARMV6_PMCR_COUNT1_IEN          (1 << 5)
-#define ARMV6_PMCR_CCOUNT_IEN          (1 << 6)
-#define ARMV6_PMCR_COUNT0_OVERFLOW     (1 << 8)
-#define ARMV6_PMCR_COUNT1_OVERFLOW     (1 << 9)
-#define ARMV6_PMCR_CCOUNT_OVERFLOW     (1 << 10)
-#define ARMV6_PMCR_EVT_COUNT0_SHIFT    20
-#define ARMV6_PMCR_EVT_COUNT0_MASK     (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
-#define ARMV6_PMCR_EVT_COUNT1_SHIFT    12
-#define ARMV6_PMCR_EVT_COUNT1_MASK     (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
-
-#define ARMV6_PMCR_OVERFLOWED_MASK \
-       (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
-        ARMV6_PMCR_CCOUNT_OVERFLOW)
-
-static inline int
-armv6_pmcr_has_overflowed(unsigned long pmcr)
-{
-       return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
-}
-
-static inline int
-armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
-                                 enum armv6_counters counter)
-{
-       int ret = 0;
-
-       if (ARMV6_CYCLE_COUNTER == counter)
-               ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
-       else if (ARMV6_COUNTER0 == counter)
-               ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
-       else if (ARMV6_COUNTER1 == counter)
-               ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
-       else
-               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-
-       return ret;
-}
-
-static inline u32
-armv6pmu_read_counter(int counter)
-{
-       unsigned long value = 0;
-
-       if (ARMV6_CYCLE_COUNTER == counter)
-               asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
-       else if (ARMV6_COUNTER0 == counter)
-               asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
-       else if (ARMV6_COUNTER1 == counter)
-               asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
-       else
-               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-
-       return value;
-}
-
-static inline void
-armv6pmu_write_counter(int counter,
-                      u32 value)
-{
-       if (ARMV6_CYCLE_COUNTER == counter)
-               asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
-       else if (ARMV6_COUNTER0 == counter)
-               asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
-       else if (ARMV6_COUNTER1 == counter)
-               asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
-       else
-               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-}
-
-void
-armv6pmu_enable_event(struct hw_perf_event *hwc,
-                     int idx)
-{
-       unsigned long val, mask, evt, flags;
-
-       if (ARMV6_CYCLE_COUNTER == idx) {
-               mask    = 0;
-               evt     = ARMV6_PMCR_CCOUNT_IEN;
-       } else if (ARMV6_COUNTER0 == idx) {
-               mask    = ARMV6_PMCR_EVT_COUNT0_MASK;
-               evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
-                         ARMV6_PMCR_COUNT0_IEN;
-       } else if (ARMV6_COUNTER1 == idx) {
-               mask    = ARMV6_PMCR_EVT_COUNT1_MASK;
-               evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
-                         ARMV6_PMCR_COUNT1_IEN;
-       } else {
-               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-               return;
-       }
-
-       /*
-        * Mask out the current event and set the counter to count the event
-        * that we're interested in.
-        */
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = armv6_pmcr_read();
-       val &= ~mask;
-       val |= evt;
-       armv6_pmcr_write(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static irqreturn_t
-armv6pmu_handle_irq(int irq_num,
-                   void *dev)
-{
-       unsigned long pmcr = armv6_pmcr_read();
-       struct perf_sample_data data;
-       struct cpu_hw_events *cpuc;
-       struct pt_regs *regs;
-       int idx;
-
-       if (!armv6_pmcr_has_overflowed(pmcr))
-               return IRQ_NONE;
-
-       regs = get_irq_regs();
-
-       /*
-        * The interrupts are cleared by writing the overflow flags back to
-        * the control register. All of the other bits don't have any effect
-        * if they are rewritten, so write the whole value back.
-        */
-       armv6_pmcr_write(pmcr);
-
-       perf_sample_data_init(&data, 0);
-
-       cpuc = &__get_cpu_var(cpu_hw_events);
-       for (idx = 0; idx <= armpmu->num_events; ++idx) {
-               struct perf_event *event = cpuc->events[idx];
-               struct hw_perf_event *hwc;
-
-               if (!test_bit(idx, cpuc->active_mask))
-                       continue;
-
-               /*
-                * We have a single interrupt for all counters. Check that
-                * each counter has overflowed before we process it.
-                */
-               if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
-                       continue;
-
-               hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
-               if (!armpmu_event_set_period(event, hwc, idx))
-                       continue;
-
-               if (perf_event_overflow(event, 0, &data, regs))
-                       armpmu->disable(hwc, idx);
-       }
-
-       /*
-        * Handle the pending perf events.
-        *
-        * Note: this call *must* be run with interrupts disabled. For
-        * platforms that can have the PMU interrupts raised as an NMI, this
-        * will not work.
-        */
-       irq_work_run();
-
-       return IRQ_HANDLED;
-}
-
-static void
-armv6pmu_start(void)
-{
-       unsigned long flags, val;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = armv6_pmcr_read();
-       val |= ARMV6_PMCR_ENABLE;
-       armv6_pmcr_write(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-void
-armv6pmu_stop(void)
-{
-       unsigned long flags, val;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = armv6_pmcr_read();
-       val &= ~ARMV6_PMCR_ENABLE;
-       armv6_pmcr_write(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static inline int
-armv6pmu_event_map(int config)
-{
-       int mapping = armv6_perf_map[config];
-       if (HW_OP_UNSUPPORTED == mapping)
-               mapping = -EOPNOTSUPP;
-       return mapping;
-}
-
-static inline int
-armv6mpcore_pmu_event_map(int config)
-{
-       int mapping = armv6mpcore_perf_map[config];
-       if (HW_OP_UNSUPPORTED == mapping)
-               mapping = -EOPNOTSUPP;
-       return mapping;
-}
-
-static u64
-armv6pmu_raw_event(u64 config)
-{
-       return config & 0xff;
-}
-
-static int
-armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
-                      struct hw_perf_event *event)
-{
-       /* Always place a cycle counter into the cycle counter. */
-       if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
-               if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
-                       return -EAGAIN;
-
-               return ARMV6_CYCLE_COUNTER;
-       } else {
-               /*
-                * For anything other than a cycle counter, try and use
-                * counter0 and counter1.
-                */
-               if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
-                       return ARMV6_COUNTER1;
-               }
-
-               if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
-                       return ARMV6_COUNTER0;
-               }
-
-               /* The counters are all in use. */
-               return -EAGAIN;
-       }
-}
-
-static void
-armv6pmu_disable_event(struct hw_perf_event *hwc,
-                      int idx)
-{
-       unsigned long val, mask, evt, flags;
-
-       if (ARMV6_CYCLE_COUNTER == idx) {
-               mask    = ARMV6_PMCR_CCOUNT_IEN;
-               evt     = 0;
-       } else if (ARMV6_COUNTER0 == idx) {
-               mask    = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
-               evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
-       } else if (ARMV6_COUNTER1 == idx) {
-               mask    = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
-               evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
-       } else {
-               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-               return;
-       }
-
-       /*
-        * Mask out the current event and set the counter to count the number
-        * of ETM bus signal assertion cycles. The external reporting should
-        * be disabled and so this should never increment.
-        */
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = armv6_pmcr_read();
-       val &= ~mask;
-       val |= evt;
-       armv6_pmcr_write(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static void
-armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
-                             int idx)
-{
-       unsigned long val, mask, flags, evt = 0;
-
-       if (ARMV6_CYCLE_COUNTER == idx) {
-               mask    = ARMV6_PMCR_CCOUNT_IEN;
-       } else if (ARMV6_COUNTER0 == idx) {
-               mask    = ARMV6_PMCR_COUNT0_IEN;
-       } else if (ARMV6_COUNTER1 == idx) {
-               mask    = ARMV6_PMCR_COUNT1_IEN;
-       } else {
-               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-               return;
-       }
-
-       /*
-        * Unlike UP ARMv6, we don't have a way of stopping the counters. We
-        * simply disable the interrupt reporting.
-        */
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = armv6_pmcr_read();
-       val &= ~mask;
-       val |= evt;
-       armv6_pmcr_write(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static const struct arm_pmu armv6pmu = {
-       .id                     = ARM_PERF_PMU_ID_V6,
-       .handle_irq             = armv6pmu_handle_irq,
-       .enable                 = armv6pmu_enable_event,
-       .disable                = armv6pmu_disable_event,
-       .event_map              = armv6pmu_event_map,
-       .raw_event              = armv6pmu_raw_event,
-       .read_counter           = armv6pmu_read_counter,
-       .write_counter          = armv6pmu_write_counter,
-       .get_event_idx          = armv6pmu_get_event_idx,
-       .start                  = armv6pmu_start,
-       .stop                   = armv6pmu_stop,
-       .num_events             = 3,
-       .max_period             = (1LLU << 32) - 1,
-};
-
-/*
- * ARMv6mpcore is almost identical to single core ARMv6 with the exception
- * that some of the events have different enumerations and that there is no
- * *hack* to stop the programmable counters. To stop the counters we simply
- * disable the interrupt reporting and update the event. When unthrottling we
- * reset the period and enable the interrupt reporting.
- */
-static const struct arm_pmu armv6mpcore_pmu = {
-       .id                     = ARM_PERF_PMU_ID_V6MP,
-       .handle_irq             = armv6pmu_handle_irq,
-       .enable                 = armv6pmu_enable_event,
-       .disable                = armv6mpcore_pmu_disable_event,
-       .event_map              = armv6mpcore_pmu_event_map,
-       .raw_event              = armv6pmu_raw_event,
-       .read_counter           = armv6pmu_read_counter,
-       .write_counter          = armv6pmu_write_counter,
-       .get_event_idx          = armv6pmu_get_event_idx,
-       .start                  = armv6pmu_start,
-       .stop                   = armv6pmu_stop,
-       .num_events             = 3,
-       .max_period             = (1LLU << 32) - 1,
-};
-
-/*
- * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
- *
- * Copied from ARMv6 code, with the low level code inspired
- *  by the ARMv7 Oprofile code.
- *
- * Cortex-A8 has up to 4 configurable performance counters and
- *  a single cycle counter.
- * Cortex-A9 has up to 31 configurable performance counters and
- *  a single cycle counter.
- *
- * All counters can be enabled/disabled and IRQ masked separately. The cycle
- *  counter and all 4 performance counters together can be reset separately.
- */
-
-/* Common ARMv7 event types */
-enum armv7_perf_types {
-       ARMV7_PERFCTR_PMNC_SW_INCR              = 0x00,
-       ARMV7_PERFCTR_IFETCH_MISS               = 0x01,
-       ARMV7_PERFCTR_ITLB_MISS                 = 0x02,
-       ARMV7_PERFCTR_DCACHE_REFILL             = 0x03,
-       ARMV7_PERFCTR_DCACHE_ACCESS             = 0x04,
-       ARMV7_PERFCTR_DTLB_REFILL               = 0x05,
-       ARMV7_PERFCTR_DREAD                     = 0x06,
-       ARMV7_PERFCTR_DWRITE                    = 0x07,
-
-       ARMV7_PERFCTR_EXC_TAKEN                 = 0x09,
-       ARMV7_PERFCTR_EXC_EXECUTED              = 0x0A,
-       ARMV7_PERFCTR_CID_WRITE                 = 0x0B,
-       /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
-        * It counts:
-        *  - all branch instructions,
-        *  - instructions that explicitly write the PC,
-        *  - exception generating instructions.
-        */
-       ARMV7_PERFCTR_PC_WRITE                  = 0x0C,
-       ARMV7_PERFCTR_PC_IMM_BRANCH             = 0x0D,
-       ARMV7_PERFCTR_UNALIGNED_ACCESS          = 0x0F,
-       ARMV7_PERFCTR_PC_BRANCH_MIS_PRED        = 0x10,
-       ARMV7_PERFCTR_CLOCK_CYCLES              = 0x11,
-
-       ARMV7_PERFCTR_PC_BRANCH_MIS_USED        = 0x12,
-
-       ARMV7_PERFCTR_CPU_CYCLES                = 0xFF
-};
-
-/* ARMv7 Cortex-A8 specific event types */
-enum armv7_a8_perf_types {
-       ARMV7_PERFCTR_INSTR_EXECUTED            = 0x08,
-
-       ARMV7_PERFCTR_PC_PROC_RETURN            = 0x0E,
-
-       ARMV7_PERFCTR_WRITE_BUFFER_FULL         = 0x40,
-       ARMV7_PERFCTR_L2_STORE_MERGED           = 0x41,
-       ARMV7_PERFCTR_L2_STORE_BUFF             = 0x42,
-       ARMV7_PERFCTR_L2_ACCESS                 = 0x43,
-       ARMV7_PERFCTR_L2_CACH_MISS              = 0x44,
-       ARMV7_PERFCTR_AXI_READ_CYCLES           = 0x45,
-       ARMV7_PERFCTR_AXI_WRITE_CYCLES          = 0x46,
-       ARMV7_PERFCTR_MEMORY_REPLAY             = 0x47,
-       ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY   = 0x48,
-       ARMV7_PERFCTR_L1_DATA_MISS              = 0x49,
-       ARMV7_PERFCTR_L1_INST_MISS              = 0x4A,
-       ARMV7_PERFCTR_L1_DATA_COLORING          = 0x4B,
-       ARMV7_PERFCTR_L1_NEON_DATA              = 0x4C,
-       ARMV7_PERFCTR_L1_NEON_CACH_DATA         = 0x4D,
-       ARMV7_PERFCTR_L2_NEON                   = 0x4E,
-       ARMV7_PERFCTR_L2_NEON_HIT               = 0x4F,
-       ARMV7_PERFCTR_L1_INST                   = 0x50,
-       ARMV7_PERFCTR_PC_RETURN_MIS_PRED        = 0x51,
-       ARMV7_PERFCTR_PC_BRANCH_FAILED          = 0x52,
-       ARMV7_PERFCTR_PC_BRANCH_TAKEN           = 0x53,
-       ARMV7_PERFCTR_PC_BRANCH_EXECUTED        = 0x54,
-       ARMV7_PERFCTR_OP_EXECUTED               = 0x55,
-       ARMV7_PERFCTR_CYCLES_INST_STALL         = 0x56,
-       ARMV7_PERFCTR_CYCLES_INST               = 0x57,
-       ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL    = 0x58,
-       ARMV7_PERFCTR_CYCLES_NEON_INST_STALL    = 0x59,
-       ARMV7_PERFCTR_NEON_CYCLES               = 0x5A,
-
-       ARMV7_PERFCTR_PMU0_EVENTS               = 0x70,
-       ARMV7_PERFCTR_PMU1_EVENTS               = 0x71,
-       ARMV7_PERFCTR_PMU_EVENTS                = 0x72,
-};
-
-/* ARMv7 Cortex-A9 specific event types */
-enum armv7_a9_perf_types {
-       ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC     = 0x40,
-       ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC     = 0x41,
-       ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC       = 0x42,
-
-       ARMV7_PERFCTR_COHERENT_LINE_MISS        = 0x50,
-       ARMV7_PERFCTR_COHERENT_LINE_HIT         = 0x51,
-
-       ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES   = 0x60,
-       ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES   = 0x61,
-       ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
-       ARMV7_PERFCTR_STREX_EXECUTED_PASSED     = 0x63,
-       ARMV7_PERFCTR_STREX_EXECUTED_FAILED     = 0x64,
-       ARMV7_PERFCTR_DATA_EVICTION             = 0x65,
-       ARMV7_PERFCTR_ISSUE_STAGE_NO_INST       = 0x66,
-       ARMV7_PERFCTR_ISSUE_STAGE_EMPTY         = 0x67,
-       ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE  = 0x68,
-
-       ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
-
-       ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST   = 0x70,
-       ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
-       ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST  = 0x72,
-       ARMV7_PERFCTR_FP_EXECUTED_INST          = 0x73,
-       ARMV7_PERFCTR_NEON_EXECUTED_INST        = 0x74,
-
-       ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
-       ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES  = 0x81,
-       ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES        = 0x82,
-       ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES        = 0x83,
-       ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES  = 0x84,
-       ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES  = 0x85,
-       ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES      = 0x86,
-
-       ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES  = 0x8A,
-       ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
-
-       ARMV7_PERFCTR_ISB_INST                  = 0x90,
-       ARMV7_PERFCTR_DSB_INST                  = 0x91,
-       ARMV7_PERFCTR_DMB_INST                  = 0x92,
-       ARMV7_PERFCTR_EXT_INTERRUPTS            = 0x93,
-
-       ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED     = 0xA0,
-       ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED       = 0xA1,
-       ARMV7_PERFCTR_PLE_FIFO_FLUSH            = 0xA2,
-       ARMV7_PERFCTR_PLE_RQST_COMPLETED        = 0xA3,
-       ARMV7_PERFCTR_PLE_FIFO_OVERFLOW         = 0xA4,
-       ARMV7_PERFCTR_PLE_RQST_PROG             = 0xA5
-};
-
-/*
- * Cortex-A8 HW events mapping
- *
- * The hardware events that we support. We do support cache operations but
- * we have harvard caches and no way to combine instruction and data
- * accesses/misses in hardware.
- */
-static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
-};
-
-static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
-                                         [PERF_COUNT_HW_CACHE_OP_MAX]
-                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-       [C(L1D)] = {
-               /*
-                * The performance counters don't differentiate between read
-                * and write accesses/misses so this isn't strictly correct,
-                * but it's the best we can do. Writes and reads get
-                * combined.
-                */
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(L1I)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(LL)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(DTLB)] = {
-               /*
-                * Only ITLB misses and DTLB refills are supported.
-                * If users want the DTLB refills misses a raw counter
-                * must be used.
-                */
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(ITLB)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(BPU)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-};
-
-/*
- * Cortex-A9 HW events mapping
- */
-static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        =
-                                       ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_COHERENT_LINE_HIT,
-       [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_COHERENT_LINE_MISS,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
-};
-
-static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
-                                         [PERF_COUNT_HW_CACHE_OP_MAX]
-                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-       [C(L1D)] = {
-               /*
-                * The performance counters don't differentiate between read
-                * and write accesses/misses so this isn't strictly correct,
-                * but it's the best we can do. Writes and reads get
-                * combined.
-                */
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(L1I)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(LL)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(DTLB)] = {
-               /*
-                * Only ITLB misses and DTLB refills are supported.
-                * If users want the DTLB refills misses a raw counter
-                * must be used.
-                */
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(ITLB)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(BPU)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-};
-
-/*
- * Perf Events counters
- */
-enum armv7_counters {
-       ARMV7_CYCLE_COUNTER             = 1,    /* Cycle counter */
-       ARMV7_COUNTER0                  = 2,    /* First event counter */
-};
-
-/*
- * The cycle counter is ARMV7_CYCLE_COUNTER.
- * The first event counter is ARMV7_COUNTER0.
- * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
- */
-#define        ARMV7_COUNTER_LAST      (ARMV7_COUNTER0 + armpmu->num_events - 1)
-
-/*
- * ARMv7 low level PMNC access
- */
-
-/*
- * Per-CPU PMNC: config reg
- */
-#define ARMV7_PMNC_E           (1 << 0) /* Enable all counters */
-#define ARMV7_PMNC_P           (1 << 1) /* Reset all counters */
-#define ARMV7_PMNC_C           (1 << 2) /* Cycle counter reset */
-#define ARMV7_PMNC_D           (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV7_PMNC_X           (1 << 4) /* Export to ETM */
-#define ARMV7_PMNC_DP          (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define        ARMV7_PMNC_N_SHIFT      11       /* Number of counters supported */
-#define        ARMV7_PMNC_N_MASK       0x1f
-#define        ARMV7_PMNC_MASK         0x3f     /* Mask for writable bits */
-
-/*
- * Available counters
- */
-#define ARMV7_CNT0             0       /* First event counter */
-#define ARMV7_CCNT             31      /* Cycle counter */
-
-/* Perf Event to low level counters mapping */
-#define ARMV7_EVENT_CNT_TO_CNTx        (ARMV7_COUNTER0 - ARMV7_CNT0)
-
-/*
- * CNTENS: counters enable reg
- */
-#define ARMV7_CNTENS_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_CNTENS_C         (1 << ARMV7_CCNT)
-
-/*
- * CNTENC: counters disable reg
- */
-#define ARMV7_CNTENC_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_CNTENC_C         (1 << ARMV7_CCNT)
-
-/*
- * INTENS: counters overflow interrupt enable reg
- */
-#define ARMV7_INTENS_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_INTENS_C         (1 << ARMV7_CCNT)
-
-/*
- * INTENC: counters overflow interrupt disable reg
- */
-#define ARMV7_INTENC_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_INTENC_C         (1 << ARMV7_CCNT)
-
-/*
- * EVTSEL: Event selection reg
- */
-#define        ARMV7_EVTSEL_MASK       0xff            /* Mask for writable bits */
-
-/*
- * SELECT: Counter selection reg
- */
-#define        ARMV7_SELECT_MASK       0x1f            /* Mask for writable bits */
-
-/*
- * FLAG: counters overflow flag status reg
- */
-#define ARMV7_FLAG_P(idx)      (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_FLAG_C           (1 << ARMV7_CCNT)
-#define        ARMV7_FLAG_MASK         0xffffffff      /* Mask for writable bits */
-#define        ARMV7_OVERFLOWED_MASK   ARMV7_FLAG_MASK
-
-static inline unsigned long armv7_pmnc_read(void)
-{
-       u32 val;
-       asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
-       return val;
-}
-
-static inline void armv7_pmnc_write(unsigned long val)
-{
-       val &= ARMV7_PMNC_MASK;
-       asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
-}
-
-static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
-{
-       return pmnc & ARMV7_OVERFLOWED_MASK;
-}
-
-static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
-                                       enum armv7_counters counter)
-{
-       int ret = 0;
-
-       if (counter == ARMV7_CYCLE_COUNTER)
-               ret = pmnc & ARMV7_FLAG_C;
-       else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
-               ret = pmnc & ARMV7_FLAG_P(counter);
-       else
-               pr_err("CPU%u checking wrong counter %d overflow status\n",
-                       smp_processor_id(), counter);
-
-       return ret;
-}
-
-static inline int armv7_pmnc_select_counter(unsigned int idx)
-{
-       u32 val;
-
-       if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
-               pr_err("CPU%u selecting wrong PMNC counter"
-                       " %d\n", smp_processor_id(), idx);
-               return -1;
-       }
-
-       val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
-       asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
-
-       return idx;
-}
-
-static inline u32 armv7pmu_read_counter(int idx)
-{
-       unsigned long value = 0;
-
-       if (idx == ARMV7_CYCLE_COUNTER)
-               asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
-       else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
-               if (armv7_pmnc_select_counter(idx) == idx)
-                       asm volatile("mrc p15, 0, %0, c9, c13, 2"
-                                    : "=r" (value));
-       } else
-               pr_err("CPU%u reading wrong counter %d\n",
-                       smp_processor_id(), idx);
-
-       return value;
-}
-
-static inline void armv7pmu_write_counter(int idx, u32 value)
-{
-       if (idx == ARMV7_CYCLE_COUNTER)
-               asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
-       else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
-               if (armv7_pmnc_select_counter(idx) == idx)
-                       asm volatile("mcr p15, 0, %0, c9, c13, 2"
-                                    : : "r" (value));
-       } else
-               pr_err("CPU%u writing wrong counter %d\n",
-                       smp_processor_id(), idx);
-}
-
-static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
-{
-       if (armv7_pmnc_select_counter(idx) == idx) {
-               val &= ARMV7_EVTSEL_MASK;
-               asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
-       }
-}
-
-static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
-{
-       u32 val;
-
-       if ((idx != ARMV7_CYCLE_COUNTER) &&
-           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
-               pr_err("CPU%u enabling wrong PMNC counter"
-                       " %d\n", smp_processor_id(), idx);
-               return -1;
-       }
-
-       if (idx == ARMV7_CYCLE_COUNTER)
-               val = ARMV7_CNTENS_C;
-       else
-               val = ARMV7_CNTENS_P(idx);
-
-       asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
-
-       return idx;
-}
-
-static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
-{
-       u32 val;
-
-
-       if ((idx != ARMV7_CYCLE_COUNTER) &&
-           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
-               pr_err("CPU%u disabling wrong PMNC counter"
-                       " %d\n", smp_processor_id(), idx);
-               return -1;
-       }
-
-       if (idx == ARMV7_CYCLE_COUNTER)
-               val = ARMV7_CNTENC_C;
-       else
-               val = ARMV7_CNTENC_P(idx);
-
-       asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
-
-       return idx;
-}
-
-static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
-{
-       u32 val;
-
-       if ((idx != ARMV7_CYCLE_COUNTER) &&
-           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
-               pr_err("CPU%u enabling wrong PMNC counter"
-                       " interrupt enable %d\n", smp_processor_id(), idx);
-               return -1;
-       }
-
-       if (idx == ARMV7_CYCLE_COUNTER)
-               val = ARMV7_INTENS_C;
-       else
-               val = ARMV7_INTENS_P(idx);
-
-       asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
-
-       return idx;
-}
-
-static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
-{
-       u32 val;
-
-       if ((idx != ARMV7_CYCLE_COUNTER) &&
-           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
-               pr_err("CPU%u disabling wrong PMNC counter"
-                       " interrupt enable %d\n", smp_processor_id(), idx);
-               return -1;
-       }
-
-       if (idx == ARMV7_CYCLE_COUNTER)
-               val = ARMV7_INTENC_C;
-       else
-               val = ARMV7_INTENC_P(idx);
-
-       asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
-
-       return idx;
-}
-
-static inline u32 armv7_pmnc_getreset_flags(void)
-{
-       u32 val;
-
-       /* Read */
-       asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
-
-       /* Write to clear flags */
-       val &= ARMV7_FLAG_MASK;
-       asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
-
-       return val;
-}
-
-#ifdef DEBUG
-static void armv7_pmnc_dump_regs(void)
-{
-       u32 val;
-       unsigned int cnt;
-
-       printk(KERN_INFO "PMNC registers dump:\n");
-
-       asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
-       printk(KERN_INFO "PMNC  =0x%08x\n", val);
-
-       asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
-       printk(KERN_INFO "CNTENS=0x%08x\n", val);
-
-       asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
-       printk(KERN_INFO "INTENS=0x%08x\n", val);
-
-       asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
-       printk(KERN_INFO "FLAGS =0x%08x\n", val);
-
-       asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
-       printk(KERN_INFO "SELECT=0x%08x\n", val);
-
-       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
-       printk(KERN_INFO "CCNT  =0x%08x\n", val);
-
-       for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
-               armv7_pmnc_select_counter(cnt);
-               asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
-               printk(KERN_INFO "CNT[%d] count =0x%08x\n",
-                       cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
-               asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
-               printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
-                       cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
-       }
-}
-#endif
-
-void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
-{
-       unsigned long flags;
-
-       /*
-        * Enable counter and interrupt, and set the counter to count
-        * the event that we're interested in.
-        */
-       spin_lock_irqsave(&pmu_lock, flags);
-
-       /*
-        * Disable counter
-        */
-       armv7_pmnc_disable_counter(idx);
-
-       /*
-        * Set event (if destined for PMNx counters)
-        * We don't need to set the event if it's a cycle count
-        */
-       if (idx != ARMV7_CYCLE_COUNTER)
-               armv7_pmnc_write_evtsel(idx, hwc->config_base);
-
-       /*
-        * Enable interrupt for this counter
-        */
-       armv7_pmnc_enable_intens(idx);
-
-       /*
-        * Enable counter
-        */
-       armv7_pmnc_enable_counter(idx);
-
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
-       unsigned long flags;
-
-       /*
-        * Disable counter and interrupt
-        */
-       spin_lock_irqsave(&pmu_lock, flags);
-
-       /*
-        * Disable counter
-        */
-       armv7_pmnc_disable_counter(idx);
-
-       /*
-        * Disable interrupt for this counter
-        */
-       armv7_pmnc_disable_intens(idx);
-
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
-{
-       unsigned long pmnc;
-       struct perf_sample_data data;
-       struct cpu_hw_events *cpuc;
-       struct pt_regs *regs;
-       int idx;
-
-       /*
-        * Get and reset the IRQ flags
-        */
-       pmnc = armv7_pmnc_getreset_flags();
-
-       /*
-        * Did an overflow occur?
-        */
-       if (!armv7_pmnc_has_overflowed(pmnc))
-               return IRQ_NONE;
-
-       /*
-        * Handle the counter(s) overflow(s)
-        */
-       regs = get_irq_regs();
-
-       perf_sample_data_init(&data, 0);
-
-       cpuc = &__get_cpu_var(cpu_hw_events);
-       for (idx = 0; idx <= armpmu->num_events; ++idx) {
-               struct perf_event *event = cpuc->events[idx];
-               struct hw_perf_event *hwc;
-
-               if (!test_bit(idx, cpuc->active_mask))
-                       continue;
-
-               /*
-                * We have a single interrupt for all counters. Check that
-                * each counter has overflowed before we process it.
-                */
-               if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
-                       continue;
-
-               hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
-               if (!armpmu_event_set_period(event, hwc, idx))
-                       continue;
-
-               if (perf_event_overflow(event, 0, &data, regs))
-                       armpmu->disable(hwc, idx);
-       }
-
-       /*
-        * Handle the pending perf events.
-        *
-        * Note: this call *must* be run with interrupts disabled. For
-        * platforms that can have the PMU interrupts raised as an NMI, this
-        * will not work.
-        */
-       irq_work_run();
-
-       return IRQ_HANDLED;
-}
-
-static void armv7pmu_start(void)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       /* Enable all counters */
-       armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static void armv7pmu_stop(void)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       /* Disable all counters */
-       armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static inline int armv7_a8_pmu_event_map(int config)
-{
-       int mapping = armv7_a8_perf_map[config];
-       if (HW_OP_UNSUPPORTED == mapping)
-               mapping = -EOPNOTSUPP;
-       return mapping;
-}
-
-static inline int armv7_a9_pmu_event_map(int config)
-{
-       int mapping = armv7_a9_perf_map[config];
-       if (HW_OP_UNSUPPORTED == mapping)
-               mapping = -EOPNOTSUPP;
-       return mapping;
-}
-
-static u64 armv7pmu_raw_event(u64 config)
-{
-       return config & 0xff;
-}
-
-static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
-                                 struct hw_perf_event *event)
-{
-       int idx;
-
-       /* Always place a cycle counter into the cycle counter. */
-       if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
-               if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
-                       return -EAGAIN;
-
-               return ARMV7_CYCLE_COUNTER;
-       } else {
-               /*
-                * For anything other than a cycle counter, try and use
-                * the events counters
-                */
-               for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
-                       if (!test_and_set_bit(idx, cpuc->used_mask))
-                               return idx;
-               }
-
-               /* The counters are all in use. */
-               return -EAGAIN;
-       }
-}
-
-static struct arm_pmu armv7pmu = {
-       .handle_irq             = armv7pmu_handle_irq,
-       .enable                 = armv7pmu_enable_event,
-       .disable                = armv7pmu_disable_event,
-       .raw_event              = armv7pmu_raw_event,
-       .read_counter           = armv7pmu_read_counter,
-       .write_counter          = armv7pmu_write_counter,
-       .get_event_idx          = armv7pmu_get_event_idx,
-       .start                  = armv7pmu_start,
-       .stop                   = armv7pmu_stop,
-       .max_period             = (1LLU << 32) - 1,
-};
-
-static u32 __init armv7_reset_read_pmnc(void)
-{
-       u32 nb_cnt;
-
-       /* Initialize & Reset PMNC: C and P bits */
-       armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
-
-       /* Read the nb of CNTx counters supported from PMNC */
-       nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
-
-       /* Add the CPU cycles counter and return */
-       return nb_cnt + 1;
-}
-
-/*
- * ARMv5 [xscale] Performance counter handling code.
- *
- * Based on xscale OProfile code.
- *
- * There are two variants of the xscale PMU that we support:
- *     - xscale1pmu: 2 event counters and a cycle counter
- *     - xscale2pmu: 4 event counters and a cycle counter
- * The two variants share event definitions, but have different
- * PMU structures.
- */
-
-enum xscale_perf_types {
-       XSCALE_PERFCTR_ICACHE_MISS              = 0x00,
-       XSCALE_PERFCTR_ICACHE_NO_DELIVER        = 0x01,
-       XSCALE_PERFCTR_DATA_STALL               = 0x02,
-       XSCALE_PERFCTR_ITLB_MISS                = 0x03,
-       XSCALE_PERFCTR_DTLB_MISS                = 0x04,
-       XSCALE_PERFCTR_BRANCH                   = 0x05,
-       XSCALE_PERFCTR_BRANCH_MISS              = 0x06,
-       XSCALE_PERFCTR_INSTRUCTION              = 0x07,
-       XSCALE_PERFCTR_DCACHE_FULL_STALL        = 0x08,
-       XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
-       XSCALE_PERFCTR_DCACHE_ACCESS            = 0x0A,
-       XSCALE_PERFCTR_DCACHE_MISS              = 0x0B,
-       XSCALE_PERFCTR_DCACHE_WRITE_BACK        = 0x0C,
-       XSCALE_PERFCTR_PC_CHANGED               = 0x0D,
-       XSCALE_PERFCTR_BCU_REQUEST              = 0x10,
-       XSCALE_PERFCTR_BCU_FULL                 = 0x11,
-       XSCALE_PERFCTR_BCU_DRAIN                = 0x12,
-       XSCALE_PERFCTR_BCU_ECC_NO_ELOG          = 0x14,
-       XSCALE_PERFCTR_BCU_1_BIT_ERR            = 0x15,
-       XSCALE_PERFCTR_RMW                      = 0x16,
-       /* XSCALE_PERFCTR_CCNT is not hardware defined */
-       XSCALE_PERFCTR_CCNT                     = 0xFE,
-       XSCALE_PERFCTR_UNUSED                   = 0xFF,
-};
-
-enum xscale_counters {
-       XSCALE_CYCLE_COUNTER    = 1,
-       XSCALE_COUNTER0,
-       XSCALE_COUNTER1,
-       XSCALE_COUNTER2,
-       XSCALE_COUNTER3,
-};
-
-static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = XSCALE_PERFCTR_CCNT,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = XSCALE_PERFCTR_INSTRUCTION,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = XSCALE_PERFCTR_BRANCH_MISS,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
-};
-
-static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
-                                          [PERF_COUNT_HW_CACHE_OP_MAX]
-                                          [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-       [C(L1D)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = XSCALE_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DCACHE_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = XSCALE_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DCACHE_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(L1I)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ICACHE_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ICACHE_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(LL)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(DTLB)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DTLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DTLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(ITLB)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ITLB_MISS,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-       [C(BPU)] = {
-               [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-               [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
-               },
-       },
-};
-
-#define        XSCALE_PMU_ENABLE       0x001
-#define XSCALE_PMN_RESET       0x002
-#define        XSCALE_CCNT_RESET       0x004
-#define        XSCALE_PMU_RESET        (CCNT_RESET | PMN_RESET)
-#define XSCALE_PMU_CNT64       0x008
-
-static inline int
-xscalepmu_event_map(int config)
-{
-       int mapping = xscale_perf_map[config];
-       if (HW_OP_UNSUPPORTED == mapping)
-               mapping = -EOPNOTSUPP;
-       return mapping;
-}
-
-static u64
-xscalepmu_raw_event(u64 config)
-{
-       return config & 0xff;
-}
-
-#define XSCALE1_OVERFLOWED_MASK        0x700
-#define XSCALE1_CCOUNT_OVERFLOW        0x400
-#define XSCALE1_COUNT0_OVERFLOW        0x100
-#define XSCALE1_COUNT1_OVERFLOW        0x200
-#define XSCALE1_CCOUNT_INT_EN  0x040
-#define XSCALE1_COUNT0_INT_EN  0x010
-#define XSCALE1_COUNT1_INT_EN  0x020
-#define XSCALE1_COUNT0_EVT_SHFT        12
-#define XSCALE1_COUNT0_EVT_MASK        (0xff << XSCALE1_COUNT0_EVT_SHFT)
-#define XSCALE1_COUNT1_EVT_SHFT        20
-#define XSCALE1_COUNT1_EVT_MASK        (0xff << XSCALE1_COUNT1_EVT_SHFT)
-
-static inline u32
-xscale1pmu_read_pmnc(void)
-{
-       u32 val;
-       asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
-       return val;
-}
-
-static inline void
-xscale1pmu_write_pmnc(u32 val)
-{
-       /* upper 4bits and 7, 11 are write-as-0 */
-       val &= 0xffff77f;
-       asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
-}
-
-static inline int
-xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
-                                       enum xscale_counters counter)
-{
-       int ret = 0;
-
-       switch (counter) {
-       case XSCALE_CYCLE_COUNTER:
-               ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
-               break;
-       case XSCALE_COUNTER0:
-               ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
-               break;
-       case XSCALE_COUNTER1:
-               ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
-               break;
-       default:
-               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-       }
-
-       return ret;
-}
-
-static irqreturn_t
-xscale1pmu_handle_irq(int irq_num, void *dev)
-{
-       unsigned long pmnc;
-       struct perf_sample_data data;
-       struct cpu_hw_events *cpuc;
-       struct pt_regs *regs;
-       int idx;
-
-       /*
-        * NOTE: there's an A stepping erratum that states if an overflow
-        *       bit already exists and another occurs, the previous
-        *       Overflow bit gets cleared. There's no workaround.
-        *       Fixed in B stepping or later.
-        */
-       pmnc = xscale1pmu_read_pmnc();
-
-       /*
-        * Write the value back to clear the overflow flags. Overflow
-        * flags remain in pmnc for use below. We also disable the PMU
-        * while we process the interrupt.
-        */
-       xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
-
-       if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
-               return IRQ_NONE;
-
-       regs = get_irq_regs();
-
-       perf_sample_data_init(&data, 0);
-
-       cpuc = &__get_cpu_var(cpu_hw_events);
-       for (idx = 0; idx <= armpmu->num_events; ++idx) {
-               struct perf_event *event = cpuc->events[idx];
-               struct hw_perf_event *hwc;
-
-               if (!test_bit(idx, cpuc->active_mask))
-                       continue;
-
-               if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
-                       continue;
-
-               hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
-               if (!armpmu_event_set_period(event, hwc, idx))
-                       continue;
-
-               if (perf_event_overflow(event, 0, &data, regs))
-                       armpmu->disable(hwc, idx);
-       }
-
-       irq_work_run();
-
-       /*
-        * Re-enable the PMU.
-        */
-       pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
-       xscale1pmu_write_pmnc(pmnc);
-
-       return IRQ_HANDLED;
-}
-
-static void
-xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
-{
-       unsigned long val, mask, evt, flags;
-
-       switch (idx) {
-       case XSCALE_CYCLE_COUNTER:
-               mask = 0;
-               evt = XSCALE1_CCOUNT_INT_EN;
-               break;
-       case XSCALE_COUNTER0:
-               mask = XSCALE1_COUNT0_EVT_MASK;
-               evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
-                       XSCALE1_COUNT0_INT_EN;
-               break;
-       case XSCALE_COUNTER1:
-               mask = XSCALE1_COUNT1_EVT_MASK;
-               evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
-                       XSCALE1_COUNT1_INT_EN;
-               break;
-       default:
-               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-               return;
-       }
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = xscale1pmu_read_pmnc();
-       val &= ~mask;
-       val |= evt;
-       xscale1pmu_write_pmnc(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static void
-xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
-       unsigned long val, mask, evt, flags;
-
-       switch (idx) {
-       case XSCALE_CYCLE_COUNTER:
-               mask = XSCALE1_CCOUNT_INT_EN;
-               evt = 0;
-               break;
-       case XSCALE_COUNTER0:
-               mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
-               evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
-               break;
-       case XSCALE_COUNTER1:
-               mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
-               evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
-               break;
-       default:
-               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-               return;
-       }
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = xscale1pmu_read_pmnc();
-       val &= ~mask;
-       val |= evt;
-       xscale1pmu_write_pmnc(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static int
-xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
-                       struct hw_perf_event *event)
-{
-       if (XSCALE_PERFCTR_CCNT == event->config_base) {
-               if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
-                       return -EAGAIN;
-
-               return XSCALE_CYCLE_COUNTER;
-       } else {
-               if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) {
-                       return XSCALE_COUNTER1;
-               }
-
-               if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) {
-                       return XSCALE_COUNTER0;
-               }
-
-               return -EAGAIN;
-       }
-}
-
-static void
-xscale1pmu_start(void)
-{
-       unsigned long flags, val;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = xscale1pmu_read_pmnc();
-       val |= XSCALE_PMU_ENABLE;
-       xscale1pmu_write_pmnc(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static void
-xscale1pmu_stop(void)
-{
-       unsigned long flags, val;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = xscale1pmu_read_pmnc();
-       val &= ~XSCALE_PMU_ENABLE;
-       xscale1pmu_write_pmnc(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static inline u32
-xscale1pmu_read_counter(int counter)
-{
-       u32 val = 0;
-
-       switch (counter) {
-       case XSCALE_CYCLE_COUNTER:
-               asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
-               break;
-       case XSCALE_COUNTER0:
-               asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
-               break;
-       case XSCALE_COUNTER1:
-               asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
-               break;
-       }
-
-       return val;
-}
-
-static inline void
-xscale1pmu_write_counter(int counter, u32 val)
-{
-       switch (counter) {
-       case XSCALE_CYCLE_COUNTER:
-               asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
-               break;
-       case XSCALE_COUNTER0:
-               asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
-               break;
-       case XSCALE_COUNTER1:
-               asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
-               break;
-       }
-}
-
-static const struct arm_pmu xscale1pmu = {
-       .id             = ARM_PERF_PMU_ID_XSCALE1,
-       .handle_irq     = xscale1pmu_handle_irq,
-       .enable         = xscale1pmu_enable_event,
-       .disable        = xscale1pmu_disable_event,
-       .event_map      = xscalepmu_event_map,
-       .raw_event      = xscalepmu_raw_event,
-       .read_counter   = xscale1pmu_read_counter,
-       .write_counter  = xscale1pmu_write_counter,
-       .get_event_idx  = xscale1pmu_get_event_idx,
-       .start          = xscale1pmu_start,
-       .stop           = xscale1pmu_stop,
-       .num_events     = 3,
-       .max_period     = (1LLU << 32) - 1,
-};
-
-#define XSCALE2_OVERFLOWED_MASK        0x01f
-#define XSCALE2_CCOUNT_OVERFLOW        0x001
-#define XSCALE2_COUNT0_OVERFLOW        0x002
-#define XSCALE2_COUNT1_OVERFLOW        0x004
-#define XSCALE2_COUNT2_OVERFLOW        0x008
-#define XSCALE2_COUNT3_OVERFLOW        0x010
-#define XSCALE2_CCOUNT_INT_EN  0x001
-#define XSCALE2_COUNT0_INT_EN  0x002
-#define XSCALE2_COUNT1_INT_EN  0x004
-#define XSCALE2_COUNT2_INT_EN  0x008
-#define XSCALE2_COUNT3_INT_EN  0x010
-#define XSCALE2_COUNT0_EVT_SHFT        0
-#define XSCALE2_COUNT0_EVT_MASK        (0xff << XSCALE2_COUNT0_EVT_SHFT)
-#define XSCALE2_COUNT1_EVT_SHFT        8
-#define XSCALE2_COUNT1_EVT_MASK        (0xff << XSCALE2_COUNT1_EVT_SHFT)
-#define XSCALE2_COUNT2_EVT_SHFT        16
-#define XSCALE2_COUNT2_EVT_MASK        (0xff << XSCALE2_COUNT2_EVT_SHFT)
-#define XSCALE2_COUNT3_EVT_SHFT        24
-#define XSCALE2_COUNT3_EVT_MASK        (0xff << XSCALE2_COUNT3_EVT_SHFT)
-
-static inline u32
-xscale2pmu_read_pmnc(void)
-{
-       u32 val;
-       asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
-       /* bits 1-2 and 4-23 are read-unpredictable */
-       return val & 0xff000009;
-}
-
-static inline void
-xscale2pmu_write_pmnc(u32 val)
-{
-       /* bits 4-23 are write-as-0, 24-31 are write ignored */
-       val &= 0xf;
-       asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
-}
-
-static inline u32
-xscale2pmu_read_overflow_flags(void)
-{
-       u32 val;
-       asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
-       return val;
-}
-
-static inline void
-xscale2pmu_write_overflow_flags(u32 val)
-{
-       asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
-}
-
-static inline u32
-xscale2pmu_read_event_select(void)
-{
-       u32 val;
-       asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
-       return val;
-}
-
-static inline void
-xscale2pmu_write_event_select(u32 val)
-{
-       asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
-}
-
-static inline u32
-xscale2pmu_read_int_enable(void)
-{
-       u32 val;
-       asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
-       return val;
-}
-
-static void
-xscale2pmu_write_int_enable(u32 val)
-{
-       asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
-}
-
-static inline int
-xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
-                                       enum xscale_counters counter)
-{
-       int ret = 0;
-
-       switch (counter) {
-       case XSCALE_CYCLE_COUNTER:
-               ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
-               break;
-       case XSCALE_COUNTER0:
-               ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
-               break;
-       case XSCALE_COUNTER1:
-               ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
-               break;
-       case XSCALE_COUNTER2:
-               ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
-               break;
-       case XSCALE_COUNTER3:
-               ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
-               break;
-       default:
-               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-       }
-
-       return ret;
-}
-
-static irqreturn_t
-xscale2pmu_handle_irq(int irq_num, void *dev)
-{
-       unsigned long pmnc, of_flags;
-       struct perf_sample_data data;
-       struct cpu_hw_events *cpuc;
-       struct pt_regs *regs;
-       int idx;
-
-       /* Disable the PMU. */
-       pmnc = xscale2pmu_read_pmnc();
-       xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
-
-       /* Check the overflow flag register. */
-       of_flags = xscale2pmu_read_overflow_flags();
-       if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
-               return IRQ_NONE;
-
-       /* Clear the overflow bits. */
-       xscale2pmu_write_overflow_flags(of_flags);
-
-       regs = get_irq_regs();
-
-       perf_sample_data_init(&data, 0);
-
-       cpuc = &__get_cpu_var(cpu_hw_events);
-       for (idx = 0; idx <= armpmu->num_events; ++idx) {
-               struct perf_event *event = cpuc->events[idx];
-               struct hw_perf_event *hwc;
-
-               if (!test_bit(idx, cpuc->active_mask))
-                       continue;
-
-               if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
-                       continue;
-
-               hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx);
-               data.period = event->hw.last_period;
-               if (!armpmu_event_set_period(event, hwc, idx))
-                       continue;
-
-               if (perf_event_overflow(event, 0, &data, regs))
-                       armpmu->disable(hwc, idx);
-       }
-
-       irq_work_run();
-
-       /*
-        * Re-enable the PMU.
-        */
-       pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
-       xscale2pmu_write_pmnc(pmnc);
-
-       return IRQ_HANDLED;
-}
-
-static void
-xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
-{
-       unsigned long flags, ien, evtsel;
-
-       ien = xscale2pmu_read_int_enable();
-       evtsel = xscale2pmu_read_event_select();
-
-       switch (idx) {
-       case XSCALE_CYCLE_COUNTER:
-               ien |= XSCALE2_CCOUNT_INT_EN;
-               break;
-       case XSCALE_COUNTER0:
-               ien |= XSCALE2_COUNT0_INT_EN;
-               evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
-               evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
-               break;
-       case XSCALE_COUNTER1:
-               ien |= XSCALE2_COUNT1_INT_EN;
-               evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
-               evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
-               break;
-       case XSCALE_COUNTER2:
-               ien |= XSCALE2_COUNT2_INT_EN;
-               evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
-               evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
-               break;
-       case XSCALE_COUNTER3:
-               ien |= XSCALE2_COUNT3_INT_EN;
-               evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
-               evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
-               break;
-       default:
-               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-               return;
-       }
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       xscale2pmu_write_event_select(evtsel);
-       xscale2pmu_write_int_enable(ien);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static void
-xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
-       unsigned long flags, ien, evtsel;
-
-       ien = xscale2pmu_read_int_enable();
-       evtsel = xscale2pmu_read_event_select();
-
-       switch (idx) {
-       case XSCALE_CYCLE_COUNTER:
-               ien &= ~XSCALE2_CCOUNT_INT_EN;
-               break;
-       case XSCALE_COUNTER0:
-               ien &= ~XSCALE2_COUNT0_INT_EN;
-               evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
-               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
-               break;
-       case XSCALE_COUNTER1:
-               ien &= ~XSCALE2_COUNT1_INT_EN;
-               evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
-               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
-               break;
-       case XSCALE_COUNTER2:
-               ien &= ~XSCALE2_COUNT2_INT_EN;
-               evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
-               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
-               break;
-       case XSCALE_COUNTER3:
-               ien &= ~XSCALE2_COUNT3_INT_EN;
-               evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
-               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
-               break;
-       default:
-               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-               return;
-       }
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       xscale2pmu_write_event_select(evtsel);
-       xscale2pmu_write_int_enable(ien);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static int
-xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
-                       struct hw_perf_event *event)
-{
-       int idx = xscale1pmu_get_event_idx(cpuc, event);
-       if (idx >= 0)
-               goto out;
-
-       if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
-               idx = XSCALE_COUNTER3;
-       else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
-               idx = XSCALE_COUNTER2;
-out:
-       return idx;
-}
-
-static void
-xscale2pmu_start(void)
-{
-       unsigned long flags, val;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
-       val |= XSCALE_PMU_ENABLE;
-       xscale2pmu_write_pmnc(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static void
-xscale2pmu_stop(void)
-{
-       unsigned long flags, val;
-
-       spin_lock_irqsave(&pmu_lock, flags);
-       val = xscale2pmu_read_pmnc();
-       val &= ~XSCALE_PMU_ENABLE;
-       xscale2pmu_write_pmnc(val);
-       spin_unlock_irqrestore(&pmu_lock, flags);
-}
-
-static inline u32
-xscale2pmu_read_counter(int counter)
-{
-       u32 val = 0;
-
-       switch (counter) {
-       case XSCALE_CYCLE_COUNTER:
-               asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
-               break;
-       case XSCALE_COUNTER0:
-               asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
-               break;
-       case XSCALE_COUNTER1:
-               asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
-               break;
-       case XSCALE_COUNTER2:
-               asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
-               break;
-       case XSCALE_COUNTER3:
-               asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
-               break;
-       }
-
-       return val;
-}
-
-static inline void
-xscale2pmu_write_counter(int counter, u32 val)
-{
-       switch (counter) {
-       case XSCALE_CYCLE_COUNTER:
-               asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
-               break;
-       case XSCALE_COUNTER0:
-               asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
-               break;
-       case XSCALE_COUNTER1:
-               asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
-               break;
-       case XSCALE_COUNTER2:
-               asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
-               break;
-       case XSCALE_COUNTER3:
-               asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
-               break;
-       }
-}
-
-static const struct arm_pmu xscale2pmu = {
-       .id             = ARM_PERF_PMU_ID_XSCALE2,
-       .handle_irq     = xscale2pmu_handle_irq,
-       .enable         = xscale2pmu_enable_event,
-       .disable        = xscale2pmu_disable_event,
-       .event_map      = xscalepmu_event_map,
-       .raw_event      = xscalepmu_raw_event,
-       .read_counter   = xscale2pmu_read_counter,
-       .write_counter  = xscale2pmu_write_counter,
-       .get_event_idx  = xscale2pmu_get_event_idx,
-       .start          = xscale2pmu_start,
-       .stop           = xscale2pmu_stop,
-       .num_events     = 5,
-       .max_period     = (1LLU << 32) - 1,
-};
+/* Include the PMU-specific implementations. */
+#include "perf_event_xscale.c"
+#include "perf_event_v6.c"
+#include "perf_event_v7.c"
 
 static int __init
 init_hw_perf_events(void)
@@ -2977,37 +622,16 @@ init_hw_perf_events(void)
                case 0xB360:    /* ARM1136 */
                case 0xB560:    /* ARM1156 */
                case 0xB760:    /* ARM1176 */
-                       armpmu = &armv6pmu;
-                       memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
-                                       sizeof(armv6_perf_cache_map));
+                       armpmu = armv6pmu_init();
                        break;
                case 0xB020:    /* ARM11mpcore */
-                       armpmu = &armv6mpcore_pmu;
-                       memcpy(armpmu_perf_cache_map,
-                              armv6mpcore_perf_cache_map,
-                              sizeof(armv6mpcore_perf_cache_map));
+                       armpmu = armv6mpcore_pmu_init();
                        break;
                case 0xC080:    /* Cortex-A8 */
-                       armv7pmu.id = ARM_PERF_PMU_ID_CA8;
-                       memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
-                               sizeof(armv7_a8_perf_cache_map));
-                       armv7pmu.event_map = armv7_a8_pmu_event_map;
-                       armpmu = &armv7pmu;
-
-                       /* Reset PMNC and read the nb of CNTx counters
-                           supported */
-                       armv7pmu.num_events = armv7_reset_read_pmnc();
+                       armpmu = armv7_a8_pmu_init();
                        break;
                case 0xC090:    /* Cortex-A9 */
-                       armv7pmu.id = ARM_PERF_PMU_ID_CA9;
-                       memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
-                               sizeof(armv7_a9_perf_cache_map));
-                       armv7pmu.event_map = armv7_a9_pmu_event_map;
-                       armpmu = &armv7pmu;
-
-                       /* Reset PMNC and read the nb of CNTx counters
-                           supported */
-                       armv7pmu.num_events = armv7_reset_read_pmnc();
+                       armpmu = armv7_a9_pmu_init();
                        break;
                }
        /* Intel CPUs [xscale]. */
@@ -3015,21 +639,17 @@ init_hw_perf_events(void)
                part_number = (cpuid >> 13) & 0x7;
                switch (part_number) {
                case 1:
-                       armpmu = &xscale1pmu;
-                       memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
-                                       sizeof(xscale_perf_cache_map));
+                       armpmu = xscale1pmu_init();
                        break;
                case 2:
-                       armpmu = &xscale2pmu;
-                       memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
-                                       sizeof(xscale_perf_cache_map));
+                       armpmu = xscale2pmu_init();
                        break;
                }
        }
 
        if (armpmu) {
                pr_info("enabled with %s PMU driver, %d counters available\n",
-                               arm_pmu_names[armpmu->id], armpmu->num_events);
+                       armpmu->name, armpmu->num_events);
        } else {
                pr_info("no hardware support available\n");
        }
@@ -3053,17 +673,17 @@ arch_initcall(init_hw_perf_events);
  * This code has been adapted from the ARM OProfile support.
  */
 struct frame_tail {
-       struct frame_tail   *fp;
-       unsigned long       sp;
-       unsigned long       lr;
+       struct frame_tail __user *fp;
+       unsigned long sp;
+       unsigned long lr;
 } __attribute__((packed));
 
 /*
  * Get the return address for a single stackframe and return a pointer to the
  * next frame tail.
  */
-static struct frame_tail *
-user_backtrace(struct frame_tail *tail,
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
               struct perf_callchain_entry *entry)
 {
        struct frame_tail buftail;
@@ -3089,10 +709,10 @@ user_backtrace(struct frame_tail *tail,
 void
 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct frame_tail *tail;
+       struct frame_tail __user *tail;
 
 
-       tail = (struct frame_tail *)regs->ARM_fp - 1;
+       tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
        while (tail && !((unsigned long)tail & 0x3))
                tail = user_backtrace(tail, entry);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
new file mode 100644 (file)
index 0000000..c058bfc
--- /dev/null
@@ -0,0 +1,672 @@
+/*
+ * ARMv6 Performance counter handling code.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ *
+ * ARMv6 has 2 configurable performance counters and a single cycle counter.
+ * They all share a single reset bit but can be written to zero so we can use
+ * that for a reset.
+ *
+ * The counters can't be individually enabled or disabled so when we remove
+ * one event and replace it with another we could get spurious counts from the
+ * wrong event. However, we can take advantage of the fact that the
+ * performance counters can export events to the event bus, and the event bus
+ * itself can be monitored. This requires that we *don't* export the events to
+ * the event bus. The procedure for disabling a configurable counter is:
+ *     - change the counter to count the ETMEXTOUT[0] signal (0x20). This
+ *       effectively stops the counter from counting.
+ *     - disable the counter's interrupt generation (each counter has it's
+ *       own interrupt enable bit).
+ * Once stopped, the counter value can be written as 0 to reset.
+ *
+ * To enable a counter:
+ *     - enable the counter's interrupt generation.
+ *     - set the new event type.
+ *
+ * Note: the dedicated cycle counter only counts cycles and can't be
+ * enabled/disabled independently of the others. When we want to disable the
+ * cycle counter, we have to just disable the interrupt reporting and start
+ * ignoring that counter. When re-enabling, we have to reset the value and
+ * enable the interrupt.
+ */
+
+#ifdef CONFIG_CPU_V6
+enum armv6_perf_types {
+       ARMV6_PERFCTR_ICACHE_MISS           = 0x0,
+       ARMV6_PERFCTR_IBUF_STALL            = 0x1,
+       ARMV6_PERFCTR_DDEP_STALL            = 0x2,
+       ARMV6_PERFCTR_ITLB_MISS             = 0x3,
+       ARMV6_PERFCTR_DTLB_MISS             = 0x4,
+       ARMV6_PERFCTR_BR_EXEC               = 0x5,
+       ARMV6_PERFCTR_BR_MISPREDICT         = 0x6,
+       ARMV6_PERFCTR_INSTR_EXEC            = 0x7,
+       ARMV6_PERFCTR_DCACHE_HIT            = 0x9,
+       ARMV6_PERFCTR_DCACHE_ACCESS         = 0xA,
+       ARMV6_PERFCTR_DCACHE_MISS           = 0xB,
+       ARMV6_PERFCTR_DCACHE_WBACK          = 0xC,
+       ARMV6_PERFCTR_SW_PC_CHANGE          = 0xD,
+       ARMV6_PERFCTR_MAIN_TLB_MISS         = 0xF,
+       ARMV6_PERFCTR_EXPL_D_ACCESS         = 0x10,
+       ARMV6_PERFCTR_LSU_FULL_STALL        = 0x11,
+       ARMV6_PERFCTR_WBUF_DRAINED          = 0x12,
+       ARMV6_PERFCTR_CPU_CYCLES            = 0xFF,
+       ARMV6_PERFCTR_NOP                   = 0x20,
+};
+
+enum armv6_counters {
+       ARMV6_CYCLE_COUNTER = 1,
+       ARMV6_COUNTER0,
+       ARMV6_COUNTER1,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6_PERFCTR_INSTR_EXEC,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6_PERFCTR_BR_MISPREDICT,
+       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                         [PERF_COUNT_HW_CACHE_OP_MAX]
+                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               /*
+                * The performance counters don't differentiate between read
+                * and write accesses/misses so this isn't strictly correct,
+                * but it's the best we can do. Writes and reads get
+                * combined.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV6_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DCACHE_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV6_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DCACHE_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               /*
+                * The ARM performance counters can count micro DTLB misses,
+                * micro ITLB misses and main TLB misses. There isn't an event
+                * for TLB misses, so use the micro misses here and if users
+                * want the main TLB misses they can use a raw counter.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DTLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_DTLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+enum armv6mpcore_perf_types {
+       ARMV6MPCORE_PERFCTR_ICACHE_MISS     = 0x0,
+       ARMV6MPCORE_PERFCTR_IBUF_STALL      = 0x1,
+       ARMV6MPCORE_PERFCTR_DDEP_STALL      = 0x2,
+       ARMV6MPCORE_PERFCTR_ITLB_MISS       = 0x3,
+       ARMV6MPCORE_PERFCTR_DTLB_MISS       = 0x4,
+       ARMV6MPCORE_PERFCTR_BR_EXEC         = 0x5,
+       ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
+       ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
+       ARMV6MPCORE_PERFCTR_INSTR_EXEC      = 0x8,
+       ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
+       ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
+       ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
+       ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
+       ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
+       ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
+       ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
+       ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
+       ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
+       ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
+       ARMV6MPCORE_PERFCTR_CPU_CYCLES      = 0xFF,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                       [PERF_COUNT_HW_CACHE_OP_MAX]
+                                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]  =
+                               ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
+                       [C(RESULT_MISS)]    =
+                               ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]  =
+                               ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
+                       [C(RESULT_MISS)]    =
+                               ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               /*
+                * The ARM performance counters can count micro DTLB misses,
+                * micro ITLB misses and main TLB misses. There isn't an event
+                * for TLB misses, so use the micro misses here and if users
+                * want the main TLB misses they can use a raw counter.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+static inline unsigned long
+armv6_pmcr_read(void)
+{
+       u32 val;
+       asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
+       return val;
+}
+
+static inline void
+armv6_pmcr_write(unsigned long val)
+{
+       asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
+}
+
+#define ARMV6_PMCR_ENABLE              (1 << 0)
+#define ARMV6_PMCR_CTR01_RESET         (1 << 1)
+#define ARMV6_PMCR_CCOUNT_RESET                (1 << 2)
+#define ARMV6_PMCR_CCOUNT_DIV          (1 << 3)
+#define ARMV6_PMCR_COUNT0_IEN          (1 << 4)
+#define ARMV6_PMCR_COUNT1_IEN          (1 << 5)
+#define ARMV6_PMCR_CCOUNT_IEN          (1 << 6)
+#define ARMV6_PMCR_COUNT0_OVERFLOW     (1 << 8)
+#define ARMV6_PMCR_COUNT1_OVERFLOW     (1 << 9)
+#define ARMV6_PMCR_CCOUNT_OVERFLOW     (1 << 10)
+#define ARMV6_PMCR_EVT_COUNT0_SHIFT    20
+#define ARMV6_PMCR_EVT_COUNT0_MASK     (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
+#define ARMV6_PMCR_EVT_COUNT1_SHIFT    12
+#define ARMV6_PMCR_EVT_COUNT1_MASK     (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
+
+#define ARMV6_PMCR_OVERFLOWED_MASK \
+       (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
+        ARMV6_PMCR_CCOUNT_OVERFLOW)
+
+static inline int
+armv6_pmcr_has_overflowed(unsigned long pmcr)
+{
+       return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
+}
+
+static inline int
+armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
+                                 enum armv6_counters counter)
+{
+       int ret = 0;
+
+       if (ARMV6_CYCLE_COUNTER == counter)
+               ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
+       else if (ARMV6_COUNTER0 == counter)
+               ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
+       else if (ARMV6_COUNTER1 == counter)
+               ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
+       else
+               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+       return ret;
+}
+
+static inline u32
+armv6pmu_read_counter(int counter)
+{
+       unsigned long value = 0;
+
+       if (ARMV6_CYCLE_COUNTER == counter)
+               asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
+       else if (ARMV6_COUNTER0 == counter)
+               asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
+       else if (ARMV6_COUNTER1 == counter)
+               asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
+       else
+               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+       return value;
+}
+
+static inline void
+armv6pmu_write_counter(int counter,
+                      u32 value)
+{
+       if (ARMV6_CYCLE_COUNTER == counter)
+               asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
+       else if (ARMV6_COUNTER0 == counter)
+               asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
+       else if (ARMV6_COUNTER1 == counter)
+               asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
+       else
+               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+}
+
+static void
+armv6pmu_enable_event(struct hw_perf_event *hwc,
+                     int idx)
+{
+       unsigned long val, mask, evt, flags;
+
+       if (ARMV6_CYCLE_COUNTER == idx) {
+               mask    = 0;
+               evt     = ARMV6_PMCR_CCOUNT_IEN;
+       } else if (ARMV6_COUNTER0 == idx) {
+               mask    = ARMV6_PMCR_EVT_COUNT0_MASK;
+               evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
+                         ARMV6_PMCR_COUNT0_IEN;
+       } else if (ARMV6_COUNTER1 == idx) {
+               mask    = ARMV6_PMCR_EVT_COUNT1_MASK;
+               evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
+                         ARMV6_PMCR_COUNT1_IEN;
+       } else {
+               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+               return;
+       }
+
+       /*
+        * Mask out the current event and set the counter to count the event
+        * that we're interested in.
+        */
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = armv6_pmcr_read();
+       val &= ~mask;
+       val |= evt;
+       armv6_pmcr_write(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static irqreturn_t
+armv6pmu_handle_irq(int irq_num,
+                   void *dev)
+{
+       unsigned long pmcr = armv6_pmcr_read();
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc;
+       struct pt_regs *regs;
+       int idx;
+
+       if (!armv6_pmcr_has_overflowed(pmcr))
+               return IRQ_NONE;
+
+       regs = get_irq_regs();
+
+       /*
+        * The interrupts are cleared by writing the overflow flags back to
+        * the control register. All of the other bits don't have any effect
+        * if they are rewritten, so write the whole value back.
+        */
+       armv6_pmcr_write(pmcr);
+
+       perf_sample_data_init(&data, 0);
+
+       cpuc = &__get_cpu_var(cpu_hw_events);
+       for (idx = 0; idx <= armpmu->num_events; ++idx) {
+               struct perf_event *event = cpuc->events[idx];
+               struct hw_perf_event *hwc;
+
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               /*
+                * We have a single interrupt for all counters. Check that
+                * each counter has overflowed before we process it.
+                */
+               if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
+                       continue;
+
+               hwc = &event->hw;
+               armpmu_event_update(event, hwc, idx);
+               data.period = event->hw.last_period;
+               if (!armpmu_event_set_period(event, hwc, idx))
+                       continue;
+
+               if (perf_event_overflow(event, 0, &data, regs))
+                       armpmu->disable(hwc, idx);
+       }
+
+       /*
+        * Handle the pending perf events.
+        *
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
+        * will not work.
+        */
+       irq_work_run();
+
+       return IRQ_HANDLED;
+}
+
+static void
+armv6pmu_start(void)
+{
+       unsigned long flags, val;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = armv6_pmcr_read();
+       val |= ARMV6_PMCR_ENABLE;
+       armv6_pmcr_write(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+armv6pmu_stop(void)
+{
+       unsigned long flags, val;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = armv6_pmcr_read();
+       val &= ~ARMV6_PMCR_ENABLE;
+       armv6_pmcr_write(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
+                      struct hw_perf_event *event)
+{
+       /* Always place a cycle counter into the cycle counter. */
+       if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
+               if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
+                       return -EAGAIN;
+
+               return ARMV6_CYCLE_COUNTER;
+       } else {
+               /*
+                * For anything other than a cycle counter, try and use
+                * counter0 and counter1.
+                */
+               if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
+                       return ARMV6_COUNTER1;
+
+               if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
+                       return ARMV6_COUNTER0;
+
+               /* The counters are all in use. */
+               return -EAGAIN;
+       }
+}
+
+static void
+armv6pmu_disable_event(struct hw_perf_event *hwc,
+                      int idx)
+{
+       unsigned long val, mask, evt, flags;
+
+       if (ARMV6_CYCLE_COUNTER == idx) {
+               mask    = ARMV6_PMCR_CCOUNT_IEN;
+               evt     = 0;
+       } else if (ARMV6_COUNTER0 == idx) {
+               mask    = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
+               evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
+       } else if (ARMV6_COUNTER1 == idx) {
+               mask    = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
+               evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
+       } else {
+               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+               return;
+       }
+
+       /*
+        * Mask out the current event and set the counter to count the number
+        * of ETM bus signal assertion cycles. The external reporting should
+        * be disabled and so this should never increment.
+        */
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = armv6_pmcr_read();
+       val &= ~mask;
+       val |= evt;
+       armv6_pmcr_write(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
+                             int idx)
+{
+       unsigned long val, mask, flags, evt = 0;
+
+       if (ARMV6_CYCLE_COUNTER == idx) {
+               mask    = ARMV6_PMCR_CCOUNT_IEN;
+       } else if (ARMV6_COUNTER0 == idx) {
+               mask    = ARMV6_PMCR_COUNT0_IEN;
+       } else if (ARMV6_COUNTER1 == idx) {
+               mask    = ARMV6_PMCR_COUNT1_IEN;
+       } else {
+               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+               return;
+       }
+
+       /*
+        * Unlike UP ARMv6, we don't have a way of stopping the counters. We
+        * simply disable the interrupt reporting.
+        */
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = armv6_pmcr_read();
+       val &= ~mask;
+       val |= evt;
+       armv6_pmcr_write(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static const struct arm_pmu armv6pmu = {
+       .id                     = ARM_PERF_PMU_ID_V6,
+       .name                   = "v6",
+       .handle_irq             = armv6pmu_handle_irq,
+       .enable                 = armv6pmu_enable_event,
+       .disable                = armv6pmu_disable_event,
+       .read_counter           = armv6pmu_read_counter,
+       .write_counter          = armv6pmu_write_counter,
+       .get_event_idx          = armv6pmu_get_event_idx,
+       .start                  = armv6pmu_start,
+       .stop                   = armv6pmu_stop,
+       .cache_map              = &armv6_perf_cache_map,
+       .event_map              = &armv6_perf_map,
+       .raw_event_mask         = 0xFF,
+       .num_events             = 3,
+       .max_period             = (1LLU << 32) - 1,
+};
+
+static const struct arm_pmu *__init armv6pmu_init(void)
+{
+       return &armv6pmu;
+}
+
+/*
+ * ARMv6mpcore is almost identical to single core ARMv6 with the exception
+ * that some of the events have different enumerations and that there is no
+ * *hack* to stop the programmable counters. To stop the counters we simply
+ * disable the interrupt reporting and update the event. When unthrottling we
+ * reset the period and enable the interrupt reporting.
+ */
+static const struct arm_pmu armv6mpcore_pmu = {
+       .id                     = ARM_PERF_PMU_ID_V6MP,
+       .name                   = "v6mpcore",
+       .handle_irq             = armv6pmu_handle_irq,
+       .enable                 = armv6pmu_enable_event,
+       .disable                = armv6mpcore_pmu_disable_event,
+       .read_counter           = armv6pmu_read_counter,
+       .write_counter          = armv6pmu_write_counter,
+       .get_event_idx          = armv6pmu_get_event_idx,
+       .start                  = armv6pmu_start,
+       .stop                   = armv6pmu_stop,
+       .cache_map              = &armv6mpcore_perf_cache_map,
+       .event_map              = &armv6mpcore_perf_map,
+       .raw_event_mask         = 0xFF,
+       .num_events             = 3,
+       .max_period             = (1LLU << 32) - 1,
+};
+
+static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
+{
+       return &armv6mpcore_pmu;
+}
+#else
+static const struct arm_pmu *__init armv6pmu_init(void)
+{
+       return NULL;
+}
+
+static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
+{
+       return NULL;
+}
+#endif /* CONFIG_CPU_V6 */
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
new file mode 100644 (file)
index 0000000..2e14025
--- /dev/null
@@ -0,0 +1,906 @@
+/*
+ * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
+ *
+ * ARMv7 support: Jean Pihet <jpihet@mvista.com>
+ * 2010 (c) MontaVista Software, LLC.
+ *
+ * Copied from ARMv6 code, with the low level code inspired
+ *  by the ARMv7 Oprofile code.
+ *
+ * Cortex-A8 has up to 4 configurable performance counters and
+ *  a single cycle counter.
+ * Cortex-A9 has up to 31 configurable performance counters and
+ *  a single cycle counter.
+ *
+ * All counters can be enabled/disabled and IRQ masked separately. The cycle
+ *  counter and all 4 performance counters together can be reset separately.
+ */
+
+#ifdef CONFIG_CPU_V7
+/* Common ARMv7 event types */
+enum armv7_perf_types {
+       ARMV7_PERFCTR_PMNC_SW_INCR              = 0x00,
+       ARMV7_PERFCTR_IFETCH_MISS               = 0x01,
+       ARMV7_PERFCTR_ITLB_MISS                 = 0x02,
+       ARMV7_PERFCTR_DCACHE_REFILL             = 0x03,
+       ARMV7_PERFCTR_DCACHE_ACCESS             = 0x04,
+       ARMV7_PERFCTR_DTLB_REFILL               = 0x05,
+       ARMV7_PERFCTR_DREAD                     = 0x06,
+       ARMV7_PERFCTR_DWRITE                    = 0x07,
+
+       ARMV7_PERFCTR_EXC_TAKEN                 = 0x09,
+       ARMV7_PERFCTR_EXC_EXECUTED              = 0x0A,
+       ARMV7_PERFCTR_CID_WRITE                 = 0x0B,
+       /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+        * It counts:
+        *  - all branch instructions,
+        *  - instructions that explicitly write the PC,
+        *  - exception generating instructions.
+        */
+       ARMV7_PERFCTR_PC_WRITE                  = 0x0C,
+       ARMV7_PERFCTR_PC_IMM_BRANCH             = 0x0D,
+       ARMV7_PERFCTR_UNALIGNED_ACCESS          = 0x0F,
+       ARMV7_PERFCTR_PC_BRANCH_MIS_PRED        = 0x10,
+       ARMV7_PERFCTR_CLOCK_CYCLES              = 0x11,
+
+       ARMV7_PERFCTR_PC_BRANCH_MIS_USED        = 0x12,
+
+       ARMV7_PERFCTR_CPU_CYCLES                = 0xFF
+};
+
+/* ARMv7 Cortex-A8 specific event types */
+enum armv7_a8_perf_types {
+       ARMV7_PERFCTR_INSTR_EXECUTED            = 0x08,
+
+       ARMV7_PERFCTR_PC_PROC_RETURN            = 0x0E,
+
+       ARMV7_PERFCTR_WRITE_BUFFER_FULL         = 0x40,
+       ARMV7_PERFCTR_L2_STORE_MERGED           = 0x41,
+       ARMV7_PERFCTR_L2_STORE_BUFF             = 0x42,
+       ARMV7_PERFCTR_L2_ACCESS                 = 0x43,
+       ARMV7_PERFCTR_L2_CACH_MISS              = 0x44,
+       ARMV7_PERFCTR_AXI_READ_CYCLES           = 0x45,
+       ARMV7_PERFCTR_AXI_WRITE_CYCLES          = 0x46,
+       ARMV7_PERFCTR_MEMORY_REPLAY             = 0x47,
+       ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY   = 0x48,
+       ARMV7_PERFCTR_L1_DATA_MISS              = 0x49,
+       ARMV7_PERFCTR_L1_INST_MISS              = 0x4A,
+       ARMV7_PERFCTR_L1_DATA_COLORING          = 0x4B,
+       ARMV7_PERFCTR_L1_NEON_DATA              = 0x4C,
+       ARMV7_PERFCTR_L1_NEON_CACH_DATA         = 0x4D,
+       ARMV7_PERFCTR_L2_NEON                   = 0x4E,
+       ARMV7_PERFCTR_L2_NEON_HIT               = 0x4F,
+       ARMV7_PERFCTR_L1_INST                   = 0x50,
+       ARMV7_PERFCTR_PC_RETURN_MIS_PRED        = 0x51,
+       ARMV7_PERFCTR_PC_BRANCH_FAILED          = 0x52,
+       ARMV7_PERFCTR_PC_BRANCH_TAKEN           = 0x53,
+       ARMV7_PERFCTR_PC_BRANCH_EXECUTED        = 0x54,
+       ARMV7_PERFCTR_OP_EXECUTED               = 0x55,
+       ARMV7_PERFCTR_CYCLES_INST_STALL         = 0x56,
+       ARMV7_PERFCTR_CYCLES_INST               = 0x57,
+       ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL    = 0x58,
+       ARMV7_PERFCTR_CYCLES_NEON_INST_STALL    = 0x59,
+       ARMV7_PERFCTR_NEON_CYCLES               = 0x5A,
+
+       ARMV7_PERFCTR_PMU0_EVENTS               = 0x70,
+       ARMV7_PERFCTR_PMU1_EVENTS               = 0x71,
+       ARMV7_PERFCTR_PMU_EVENTS                = 0x72,
+};
+
+/* ARMv7 Cortex-A9 specific event types */
+enum armv7_a9_perf_types {
+       ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC     = 0x40,
+       ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC     = 0x41,
+       ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC       = 0x42,
+
+       ARMV7_PERFCTR_COHERENT_LINE_MISS        = 0x50,
+       ARMV7_PERFCTR_COHERENT_LINE_HIT         = 0x51,
+
+       ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES   = 0x60,
+       ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES   = 0x61,
+       ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
+       ARMV7_PERFCTR_STREX_EXECUTED_PASSED     = 0x63,
+       ARMV7_PERFCTR_STREX_EXECUTED_FAILED     = 0x64,
+       ARMV7_PERFCTR_DATA_EVICTION             = 0x65,
+       ARMV7_PERFCTR_ISSUE_STAGE_NO_INST       = 0x66,
+       ARMV7_PERFCTR_ISSUE_STAGE_EMPTY         = 0x67,
+       ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE  = 0x68,
+
+       ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
+
+       ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST   = 0x70,
+       ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
+       ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST  = 0x72,
+       ARMV7_PERFCTR_FP_EXECUTED_INST          = 0x73,
+       ARMV7_PERFCTR_NEON_EXECUTED_INST        = 0x74,
+
+       ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
+       ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES  = 0x81,
+       ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES        = 0x82,
+       ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES        = 0x83,
+       ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES  = 0x84,
+       ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES  = 0x85,
+       ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES      = 0x86,
+
+       ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES  = 0x8A,
+       ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
+
+       ARMV7_PERFCTR_ISB_INST                  = 0x90,
+       ARMV7_PERFCTR_DSB_INST                  = 0x91,
+       ARMV7_PERFCTR_DMB_INST                  = 0x92,
+       ARMV7_PERFCTR_EXT_INTERRUPTS            = 0x93,
+
+       ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED     = 0xA0,
+       ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED       = 0xA1,
+       ARMV7_PERFCTR_PLE_FIFO_FLUSH            = 0xA2,
+       ARMV7_PERFCTR_PLE_RQST_COMPLETED        = 0xA3,
+       ARMV7_PERFCTR_PLE_FIFO_OVERFLOW         = 0xA4,
+       ARMV7_PERFCTR_PLE_RQST_PROG             = 0xA5
+};
+
+/*
+ * Cortex-A8 HW events mapping
+ *
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                         [PERF_COUNT_HW_CACHE_OP_MAX]
+                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               /*
+                * The performance counters don't differentiate between read
+                * and write accesses/misses so this isn't strictly correct,
+                * but it's the best we can do. Writes and reads get
+                * combined.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               /*
+                * Only ITLB misses and DTLB refills are supported.
+                * If users want the DTLB refills misses a raw counter
+                * must be used.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+/*
+ * Cortex-A9 HW events mapping
+ */
+static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        =
+                                       ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_COHERENT_LINE_HIT,
+       [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_COHERENT_LINE_MISS,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                         [PERF_COUNT_HW_CACHE_OP_MAX]
+                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               /*
+                * The performance counters don't differentiate between read
+                * and write accesses/misses so this isn't strictly correct,
+                * but it's the best we can do. Writes and reads get
+                * combined.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               /*
+                * Only ITLB misses and DTLB refills are supported.
+                * If users want the DTLB refills misses a raw counter
+                * must be used.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+/*
+ * Perf Events counters
+ */
+enum armv7_counters {
+       ARMV7_CYCLE_COUNTER             = 1,    /* Cycle counter */
+       ARMV7_COUNTER0                  = 2,    /* First event counter */
+};
+
+/*
+ * The cycle counter is ARMV7_CYCLE_COUNTER.
+ * The first event counter is ARMV7_COUNTER0.
+ * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
+ */
+#define        ARMV7_COUNTER_LAST      (ARMV7_COUNTER0 + armpmu->num_events - 1)
+
+/*
+ * ARMv7 low level PMNC access
+ */
+
+/*
+ * Per-CPU PMNC: config reg
+ */
+#define ARMV7_PMNC_E           (1 << 0) /* Enable all counters */
+#define ARMV7_PMNC_P           (1 << 1) /* Reset all counters */
+#define ARMV7_PMNC_C           (1 << 2) /* Cycle counter reset */
+#define ARMV7_PMNC_D           (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV7_PMNC_X           (1 << 4) /* Export to ETM */
+#define ARMV7_PMNC_DP          (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define        ARMV7_PMNC_N_SHIFT      11       /* Number of counters supported */
+#define        ARMV7_PMNC_N_MASK       0x1f
+#define        ARMV7_PMNC_MASK         0x3f     /* Mask for writable bits */
+
+/*
+ * Available counters
+ */
+#define ARMV7_CNT0             0       /* First event counter */
+#define ARMV7_CCNT             31      /* Cycle counter */
+
+/* Perf Event to low level counters mapping */
+#define ARMV7_EVENT_CNT_TO_CNTx        (ARMV7_COUNTER0 - ARMV7_CNT0)
+
+/*
+ * CNTENS: counters enable reg
+ */
+#define ARMV7_CNTENS_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_CNTENS_C         (1 << ARMV7_CCNT)
+
+/*
+ * CNTENC: counters disable reg
+ */
+#define ARMV7_CNTENC_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_CNTENC_C         (1 << ARMV7_CCNT)
+
+/*
+ * INTENS: counters overflow interrupt enable reg
+ */
+#define ARMV7_INTENS_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_INTENS_C         (1 << ARMV7_CCNT)
+
+/*
+ * INTENC: counters overflow interrupt disable reg
+ */
+#define ARMV7_INTENC_P(idx)    (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_INTENC_C         (1 << ARMV7_CCNT)
+
+/*
+ * EVTSEL: Event selection reg
+ */
+#define        ARMV7_EVTSEL_MASK       0xff            /* Mask for writable bits */
+
+/*
+ * SELECT: Counter selection reg
+ */
+#define        ARMV7_SELECT_MASK       0x1f            /* Mask for writable bits */
+
+/*
+ * FLAG: counters overflow flag status reg
+ */
+#define ARMV7_FLAG_P(idx)      (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_FLAG_C           (1 << ARMV7_CCNT)
+#define        ARMV7_FLAG_MASK         0xffffffff      /* Mask for writable bits */
+#define        ARMV7_OVERFLOWED_MASK   ARMV7_FLAG_MASK
+
+static inline unsigned long armv7_pmnc_read(void)
+{
+       u32 val;
+       asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
+       return val;
+}
+
+static inline void armv7_pmnc_write(unsigned long val)
+{
+       val &= ARMV7_PMNC_MASK;
+       asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
+}
+
+static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
+{
+       return pmnc & ARMV7_OVERFLOWED_MASK;
+}
+
+static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
+                                       enum armv7_counters counter)
+{
+       int ret = 0;
+
+       if (counter == ARMV7_CYCLE_COUNTER)
+               ret = pmnc & ARMV7_FLAG_C;
+       else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
+               ret = pmnc & ARMV7_FLAG_P(counter);
+       else
+               pr_err("CPU%u checking wrong counter %d overflow status\n",
+                       smp_processor_id(), counter);
+
+       return ret;
+}
+
+static inline int armv7_pmnc_select_counter(unsigned int idx)
+{
+       u32 val;
+
+       if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
+               pr_err("CPU%u selecting wrong PMNC counter"
+                       " %d\n", smp_processor_id(), idx);
+               return -1;
+       }
+
+       val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
+       asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+
+       return idx;
+}
+
+static inline u32 armv7pmu_read_counter(int idx)
+{
+       unsigned long value = 0;
+
+       if (idx == ARMV7_CYCLE_COUNTER)
+               asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
+       else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
+               if (armv7_pmnc_select_counter(idx) == idx)
+                       asm volatile("mrc p15, 0, %0, c9, c13, 2"
+                                    : "=r" (value));
+       } else
+               pr_err("CPU%u reading wrong counter %d\n",
+                       smp_processor_id(), idx);
+
+       return value;
+}
+
+static inline void armv7pmu_write_counter(int idx, u32 value)
+{
+       if (idx == ARMV7_CYCLE_COUNTER)
+               asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+       else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
+               if (armv7_pmnc_select_counter(idx) == idx)
+                       asm volatile("mcr p15, 0, %0, c9, c13, 2"
+                                    : : "r" (value));
+       } else
+               pr_err("CPU%u writing wrong counter %d\n",
+                       smp_processor_id(), idx);
+}
+
+static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
+{
+       if (armv7_pmnc_select_counter(idx) == idx) {
+               val &= ARMV7_EVTSEL_MASK;
+               asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+       }
+}
+
+static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
+{
+       u32 val;
+
+       if ((idx != ARMV7_CYCLE_COUNTER) &&
+           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+               pr_err("CPU%u enabling wrong PMNC counter"
+                       " %d\n", smp_processor_id(), idx);
+               return -1;
+       }
+
+       if (idx == ARMV7_CYCLE_COUNTER)
+               val = ARMV7_CNTENS_C;
+       else
+               val = ARMV7_CNTENS_P(idx);
+
+       asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+
+       return idx;
+}
+
+static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
+{
+       u32 val;
+
+
+       if ((idx != ARMV7_CYCLE_COUNTER) &&
+           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+               pr_err("CPU%u disabling wrong PMNC counter"
+                       " %d\n", smp_processor_id(), idx);
+               return -1;
+       }
+
+       if (idx == ARMV7_CYCLE_COUNTER)
+               val = ARMV7_CNTENC_C;
+       else
+               val = ARMV7_CNTENC_P(idx);
+
+       asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+
+       return idx;
+}
+
+static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
+{
+       u32 val;
+
+       if ((idx != ARMV7_CYCLE_COUNTER) &&
+           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+               pr_err("CPU%u enabling wrong PMNC counter"
+                       " interrupt enable %d\n", smp_processor_id(), idx);
+               return -1;
+       }
+
+       if (idx == ARMV7_CYCLE_COUNTER)
+               val = ARMV7_INTENS_C;
+       else
+               val = ARMV7_INTENS_P(idx);
+
+       asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
+
+       return idx;
+}
+
+static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
+{
+       u32 val;
+
+       if ((idx != ARMV7_CYCLE_COUNTER) &&
+           ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+               pr_err("CPU%u disabling wrong PMNC counter"
+                       " interrupt enable %d\n", smp_processor_id(), idx);
+               return -1;
+       }
+
+       if (idx == ARMV7_CYCLE_COUNTER)
+               val = ARMV7_INTENC_C;
+       else
+               val = ARMV7_INTENC_P(idx);
+
+       asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
+
+       return idx;
+}
+
+static inline u32 armv7_pmnc_getreset_flags(void)
+{
+       u32 val;
+
+       /* Read */
+       asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+
+       /* Write to clear flags */
+       val &= ARMV7_FLAG_MASK;
+       asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
+
+       return val;
+}
+
+#ifdef DEBUG
+static void armv7_pmnc_dump_regs(void)
+{
+       u32 val;
+       unsigned int cnt;
+
+       printk(KERN_INFO "PMNC registers dump:\n");
+
+       asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+       printk(KERN_INFO "PMNC  =0x%08x\n", val);
+
+       asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
+       printk(KERN_INFO "CNTENS=0x%08x\n", val);
+
+       asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
+       printk(KERN_INFO "INTENS=0x%08x\n", val);
+
+       asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+       printk(KERN_INFO "FLAGS =0x%08x\n", val);
+
+       asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
+       printk(KERN_INFO "SELECT=0x%08x\n", val);
+
+       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+       printk(KERN_INFO "CCNT  =0x%08x\n", val);
+
+       for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
+               armv7_pmnc_select_counter(cnt);
+               asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+               printk(KERN_INFO "CNT[%d] count =0x%08x\n",
+                       cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+               asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
+               printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
+                       cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+       }
+}
+#endif
+
+static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+       unsigned long flags;
+
+       /*
+        * Enable counter and interrupt, and set the counter to count
+        * the event that we're interested in.
+        */
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+
+       /*
+        * Disable counter
+        */
+       armv7_pmnc_disable_counter(idx);
+
+       /*
+        * Set event (if destined for PMNx counters)
+        * We don't need to set the event if it's a cycle count
+        */
+       if (idx != ARMV7_CYCLE_COUNTER)
+               armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+       /*
+        * Enable interrupt for this counter
+        */
+       armv7_pmnc_enable_intens(idx);
+
+       /*
+        * Enable counter
+        */
+       armv7_pmnc_enable_counter(idx);
+
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+       unsigned long flags;
+
+       /*
+        * Disable counter and interrupt
+        */
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+
+       /*
+        * Disable counter
+        */
+       armv7_pmnc_disable_counter(idx);
+
+       /*
+        * Disable interrupt for this counter
+        */
+       armv7_pmnc_disable_intens(idx);
+
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+{
+       unsigned long pmnc;
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc;
+       struct pt_regs *regs;
+       int idx;
+
+       /*
+        * Get and reset the IRQ flags
+        */
+       pmnc = armv7_pmnc_getreset_flags();
+
+       /*
+        * Did an overflow occur?
+        */
+       if (!armv7_pmnc_has_overflowed(pmnc))
+               return IRQ_NONE;
+
+       /*
+        * Handle the counter(s) overflow(s)
+        */
+       regs = get_irq_regs();
+
+       perf_sample_data_init(&data, 0);
+
+       cpuc = &__get_cpu_var(cpu_hw_events);
+       for (idx = 0; idx <= armpmu->num_events; ++idx) {
+               struct perf_event *event = cpuc->events[idx];
+               struct hw_perf_event *hwc;
+
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               /*
+                * We have a single interrupt for all counters. Check that
+                * each counter has overflowed before we process it.
+                */
+               if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
+                       continue;
+
+               hwc = &event->hw;
+               armpmu_event_update(event, hwc, idx);
+               data.period = event->hw.last_period;
+               if (!armpmu_event_set_period(event, hwc, idx))
+                       continue;
+
+               if (perf_event_overflow(event, 0, &data, regs))
+                       armpmu->disable(hwc, idx);
+       }
+
+       /*
+        * Handle the pending perf events.
+        *
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
+        * will not work.
+        */
+       irq_work_run();
+
+       return IRQ_HANDLED;
+}
+
+static void armv7pmu_start(void)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       /* Enable all counters */
+       armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void armv7pmu_stop(void)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       /* Disable all counters */
+       armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
+                                 struct hw_perf_event *event)
+{
+       int idx;
+
+       /* Always place a cycle counter into the cycle counter. */
+       if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
+               if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
+                       return -EAGAIN;
+
+               return ARMV7_CYCLE_COUNTER;
+       } else {
+               /*
+                * For anything other than a cycle counter, try and use
+                * the events counters
+                */
+               for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
+                       if (!test_and_set_bit(idx, cpuc->used_mask))
+                               return idx;
+               }
+
+               /* The counters are all in use. */
+               return -EAGAIN;
+       }
+}
+
+static struct arm_pmu armv7pmu = {
+       .handle_irq             = armv7pmu_handle_irq,
+       .enable                 = armv7pmu_enable_event,
+       .disable                = armv7pmu_disable_event,
+       .read_counter           = armv7pmu_read_counter,
+       .write_counter          = armv7pmu_write_counter,
+       .get_event_idx          = armv7pmu_get_event_idx,
+       .start                  = armv7pmu_start,
+       .stop                   = armv7pmu_stop,
+       .raw_event_mask         = 0xFF,
+       .max_period             = (1LLU << 32) - 1,
+};
+
+static u32 __init armv7_reset_read_pmnc(void)
+{
+       u32 nb_cnt;
+
+       /* Initialize & Reset PMNC: C and P bits */
+       armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+
+       /* Read the nb of CNTx counters supported from PMNC */
+       nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+
+       /* Add the CPU cycles counter and return */
+       return nb_cnt + 1;
+}
+
+static const struct arm_pmu *__init armv7_a8_pmu_init(void)
+{
+       armv7pmu.id             = ARM_PERF_PMU_ID_CA8;
+       armv7pmu.name           = "ARMv7 Cortex-A8";
+       armv7pmu.cache_map      = &armv7_a8_perf_cache_map;
+       armv7pmu.event_map      = &armv7_a8_perf_map;
+       armv7pmu.num_events     = armv7_reset_read_pmnc();
+       return &armv7pmu;
+}
+
+static const struct arm_pmu *__init armv7_a9_pmu_init(void)
+{
+       armv7pmu.id             = ARM_PERF_PMU_ID_CA9;
+       armv7pmu.name           = "ARMv7 Cortex-A9";
+       armv7pmu.cache_map      = &armv7_a9_perf_cache_map;
+       armv7pmu.event_map      = &armv7_a9_perf_map;
+       armv7pmu.num_events     = armv7_reset_read_pmnc();
+       return &armv7pmu;
+}
+#else
+static const struct arm_pmu *__init armv7_a8_pmu_init(void)
+{
+       return NULL;
+}
+
+static const struct arm_pmu *__init armv7_a9_pmu_init(void)
+{
+       return NULL;
+}
+#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
new file mode 100644 (file)
index 0000000..28cd3b0
--- /dev/null
@@ -0,0 +1,807 @@
+/*
+ * ARMv5 [xscale] Performance counter handling code.
+ *
+ * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * Based on the previous xscale OProfile code.
+ *
+ * There are two variants of the xscale PMU that we support:
+ *     - xscale1pmu: 2 event counters and a cycle counter
+ *     - xscale2pmu: 4 event counters and a cycle counter
+ * The two variants share event definitions, but have different
+ * PMU structures.
+ */
+
+#ifdef CONFIG_CPU_XSCALE
+enum xscale_perf_types {
+       XSCALE_PERFCTR_ICACHE_MISS              = 0x00,
+       XSCALE_PERFCTR_ICACHE_NO_DELIVER        = 0x01,
+       XSCALE_PERFCTR_DATA_STALL               = 0x02,
+       XSCALE_PERFCTR_ITLB_MISS                = 0x03,
+       XSCALE_PERFCTR_DTLB_MISS                = 0x04,
+       XSCALE_PERFCTR_BRANCH                   = 0x05,
+       XSCALE_PERFCTR_BRANCH_MISS              = 0x06,
+       XSCALE_PERFCTR_INSTRUCTION              = 0x07,
+       XSCALE_PERFCTR_DCACHE_FULL_STALL        = 0x08,
+       XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
+       XSCALE_PERFCTR_DCACHE_ACCESS            = 0x0A,
+       XSCALE_PERFCTR_DCACHE_MISS              = 0x0B,
+       XSCALE_PERFCTR_DCACHE_WRITE_BACK        = 0x0C,
+       XSCALE_PERFCTR_PC_CHANGED               = 0x0D,
+       XSCALE_PERFCTR_BCU_REQUEST              = 0x10,
+       XSCALE_PERFCTR_BCU_FULL                 = 0x11,
+       XSCALE_PERFCTR_BCU_DRAIN                = 0x12,
+       XSCALE_PERFCTR_BCU_ECC_NO_ELOG          = 0x14,
+       XSCALE_PERFCTR_BCU_1_BIT_ERR            = 0x15,
+       XSCALE_PERFCTR_RMW                      = 0x16,
+       /* XSCALE_PERFCTR_CCNT is not hardware defined */
+       XSCALE_PERFCTR_CCNT                     = 0xFE,
+       XSCALE_PERFCTR_UNUSED                   = 0xFF,
+};
+
+enum xscale_counters {
+       XSCALE_CYCLE_COUNTER    = 1,
+       XSCALE_COUNTER0,
+       XSCALE_COUNTER1,
+       XSCALE_COUNTER2,
+       XSCALE_COUNTER3,
+};
+
+static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = XSCALE_PERFCTR_CCNT,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = XSCALE_PERFCTR_INSTRUCTION,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = XSCALE_PERFCTR_BRANCH_MISS,
+       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                          [PERF_COUNT_HW_CACHE_OP_MAX]
+                                          [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = XSCALE_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DCACHE_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = XSCALE_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DCACHE_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ICACHE_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ICACHE_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DTLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_DTLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+#define        XSCALE_PMU_ENABLE       0x001
+#define XSCALE_PMN_RESET       0x002
+#define        XSCALE_CCNT_RESET       0x004
+#define        XSCALE_PMU_RESET        (CCNT_RESET | PMN_RESET)
+#define XSCALE_PMU_CNT64       0x008
+
+#define XSCALE1_OVERFLOWED_MASK        0x700
+#define XSCALE1_CCOUNT_OVERFLOW        0x400
+#define XSCALE1_COUNT0_OVERFLOW        0x100
+#define XSCALE1_COUNT1_OVERFLOW        0x200
+#define XSCALE1_CCOUNT_INT_EN  0x040
+#define XSCALE1_COUNT0_INT_EN  0x010
+#define XSCALE1_COUNT1_INT_EN  0x020
+#define XSCALE1_COUNT0_EVT_SHFT        12
+#define XSCALE1_COUNT0_EVT_MASK        (0xff << XSCALE1_COUNT0_EVT_SHFT)
+#define XSCALE1_COUNT1_EVT_SHFT        20
+#define XSCALE1_COUNT1_EVT_MASK        (0xff << XSCALE1_COUNT1_EVT_SHFT)
+
+static inline u32
+xscale1pmu_read_pmnc(void)
+{
+       u32 val;
+       asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
+       return val;
+}
+
+static inline void
+xscale1pmu_write_pmnc(u32 val)
+{
+       /* upper 4bits and 7, 11 are write-as-0 */
+       val &= 0xffff77f;
+       asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
+}
+
+static inline int
+xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
+                                       enum xscale_counters counter)
+{
+       int ret = 0;
+
+       switch (counter) {
+       case XSCALE_CYCLE_COUNTER:
+               ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
+               break;
+       case XSCALE_COUNTER0:
+               ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
+               break;
+       case XSCALE_COUNTER1:
+               ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
+               break;
+       default:
+               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+       }
+
+       return ret;
+}
+
+static irqreturn_t
+xscale1pmu_handle_irq(int irq_num, void *dev)
+{
+       unsigned long pmnc;
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc;
+       struct pt_regs *regs;
+       int idx;
+
+       /*
+        * NOTE: there's an A stepping erratum that states if an overflow
+        *       bit already exists and another occurs, the previous
+        *       Overflow bit gets cleared. There's no workaround.
+        *       Fixed in B stepping or later.
+        */
+       pmnc = xscale1pmu_read_pmnc();
+
+       /*
+        * Write the value back to clear the overflow flags. Overflow
+        * flags remain in pmnc for use below. We also disable the PMU
+        * while we process the interrupt.
+        */
+       xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+       if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
+               return IRQ_NONE;
+
+       regs = get_irq_regs();
+
+       perf_sample_data_init(&data, 0);
+
+       cpuc = &__get_cpu_var(cpu_hw_events);
+       for (idx = 0; idx <= armpmu->num_events; ++idx) {
+               struct perf_event *event = cpuc->events[idx];
+               struct hw_perf_event *hwc;
+
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+                       continue;
+
+               hwc = &event->hw;
+               armpmu_event_update(event, hwc, idx);
+               data.period = event->hw.last_period;
+               if (!armpmu_event_set_period(event, hwc, idx))
+                       continue;
+
+               if (perf_event_overflow(event, 0, &data, regs))
+                       armpmu->disable(hwc, idx);
+       }
+
+       irq_work_run();
+
+       /*
+        * Re-enable the PMU.
+        */
+       pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+       xscale1pmu_write_pmnc(pmnc);
+
+       return IRQ_HANDLED;
+}
+
+static void
+xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+       unsigned long val, mask, evt, flags;
+
+       switch (idx) {
+       case XSCALE_CYCLE_COUNTER:
+               mask = 0;
+               evt = XSCALE1_CCOUNT_INT_EN;
+               break;
+       case XSCALE_COUNTER0:
+               mask = XSCALE1_COUNT0_EVT_MASK;
+               evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
+                       XSCALE1_COUNT0_INT_EN;
+               break;
+       case XSCALE_COUNTER1:
+               mask = XSCALE1_COUNT1_EVT_MASK;
+               evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
+                       XSCALE1_COUNT1_INT_EN;
+               break;
+       default:
+               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+               return;
+       }
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = xscale1pmu_read_pmnc();
+       val &= ~mask;
+       val |= evt;
+       xscale1pmu_write_pmnc(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+       unsigned long val, mask, evt, flags;
+
+       switch (idx) {
+       case XSCALE_CYCLE_COUNTER:
+               mask = XSCALE1_CCOUNT_INT_EN;
+               evt = 0;
+               break;
+       case XSCALE_COUNTER0:
+               mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
+               evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
+               break;
+       case XSCALE_COUNTER1:
+               mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
+               evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
+               break;
+       default:
+               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+               return;
+       }
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = xscale1pmu_read_pmnc();
+       val &= ~mask;
+       val |= evt;
+       xscale1pmu_write_pmnc(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
+                       struct hw_perf_event *event)
+{
+       if (XSCALE_PERFCTR_CCNT == event->config_base) {
+               if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
+                       return -EAGAIN;
+
+               return XSCALE_CYCLE_COUNTER;
+       } else {
+               if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
+                       return XSCALE_COUNTER1;
+
+               if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
+                       return XSCALE_COUNTER0;
+
+               return -EAGAIN;
+       }
+}
+
+static void
+xscale1pmu_start(void)
+{
+       unsigned long flags, val;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = xscale1pmu_read_pmnc();
+       val |= XSCALE_PMU_ENABLE;
+       xscale1pmu_write_pmnc(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_stop(void)
+{
+       unsigned long flags, val;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = xscale1pmu_read_pmnc();
+       val &= ~XSCALE_PMU_ENABLE;
+       xscale1pmu_write_pmnc(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale1pmu_read_counter(int counter)
+{
+       u32 val = 0;
+
+       switch (counter) {
+       case XSCALE_CYCLE_COUNTER:
+               asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
+               break;
+       case XSCALE_COUNTER0:
+               asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
+               break;
+       case XSCALE_COUNTER1:
+               asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
+               break;
+       }
+
+       return val;
+}
+
+static inline void
+xscale1pmu_write_counter(int counter, u32 val)
+{
+       switch (counter) {
+       case XSCALE_CYCLE_COUNTER:
+               asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
+               break;
+       case XSCALE_COUNTER0:
+               asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
+               break;
+       case XSCALE_COUNTER1:
+               asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
+               break;
+       }
+}
+
+static const struct arm_pmu xscale1pmu = {
+       .id             = ARM_PERF_PMU_ID_XSCALE1,
+       .name           = "xscale1",
+       .handle_irq     = xscale1pmu_handle_irq,
+       .enable         = xscale1pmu_enable_event,
+       .disable        = xscale1pmu_disable_event,
+       .read_counter   = xscale1pmu_read_counter,
+       .write_counter  = xscale1pmu_write_counter,
+       .get_event_idx  = xscale1pmu_get_event_idx,
+       .start          = xscale1pmu_start,
+       .stop           = xscale1pmu_stop,
+       .cache_map      = &xscale_perf_cache_map,
+       .event_map      = &xscale_perf_map,
+       .raw_event_mask = 0xFF,
+       .num_events     = 3,
+       .max_period     = (1LLU << 32) - 1,
+};
+
+static const struct arm_pmu *__init xscale1pmu_init(void)
+{
+       return &xscale1pmu;
+}
+
+#define XSCALE2_OVERFLOWED_MASK        0x01f
+#define XSCALE2_CCOUNT_OVERFLOW        0x001
+#define XSCALE2_COUNT0_OVERFLOW        0x002
+#define XSCALE2_COUNT1_OVERFLOW        0x004
+#define XSCALE2_COUNT2_OVERFLOW        0x008
+#define XSCALE2_COUNT3_OVERFLOW        0x010
+#define XSCALE2_CCOUNT_INT_EN  0x001
+#define XSCALE2_COUNT0_INT_EN  0x002
+#define XSCALE2_COUNT1_INT_EN  0x004
+#define XSCALE2_COUNT2_INT_EN  0x008
+#define XSCALE2_COUNT3_INT_EN  0x010
+#define XSCALE2_COUNT0_EVT_SHFT        0
+#define XSCALE2_COUNT0_EVT_MASK        (0xff << XSCALE2_COUNT0_EVT_SHFT)
+#define XSCALE2_COUNT1_EVT_SHFT        8
+#define XSCALE2_COUNT1_EVT_MASK        (0xff << XSCALE2_COUNT1_EVT_SHFT)
+#define XSCALE2_COUNT2_EVT_SHFT        16
+#define XSCALE2_COUNT2_EVT_MASK        (0xff << XSCALE2_COUNT2_EVT_SHFT)
+#define XSCALE2_COUNT3_EVT_SHFT        24
+#define XSCALE2_COUNT3_EVT_MASK        (0xff << XSCALE2_COUNT3_EVT_SHFT)
+
+static inline u32
+xscale2pmu_read_pmnc(void)
+{
+       u32 val;
+       asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
+       /* bits 1-2 and 4-23 are read-unpredictable */
+       return val & 0xff000009;
+}
+
+static inline void
+xscale2pmu_write_pmnc(u32 val)
+{
+       /* bits 4-23 are write-as-0, 24-31 are write ignored */
+       val &= 0xf;
+       asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_overflow_flags(void)
+{
+       u32 val;
+       asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
+       return val;
+}
+
+static inline void
+xscale2pmu_write_overflow_flags(u32 val)
+{
+       asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_event_select(void)
+{
+       u32 val;
+       asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
+       return val;
+}
+
+static inline void
+xscale2pmu_write_event_select(u32 val)
+{
+       asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
+}
+
+static inline u32
+xscale2pmu_read_int_enable(void)
+{
+       u32 val;
+       asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
+       return val;
+}
+
+static void
+xscale2pmu_write_int_enable(u32 val)
+{
+       asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
+}
+
+static inline int
+xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
+                                       enum xscale_counters counter)
+{
+       int ret = 0;
+
+       switch (counter) {
+       case XSCALE_CYCLE_COUNTER:
+               ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
+               break;
+       case XSCALE_COUNTER0:
+               ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
+               break;
+       case XSCALE_COUNTER1:
+               ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
+               break;
+       case XSCALE_COUNTER2:
+               ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
+               break;
+       case XSCALE_COUNTER3:
+               ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
+               break;
+       default:
+               WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+       }
+
+       return ret;
+}
+
+static irqreturn_t
+xscale2pmu_handle_irq(int irq_num, void *dev)
+{
+       unsigned long pmnc, of_flags;
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc;
+       struct pt_regs *regs;
+       int idx;
+
+       /* Disable the PMU. */
+       pmnc = xscale2pmu_read_pmnc();
+       xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+       /* Check the overflow flag register. */
+       of_flags = xscale2pmu_read_overflow_flags();
+       if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
+               return IRQ_NONE;
+
+       /* Clear the overflow bits. */
+       xscale2pmu_write_overflow_flags(of_flags);
+
+       regs = get_irq_regs();
+
+       perf_sample_data_init(&data, 0);
+
+       cpuc = &__get_cpu_var(cpu_hw_events);
+       for (idx = 0; idx <= armpmu->num_events; ++idx) {
+               struct perf_event *event = cpuc->events[idx];
+               struct hw_perf_event *hwc;
+
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+                       continue;
+
+               hwc = &event->hw;
+               armpmu_event_update(event, hwc, idx);
+               data.period = event->hw.last_period;
+               if (!armpmu_event_set_period(event, hwc, idx))
+                       continue;
+
+               if (perf_event_overflow(event, 0, &data, regs))
+                       armpmu->disable(hwc, idx);
+       }
+
+       irq_work_run();
+
+       /*
+        * Re-enable the PMU.
+        */
+       pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+       xscale2pmu_write_pmnc(pmnc);
+
+       return IRQ_HANDLED;
+}
+
+static void
+xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+       unsigned long flags, ien, evtsel;
+
+       ien = xscale2pmu_read_int_enable();
+       evtsel = xscale2pmu_read_event_select();
+
+       switch (idx) {
+       case XSCALE_CYCLE_COUNTER:
+               ien |= XSCALE2_CCOUNT_INT_EN;
+               break;
+       case XSCALE_COUNTER0:
+               ien |= XSCALE2_COUNT0_INT_EN;
+               evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+               evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
+               break;
+       case XSCALE_COUNTER1:
+               ien |= XSCALE2_COUNT1_INT_EN;
+               evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+               evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
+               break;
+       case XSCALE_COUNTER2:
+               ien |= XSCALE2_COUNT2_INT_EN;
+               evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+               evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
+               break;
+       case XSCALE_COUNTER3:
+               ien |= XSCALE2_COUNT3_INT_EN;
+               evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+               evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
+               break;
+       default:
+               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+               return;
+       }
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       xscale2pmu_write_event_select(evtsel);
+       xscale2pmu_write_int_enable(ien);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+       unsigned long flags, ien, evtsel;
+
+       ien = xscale2pmu_read_int_enable();
+       evtsel = xscale2pmu_read_event_select();
+
+       switch (idx) {
+       case XSCALE_CYCLE_COUNTER:
+               ien &= ~XSCALE2_CCOUNT_INT_EN;
+               break;
+       case XSCALE_COUNTER0:
+               ien &= ~XSCALE2_COUNT0_INT_EN;
+               evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+               break;
+       case XSCALE_COUNTER1:
+               ien &= ~XSCALE2_COUNT1_INT_EN;
+               evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+               break;
+       case XSCALE_COUNTER2:
+               ien &= ~XSCALE2_COUNT2_INT_EN;
+               evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+               break;
+       case XSCALE_COUNTER3:
+               ien &= ~XSCALE2_COUNT3_INT_EN;
+               evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+               evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+               break;
+       default:
+               WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+               return;
+       }
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       xscale2pmu_write_event_select(evtsel);
+       xscale2pmu_write_int_enable(ien);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
+                       struct hw_perf_event *event)
+{
+       int idx = xscale1pmu_get_event_idx(cpuc, event);
+       if (idx >= 0)
+               goto out;
+
+       if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
+               idx = XSCALE_COUNTER3;
+       else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
+               idx = XSCALE_COUNTER2;
+out:
+       return idx;
+}
+
+static void
+xscale2pmu_start(void)
+{
+       unsigned long flags, val;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
+       val |= XSCALE_PMU_ENABLE;
+       xscale2pmu_write_pmnc(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_stop(void)
+{
+       unsigned long flags, val;
+
+       raw_spin_lock_irqsave(&pmu_lock, flags);
+       val = xscale2pmu_read_pmnc();
+       val &= ~XSCALE_PMU_ENABLE;
+       xscale2pmu_write_pmnc(val);
+       raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale2pmu_read_counter(int counter)
+{
+       u32 val = 0;
+
+       switch (counter) {
+       case XSCALE_CYCLE_COUNTER:
+               asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
+               break;
+       case XSCALE_COUNTER0:
+               asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
+               break;
+       case XSCALE_COUNTER1:
+               asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
+               break;
+       case XSCALE_COUNTER2:
+               asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
+               break;
+       case XSCALE_COUNTER3:
+               asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
+               break;
+       }
+
+       return val;
+}
+
+static inline void
+xscale2pmu_write_counter(int counter, u32 val)
+{
+       switch (counter) {
+       case XSCALE_CYCLE_COUNTER:
+               asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
+               break;
+       case XSCALE_COUNTER0:
+               asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
+               break;
+       case XSCALE_COUNTER1:
+               asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
+               break;
+       case XSCALE_COUNTER2:
+               asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
+               break;
+       case XSCALE_COUNTER3:
+               asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
+               break;
+       }
+}
+
+static const struct arm_pmu xscale2pmu = {
+       .id             = ARM_PERF_PMU_ID_XSCALE2,
+       .name           = "xscale2",
+       .handle_irq     = xscale2pmu_handle_irq,
+       .enable         = xscale2pmu_enable_event,
+       .disable        = xscale2pmu_disable_event,
+       .read_counter   = xscale2pmu_read_counter,
+       .write_counter  = xscale2pmu_write_counter,
+       .get_event_idx  = xscale2pmu_get_event_idx,
+       .start          = xscale2pmu_start,
+       .stop           = xscale2pmu_stop,
+       .cache_map      = &xscale_perf_cache_map,
+       .event_map      = &xscale_perf_map,
+       .raw_event_mask = 0xFF,
+       .num_events     = 5,
+       .max_period     = (1LLU << 32) - 1,
+};
+
+static const struct arm_pmu *__init xscale2pmu_init(void)
+{
+       return &xscale2pmu;
+}
+#else
+static const struct arm_pmu *__init xscale1pmu_init(void)
+{
+       return NULL;
+}
+
+static const struct arm_pmu *__init xscale2pmu_init(void)
+{
+       return NULL;
+}
+#endif /* CONFIG_CPU_XSCALE */
index 3e97483..19c6816 100644 (file)
@@ -1060,8 +1060,8 @@ static int ptrace_sethbpregs(struct task_struct *tsk, long num,
                        goto out;
 
                if ((gen_type & implied_type) != gen_type) {
-                               ret = -EINVAL;
-                               goto out;
+                       ret = -EINVAL;
+                       goto out;
                }
 
                attr.bp_len     = gen_len;
index 8c19595..bbca898 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/cache.h>
 #include <linux/profile.h>
 #include <linux/errno.h>
+#include <linux/ftrace.h>
 #include <linux/mm.h>
 #include <linux/err.h>
 #include <linux/cpu.h>
@@ -457,7 +458,7 @@ static void ipi_timer(void)
 }
 
 #ifdef CONFIG_LOCAL_TIMERS
-asmlinkage void __exception do_local_timer(struct pt_regs *regs)
+asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
        int cpu = smp_processor_id();
@@ -544,7 +545,7 @@ static void ipi_cpu_stop(unsigned int cpu)
  *
  *  Bit 0 - Inter-processor function call
  */
-asmlinkage void __exception do_IPI(struct pt_regs *regs)
+asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)
 {
        unsigned int cpu = smp_processor_id();
        struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
index cead889..897c1a8 100644 (file)
@@ -101,6 +101,7 @@ SECTIONS
                        __exception_text_start = .;
                        *(.exception.text)
                        __exception_text_end = .;
+                       IRQENTRY_TEXT
                        TEXT_TEXT
                        SCHED_TEXT
                        LOCK_TEXT
index 3b9a32a..a4ed390 100644 (file)
@@ -9,6 +9,12 @@ config MACH_DOVE_DB
          Say 'Y' here if you want your kernel to support the
          Marvell DB-MV88AP510 Development Board.
 
+ config MACH_CM_A510
+       bool "CompuLab CM-A510 Board"
+       help
+         Say 'Y' here if you want your kernel to support the
+         CompuLab CM-A510 Board.
+
 endmenu
 
 endif
index 7ab3be5..fa0f018 100644 (file)
@@ -1,3 +1,4 @@
-obj-y                          += common.o addr-map.o irq.o pcie.o
+obj-y                          += common.o addr-map.o irq.o pcie.o mpp.o
 
 obj-$(CONFIG_MACH_DOVE_DB)     += dove-db-setup.o
+obj-$(CONFIG_MACH_CM_A510)     += cm-a510.o
diff --git a/arch/arm/mach-dove/cm-a510.c b/arch/arm/mach-dove/cm-a510.c
new file mode 100644 (file)
index 0000000..96e0e94
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * arch/arm/mach-dove/cm-a510.c
+ *
+ * Copyright (C) 2010 CompuLab, Ltd.
+ * Konstantin Sinyuk <kostyas@compulab.co.il>
+ *
+ * Based on Marvell DB-MV88AP510-BP Development Board Setup
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/dove.h>
+
+#include "common.h"
+
+static struct mv643xx_eth_platform_data cm_a510_ge00_data = {
+       .phy_addr       = MV643XX_ETH_PHY_ADDR_DEFAULT,
+};
+
+static struct mv_sata_platform_data cm_a510_sata_data = {
+       .n_ports        = 1,
+};
+
+/*
+ * SPI Devices:
+ * SPI0: 1M Flash Winbond w25q32bv
+ */
+static const struct flash_platform_data cm_a510_spi_flash_data = {
+       .type           = "w25q32bv",
+};
+
+static struct spi_board_info __initdata cm_a510_spi_flash_info[] = {
+       {
+               .modalias       = "m25p80",
+               .platform_data  = &cm_a510_spi_flash_data,
+               .irq            = -1,
+               .max_speed_hz   = 20000000,
+               .bus_num        = 0,
+               .chip_select    = 0,
+       },
+};
+
+static int __init cm_a510_pci_init(void)
+{
+       if (machine_is_cm_a510())
+               dove_pcie_init(1, 1);
+
+       return 0;
+}
+
+subsys_initcall(cm_a510_pci_init);
+
+/* Board Init */
+static void __init cm_a510_init(void)
+{
+       /*
+        * Basic Dove setup. Needs to be called early.
+        */
+       dove_init();
+
+       dove_ge00_init(&cm_a510_ge00_data);
+       dove_ehci0_init();
+       dove_ehci1_init();
+       dove_sata_init(&cm_a510_sata_data);
+       dove_sdio0_init();
+       dove_sdio1_init();
+       dove_spi0_init();
+       dove_spi1_init();
+       dove_uart0_init();
+       dove_uart1_init();
+       dove_i2c_init();
+       spi_register_board_info(cm_a510_spi_flash_info,
+                               ARRAY_SIZE(cm_a510_spi_flash_info));
+}
+
+MACHINE_START(CM_A510, "Compulab CM-A510 Board")
+       .boot_params    = 0x00000100,
+       .init_machine   = cm_a510_init,
+       .map_io         = dove_map_io,
+       .init_irq       = dove_init_irq,
+       .timer          = &dove_timer,
+MACHINE_END
index f6a0839..27b4145 100644 (file)
 #define DOVE_RESET_SAMPLE_LO   (DOVE_MPP_VIRT_BASE | 0x014)
 #define DOVE_RESET_SAMPLE_HI   (DOVE_MPP_VIRT_BASE | 0x018)
 #define DOVE_GPIO_VIRT_BASE    (DOVE_SB_REGS_VIRT_BASE | 0xd0400)
+#define DOVE_GPIO2_VIRT_BASE    (DOVE_SB_REGS_VIRT_BASE | 0xe8400)
 #define DOVE_MPP_GENERAL_VIRT_BASE     (DOVE_SB_REGS_VIRT_BASE | 0xe803c)
 #define  DOVE_AU1_SPDIFO_GPIO_EN       (1 << 1)
 #define  DOVE_NAND_GPIO_EN             (1 << 0)
 #define DOVE_MPP_CTRL4_VIRT_BASE       (DOVE_GPIO_VIRT_BASE + 0x40)
-
+#define  DOVE_SPI_GPIO_SEL             (1 << 5)
+#define  DOVE_UART1_GPIO_SEL           (1 << 4)
+#define  DOVE_AU1_GPIO_SEL             (1 << 3)
+#define  DOVE_CAM_GPIO_SEL             (1 << 2)
+#define  DOVE_SD1_GPIO_SEL             (1 << 1)
+#define  DOVE_SD0_GPIO_SEL             (1 << 0)
 
 /* Power Management */
 #define DOVE_PMU_VIRT_BASE     (DOVE_SB_REGS_VIRT_BASE | 0xd0000)
+#define DOVE_PMU_SIG_CTRL      (DOVE_PMU_VIRT_BASE + 0x802c)
 
 /* Real Time Clock */
 #define DOVE_RTC_PHYS_BASE     (DOVE_SB_REGS_PHYS_BASE | 0xd8500)
index 0ee70ff..340bb7a 100644 (file)
 #include <plat/gpio.h>
 #include <asm-generic/gpio.h>          /* cansleep wrappers */
 
-#define GPIO_MAX       64
+#define GPIO_MAX       72
 
 #define GPIO_BASE_LO           (DOVE_GPIO_VIRT_BASE + 0x00)
 #define GPIO_BASE_HI           (DOVE_GPIO_VIRT_BASE + 0x20)
 
-#define GPIO_BASE(pin)         ((pin < 32) ? GPIO_BASE_LO : GPIO_BASE_HI)
+#define GPIO_BASE(pin)         ((pin < 32) ? GPIO_BASE_LO :            \
+                                ((pin < 64) ? GPIO_BASE_HI :           \
+                                 DOVE_GPIO2_VIRT_BASE))
 
 #define GPIO_OUT(pin)          (GPIO_BASE(pin) + 0x00)
 #define GPIO_IO_CONF(pin)      (GPIO_BASE(pin) + 0x04)
diff --git a/arch/arm/mach-dove/mpp.c b/arch/arm/mach-dove/mpp.c
new file mode 100644 (file)
index 0000000..71db2bd
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * arch/arm/mach-dove/mpp.c
+ *
+ * MPP functions for Marvell Dove SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <mach/dove.h>
+
+#include "mpp.h"
+
+#define MPP_NR_REGS 4
+#define MPP_CTRL(i)    ((i) == 3 ?                             \
+                        DOVE_MPP_CTRL4_VIRT_BASE :             \
+                        DOVE_MPP_VIRT_BASE + (i) * 4)
+#define PMU_SIG_REGS 2
+#define PMU_SIG_CTRL(i)        (DOVE_PMU_SIG_CTRL + (i) * 4)
+
+struct dove_mpp_grp {
+       int start;
+       int end;
+};
+
+static struct dove_mpp_grp dove_mpp_grp[] = {
+       [MPP_24_39] = {
+               .start  = 24,
+               .end    = 39,
+       },
+       [MPP_40_45] = {
+               .start  = 40,
+               .end    = 45,
+       },
+       [MPP_46_51] = {
+               .start  = 40,
+               .end    = 45,
+       },
+       [MPP_58_61] = {
+               .start  = 58,
+               .end    = 61,
+       },
+       [MPP_62_63] = {
+               .start  = 62,
+               .end    = 63,
+       },
+};
+
+static void dove_mpp_gpio_mode(int start, int end, int gpio_mode)
+{
+       int i;
+
+       for (i = start; i <= end; i++)
+               orion_gpio_set_valid(i, gpio_mode);
+}
+
+static void dove_mpp_dump_regs(void)
+{
+#ifdef DEBUG
+       int i;
+
+       pr_debug("MPP_CTRL regs:");
+       for (i = 0; i < MPP_NR_REGS; i++)
+               printk(" %08x", readl(MPP_CTRL(i)));
+       printk("\n");
+
+       pr_debug("PMU_SIG_CTRL regs:");
+       for (i = 0; i < PMU_SIG_REGS; i++)
+               printk(" %08x", readl(PMU_SIG_CTRL(i)));
+       printk("\n");
+
+       pr_debug("PMU_MPP_GENERAL_CTRL: %08x\n", readl(DOVE_PMU_MPP_GENERAL_CTRL));
+       pr_debug("MPP_GENERAL: %08x\n", readl(DOVE_MPP_GENERAL_VIRT_BASE));
+#endif
+}
+
+static void dove_mpp_cfg_nfc(int sel)
+{
+       u32 mpp_gen_cfg = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+
+       mpp_gen_cfg &= ~0x1;
+       mpp_gen_cfg |= sel;
+       writel(mpp_gen_cfg, DOVE_MPP_GENERAL_VIRT_BASE);
+
+       dove_mpp_gpio_mode(64, 71, GPIO_OUTPUT_OK);
+}
+
+static void dove_mpp_cfg_au1(int sel)
+{
+       u32 mpp_ctrl4           = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+       u32 ssp_ctrl1 = readl(DOVE_SSP_CTRL_STATUS_1);
+       u32 mpp_gen_ctrl = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+       u32 global_cfg_2 = readl(DOVE_GLOBAL_CONFIG_2);
+
+       mpp_ctrl4 &= ~(DOVE_AU1_GPIO_SEL);
+       ssp_ctrl1 &= ~(DOVE_SSP_ON_AU1);
+       mpp_gen_ctrl &= ~(DOVE_AU1_SPDIFO_GPIO_EN);
+       global_cfg_2 &= ~(DOVE_TWSI_OPTION3_GPIO);
+
+       if (!sel || sel == 0x2)
+               dove_mpp_gpio_mode(52, 57, 0);
+       else
+               dove_mpp_gpio_mode(52, 57, GPIO_OUTPUT_OK | GPIO_INPUT_OK);
+
+       if (sel & 0x1) {
+               global_cfg_2 |= DOVE_TWSI_OPTION3_GPIO;
+               dove_mpp_gpio_mode(56, 57, 0);
+       }
+       if (sel & 0x2) {
+               mpp_gen_ctrl |= DOVE_AU1_SPDIFO_GPIO_EN;
+               dove_mpp_gpio_mode(57, 57, GPIO_OUTPUT_OK | GPIO_INPUT_OK);
+       }
+       if (sel & 0x4) {
+               ssp_ctrl1 |= DOVE_SSP_ON_AU1;
+               dove_mpp_gpio_mode(52, 55, 0);
+       }
+       if (sel & 0x8)
+               mpp_ctrl4 |= DOVE_AU1_GPIO_SEL;
+
+       writel(mpp_ctrl4, DOVE_MPP_CTRL4_VIRT_BASE);
+       writel(ssp_ctrl1, DOVE_SSP_CTRL_STATUS_1);
+       writel(mpp_gen_ctrl, DOVE_MPP_GENERAL_VIRT_BASE);
+       writel(global_cfg_2, DOVE_GLOBAL_CONFIG_2);
+}
+
+static void dove_mpp_conf_grp(int num, int sel, u32 *mpp_ctrl)
+{
+       int start = dove_mpp_grp[num].start;
+       int end = dove_mpp_grp[num].end;
+       int gpio_mode = sel ? GPIO_OUTPUT_OK | GPIO_INPUT_OK : 0;
+
+       *mpp_ctrl &= ~(0x1 << num);
+       *mpp_ctrl |= sel << num;
+
+       dove_mpp_gpio_mode(start, end, gpio_mode);
+}
+
+void __init dove_mpp_conf(unsigned int *mpp_list)
+{
+       u32 mpp_ctrl[MPP_NR_REGS];
+       u32 pmu_mpp_ctrl = 0;
+       u32 pmu_sig_ctrl[PMU_SIG_REGS];
+       int i;
+
+       /* Initialize gpiolib. */
+       orion_gpio_init();
+
+       for (i = 0; i < MPP_NR_REGS; i++)
+               mpp_ctrl[i] = readl(MPP_CTRL(i));
+
+       for (i = 0; i < PMU_SIG_REGS; i++)
+               pmu_sig_ctrl[i] = readl(PMU_SIG_CTRL(i));
+
+       pmu_mpp_ctrl = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+
+       dove_mpp_dump_regs();
+
+       for ( ; *mpp_list != MPP_END; mpp_list++) {
+               unsigned int num = MPP_NUM(*mpp_list);
+               unsigned int sel = MPP_SEL(*mpp_list);
+               int shift, gpio_mode;
+
+               if (num > MPP_MAX) {
+                       pr_err("dove: invalid MPP number (%u)\n", num);
+                       continue;
+               }
+
+               if (*mpp_list & MPP_NFC_MASK) {
+                       dove_mpp_cfg_nfc(sel);
+                       continue;
+               }
+
+               if (*mpp_list & MPP_AU1_MASK) {
+                       dove_mpp_cfg_au1(sel);
+                       continue;
+               }
+
+               if (*mpp_list & MPP_GRP_MASK) {
+                       dove_mpp_conf_grp(num, sel, &mpp_ctrl[3]);
+                       continue;
+               }
+
+               shift = (num & 7) << 2;
+               if (*mpp_list & MPP_PMU_MASK) {
+                       pmu_mpp_ctrl |= (0x1 << num);
+                       pmu_sig_ctrl[num / 8] &= ~(0xf << shift);
+                       pmu_sig_ctrl[num / 8] |= 0xf << shift;
+                       gpio_mode = 0;
+               } else {
+                       mpp_ctrl[num / 8] &= ~(0xf << shift);
+                       mpp_ctrl[num / 8] |= sel << shift;
+                       gpio_mode = GPIO_OUTPUT_OK | GPIO_INPUT_OK;
+               }
+
+               orion_gpio_set_valid(num, gpio_mode);
+       }
+
+       for (i = 0; i < MPP_NR_REGS; i++)
+               writel(mpp_ctrl[i], MPP_CTRL(i));
+
+       for (i = 0; i < PMU_SIG_REGS; i++)
+               writel(pmu_sig_ctrl[i], PMU_SIG_CTRL(i));
+
+       writel(pmu_mpp_ctrl, DOVE_PMU_MPP_GENERAL_CTRL);
+
+       dove_mpp_dump_regs();
+}
diff --git a/arch/arm/mach-dove/mpp.h b/arch/arm/mach-dove/mpp.h
new file mode 100644 (file)
index 0000000..2a43ce4
--- /dev/null
@@ -0,0 +1,220 @@
+#ifndef __ARCH_DOVE_MPP_CODED_H
+#define __ARCH_DOVE_MPP_CODED_H
+
+#define MPP(_num, _mode, _pmu, _grp, _au1, _nfc) (     \
+/* MPP/group number */         ((_num) & 0xff) |               \
+/* MPP select value */         (((_mode) & 0xf) << 8) |        \
+/* MPP PMU */                  ((!!(_pmu)) << 12) |            \
+/* group flag */               ((!!(_grp)) << 13) |            \
+/* AU1 flag */                 ((!!(_au1)) << 14) |            \
+/* NFCE flag */                        ((!!(_nfc)) << 15))
+
+#define MPP_MAX        71
+
+#define MPP_NUM(x)    ((x) & 0xff)
+#define MPP_SEL(x)    (((x) >> 8) & 0xf)
+
+#define MPP_PMU_MASK           MPP(0, 0x0, 1, 0, 0, 0)
+#define MPP_GRP_MASK           MPP(0, 0x0, 0, 1, 0, 0)
+#define MPP_AU1_MASK           MPP(0, 0x0, 0, 0, 1, 0)
+#define MPP_NFC_MASK           MPP(0, 0x0, 0, 0, 0, 1)
+
+#define MPP_END                        MPP(0xff, 0xf, 1, 1, 1, 1)
+
+#define MPP_PMU_DRIVE_0                0x1
+#define MPP_PMU_DRIVE_1                0x2
+#define MPP_PMU_SDI            0x3
+#define MPP_PMU_CPU_PWRDWN     0x4
+#define MPP_PMU_STBY_PWRDWN    0x5
+#define MPP_PMU_CORE_PWR_GOOD  0x8
+#define MPP_PMU_BAT_FAULT      0xa
+#define MPP_PMU_EXT0_WU                0xb
+#define MPP_PMU_EXT1_WU                0xc
+#define MPP_PMU_EXT2_WU                0xd
+#define MPP_PMU_BLINK          0xe
+#define MPP_PMU(_num, _mode)   MPP((_num), MPP_PMU_##_mode, 1, 0, 0, 0)
+
+#define MPP_PIN(_num, _mode)   MPP((_num), (_mode), 0, 0, 0, 0)
+#define MPP_GRP(_grp, _mode)   MPP((_grp), (_mode), 0, 1, 0, 0)
+#define MPP_GRP_AU1(_mode)     MPP(0, (_mode), 0, 0, 1, 0)
+#define MPP_GRP_NFC(_mode)     MPP(0, (_mode), 0, 0, 0, 1)
+
+#define MPP0_GPIO0             MPP_PIN(0, 0x0)
+#define MPP0_UA2_RTSn          MPP_PIN(0, 0x2)
+#define MPP0_SDIO0_CD          MPP_PIN(0, 0x3)
+#define MPP0_LCD0_PWM          MPP_PIN(0, 0xf)
+
+#define MPP1_GPIO1             MPP_PIN(1, 0x0)
+#define MPP1_UA2_CTSn          MPP_PIN(1, 0x2)
+#define MPP1_SDIO0_WP          MPP_PIN(1, 0x3)
+#define MPP1_LCD1_PWM          MPP_PIN(1, 0xf)
+
+#define MPP2_GPIO2             MPP_PIN(2, 0x0)
+#define MPP2_SATA_PRESENT      MPP_PIN(2, 0x1)
+#define MPP2_UA2_TXD           MPP_PIN(2, 0x2)
+#define MPP2_SDIO0_BUS_POWER   MPP_PIN(2, 0x3)
+#define MPP2_UA_RTSn1          MPP_PIN(2, 0x4)
+
+#define MPP3_GPIO3             MPP_PIN(3, 0x0)
+#define MPP3_SATA_ACT          MPP_PIN(3, 0x1)
+#define MPP3_UA2_RXD           MPP_PIN(3, 0x2)
+#define MPP3_SDIO0_LED_CTRL    MPP_PIN(3, 0x3)
+#define MPP3_UA_CTSn1          MPP_PIN(3, 0x4)
+#define MPP3_SPI_LCD_CS1       MPP_PIN(3, 0xf)
+
+#define MPP4_GPIO4             MPP_PIN(4, 0x0)
+#define MPP4_UA3_RTSn          MPP_PIN(4, 0x2)
+#define MPP4_SDIO1_CD          MPP_PIN(4, 0x3)
+#define MPP4_SPI_1_MISO                MPP_PIN(4, 0x4)
+
+#define MPP5_GPIO5             MPP_PIN(5, 0x0)
+#define MPP5_UA3_CTSn          MPP_PIN(5, 0x2)
+#define MPP5_SDIO1_WP          MPP_PIN(5, 0x3)
+#define MPP5_SPI_1_CS          MPP_PIN(5, 0x4)
+
+#define MPP6_GPIO6             MPP_PIN(6, 0x0)
+#define MPP6_UA3_TXD           MPP_PIN(6, 0x2)
+#define MPP6_SDIO1_BUS_POWER   MPP_PIN(6, 0x3)
+#define MPP6_SPI_1_MOSI                MPP_PIN(6, 0x4)
+
+#define MPP7_GPIO7             MPP_PIN(7, 0x0)
+#define MPP7_UA3_RXD           MPP_PIN(7, 0x2)
+#define MPP7_SDIO1_LED_CTRL    MPP_PIN(7, 0x3)
+#define MPP7_SPI_1_SCK         MPP_PIN(7, 0x4)
+
+#define MPP8_GPIO8             MPP_PIN(8, 0x0)
+#define MPP8_WD_RST_OUT                MPP_PIN(8, 0x1)
+
+#define MPP9_GPIO9             MPP_PIN(9, 0x0)
+#define MPP9_PEX1_CLKREQn      MPP_PIN(9, 0x5)
+
+#define MPP10_GPIO10           MPP_PIN(10, 0x0)
+#define MPP10_SSP_SCLK         MPP_PIN(10, 0x5)
+
+#define MPP11_GPIO11           MPP_PIN(11, 0x0)
+#define MPP11_SATA_PRESENT     MPP_PIN(11, 0x1)
+#define MPP11_SATA_ACT         MPP_PIN(11, 0x2)
+#define MPP11_SDIO0_LED_CTRL   MPP_PIN(11, 0x3)
+#define MPP11_SDIO1_LED_CTRL   MPP_PIN(11, 0x4)
+#define MPP11_PEX0_CLKREQn     MPP_PIN(11, 0x5)
+
+#define MPP12_GPIO12           MPP_PIN(12, 0x0)
+#define MPP12_SATA_ACT         MPP_PIN(12, 0x1)
+#define MPP12_UA2_RTSn         MPP_PIN(12, 0x2)
+#define MPP12_AD0_I2S_EXT_MCLK MPP_PIN(12, 0x3)
+#define MPP12_SDIO1_CD         MPP_PIN(12, 0x4)
+
+#define MPP13_GPIO13           MPP_PIN(13, 0x0)
+#define MPP13_UA2_CTSn         MPP_PIN(13, 0x2)
+#define MPP13_AD1_I2S_EXT_MCLK MPP_PIN(13, 0x3)
+#define MPP13_SDIO1WP          MPP_PIN(13, 0x4)
+#define MPP13_SSP_EXTCLK       MPP_PIN(13, 0x5)
+
+#define MPP14_GPIO14           MPP_PIN(14, 0x0)
+#define MPP14_UA2_TXD          MPP_PIN(14, 0x2)
+#define MPP14_SDIO1_BUS_POWER  MPP_PIN(14, 0x4)
+#define MPP14_SSP_RXD          MPP_PIN(14, 0x5)
+
+#define MPP15_GPIO15           MPP_PIN(15, 0x0)
+#define MPP15_UA2_RXD          MPP_PIN(15, 0x2)
+#define MPP15_SDIO1_LED_CTRL   MPP_PIN(15, 0x4)
+#define MPP15_SSP_SFRM         MPP_PIN(15, 0x5)
+
+#define MPP16_GPIO16           MPP_PIN(16, 0x0)
+#define MPP16_UA3_RTSn         MPP_PIN(16, 0x2)
+#define MPP16_SDIO0_CD         MPP_PIN(16, 0x3)
+#define MPP16_SPI_LCD_CS1      MPP_PIN(16, 0x4)
+#define MPP16_AC97_SDATA_IN1   MPP_PIN(16, 0x5)
+
+#define MPP17_GPIO17           MPP_PIN(17, 0x0)
+#define MPP17_AC97_SYSCLK_OUT  MPP_PIN(17, 0x1)
+#define MPP17_UA3_CTSn         MPP_PIN(17, 0x2)
+#define MPP17_SDIO0_WP         MPP_PIN(17, 0x3)
+#define MPP17_TW_SDA2          MPP_PIN(17, 0x4)
+#define MPP17_AC97_SDATA_IN2   MPP_PIN(17, 0x5)
+
+#define MPP18_GPIO18           MPP_PIN(18, 0x0)
+#define MPP18_UA3_TXD          MPP_PIN(18, 0x2)
+#define MPP18_SDIO0_BUS_POWER  MPP_PIN(18, 0x3)
+#define MPP18_LCD0_PWM         MPP_PIN(18, 0x4)
+#define MPP18_AC_SDATA_IN3     MPP_PIN(18, 0x5)
+
+#define MPP19_GPIO19           MPP_PIN(19, 0x0)
+#define MPP19_UA3_RXD          MPP_PIN(19, 0x2)
+#define MPP19_SDIO0_LED_CTRL   MPP_PIN(19, 0x3)
+#define MPP19_TW_SCK2          MPP_PIN(19, 0x4)
+
+#define MPP20_GPIO20           MPP_PIN(20, 0x0)
+#define MPP20_AC97_SYSCLK_OUT  MPP_PIN(20, 0x1)
+#define MPP20_SPI_LCD_MISO     MPP_PIN(20, 0x2)
+#define MPP20_SDIO1_CD         MPP_PIN(20, 0x3)
+#define MPP20_SDIO0_CD         MPP_PIN(20, 0x5)
+#define MPP20_SPI_1_MISO       MPP_PIN(20, 0x6)
+
+#define MPP21_GPIO21           MPP_PIN(21, 0x0)
+#define MPP21_UA1_RTSn         MPP_PIN(21, 0x1)
+#define MPP21_SPI_LCD_CS0      MPP_PIN(21, 0x2)
+#define MPP21_SDIO1_WP         MPP_PIN(21, 0x3)
+#define MPP21_SSP_SFRM         MPP_PIN(21, 0x4)
+#define MPP21_SDIO0_WP         MPP_PIN(21, 0x5)
+#define MPP21_SPI_1_CS         MPP_PIN(21, 0x6)
+
+#define MPP22_GPIO22           MPP_PIN(22, 0x0)
+#define MPP22_UA1_CTSn         MPP_PIN(22, 0x1)
+#define MPP22_SPI_LCD_MOSI     MPP_PIN(22, 0x2)
+#define MPP22_SDIO1_BUS_POWER  MPP_PIN(22, 0x3)
+#define MPP22_SSP_TXD          MPP_PIN(22, 0x4)
+#define MPP22_SDIO0_BUS_POWER  MPP_PIN(22, 0x5)
+#define MPP22_SPI_1_MOSI       MPP_PIN(22, 0x6)
+
+#define MPP23_GPIO23           MPP_PIN(23, 0x0)
+#define MPP23_SPI_LCD_SCK      MPP_PIN(23, 0x2)
+#define MPP23_SDIO1_LED_CTRL   MPP_PIN(23, 0x3)
+#define MPP23_SSP_SCLK         MPP_PIN(23, 0x4)
+#define MPP23_SDIO0_LED_CTRL   MPP_PIN(23, 0x5)
+#define MPP23_SPI_1_SCK                MPP_PIN(23, 0x6)
+
+/* for MPP groups _num is a group index */
+enum dove_mpp_grp_idx {
+       MPP_24_39 = 2,
+       MPP_40_45 = 0,
+       MPP_46_51 = 1,
+       MPP_58_61 = 5,
+       MPP_62_63 = 4,
+};
+
+#define MPP24_39_GPIO          MPP_GRP(MPP_24_39, 0x1)
+#define MPP24_39_CAM           MPP_GRP(MPP_24_39, 0x0)
+
+#define MPP40_45_GPIO          MPP_GRP(MPP_40_45, 0x1)
+#define MPP40_45_SD0           MPP_GRP(MPP_40_45, 0x0)
+
+#define MPP46_51_GPIO          MPP_GRP(MPP_46_51, 0x1)
+#define MPP46_51_SD1           MPP_GRP(MPP_46_51, 0x0)
+
+#define MPP58_61_GPIO          MPP_GRP(MPP_58_61, 0x1)
+#define MPP58_61_SPI           MPP_GRP(MPP_58_61, 0x0)
+
+#define MPP62_63_GPIO          MPP_GRP(MPP_62_63, 0x1)
+#define MPP62_63_UA1           MPP_GRP(MPP_62_63, 0x0)
+
+/* The MPP[64:71] control differs from other groups */
+#define MPP64_71_GPO           MPP_GRP_NFC(0x1)
+#define MPP64_71_NFC           MPP_GRP_NFC(0x0)
+
+/*
+ * The MPP[52:57] functionality is encoded by 4 bits in different
+ * registers. The _num field in this case encodes those bits in
+ * correspodence with Table 135 of 88AP510 Functional specification
+ */
+#define MPP52_57_AU1           MPP_GRP_AU1(0x0)
+#define MPP52_57_AU1_GPIO57    MPP_GRP_AU1(0x2)
+#define MPP52_57_GPIO          MPP_GRP_AU1(0xa)
+#define MPP52_57_TW_GPIO       MPP_GRP_AU1(0xb)
+#define MPP52_57_AU1_SSP       MPP_GRP_AU1(0xc)
+#define MPP52_57_SSP_GPIO      MPP_GRP_AU1(0xe)
+#define MPP52_57_SSP_TW                MPP_GRP_AU1(0xf)
+
+void dove_mpp_conf(unsigned int *mpp_list);
+
+#endif /* __ARCH_DOVE_MPP_CODED_H */
index 3410633..7fc603b 100644 (file)
@@ -45,18 +45,18 @@ config MACH_GURUPLUG
          Marvell GuruPlug Reference Board.
 
 config MACH_TS219
-       bool "QNAP TS-110, TS-119, TS-210, TS-219 and TS-219P Turbo NAS"
+       bool "QNAP TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and TS-219P+ Turbo NAS"
        help
          Say 'Y' here if you want your kernel to support the
-         QNAP TS-110, TS-119, TS-210, TS-219 and TS-219P Turbo NAS
-         devices.
+         QNAP TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and
+         TS-219P+ Turbo NAS devices.
 
 config MACH_TS41X
-       bool "QNAP TS-410, TS-410U, TS-419P and TS-419U Turbo NAS"
+       bool "QNAP TS-410, TS-410U, TS-419P, TS-419P+ and TS-419U Turbo NAS"
        help
          Say 'Y' here if you want your kernel to support the
-         QNAP TS-410, TS-410U, TS-419P and TS-419U Turbo NAS
-         devices.
+         QNAP TS-410, TS-410U, TS-419P, TS-419P+ and TS-419U Turbo
+         NAS devices.
 
 config MACH_DOCKSTAR
        bool "Seagate FreeAgent DockStar"
index 6710bd7..dc999c4 100644 (file)
@@ -80,15 +80,19 @@ static unsigned int qnap_ts219_mpp_config[] __initdata = {
        MPP11_UART0_RXD,
        MPP13_UART1_TXD,        /* PIC controller */
        MPP14_UART1_RXD,        /* PIC controller */
-       MPP15_GPIO,             /* USB Copy button */
-       MPP16_GPIO,             /* Reset button */
+       MPP15_GPIO,             /* USB Copy button (on devices with 88F6281) */
+       MPP16_GPIO,             /* Reset button (on devices with 88F6281) */
        MPP36_GPIO,             /* RAM: 0: 256 MB, 1: 512 MB */
+       MPP37_GPIO,             /* Reset button (on devices with 88F6282) */
+       MPP43_GPIO,             /* USB Copy button (on devices with 88F6282) */
        MPP44_GPIO,             /* Board ID: 0: TS-11x, 1: TS-21x */
        0
 };
 
 static void __init qnap_ts219_init(void)
 {
+       u32 dev, rev;
+
        /*
         * Basic setup. Needs to be called early.
         */
@@ -100,6 +104,14 @@ static void __init qnap_ts219_init(void)
        qnap_tsx1x_register_flash();
        kirkwood_i2c_init();
        i2c_register_board_info(0, &qnap_ts219_i2c_rtc, 1);
+
+       kirkwood_pcie_id(&dev, &rev);
+       if (dev == MV88F6282_DEV_ID) {
+               qnap_ts219_buttons[0].gpio = 43; /* USB Copy button */
+               qnap_ts219_buttons[1].gpio = 37; /* Reset button */
+               qnap_ts219_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
+       }
+
        kirkwood_ge00_init(&qnap_ts219_ge00_data);
        kirkwood_sata_init(&qnap_ts219_sata_data);
        kirkwood_ehci_init();
index 3587a28..9a44029 100644 (file)
@@ -119,6 +119,8 @@ static unsigned int qnap_ts41x_mpp_config[] __initdata = {
 
 static void __init qnap_ts41x_init(void)
 {
+       u32 dev, rev;
+
        /*
         * Basic setup. Needs to be called early.
         */
@@ -130,8 +132,15 @@ static void __init qnap_ts41x_init(void)
        qnap_tsx1x_register_flash();
        kirkwood_i2c_init();
        i2c_register_board_info(0, &qnap_ts41x_i2c_rtc, 1);
+
+       kirkwood_pcie_id(&dev, &rev);
+       if (dev == MV88F6282_DEV_ID) {
+               qnap_ts41x_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
+               qnap_ts41x_ge01_data.phy_addr = MV643XX_ETH_PHY_ADDR(1);
+       }
        kirkwood_ge00_init(&qnap_ts41x_ge00_data);
        kirkwood_ge01_init(&qnap_ts41x_ge01_data);
+
        kirkwood_sata_init(&qnap_ts41x_sata_data);
        kirkwood_ehci_init();
        platform_device_register(&qnap_ts41x_button_device);
index dbbcfeb..31e5fd6 100644 (file)
@@ -49,6 +49,8 @@ endchoice
 
 config MSM_SOC_REV_A
        bool
+config  ARCH_MSM_SCORPIONMP
+       bool
 
 config  ARCH_MSM_ARM11
        bool
index 788bdac..3eff399 100644 (file)
@@ -65,7 +65,7 @@
  */
 #define DDR_VIRT_BASE          (MV78XX0_REGS_VIRT_BASE | 0x00000)
 #define  DDR_WINDOW_CPU0_BASE  (DDR_VIRT_BASE | 0x1500)
-#define  DDR_WINDOW_CPU1_BASE  (DDR_VIRT_BASE | 0x1700)
+#define  DDR_WINDOW_CPU1_BASE  (DDR_VIRT_BASE | 0x1570)
 
 #define DEV_BUS_PHYS_BASE      (MV78XX0_REGS_PHYS_BASE | 0x10000)
 #define DEV_BUS_VIRT_BASE      (MV78XX0_REGS_VIRT_BASE | 0x10000)
index c897e03..6604fc6 100644 (file)
@@ -51,6 +51,13 @@ config MACH_LINKSTATION_PRO
          Buffalo Linkstation Pro/Live platform. Both v1 and
          v2 devices are supported.
 
+config MACH_LINKSTATION_LSCHL
+       bool "Buffalo Linkstation Live v3 (LS-CHL)"
+       select I2C_BOARDINFO
+       help
+         Say 'Y' here if you want your kernel to support the
+         Buffalo Linkstation Live v3 (LS-CHL) platform.
+
 config MACH_LINKSTATION_MINI
        bool "Buffalo Linkstation Mini"
        select I2C_BOARDINFO
index eb6eabc..7f18cda 100644 (file)
@@ -21,3 +21,4 @@ obj-$(CONFIG_MACH_WNR854T)    += wnr854t-setup.o
 obj-$(CONFIG_MACH_RD88F5181L_GE)       += rd88f5181l-ge-setup.o
 obj-$(CONFIG_MACH_RD88F5181L_FXO)      += rd88f5181l-fxo-setup.o
 obj-$(CONFIG_MACH_RD88F6183AP_GE)      += rd88f6183ap-ge-setup.o
+obj-$(CONFIG_MACH_LINKSTATION_LSCHL)   += ls-chl-setup.o
diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c
new file mode 100644 (file)
index 0000000..20a9b66
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * arch/arm/mach-orion5x/ls-chl-setup.c
+ *
+ * Maintainer: Ash Hughes <ashley.hughes@blueyonder.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/leds.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio-fan.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/ata_platform.h>
+#include <linux/gpio.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/system.h>
+#include <mach/orion5x.h>
+#include "common.h"
+#include "mpp.h"
+
+/*****************************************************************************
+ * Linkstation LS-CHL Info
+ ****************************************************************************/
+
+/*
+ * 256K NOR flash Device bus boot chip select
+ */
+
+#define LSCHL_NOR_BOOT_BASE    0xf4000000
+#define LSCHL_NOR_BOOT_SIZE    SZ_256K
+
+/*****************************************************************************
+ * 256KB NOR Flash on BOOT Device
+ ****************************************************************************/
+
+static struct physmap_flash_data lschl_nor_flash_data = {
+       .width = 1,
+};
+
+static struct resource lschl_nor_flash_resource = {
+       .flags  = IORESOURCE_MEM,
+       .start  = LSCHL_NOR_BOOT_BASE,
+       .end    = LSCHL_NOR_BOOT_BASE + LSCHL_NOR_BOOT_SIZE - 1,
+};
+
+static struct platform_device lschl_nor_flash = {
+       .name = "physmap-flash",
+       .id = 0,
+       .dev = {
+               .platform_data  = &lschl_nor_flash_data,
+       },
+       .num_resources = 1,
+       .resource = &lschl_nor_flash_resource,
+};
+
+/*****************************************************************************
+ * Ethernet
+ ****************************************************************************/
+
+static struct mv643xx_eth_platform_data lschl_eth_data = {
+       .phy_addr = MV643XX_ETH_PHY_ADDR(8),
+};
+
+/*****************************************************************************
+ * RTC 5C372a on I2C bus
+ ****************************************************************************/
+
+static struct i2c_board_info __initdata lschl_i2c_rtc = {
+       I2C_BOARD_INFO("rs5c372a", 0x32),
+};
+
+/*****************************************************************************
+ * LEDs attached to GPIO
+ ****************************************************************************/
+
+#define LSCHL_GPIO_LED_ALARM   2
+#define LSCHL_GPIO_LED_INFO    3
+#define LSCHL_GPIO_LED_FUNC    17
+#define LSCHL_GPIO_LED_PWR     0
+
+static struct gpio_led lschl_led_pins[] = {
+       {
+               .name = "alarm:red",
+               .gpio = LSCHL_GPIO_LED_ALARM,
+               .active_low = 1,
+       }, {
+               .name = "info:amber",
+               .gpio = LSCHL_GPIO_LED_INFO,
+               .active_low = 1,
+       }, {
+               .name = "func:blue:top",
+               .gpio = LSCHL_GPIO_LED_FUNC,
+               .active_low = 1,
+       }, {
+               .name = "power:blue:bottom",
+               .gpio = LSCHL_GPIO_LED_PWR,
+       },
+};
+
+static struct gpio_led_platform_data lschl_led_data = {
+       .leds = lschl_led_pins,
+       .num_leds = ARRAY_SIZE(lschl_led_pins),
+};
+
+static struct platform_device lschl_leds = {
+       .name = "leds-gpio",
+       .id = -1,
+       .dev = {
+               .platform_data = &lschl_led_data,
+       },
+};
+
+/*****************************************************************************
+ * SATA
+ ****************************************************************************/
+static struct mv_sata_platform_data lschl_sata_data = {
+       .n_ports = 2,
+};
+
+/*****************************************************************************
+ * LS-CHL specific power off method: reboot
+ ****************************************************************************/
+/*
+ * On the LS-CHL, the shutdown process is following:
+ * - Userland monitors key events until the power switch goes to off position
+ * - The board reboots
+ * - U-boot starts and goes into an idle mode waiting for the user
+ *   to move the switch to ON position
+ *
+ */
+
+static void lschl_power_off(void)
+{
+       arm_machine_restart('h', NULL);
+}
+
+/*****************************************************************************
+ * General Setup
+ ****************************************************************************/
+#define LSCHL_GPIO_USB_POWER   9
+#define LSCHL_GPIO_AUTO_POWER  17
+#define LSCHL_GPIO_POWER       18
+
+/****************************************************************************
+ * GPIO Attached Keys
+ ****************************************************************************/
+#define LSCHL_GPIO_KEY_FUNC            15
+#define LSCHL_GPIO_KEY_POWER           8
+#define LSCHL_GPIO_KEY_AUTOPOWER       10
+#define LSCHL_SW_POWER         0x00
+#define LSCHL_SW_AUTOPOWER     0x01
+#define LSCHL_SW_FUNC          0x02
+
+static struct gpio_keys_button lschl_buttons[] = {
+       {
+               .type = EV_SW,
+               .code = LSCHL_SW_POWER,
+               .gpio = LSCHL_GPIO_KEY_POWER,
+               .desc = "Power-on Switch",
+               .active_low = 1,
+       }, {
+               .type = EV_SW,
+               .code = LSCHL_SW_AUTOPOWER,
+               .gpio = LSCHL_GPIO_KEY_AUTOPOWER,
+               .desc = "Power-auto Switch",
+               .active_low = 1,
+       }, {
+               .type = EV_SW,
+               .code = LSCHL_SW_FUNC,
+               .gpio = LSCHL_GPIO_KEY_FUNC,
+               .desc = "Function Switch",
+               .active_low = 1,
+       },
+};
+
+static struct gpio_keys_platform_data lschl_button_data = {
+       .buttons = lschl_buttons,
+       .nbuttons = ARRAY_SIZE(lschl_buttons),
+};
+
+static struct platform_device lschl_button_device = {
+       .name = "gpio-keys",
+       .id = -1,
+       .num_resources = 0,
+       .dev = {
+               .platform_data = &lschl_button_data,
+       },
+};
+
+#define LSCHL_GPIO_HDD_POWER   1
+
+/****************************************************************************
+ * GPIO Fan
+ ****************************************************************************/
+
+#define LSCHL_GPIO_FAN_LOW     16
+#define LSCHL_GPIO_FAN_HIGH    14
+#define LSCHL_GPIO_FAN_LOCK    6
+
+static struct gpio_fan_alarm lschl_alarm = {
+       .gpio = LSCHL_GPIO_FAN_LOCK,
+};
+
+static struct gpio_fan_speed lschl_speeds[] = {
+       {
+               .rpm = 0,
+               .ctrl_val = 3,
+       }, {
+               .rpm = 1500,
+               .ctrl_val = 2,
+       }, {
+               .rpm = 3250,
+               .ctrl_val = 1,
+       }, {
+               .rpm = 5000,
+               .ctrl_val = 0,
+       },
+};
+
+static int lschl_gpio_list[] = {
+       LSCHL_GPIO_FAN_HIGH, LSCHL_GPIO_FAN_LOW,
+};
+
+static struct gpio_fan_platform_data lschl_fan_data = {
+       .num_ctrl = ARRAY_SIZE(lschl_gpio_list),
+       .ctrl = lschl_gpio_list,
+       .alarm = &lschl_alarm,
+       .num_speed = ARRAY_SIZE(lschl_speeds),
+       .speed = lschl_speeds,
+};
+
+static struct platform_device lschl_fan_device = {
+       .name = "gpio-fan",
+       .id = -1,
+       .num_resources = 0,
+       .dev = {
+               .platform_data = &lschl_fan_data,
+       },
+};
+
+/****************************************************************************
+ * GPIO Data
+ ****************************************************************************/
+
+static struct orion5x_mpp_mode lschl_mpp_modes[] __initdata = {
+       {  0, MPP_GPIO }, /* LED POWER */
+       {  1, MPP_GPIO }, /* HDD POWER */
+       {  2, MPP_GPIO }, /* LED ALARM */
+       {  3, MPP_GPIO }, /* LED INFO */
+       {  4, MPP_UNUSED },
+       {  5, MPP_UNUSED },
+       {  6, MPP_GPIO }, /* FAN LOCK */
+       {  7, MPP_GPIO }, /* SW INIT */
+       {  8, MPP_GPIO }, /* SW POWER */
+       {  9, MPP_GPIO }, /* USB POWER */
+       { 10, MPP_GPIO }, /* SW AUTO POWER */
+       { 11, MPP_UNUSED },
+       { 12, MPP_UNUSED },
+       { 13, MPP_UNUSED },
+       { 14, MPP_GPIO }, /* FAN HIGH */
+       { 15, MPP_GPIO }, /* SW FUNC */
+       { 16, MPP_GPIO }, /* FAN LOW */
+       { 17, MPP_GPIO }, /* LED FUNC */
+       { 18, MPP_UNUSED },
+       { 19, MPP_UNUSED },
+       { -1 },
+};
+
+static void __init lschl_init(void)
+{
+       /*
+        * Setup basic Orion functions. Needs to be called early.
+        */
+       orion5x_init();
+
+       orion5x_mpp_conf(lschl_mpp_modes);
+
+       /*
+        * Configure peripherals.
+        */
+       orion5x_ehci0_init();
+       orion5x_ehci1_init();
+       orion5x_eth_init(&lschl_eth_data);
+       orion5x_i2c_init();
+       orion5x_sata_init(&lschl_sata_data);
+       orion5x_uart0_init();
+       orion5x_xor_init();
+
+       orion5x_setup_dev_boot_win(LSCHL_NOR_BOOT_BASE,
+                                  LSCHL_NOR_BOOT_SIZE);
+       platform_device_register(&lschl_nor_flash);
+
+       platform_device_register(&lschl_leds);
+
+       platform_device_register(&lschl_button_device);
+
+       platform_device_register(&lschl_fan_device);
+
+       i2c_register_board_info(0, &lschl_i2c_rtc, 1);
+
+       /* usb power on */
+       gpio_set_value(LSCHL_GPIO_USB_POWER, 1);
+
+       /* register power-off method */
+       pm_power_off = lschl_power_off;
+
+       pr_info("%s: finished\n", __func__);
+}
+
+MACHINE_START(LINKSTATION_LSCHL, "Buffalo Linkstation LiveV3 (LS-CHL)")
+       /* Maintainer: Ash Hughes <ashley.hughes@blueyonder.co.uk> */
+       .boot_params    = 0x00000100,
+       .init_machine   = lschl_init,
+       .map_io         = orion5x_map_io,
+       .init_irq       = orion5x_init_irq,
+       .timer          = &orion5x_timer,
+       .fixup          = tag_fixup_mem32,
+MACHINE_END
index 9768cf7..9696ddc 100644 (file)
@@ -20,6 +20,7 @@
  */
 #include <linux/cnt32_to_63.h>
 #include <linux/io.h>
+#include <linux/sched.h>
 #include <asm/div64.h>
 
 #include <mach/hardware.h>