sparc64: Like x86 we should check current->mm during perf backtrace generation.
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / sparc / kernel / perf_event.c
1 /* Performance event support for sparc64.
2  *
3  * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
4  *
5  * This code is based almost entirely upon the x86 perf event
6  * code, which is:
7  *
8  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10  *  Copyright (C) 2009 Jaswinder Singh Rajput
11  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/ftrace.h>
18 #include <linux/kernel.h>
19 #include <linux/kdebug.h>
20 #include <linux/mutex.h>
21
22 #include <asm/stacktrace.h>
23 #include <asm/cpudata.h>
24 #include <asm/uaccess.h>
25 #include <linux/atomic.h>
26 #include <asm/nmi.h>
27 #include <asm/pcr.h>
28 #include <asm/cacheflush.h>
29
30 #include "kernel.h"
31 #include "kstack.h"
32
33 /* Two classes of sparc64 chips currently exist.  All of which have
34  * 32-bit counters which can generate overflow interrupts on the
35  * transition from 0xffffffff to 0.
36  *
37  * All chips upto and including SPARC-T3 have two performance
38  * counters.  The two 32-bit counters are accessed in one go using a
39  * single 64-bit register.
40  *
41  * On these older chips both counters are controlled using a single
42  * control register.  The only way to stop all sampling is to clear
43  * all of the context (user, supervisor, hypervisor) sampling enable
44  * bits.  But these bits apply to both counters, thus the two counters
45  * can't be enabled/disabled individually.
46  *
47  * Furthermore, the control register on these older chips have two
48  * event fields, one for each of the two counters.  It's thus nearly
49  * impossible to have one counter going while keeping the other one
50  * stopped.  Therefore it is possible to get overflow interrupts for
51  * counters not currently "in use" and that condition must be checked
52  * in the overflow interrupt handler.
53  *
54  * So we use a hack, in that we program inactive counters with the
55  * "sw_count0" and "sw_count1" events.  These count how many times
56  * the instruction "sethi %hi(0xfc000), %g0" is executed.  It's an
57  * unusual way to encode a NOP and therefore will not trigger in
58  * normal code.
59  *
60  * Starting with SPARC-T4 we have one control register per counter.
61  * And the counters are stored in individual registers.  The registers
62  * for the counters are 64-bit but only a 32-bit counter is
63  * implemented.  The event selections on SPARC-T4 lack any
64  * restrictions, therefore we can elide all of the complicated
65  * conflict resolution code we have for SPARC-T3 and earlier chips.
66  */
67
68 #define MAX_HWEVENTS                    4
69 #define MAX_PCRS                        4
70 #define MAX_PERIOD                      ((1UL << 32) - 1)
71
72 #define PIC_UPPER_INDEX                 0
73 #define PIC_LOWER_INDEX                 1
74 #define PIC_NO_INDEX                    -1
75
76 struct cpu_hw_events {
77         /* Number of events currently scheduled onto this cpu.
78          * This tells how many entries in the arrays below
79          * are valid.
80          */
81         int                     n_events;
82
83         /* Number of new events added since the last hw_perf_disable().
84          * This works because the perf event layer always adds new
85          * events inside of a perf_{disable,enable}() sequence.
86          */
87         int                     n_added;
88
89         /* Array of events current scheduled on this cpu.  */
90         struct perf_event       *event[MAX_HWEVENTS];
91
92         /* Array of encoded longs, specifying the %pcr register
93          * encoding and the mask of PIC counters this even can
94          * be scheduled on.  See perf_event_encode() et al.
95          */
96         unsigned long           events[MAX_HWEVENTS];
97
98         /* The current counter index assigned to an event.  When the
99          * event hasn't been programmed into the cpu yet, this will
100          * hold PIC_NO_INDEX.  The event->hw.idx value tells us where
101          * we ought to schedule the event.
102          */
103         int                     current_idx[MAX_HWEVENTS];
104
105         /* Software copy of %pcr register(s) on this cpu.  */
106         u64                     pcr[MAX_HWEVENTS];
107
108         /* Enabled/disable state.  */
109         int                     enabled;
110
111         unsigned int            group_flag;
112 };
113 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
114
115 /* An event map describes the characteristics of a performance
116  * counter event.  In particular it gives the encoding as well as
117  * a mask telling which counters the event can be measured on.
118  *
119  * The mask is unused on SPARC-T4 and later.
120  */
121 struct perf_event_map {
122         u16     encoding;
123         u8      pic_mask;
124 #define PIC_NONE        0x00
125 #define PIC_UPPER       0x01
126 #define PIC_LOWER       0x02
127 };
128
129 /* Encode a perf_event_map entry into a long.  */
130 static unsigned long perf_event_encode(const struct perf_event_map *pmap)
131 {
132         return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
133 }
134
135 static u8 perf_event_get_msk(unsigned long val)
136 {
137         return val & 0xff;
138 }
139
140 static u64 perf_event_get_enc(unsigned long val)
141 {
142         return val >> 16;
143 }
144
145 #define C(x) PERF_COUNT_HW_CACHE_##x
146
147 #define CACHE_OP_UNSUPPORTED    0xfffe
148 #define CACHE_OP_NONSENSE       0xffff
149
150 typedef struct perf_event_map cache_map_t
151                                 [PERF_COUNT_HW_CACHE_MAX]
152                                 [PERF_COUNT_HW_CACHE_OP_MAX]
153                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
154
155 struct sparc_pmu {
156         const struct perf_event_map     *(*event_map)(int);
157         const cache_map_t               *cache_map;
158         int                             max_events;
159         u32                             (*read_pmc)(int);
160         void                            (*write_pmc)(int, u64);
161         int                             upper_shift;
162         int                             lower_shift;
163         int                             event_mask;
164         int                             user_bit;
165         int                             priv_bit;
166         int                             hv_bit;
167         int                             irq_bit;
168         int                             upper_nop;
169         int                             lower_nop;
170         unsigned int                    flags;
171 #define SPARC_PMU_ALL_EXCLUDES_SAME     0x00000001
172 #define SPARC_PMU_HAS_CONFLICTS         0x00000002
173         int                             max_hw_events;
174         int                             num_pcrs;
175         int                             num_pic_regs;
176 };
177
178 static u32 sparc_default_read_pmc(int idx)
179 {
180         u64 val;
181
182         val = pcr_ops->read_pic(0);
183         if (idx == PIC_UPPER_INDEX)
184                 val >>= 32;
185
186         return val & 0xffffffff;
187 }
188
189 static void sparc_default_write_pmc(int idx, u64 val)
190 {
191         u64 shift, mask, pic;
192
193         shift = 0;
194         if (idx == PIC_UPPER_INDEX)
195                 shift = 32;
196
197         mask = ((u64) 0xffffffff) << shift;
198         val <<= shift;
199
200         pic = pcr_ops->read_pic(0);
201         pic &= ~mask;
202         pic |= val;
203         pcr_ops->write_pic(0, pic);
204 }
205
206 static const struct perf_event_map ultra3_perfmon_event_map[] = {
207         [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
208         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
209         [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
210         [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
211 };
212
213 static const struct perf_event_map *ultra3_event_map(int event_id)
214 {
215         return &ultra3_perfmon_event_map[event_id];
216 }
217
218 static const cache_map_t ultra3_cache_map = {
219 [C(L1D)] = {
220         [C(OP_READ)] = {
221                 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
222                 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
223         },
224         [C(OP_WRITE)] = {
225                 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
226                 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
227         },
228         [C(OP_PREFETCH)] = {
229                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
230                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
231         },
232 },
233 [C(L1I)] = {
234         [C(OP_READ)] = {
235                 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
236                 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
237         },
238         [ C(OP_WRITE) ] = {
239                 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
240                 [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
241         },
242         [ C(OP_PREFETCH) ] = {
243                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
244                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
245         },
246 },
247 [C(LL)] = {
248         [C(OP_READ)] = {
249                 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
250                 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
251         },
252         [C(OP_WRITE)] = {
253                 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
254                 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
255         },
256         [C(OP_PREFETCH)] = {
257                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
258                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
259         },
260 },
261 [C(DTLB)] = {
262         [C(OP_READ)] = {
263                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
264                 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
265         },
266         [ C(OP_WRITE) ] = {
267                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
268                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
269         },
270         [ C(OP_PREFETCH) ] = {
271                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
272                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
273         },
274 },
275 [C(ITLB)] = {
276         [C(OP_READ)] = {
277                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
278                 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
279         },
280         [ C(OP_WRITE) ] = {
281                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
282                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
283         },
284         [ C(OP_PREFETCH) ] = {
285                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
286                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
287         },
288 },
289 [C(BPU)] = {
290         [C(OP_READ)] = {
291                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
292                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
293         },
294         [ C(OP_WRITE) ] = {
295                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
296                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
297         },
298         [ C(OP_PREFETCH) ] = {
299                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
300                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
301         },
302 },
303 [C(NODE)] = {
304         [C(OP_READ)] = {
305                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
306                 [C(RESULT_MISS)  ] = { CACHE_OP_UNSUPPORTED },
307         },
308         [ C(OP_WRITE) ] = {
309                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
310                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
311         },
312         [ C(OP_PREFETCH) ] = {
313                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
314                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
315         },
316 },
317 };
318
319 static const struct sparc_pmu ultra3_pmu = {
320         .event_map      = ultra3_event_map,
321         .cache_map      = &ultra3_cache_map,
322         .max_events     = ARRAY_SIZE(ultra3_perfmon_event_map),
323         .read_pmc       = sparc_default_read_pmc,
324         .write_pmc      = sparc_default_write_pmc,
325         .upper_shift    = 11,
326         .lower_shift    = 4,
327         .event_mask     = 0x3f,
328         .user_bit       = PCR_UTRACE,
329         .priv_bit       = PCR_STRACE,
330         .upper_nop      = 0x1c,
331         .lower_nop      = 0x14,
332         .flags          = (SPARC_PMU_ALL_EXCLUDES_SAME |
333                            SPARC_PMU_HAS_CONFLICTS),
334         .max_hw_events  = 2,
335         .num_pcrs       = 1,
336         .num_pic_regs   = 1,
337 };
338
339 /* Niagara1 is very limited.  The upper PIC is hard-locked to count
340  * only instructions, so it is free running which creates all kinds of
341  * problems.  Some hardware designs make one wonder if the creator
342  * even looked at how this stuff gets used by software.
343  */
344 static const struct perf_event_map niagara1_perfmon_event_map[] = {
345         [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
346         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
347         [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
348         [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
349 };
350
351 static const struct perf_event_map *niagara1_event_map(int event_id)
352 {
353         return &niagara1_perfmon_event_map[event_id];
354 }
355
356 static const cache_map_t niagara1_cache_map = {
357 [C(L1D)] = {
358         [C(OP_READ)] = {
359                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
360                 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
361         },
362         [C(OP_WRITE)] = {
363                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
364                 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
365         },
366         [C(OP_PREFETCH)] = {
367                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
368                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
369         },
370 },
371 [C(L1I)] = {
372         [C(OP_READ)] = {
373                 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
374                 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
375         },
376         [ C(OP_WRITE) ] = {
377                 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
378                 [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
379         },
380         [ C(OP_PREFETCH) ] = {
381                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
382                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
383         },
384 },
385 [C(LL)] = {
386         [C(OP_READ)] = {
387                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
388                 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
389         },
390         [C(OP_WRITE)] = {
391                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
392                 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
393         },
394         [C(OP_PREFETCH)] = {
395                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
396                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
397         },
398 },
399 [C(DTLB)] = {
400         [C(OP_READ)] = {
401                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
402                 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
403         },
404         [ C(OP_WRITE) ] = {
405                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
406                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
407         },
408         [ C(OP_PREFETCH) ] = {
409                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
410                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
411         },
412 },
413 [C(ITLB)] = {
414         [C(OP_READ)] = {
415                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
416                 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
417         },
418         [ C(OP_WRITE) ] = {
419                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
420                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
421         },
422         [ C(OP_PREFETCH) ] = {
423                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
424                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
425         },
426 },
427 [C(BPU)] = {
428         [C(OP_READ)] = {
429                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
430                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
431         },
432         [ C(OP_WRITE) ] = {
433                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
434                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
435         },
436         [ C(OP_PREFETCH) ] = {
437                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
438                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
439         },
440 },
441 [C(NODE)] = {
442         [C(OP_READ)] = {
443                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
444                 [C(RESULT_MISS)  ] = { CACHE_OP_UNSUPPORTED },
445         },
446         [ C(OP_WRITE) ] = {
447                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
448                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
449         },
450         [ C(OP_PREFETCH) ] = {
451                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
452                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
453         },
454 },
455 };
456
457 static const struct sparc_pmu niagara1_pmu = {
458         .event_map      = niagara1_event_map,
459         .cache_map      = &niagara1_cache_map,
460         .max_events     = ARRAY_SIZE(niagara1_perfmon_event_map),
461         .read_pmc       = sparc_default_read_pmc,
462         .write_pmc      = sparc_default_write_pmc,
463         .upper_shift    = 0,
464         .lower_shift    = 4,
465         .event_mask     = 0x7,
466         .user_bit       = PCR_UTRACE,
467         .priv_bit       = PCR_STRACE,
468         .upper_nop      = 0x0,
469         .lower_nop      = 0x0,
470         .flags          = (SPARC_PMU_ALL_EXCLUDES_SAME |
471                            SPARC_PMU_HAS_CONFLICTS),
472         .max_hw_events  = 2,
473         .num_pcrs       = 1,
474         .num_pic_regs   = 1,
475 };
476
477 static const struct perf_event_map niagara2_perfmon_event_map[] = {
478         [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
479         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
480         [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
481         [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
482         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
483         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
484 };
485
486 static const struct perf_event_map *niagara2_event_map(int event_id)
487 {
488         return &niagara2_perfmon_event_map[event_id];
489 }
490
491 static const cache_map_t niagara2_cache_map = {
492 [C(L1D)] = {
493         [C(OP_READ)] = {
494                 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
495                 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
496         },
497         [C(OP_WRITE)] = {
498                 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
499                 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
500         },
501         [C(OP_PREFETCH)] = {
502                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
503                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
504         },
505 },
506 [C(L1I)] = {
507         [C(OP_READ)] = {
508                 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
509                 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
510         },
511         [ C(OP_WRITE) ] = {
512                 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
513                 [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
514         },
515         [ C(OP_PREFETCH) ] = {
516                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
517                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
518         },
519 },
520 [C(LL)] = {
521         [C(OP_READ)] = {
522                 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
523                 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
524         },
525         [C(OP_WRITE)] = {
526                 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
527                 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
528         },
529         [C(OP_PREFETCH)] = {
530                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
531                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
532         },
533 },
534 [C(DTLB)] = {
535         [C(OP_READ)] = {
536                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
537                 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
538         },
539         [ C(OP_WRITE) ] = {
540                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
541                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
542         },
543         [ C(OP_PREFETCH) ] = {
544                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
545                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
546         },
547 },
548 [C(ITLB)] = {
549         [C(OP_READ)] = {
550                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
551                 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
552         },
553         [ C(OP_WRITE) ] = {
554                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
555                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
556         },
557         [ C(OP_PREFETCH) ] = {
558                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
559                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
560         },
561 },
562 [C(BPU)] = {
563         [C(OP_READ)] = {
564                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
565                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
566         },
567         [ C(OP_WRITE) ] = {
568                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
569                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
570         },
571         [ C(OP_PREFETCH) ] = {
572                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
573                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
574         },
575 },
576 [C(NODE)] = {
577         [C(OP_READ)] = {
578                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
579                 [C(RESULT_MISS)  ] = { CACHE_OP_UNSUPPORTED },
580         },
581         [ C(OP_WRITE) ] = {
582                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
583                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
584         },
585         [ C(OP_PREFETCH) ] = {
586                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
587                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
588         },
589 },
590 };
591
592 static const struct sparc_pmu niagara2_pmu = {
593         .event_map      = niagara2_event_map,
594         .cache_map      = &niagara2_cache_map,
595         .max_events     = ARRAY_SIZE(niagara2_perfmon_event_map),
596         .read_pmc       = sparc_default_read_pmc,
597         .write_pmc      = sparc_default_write_pmc,
598         .upper_shift    = 19,
599         .lower_shift    = 6,
600         .event_mask     = 0xfff,
601         .user_bit       = PCR_UTRACE,
602         .priv_bit       = PCR_STRACE,
603         .hv_bit         = PCR_N2_HTRACE,
604         .irq_bit        = 0x30,
605         .upper_nop      = 0x220,
606         .lower_nop      = 0x220,
607         .flags          = (SPARC_PMU_ALL_EXCLUDES_SAME |
608                            SPARC_PMU_HAS_CONFLICTS),
609         .max_hw_events  = 2,
610         .num_pcrs       = 1,
611         .num_pic_regs   = 1,
612 };
613
614 static const struct perf_event_map niagara4_perfmon_event_map[] = {
615         [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) },
616         [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f },
617         [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 },
618         [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 },
619         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 },
620         [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f },
621 };
622
623 static const struct perf_event_map *niagara4_event_map(int event_id)
624 {
625         return &niagara4_perfmon_event_map[event_id];
626 }
627
628 static const cache_map_t niagara4_cache_map = {
629 [C(L1D)] = {
630         [C(OP_READ)] = {
631                 [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
632                 [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
633         },
634         [C(OP_WRITE)] = {
635                 [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
636                 [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
637         },
638         [C(OP_PREFETCH)] = {
639                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
640                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
641         },
642 },
643 [C(L1I)] = {
644         [C(OP_READ)] = {
645                 [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f },
646                 [C(RESULT_MISS)] = { (11 << 6) | 0x03 },
647         },
648         [ C(OP_WRITE) ] = {
649                 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
650                 [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
651         },
652         [ C(OP_PREFETCH) ] = {
653                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
654                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
655         },
656 },
657 [C(LL)] = {
658         [C(OP_READ)] = {
659                 [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
660                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
661         },
662         [C(OP_WRITE)] = {
663                 [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
664                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
665         },
666         [C(OP_PREFETCH)] = {
667                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
668                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
669         },
670 },
671 [C(DTLB)] = {
672         [C(OP_READ)] = {
673                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
674                 [C(RESULT_MISS)] = { (17 << 6) | 0x3f },
675         },
676         [ C(OP_WRITE) ] = {
677                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
678                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
679         },
680         [ C(OP_PREFETCH) ] = {
681                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
682                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
683         },
684 },
685 [C(ITLB)] = {
686         [C(OP_READ)] = {
687                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
688                 [C(RESULT_MISS)] = { (6 << 6) | 0x3f },
689         },
690         [ C(OP_WRITE) ] = {
691                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
692                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
693         },
694         [ C(OP_PREFETCH) ] = {
695                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
696                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
697         },
698 },
699 [C(BPU)] = {
700         [C(OP_READ)] = {
701                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
702                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
703         },
704         [ C(OP_WRITE) ] = {
705                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
706                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
707         },
708         [ C(OP_PREFETCH) ] = {
709                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
710                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
711         },
712 },
713 [C(NODE)] = {
714         [C(OP_READ)] = {
715                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
716                 [C(RESULT_MISS)  ] = { CACHE_OP_UNSUPPORTED },
717         },
718         [ C(OP_WRITE) ] = {
719                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
720                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
721         },
722         [ C(OP_PREFETCH) ] = {
723                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
724                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
725         },
726 },
727 };
728
729 static u32 sparc_vt_read_pmc(int idx)
730 {
731         u64 val = pcr_ops->read_pic(idx);
732
733         return val & 0xffffffff;
734 }
735
736 static void sparc_vt_write_pmc(int idx, u64 val)
737 {
738         u64 pcr;
739
740         /* There seems to be an internal latch on the overflow event
741          * on SPARC-T4 that prevents it from triggering unless you
742          * update the PIC exactly as we do here.  The requirement
743          * seems to be that you have to turn off event counting in the
744          * PCR around the PIC update.
745          *
746          * For example, after the following sequence:
747          *
748          * 1) set PIC to -1
749          * 2) enable event counting and overflow reporting in PCR
750          * 3) overflow triggers, softint 15 handler invoked
751          * 4) clear OV bit in PCR
752          * 5) write PIC to -1
753          *
754          * a subsequent overflow event will not trigger.  This
755          * sequence works on SPARC-T3 and previous chips.
756          */
757         pcr = pcr_ops->read_pcr(idx);
758         pcr_ops->write_pcr(idx, PCR_N4_PICNPT);
759
760         pcr_ops->write_pic(idx, val & 0xffffffff);
761
762         pcr_ops->write_pcr(idx, pcr);
763 }
764
765 static const struct sparc_pmu niagara4_pmu = {
766         .event_map      = niagara4_event_map,
767         .cache_map      = &niagara4_cache_map,
768         .max_events     = ARRAY_SIZE(niagara4_perfmon_event_map),
769         .read_pmc       = sparc_vt_read_pmc,
770         .write_pmc      = sparc_vt_write_pmc,
771         .upper_shift    = 5,
772         .lower_shift    = 5,
773         .event_mask     = 0x7ff,
774         .user_bit       = PCR_N4_UTRACE,
775         .priv_bit       = PCR_N4_STRACE,
776
777         /* We explicitly don't support hypervisor tracing.  The T4
778          * generates the overflow event for precise events via a trap
779          * which will not be generated (ie. it's completely lost) if
780          * we happen to be in the hypervisor when the event triggers.
781          * Essentially, the overflow event reporting is completely
782          * unusable when you have hypervisor mode tracing enabled.
783          */
784         .hv_bit         = 0,
785
786         .irq_bit        = PCR_N4_TOE,
787         .upper_nop      = 0,
788         .lower_nop      = 0,
789         .flags          = 0,
790         .max_hw_events  = 4,
791         .num_pcrs       = 4,
792         .num_pic_regs   = 4,
793 };
794
795 static const struct sparc_pmu *sparc_pmu __read_mostly;
796
797 static u64 event_encoding(u64 event_id, int idx)
798 {
799         if (idx == PIC_UPPER_INDEX)
800                 event_id <<= sparc_pmu->upper_shift;
801         else
802                 event_id <<= sparc_pmu->lower_shift;
803         return event_id;
804 }
805
806 static u64 mask_for_index(int idx)
807 {
808         return event_encoding(sparc_pmu->event_mask, idx);
809 }
810
811 static u64 nop_for_index(int idx)
812 {
813         return event_encoding(idx == PIC_UPPER_INDEX ?
814                               sparc_pmu->upper_nop :
815                               sparc_pmu->lower_nop, idx);
816 }
817
818 static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
819 {
820         u64 val, mask = mask_for_index(idx);
821         int pcr_index = 0;
822
823         if (sparc_pmu->num_pcrs > 1)
824                 pcr_index = idx;
825
826         val = cpuc->pcr[pcr_index];
827         val &= ~mask;
828         val |= hwc->config;
829         cpuc->pcr[pcr_index] = val;
830
831         pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
832 }
833
834 static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
835 {
836         u64 mask = mask_for_index(idx);
837         u64 nop = nop_for_index(idx);
838         int pcr_index = 0;
839         u64 val;
840
841         if (sparc_pmu->num_pcrs > 1)
842                 pcr_index = idx;
843
844         val = cpuc->pcr[pcr_index];
845         val &= ~mask;
846         val |= nop;
847         cpuc->pcr[pcr_index] = val;
848
849         pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
850 }
851
852 static u64 sparc_perf_event_update(struct perf_event *event,
853                                    struct hw_perf_event *hwc, int idx)
854 {
855         int shift = 64 - 32;
856         u64 prev_raw_count, new_raw_count;
857         s64 delta;
858
859 again:
860         prev_raw_count = local64_read(&hwc->prev_count);
861         new_raw_count = sparc_pmu->read_pmc(idx);
862
863         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
864                              new_raw_count) != prev_raw_count)
865                 goto again;
866
867         delta = (new_raw_count << shift) - (prev_raw_count << shift);
868         delta >>= shift;
869
870         local64_add(delta, &event->count);
871         local64_sub(delta, &hwc->period_left);
872
873         return new_raw_count;
874 }
875
876 static int sparc_perf_event_set_period(struct perf_event *event,
877                                        struct hw_perf_event *hwc, int idx)
878 {
879         s64 left = local64_read(&hwc->period_left);
880         s64 period = hwc->sample_period;
881         int ret = 0;
882
883         if (unlikely(left <= -period)) {
884                 left = period;
885                 local64_set(&hwc->period_left, left);
886                 hwc->last_period = period;
887                 ret = 1;
888         }
889
890         if (unlikely(left <= 0)) {
891                 left += period;
892                 local64_set(&hwc->period_left, left);
893                 hwc->last_period = period;
894                 ret = 1;
895         }
896         if (left > MAX_PERIOD)
897                 left = MAX_PERIOD;
898
899         local64_set(&hwc->prev_count, (u64)-left);
900
901         sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
902
903         perf_event_update_userpage(event);
904
905         return ret;
906 }
907
908 static void read_in_all_counters(struct cpu_hw_events *cpuc)
909 {
910         int i;
911
912         for (i = 0; i < cpuc->n_events; i++) {
913                 struct perf_event *cp = cpuc->event[i];
914
915                 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
916                     cpuc->current_idx[i] != cp->hw.idx) {
917                         sparc_perf_event_update(cp, &cp->hw,
918                                                 cpuc->current_idx[i]);
919                         cpuc->current_idx[i] = PIC_NO_INDEX;
920                 }
921         }
922 }
923
924 /* On this PMU all PICs are programmed using a single PCR.  Calculate
925  * the combined control register value.
926  *
927  * For such chips we require that all of the events have the same
928  * configuration, so just fetch the settings from the first entry.
929  */
930 static void calculate_single_pcr(struct cpu_hw_events *cpuc)
931 {
932         int i;
933
934         if (!cpuc->n_added)
935                 goto out;
936
937         /* Assign to counters all unassigned events.  */
938         for (i = 0; i < cpuc->n_events; i++) {
939                 struct perf_event *cp = cpuc->event[i];
940                 struct hw_perf_event *hwc = &cp->hw;
941                 int idx = hwc->idx;
942                 u64 enc;
943
944                 if (cpuc->current_idx[i] != PIC_NO_INDEX)
945                         continue;
946
947                 sparc_perf_event_set_period(cp, hwc, idx);
948                 cpuc->current_idx[i] = idx;
949
950                 enc = perf_event_get_enc(cpuc->events[i]);
951                 cpuc->pcr[0] &= ~mask_for_index(idx);
952                 if (hwc->state & PERF_HES_STOPPED)
953                         cpuc->pcr[0] |= nop_for_index(idx);
954                 else
955                         cpuc->pcr[0] |= event_encoding(enc, idx);
956         }
957 out:
958         cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
959 }
960
961 /* On this PMU each PIC has it's own PCR control register.  */
962 static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
963 {
964         int i;
965
966         if (!cpuc->n_added)
967                 goto out;
968
969         for (i = 0; i < cpuc->n_events; i++) {
970                 struct perf_event *cp = cpuc->event[i];
971                 struct hw_perf_event *hwc = &cp->hw;
972                 int idx = hwc->idx;
973                 u64 enc;
974
975                 if (cpuc->current_idx[i] != PIC_NO_INDEX)
976                         continue;
977
978                 sparc_perf_event_set_period(cp, hwc, idx);
979                 cpuc->current_idx[i] = idx;
980
981                 enc = perf_event_get_enc(cpuc->events[i]);
982                 cpuc->pcr[idx] &= ~mask_for_index(idx);
983                 if (hwc->state & PERF_HES_STOPPED)
984                         cpuc->pcr[idx] |= nop_for_index(idx);
985                 else
986                         cpuc->pcr[idx] |= event_encoding(enc, idx);
987         }
988 out:
989         for (i = 0; i < cpuc->n_events; i++) {
990                 struct perf_event *cp = cpuc->event[i];
991                 int idx = cp->hw.idx;
992
993                 cpuc->pcr[idx] |= cp->hw.config_base;
994         }
995 }
996
997 /* If performance event entries have been added, move existing events
998  * around (if necessary) and then assign new entries to counters.
999  */
1000 static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
1001 {
1002         if (cpuc->n_added)
1003                 read_in_all_counters(cpuc);
1004
1005         if (sparc_pmu->num_pcrs == 1) {
1006                 calculate_single_pcr(cpuc);
1007         } else {
1008                 calculate_multiple_pcrs(cpuc);
1009         }
1010 }
1011
1012 static void sparc_pmu_enable(struct pmu *pmu)
1013 {
1014         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1015         int i;
1016
1017         if (cpuc->enabled)
1018                 return;
1019
1020         cpuc->enabled = 1;
1021         barrier();
1022
1023         if (cpuc->n_events)
1024                 update_pcrs_for_enable(cpuc);
1025
1026         for (i = 0; i < sparc_pmu->num_pcrs; i++)
1027                 pcr_ops->write_pcr(i, cpuc->pcr[i]);
1028 }
1029
1030 static void sparc_pmu_disable(struct pmu *pmu)
1031 {
1032         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1033         int i;
1034
1035         if (!cpuc->enabled)
1036                 return;
1037
1038         cpuc->enabled = 0;
1039         cpuc->n_added = 0;
1040
1041         for (i = 0; i < sparc_pmu->num_pcrs; i++) {
1042                 u64 val = cpuc->pcr[i];
1043
1044                 val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
1045                          sparc_pmu->hv_bit | sparc_pmu->irq_bit);
1046                 cpuc->pcr[i] = val;
1047                 pcr_ops->write_pcr(i, cpuc->pcr[i]);
1048         }
1049 }
1050
1051 static int active_event_index(struct cpu_hw_events *cpuc,
1052                               struct perf_event *event)
1053 {
1054         int i;
1055
1056         for (i = 0; i < cpuc->n_events; i++) {
1057                 if (cpuc->event[i] == event)
1058                         break;
1059         }
1060         BUG_ON(i == cpuc->n_events);
1061         return cpuc->current_idx[i];
1062 }
1063
1064 static void sparc_pmu_start(struct perf_event *event, int flags)
1065 {
1066         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1067         int idx = active_event_index(cpuc, event);
1068
1069         if (flags & PERF_EF_RELOAD) {
1070                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1071                 sparc_perf_event_set_period(event, &event->hw, idx);
1072         }
1073
1074         event->hw.state = 0;
1075
1076         sparc_pmu_enable_event(cpuc, &event->hw, idx);
1077 }
1078
1079 static void sparc_pmu_stop(struct perf_event *event, int flags)
1080 {
1081         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1082         int idx = active_event_index(cpuc, event);
1083
1084         if (!(event->hw.state & PERF_HES_STOPPED)) {
1085                 sparc_pmu_disable_event(cpuc, &event->hw, idx);
1086                 event->hw.state |= PERF_HES_STOPPED;
1087         }
1088
1089         if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
1090                 sparc_perf_event_update(event, &event->hw, idx);
1091                 event->hw.state |= PERF_HES_UPTODATE;
1092         }
1093 }
1094
1095 static void sparc_pmu_del(struct perf_event *event, int _flags)
1096 {
1097         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1098         unsigned long flags;
1099         int i;
1100
1101         local_irq_save(flags);
1102         perf_pmu_disable(event->pmu);
1103
1104         for (i = 0; i < cpuc->n_events; i++) {
1105                 if (event == cpuc->event[i]) {
1106                         /* Absorb the final count and turn off the
1107                          * event.
1108                          */
1109                         sparc_pmu_stop(event, PERF_EF_UPDATE);
1110
1111                         /* Shift remaining entries down into
1112                          * the existing slot.
1113                          */
1114                         while (++i < cpuc->n_events) {
1115                                 cpuc->event[i - 1] = cpuc->event[i];
1116                                 cpuc->events[i - 1] = cpuc->events[i];
1117                                 cpuc->current_idx[i - 1] =
1118                                         cpuc->current_idx[i];
1119                         }
1120
1121                         perf_event_update_userpage(event);
1122
1123                         cpuc->n_events--;
1124                         break;
1125                 }
1126         }
1127
1128         perf_pmu_enable(event->pmu);
1129         local_irq_restore(flags);
1130 }
1131
1132 static void sparc_pmu_read(struct perf_event *event)
1133 {
1134         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1135         int idx = active_event_index(cpuc, event);
1136         struct hw_perf_event *hwc = &event->hw;
1137
1138         sparc_perf_event_update(event, hwc, idx);
1139 }
1140
1141 static atomic_t active_events = ATOMIC_INIT(0);
1142 static DEFINE_MUTEX(pmc_grab_mutex);
1143
1144 static void perf_stop_nmi_watchdog(void *unused)
1145 {
1146         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1147         int i;
1148
1149         stop_nmi_watchdog(NULL);
1150         for (i = 0; i < sparc_pmu->num_pcrs; i++)
1151                 cpuc->pcr[i] = pcr_ops->read_pcr(i);
1152 }
1153
1154 void perf_event_grab_pmc(void)
1155 {
1156         if (atomic_inc_not_zero(&active_events))
1157                 return;
1158
1159         mutex_lock(&pmc_grab_mutex);
1160         if (atomic_read(&active_events) == 0) {
1161                 if (atomic_read(&nmi_active) > 0) {
1162                         on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
1163                         BUG_ON(atomic_read(&nmi_active) != 0);
1164                 }
1165                 atomic_inc(&active_events);
1166         }
1167         mutex_unlock(&pmc_grab_mutex);
1168 }
1169
1170 void perf_event_release_pmc(void)
1171 {
1172         if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
1173                 if (atomic_read(&nmi_active) == 0)
1174                         on_each_cpu(start_nmi_watchdog, NULL, 1);
1175                 mutex_unlock(&pmc_grab_mutex);
1176         }
1177 }
1178
1179 static const struct perf_event_map *sparc_map_cache_event(u64 config)
1180 {
1181         unsigned int cache_type, cache_op, cache_result;
1182         const struct perf_event_map *pmap;
1183
1184         if (!sparc_pmu->cache_map)
1185                 return ERR_PTR(-ENOENT);
1186
1187         cache_type = (config >>  0) & 0xff;
1188         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
1189                 return ERR_PTR(-EINVAL);
1190
1191         cache_op = (config >>  8) & 0xff;
1192         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
1193                 return ERR_PTR(-EINVAL);
1194
1195         cache_result = (config >> 16) & 0xff;
1196         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1197                 return ERR_PTR(-EINVAL);
1198
1199         pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
1200
1201         if (pmap->encoding == CACHE_OP_UNSUPPORTED)
1202                 return ERR_PTR(-ENOENT);
1203
1204         if (pmap->encoding == CACHE_OP_NONSENSE)
1205                 return ERR_PTR(-EINVAL);
1206
1207         return pmap;
1208 }
1209
1210 static void hw_perf_event_destroy(struct perf_event *event)
1211 {
1212         perf_event_release_pmc();
1213 }
1214
1215 /* Make sure all events can be scheduled into the hardware at
1216  * the same time.  This is simplified by the fact that we only
1217  * need to support 2 simultaneous HW events.
1218  *
1219  * As a side effect, the evts[]->hw.idx values will be assigned
1220  * on success.  These are pending indexes.  When the events are
1221  * actually programmed into the chip, these values will propagate
1222  * to the per-cpu cpuc->current_idx[] slots, see the code in
1223  * maybe_change_configuration() for details.
1224  */
1225 static int sparc_check_constraints(struct perf_event **evts,
1226                                    unsigned long *events, int n_ev)
1227 {
1228         u8 msk0 = 0, msk1 = 0;
1229         int idx0 = 0;
1230
1231         /* This case is possible when we are invoked from
1232          * hw_perf_group_sched_in().
1233          */
1234         if (!n_ev)
1235                 return 0;
1236
1237         if (n_ev > sparc_pmu->max_hw_events)
1238                 return -1;
1239
1240         if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
1241                 int i;
1242
1243                 for (i = 0; i < n_ev; i++)
1244                         evts[i]->hw.idx = i;
1245                 return 0;
1246         }
1247
1248         msk0 = perf_event_get_msk(events[0]);
1249         if (n_ev == 1) {
1250                 if (msk0 & PIC_LOWER)
1251                         idx0 = 1;
1252                 goto success;
1253         }
1254         BUG_ON(n_ev != 2);
1255         msk1 = perf_event_get_msk(events[1]);
1256
1257         /* If both events can go on any counter, OK.  */
1258         if (msk0 == (PIC_UPPER | PIC_LOWER) &&
1259             msk1 == (PIC_UPPER | PIC_LOWER))
1260                 goto success;
1261
1262         /* If one event is limited to a specific counter,
1263          * and the other can go on both, OK.
1264          */
1265         if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
1266             msk1 == (PIC_UPPER | PIC_LOWER)) {
1267                 if (msk0 & PIC_LOWER)
1268                         idx0 = 1;
1269                 goto success;
1270         }
1271
1272         if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
1273             msk0 == (PIC_UPPER | PIC_LOWER)) {
1274                 if (msk1 & PIC_UPPER)
1275                         idx0 = 1;
1276                 goto success;
1277         }
1278
1279         /* If the events are fixed to different counters, OK.  */
1280         if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
1281             (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
1282                 if (msk0 & PIC_LOWER)
1283                         idx0 = 1;
1284                 goto success;
1285         }
1286
1287         /* Otherwise, there is a conflict.  */
1288         return -1;
1289
1290 success:
1291         evts[0]->hw.idx = idx0;
1292         if (n_ev == 2)
1293                 evts[1]->hw.idx = idx0 ^ 1;
1294         return 0;
1295 }
1296
1297 static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1298 {
1299         int eu = 0, ek = 0, eh = 0;
1300         struct perf_event *event;
1301         int i, n, first;
1302
1303         if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1304                 return 0;
1305
1306         n = n_prev + n_new;
1307         if (n <= 1)
1308                 return 0;
1309
1310         first = 1;
1311         for (i = 0; i < n; i++) {
1312                 event = evts[i];
1313                 if (first) {
1314                         eu = event->attr.exclude_user;
1315                         ek = event->attr.exclude_kernel;
1316                         eh = event->attr.exclude_hv;
1317                         first = 0;
1318                 } else if (event->attr.exclude_user != eu ||
1319                            event->attr.exclude_kernel != ek ||
1320                            event->attr.exclude_hv != eh) {
1321                         return -EAGAIN;
1322                 }
1323         }
1324
1325         return 0;
1326 }
1327
1328 static int collect_events(struct perf_event *group, int max_count,
1329                           struct perf_event *evts[], unsigned long *events,
1330                           int *current_idx)
1331 {
1332         struct perf_event *event;
1333         int n = 0;
1334
1335         if (!is_software_event(group)) {
1336                 if (n >= max_count)
1337                         return -1;
1338                 evts[n] = group;
1339                 events[n] = group->hw.event_base;
1340                 current_idx[n++] = PIC_NO_INDEX;
1341         }
1342         list_for_each_entry(event, &group->sibling_list, group_entry) {
1343                 if (!is_software_event(event) &&
1344                     event->state != PERF_EVENT_STATE_OFF) {
1345                         if (n >= max_count)
1346                                 return -1;
1347                         evts[n] = event;
1348                         events[n] = event->hw.event_base;
1349                         current_idx[n++] = PIC_NO_INDEX;
1350                 }
1351         }
1352         return n;
1353 }
1354
1355 static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1356 {
1357         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1358         int n0, ret = -EAGAIN;
1359         unsigned long flags;
1360
1361         local_irq_save(flags);
1362         perf_pmu_disable(event->pmu);
1363
1364         n0 = cpuc->n_events;
1365         if (n0 >= sparc_pmu->max_hw_events)
1366                 goto out;
1367
1368         cpuc->event[n0] = event;
1369         cpuc->events[n0] = event->hw.event_base;
1370         cpuc->current_idx[n0] = PIC_NO_INDEX;
1371
1372         event->hw.state = PERF_HES_UPTODATE;
1373         if (!(ef_flags & PERF_EF_START))
1374                 event->hw.state |= PERF_HES_STOPPED;
1375
1376         /*
1377          * If group events scheduling transaction was started,
1378          * skip the schedulability test here, it will be performed
1379          * at commit time(->commit_txn) as a whole
1380          */
1381         if (cpuc->group_flag & PERF_EVENT_TXN)
1382                 goto nocheck;
1383
1384         if (check_excludes(cpuc->event, n0, 1))
1385                 goto out;
1386         if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1387                 goto out;
1388
1389 nocheck:
1390         cpuc->n_events++;
1391         cpuc->n_added++;
1392
1393         ret = 0;
1394 out:
1395         perf_pmu_enable(event->pmu);
1396         local_irq_restore(flags);
1397         return ret;
1398 }
1399
1400 static int sparc_pmu_event_init(struct perf_event *event)
1401 {
1402         struct perf_event_attr *attr = &event->attr;
1403         struct perf_event *evts[MAX_HWEVENTS];
1404         struct hw_perf_event *hwc = &event->hw;
1405         unsigned long events[MAX_HWEVENTS];
1406         int current_idx_dmy[MAX_HWEVENTS];
1407         const struct perf_event_map *pmap;
1408         int n;
1409
1410         if (atomic_read(&nmi_active) < 0)
1411                 return -ENODEV;
1412
1413         /* does not support taken branch sampling */
1414         if (has_branch_stack(event))
1415                 return -EOPNOTSUPP;
1416
1417         switch (attr->type) {
1418         case PERF_TYPE_HARDWARE:
1419                 if (attr->config >= sparc_pmu->max_events)
1420                         return -EINVAL;
1421                 pmap = sparc_pmu->event_map(attr->config);
1422                 break;
1423
1424         case PERF_TYPE_HW_CACHE:
1425                 pmap = sparc_map_cache_event(attr->config);
1426                 if (IS_ERR(pmap))
1427                         return PTR_ERR(pmap);
1428                 break;
1429
1430         case PERF_TYPE_RAW:
1431                 pmap = NULL;
1432                 break;
1433
1434         default:
1435                 return -ENOENT;
1436
1437         }
1438
1439         if (pmap) {
1440                 hwc->event_base = perf_event_encode(pmap);
1441         } else {
1442                 /*
1443                  * User gives us "(encoding << 16) | pic_mask" for
1444                  * PERF_TYPE_RAW events.
1445                  */
1446                 hwc->event_base = attr->config;
1447         }
1448
1449         /* We save the enable bits in the config_base.  */
1450         hwc->config_base = sparc_pmu->irq_bit;
1451         if (!attr->exclude_user)
1452                 hwc->config_base |= sparc_pmu->user_bit;
1453         if (!attr->exclude_kernel)
1454                 hwc->config_base |= sparc_pmu->priv_bit;
1455         if (!attr->exclude_hv)
1456                 hwc->config_base |= sparc_pmu->hv_bit;
1457
1458         n = 0;
1459         if (event->group_leader != event) {
1460                 n = collect_events(event->group_leader,
1461                                    sparc_pmu->max_hw_events - 1,
1462                                    evts, events, current_idx_dmy);
1463                 if (n < 0)
1464                         return -EINVAL;
1465         }
1466         events[n] = hwc->event_base;
1467         evts[n] = event;
1468
1469         if (check_excludes(evts, n, 1))
1470                 return -EINVAL;
1471
1472         if (sparc_check_constraints(evts, events, n + 1))
1473                 return -EINVAL;
1474
1475         hwc->idx = PIC_NO_INDEX;
1476
1477         /* Try to do all error checking before this point, as unwinding
1478          * state after grabbing the PMC is difficult.
1479          */
1480         perf_event_grab_pmc();
1481         event->destroy = hw_perf_event_destroy;
1482
1483         if (!hwc->sample_period) {
1484                 hwc->sample_period = MAX_PERIOD;
1485                 hwc->last_period = hwc->sample_period;
1486                 local64_set(&hwc->period_left, hwc->sample_period);
1487         }
1488
1489         return 0;
1490 }
1491
1492 /*
1493  * Start group events scheduling transaction
1494  * Set the flag to make pmu::enable() not perform the
1495  * schedulability test, it will be performed at commit time
1496  */
1497 static void sparc_pmu_start_txn(struct pmu *pmu)
1498 {
1499         struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1500
1501         perf_pmu_disable(pmu);
1502         cpuhw->group_flag |= PERF_EVENT_TXN;
1503 }
1504
1505 /*
1506  * Stop group events scheduling transaction
1507  * Clear the flag and pmu::enable() will perform the
1508  * schedulability test.
1509  */
1510 static void sparc_pmu_cancel_txn(struct pmu *pmu)
1511 {
1512         struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1513
1514         cpuhw->group_flag &= ~PERF_EVENT_TXN;
1515         perf_pmu_enable(pmu);
1516 }
1517
1518 /*
1519  * Commit group events scheduling transaction
1520  * Perform the group schedulability test as a whole
1521  * Return 0 if success
1522  */
1523 static int sparc_pmu_commit_txn(struct pmu *pmu)
1524 {
1525         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1526         int n;
1527
1528         if (!sparc_pmu)
1529                 return -EINVAL;
1530
1531         cpuc = &__get_cpu_var(cpu_hw_events);
1532         n = cpuc->n_events;
1533         if (check_excludes(cpuc->event, 0, n))
1534                 return -EINVAL;
1535         if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1536                 return -EAGAIN;
1537
1538         cpuc->group_flag &= ~PERF_EVENT_TXN;
1539         perf_pmu_enable(pmu);
1540         return 0;
1541 }
1542
1543 static struct pmu pmu = {
1544         .pmu_enable     = sparc_pmu_enable,
1545         .pmu_disable    = sparc_pmu_disable,
1546         .event_init     = sparc_pmu_event_init,
1547         .add            = sparc_pmu_add,
1548         .del            = sparc_pmu_del,
1549         .start          = sparc_pmu_start,
1550         .stop           = sparc_pmu_stop,
1551         .read           = sparc_pmu_read,
1552         .start_txn      = sparc_pmu_start_txn,
1553         .cancel_txn     = sparc_pmu_cancel_txn,
1554         .commit_txn     = sparc_pmu_commit_txn,
1555 };
1556
1557 void perf_event_print_debug(void)
1558 {
1559         unsigned long flags;
1560         int cpu, i;
1561
1562         if (!sparc_pmu)
1563                 return;
1564
1565         local_irq_save(flags);
1566
1567         cpu = smp_processor_id();
1568
1569         pr_info("\n");
1570         for (i = 0; i < sparc_pmu->num_pcrs; i++)
1571                 pr_info("CPU#%d: PCR%d[%016llx]\n",
1572                         cpu, i, pcr_ops->read_pcr(i));
1573         for (i = 0; i < sparc_pmu->num_pic_regs; i++)
1574                 pr_info("CPU#%d: PIC%d[%016llx]\n",
1575                         cpu, i, pcr_ops->read_pic(i));
1576
1577         local_irq_restore(flags);
1578 }
1579
1580 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1581                                             unsigned long cmd, void *__args)
1582 {
1583         struct die_args *args = __args;
1584         struct perf_sample_data data;
1585         struct cpu_hw_events *cpuc;
1586         struct pt_regs *regs;
1587         int i;
1588
1589         if (!atomic_read(&active_events))
1590                 return NOTIFY_DONE;
1591
1592         switch (cmd) {
1593         case DIE_NMI:
1594                 break;
1595
1596         default:
1597                 return NOTIFY_DONE;
1598         }
1599
1600         regs = args->regs;
1601
1602         cpuc = &__get_cpu_var(cpu_hw_events);
1603
1604         /* If the PMU has the TOE IRQ enable bits, we need to do a
1605          * dummy write to the %pcr to clear the overflow bits and thus
1606          * the interrupt.
1607          *
1608          * Do this before we peek at the counters to determine
1609          * overflow so we don't lose any events.
1610          */
1611         if (sparc_pmu->irq_bit &&
1612             sparc_pmu->num_pcrs == 1)
1613                 pcr_ops->write_pcr(0, cpuc->pcr[0]);
1614
1615         for (i = 0; i < cpuc->n_events; i++) {
1616                 struct perf_event *event = cpuc->event[i];
1617                 int idx = cpuc->current_idx[i];
1618                 struct hw_perf_event *hwc;
1619                 u64 val;
1620
1621                 if (sparc_pmu->irq_bit &&
1622                     sparc_pmu->num_pcrs > 1)
1623                         pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
1624
1625                 hwc = &event->hw;
1626                 val = sparc_perf_event_update(event, hwc, idx);
1627                 if (val & (1ULL << 31))
1628                         continue;
1629
1630                 perf_sample_data_init(&data, 0, hwc->last_period);
1631                 if (!sparc_perf_event_set_period(event, hwc, idx))
1632                         continue;
1633
1634                 if (perf_event_overflow(event, &data, regs))
1635                         sparc_pmu_stop(event, 0);
1636         }
1637
1638         return NOTIFY_STOP;
1639 }
1640
1641 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1642         .notifier_call          = perf_event_nmi_handler,
1643 };
1644
1645 static bool __init supported_pmu(void)
1646 {
1647         if (!strcmp(sparc_pmu_type, "ultra3") ||
1648             !strcmp(sparc_pmu_type, "ultra3+") ||
1649             !strcmp(sparc_pmu_type, "ultra3i") ||
1650             !strcmp(sparc_pmu_type, "ultra4+")) {
1651                 sparc_pmu = &ultra3_pmu;
1652                 return true;
1653         }
1654         if (!strcmp(sparc_pmu_type, "niagara")) {
1655                 sparc_pmu = &niagara1_pmu;
1656                 return true;
1657         }
1658         if (!strcmp(sparc_pmu_type, "niagara2") ||
1659             !strcmp(sparc_pmu_type, "niagara3")) {
1660                 sparc_pmu = &niagara2_pmu;
1661                 return true;
1662         }
1663         if (!strcmp(sparc_pmu_type, "niagara4")) {
1664                 sparc_pmu = &niagara4_pmu;
1665                 return true;
1666         }
1667         return false;
1668 }
1669
1670 int __init init_hw_perf_events(void)
1671 {
1672         pr_info("Performance events: ");
1673
1674         if (!supported_pmu()) {
1675                 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1676                 return 0;
1677         }
1678
1679         pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1680
1681         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1682         register_die_notifier(&perf_event_nmi_notifier);
1683
1684         return 0;
1685 }
1686 early_initcall(init_hw_perf_events);
1687
1688 void perf_callchain_kernel(struct perf_callchain_entry *entry,
1689                            struct pt_regs *regs)
1690 {
1691         unsigned long ksp, fp;
1692 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1693         int graph = 0;
1694 #endif
1695
1696         stack_trace_flush();
1697
1698         perf_callchain_store(entry, regs->tpc);
1699
1700         ksp = regs->u_regs[UREG_I6];
1701         fp = ksp + STACK_BIAS;
1702         do {
1703                 struct sparc_stackf *sf;
1704                 struct pt_regs *regs;
1705                 unsigned long pc;
1706
1707                 if (!kstack_valid(current_thread_info(), fp))
1708                         break;
1709
1710                 sf = (struct sparc_stackf *) fp;
1711                 regs = (struct pt_regs *) (sf + 1);
1712
1713                 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1714                         if (user_mode(regs))
1715                                 break;
1716                         pc = regs->tpc;
1717                         fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1718                 } else {
1719                         pc = sf->callers_pc;
1720                         fp = (unsigned long)sf->fp + STACK_BIAS;
1721                 }
1722                 perf_callchain_store(entry, pc);
1723 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1724                 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1725                         int index = current->curr_ret_stack;
1726                         if (current->ret_stack && index >= graph) {
1727                                 pc = current->ret_stack[index - graph].ret;
1728                                 perf_callchain_store(entry, pc);
1729                                 graph++;
1730                         }
1731                 }
1732 #endif
1733         } while (entry->nr < PERF_MAX_STACK_DEPTH);
1734 }
1735
1736 static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1737                                    struct pt_regs *regs)
1738 {
1739         unsigned long ufp;
1740
1741         ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1742         do {
1743                 struct sparc_stackf *usf, sf;
1744                 unsigned long pc;
1745
1746                 usf = (struct sparc_stackf *) ufp;
1747                 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1748                         break;
1749
1750                 pc = sf.callers_pc;
1751                 ufp = (unsigned long)sf.fp + STACK_BIAS;
1752                 perf_callchain_store(entry, pc);
1753         } while (entry->nr < PERF_MAX_STACK_DEPTH);
1754 }
1755
1756 static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1757                                    struct pt_regs *regs)
1758 {
1759         unsigned long ufp;
1760
1761         ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1762         do {
1763                 struct sparc_stackf32 *usf, sf;
1764                 unsigned long pc;
1765
1766                 usf = (struct sparc_stackf32 *) ufp;
1767                 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1768                         break;
1769
1770                 pc = sf.callers_pc;
1771                 ufp = (unsigned long)sf.fp;
1772                 perf_callchain_store(entry, pc);
1773         } while (entry->nr < PERF_MAX_STACK_DEPTH);
1774 }
1775
1776 void
1777 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1778 {
1779         perf_callchain_store(entry, regs->tpc);
1780
1781         if (!current->mm)
1782                 return;
1783
1784         flushw_user();
1785         if (test_thread_flag(TIF_32BIT))
1786                 perf_callchain_user_32(entry, regs);
1787         else
1788                 perf_callchain_user_64(entry, regs);
1789 }