412b790f5da62f19ed085c0255495367bda111ed
[platform/adaptation/renesas_rcar/renesas_kernel.git] / include / linux / perf_event.h
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22  * User-space ABI bits:
23  */
24
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29         PERF_TYPE_HARDWARE                      = 0,
30         PERF_TYPE_SOFTWARE                      = 1,
31         PERF_TYPE_TRACEPOINT                    = 2,
32         PERF_TYPE_HW_CACHE                      = 3,
33         PERF_TYPE_RAW                           = 4,
34         PERF_TYPE_BREAKPOINT                    = 5,
35
36         PERF_TYPE_MAX,                          /* non-ABI */
37 };
38
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45         /*
46          * Common hardware events, generalized by the kernel:
47          */
48         PERF_COUNT_HW_CPU_CYCLES                = 0,
49         PERF_COUNT_HW_INSTRUCTIONS              = 1,
50         PERF_COUNT_HW_CACHE_REFERENCES          = 2,
51         PERF_COUNT_HW_CACHE_MISSES              = 3,
52         PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
53         PERF_COUNT_HW_BRANCH_MISSES             = 5,
54         PERF_COUNT_HW_BUS_CYCLES                = 6,
55         PERF_COUNT_HW_STALLED_CYCLES_FRONTEND   = 7,
56         PERF_COUNT_HW_STALLED_CYCLES_BACKEND    = 8,
57         PERF_COUNT_HW_REF_CPU_CYCLES            = 9,
58
59         PERF_COUNT_HW_MAX,                      /* non-ABI */
60 };
61
62 /*
63  * Generalized hardware cache events:
64  *
65  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66  *       { read, write, prefetch } x
67  *       { accesses, misses }
68  */
69 enum perf_hw_cache_id {
70         PERF_COUNT_HW_CACHE_L1D                 = 0,
71         PERF_COUNT_HW_CACHE_L1I                 = 1,
72         PERF_COUNT_HW_CACHE_LL                  = 2,
73         PERF_COUNT_HW_CACHE_DTLB                = 3,
74         PERF_COUNT_HW_CACHE_ITLB                = 4,
75         PERF_COUNT_HW_CACHE_BPU                 = 5,
76         PERF_COUNT_HW_CACHE_NODE                = 6,
77
78         PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
79 };
80
81 enum perf_hw_cache_op_id {
82         PERF_COUNT_HW_CACHE_OP_READ             = 0,
83         PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
84         PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
85
86         PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
87 };
88
89 enum perf_hw_cache_op_result_id {
90         PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
91         PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
92
93         PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
94 };
95
96 /*
97  * Special "software" events provided by the kernel, even if the hardware
98  * does not support performance events. These events measure various
99  * physical and sw events of the kernel (and allow the profiling of them as
100  * well):
101  */
102 enum perf_sw_ids {
103         PERF_COUNT_SW_CPU_CLOCK                 = 0,
104         PERF_COUNT_SW_TASK_CLOCK                = 1,
105         PERF_COUNT_SW_PAGE_FAULTS               = 2,
106         PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
107         PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
108         PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
109         PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
110         PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
111         PERF_COUNT_SW_EMULATION_FAULTS          = 8,
112
113         PERF_COUNT_SW_MAX,                      /* non-ABI */
114 };
115
116 /*
117  * Bits that can be set in attr.sample_type to request information
118  * in the overflow packets.
119  */
120 enum perf_event_sample_format {
121         PERF_SAMPLE_IP                          = 1U << 0,
122         PERF_SAMPLE_TID                         = 1U << 1,
123         PERF_SAMPLE_TIME                        = 1U << 2,
124         PERF_SAMPLE_ADDR                        = 1U << 3,
125         PERF_SAMPLE_READ                        = 1U << 4,
126         PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
127         PERF_SAMPLE_ID                          = 1U << 6,
128         PERF_SAMPLE_CPU                         = 1U << 7,
129         PERF_SAMPLE_PERIOD                      = 1U << 8,
130         PERF_SAMPLE_STREAM_ID                   = 1U << 9,
131         PERF_SAMPLE_RAW                         = 1U << 10,
132
133         PERF_SAMPLE_MAX = 1U << 11,             /* non-ABI */
134 };
135
136 /*
137  * The format of the data returned by read() on a perf event fd,
138  * as specified by attr.read_format:
139  *
140  * struct read_format {
141  *      { u64           value;
142  *        { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
143  *        { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
144  *        { u64         id;           } && PERF_FORMAT_ID
145  *      } && !PERF_FORMAT_GROUP
146  *
147  *      { u64           nr;
148  *        { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
149  *        { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
150  *        { u64         value;
151  *          { u64       id;           } && PERF_FORMAT_ID
152  *        }             cntr[nr];
153  *      } && PERF_FORMAT_GROUP
154  * };
155  */
156 enum perf_event_read_format {
157         PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
158         PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
159         PERF_FORMAT_ID                          = 1U << 2,
160         PERF_FORMAT_GROUP                       = 1U << 3,
161
162         PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
163 };
164
165 #define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
166
167 /*
168  * Hardware event_id to monitor via a performance monitoring event:
169  */
170 struct perf_event_attr {
171
172         /*
173          * Major type: hardware/software/tracepoint/etc.
174          */
175         __u32                   type;
176
177         /*
178          * Size of the attr structure, for fwd/bwd compat.
179          */
180         __u32                   size;
181
182         /*
183          * Type specific configuration information.
184          */
185         __u64                   config;
186
187         union {
188                 __u64           sample_period;
189                 __u64           sample_freq;
190         };
191
192         __u64                   sample_type;
193         __u64                   read_format;
194
195         __u64                   disabled       :  1, /* off by default        */
196                                 inherit        :  1, /* children inherit it   */
197                                 pinned         :  1, /* must always be on PMU */
198                                 exclusive      :  1, /* only group on PMU     */
199                                 exclude_user   :  1, /* don't count user      */
200                                 exclude_kernel :  1, /* ditto kernel          */
201                                 exclude_hv     :  1, /* ditto hypervisor      */
202                                 exclude_idle   :  1, /* don't count when idle */
203                                 mmap           :  1, /* include mmap data     */
204                                 comm           :  1, /* include comm data     */
205                                 freq           :  1, /* use freq, not period  */
206                                 inherit_stat   :  1, /* per task counts       */
207                                 enable_on_exec :  1, /* next exec enables     */
208                                 task           :  1, /* trace fork/exit       */
209                                 watermark      :  1, /* wakeup_watermark      */
210                                 /*
211                                  * precise_ip:
212                                  *
213                                  *  0 - SAMPLE_IP can have arbitrary skid
214                                  *  1 - SAMPLE_IP must have constant skid
215                                  *  2 - SAMPLE_IP requested to have 0 skid
216                                  *  3 - SAMPLE_IP must have 0 skid
217                                  *
218                                  *  See also PERF_RECORD_MISC_EXACT_IP
219                                  */
220                                 precise_ip     :  2, /* skid constraint       */
221                                 mmap_data      :  1, /* non-exec mmap data    */
222                                 sample_id_all  :  1, /* sample_type all events */
223
224                                 exclude_host   :  1, /* don't count in host   */
225                                 exclude_guest  :  1, /* don't count in guest  */
226
227                                 __reserved_1   : 43;
228
229         union {
230                 __u32           wakeup_events;    /* wakeup every n events */
231                 __u32           wakeup_watermark; /* bytes before wakeup   */
232         };
233
234         __u32                   bp_type;
235         union {
236                 __u64           bp_addr;
237                 __u64           config1; /* extension of config */
238         };
239         union {
240                 __u64           bp_len;
241                 __u64           config2; /* extension of config1 */
242         };
243 };
244
245 /*
246  * Ioctls that can be done on a perf event fd:
247  */
248 #define PERF_EVENT_IOC_ENABLE           _IO ('$', 0)
249 #define PERF_EVENT_IOC_DISABLE          _IO ('$', 1)
250 #define PERF_EVENT_IOC_REFRESH          _IO ('$', 2)
251 #define PERF_EVENT_IOC_RESET            _IO ('$', 3)
252 #define PERF_EVENT_IOC_PERIOD           _IOW('$', 4, __u64)
253 #define PERF_EVENT_IOC_SET_OUTPUT       _IO ('$', 5)
254 #define PERF_EVENT_IOC_SET_FILTER       _IOW('$', 6, char *)
255
256 enum perf_event_ioc_flags {
257         PERF_IOC_FLAG_GROUP             = 1U << 0,
258 };
259
260 /*
261  * Structure of the page that can be mapped via mmap
262  */
263 struct perf_event_mmap_page {
264         __u32   version;                /* version number of this structure */
265         __u32   compat_version;         /* lowest version this is compat with */
266
267         /*
268          * Bits needed to read the hw events in user-space.
269          *
270          *   u32 seq;
271          *   s64 count;
272          *
273          *   do {
274          *     seq = pc->lock;
275          *
276          *     barrier()
277          *     if (pc->index) {
278          *       count = pmc_read(pc->index - 1);
279          *       count += pc->offset;
280          *     } else
281          *       goto regular_read;
282          *
283          *     barrier();
284          *   } while (pc->lock != seq);
285          *
286          * NOTE: for obvious reason this only works on self-monitoring
287          *       processes.
288          */
289         __u32   lock;                   /* seqlock for synchronization */
290         __u32   index;                  /* hardware event identifier */
291         __s64   offset;                 /* add to hardware event value */
292         __u64   time_enabled;           /* time event active */
293         __u64   time_running;           /* time event on cpu */
294         __u32   time_mult, time_shift;
295         __u64   time_offset;
296
297                 /*
298                  * Hole for extension of the self monitor capabilities
299                  */
300
301         __u64   __reserved[121];        /* align to 1k */
302
303         /*
304          * Control data for the mmap() data buffer.
305          *
306          * User-space reading the @data_head value should issue an rmb(), on
307          * SMP capable platforms, after reading this value -- see
308          * perf_event_wakeup().
309          *
310          * When the mapping is PROT_WRITE the @data_tail value should be
311          * written by userspace to reflect the last read data. In this case
312          * the kernel will not over-write unread data.
313          */
314         __u64   data_head;              /* head in the data section */
315         __u64   data_tail;              /* user-space written tail */
316 };
317
318 #define PERF_RECORD_MISC_CPUMODE_MASK           (7 << 0)
319 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN        (0 << 0)
320 #define PERF_RECORD_MISC_KERNEL                 (1 << 0)
321 #define PERF_RECORD_MISC_USER                   (2 << 0)
322 #define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
323 #define PERF_RECORD_MISC_GUEST_KERNEL           (4 << 0)
324 #define PERF_RECORD_MISC_GUEST_USER             (5 << 0)
325
326 /*
327  * Indicates that the content of PERF_SAMPLE_IP points to
328  * the actual instruction that triggered the event. See also
329  * perf_event_attr::precise_ip.
330  */
331 #define PERF_RECORD_MISC_EXACT_IP               (1 << 14)
332 /*
333  * Reserve the last bit to indicate some extended misc field
334  */
335 #define PERF_RECORD_MISC_EXT_RESERVED           (1 << 15)
336
337 struct perf_event_header {
338         __u32   type;
339         __u16   misc;
340         __u16   size;
341 };
342
343 enum perf_event_type {
344
345         /*
346          * If perf_event_attr.sample_id_all is set then all event types will
347          * have the sample_type selected fields related to where/when
348          * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
349          * described in PERF_RECORD_SAMPLE below, it will be stashed just after
350          * the perf_event_header and the fields already present for the existing
351          * fields, i.e. at the end of the payload. That way a newer perf.data
352          * file will be supported by older perf tools, with these new optional
353          * fields being ignored.
354          *
355          * The MMAP events record the PROT_EXEC mappings so that we can
356          * correlate userspace IPs to code. They have the following structure:
357          *
358          * struct {
359          *      struct perf_event_header        header;
360          *
361          *      u32                             pid, tid;
362          *      u64                             addr;
363          *      u64                             len;
364          *      u64                             pgoff;
365          *      char                            filename[];
366          * };
367          */
368         PERF_RECORD_MMAP                        = 1,
369
370         /*
371          * struct {
372          *      struct perf_event_header        header;
373          *      u64                             id;
374          *      u64                             lost;
375          * };
376          */
377         PERF_RECORD_LOST                        = 2,
378
379         /*
380          * struct {
381          *      struct perf_event_header        header;
382          *
383          *      u32                             pid, tid;
384          *      char                            comm[];
385          * };
386          */
387         PERF_RECORD_COMM                        = 3,
388
389         /*
390          * struct {
391          *      struct perf_event_header        header;
392          *      u32                             pid, ppid;
393          *      u32                             tid, ptid;
394          *      u64                             time;
395          * };
396          */
397         PERF_RECORD_EXIT                        = 4,
398
399         /*
400          * struct {
401          *      struct perf_event_header        header;
402          *      u64                             time;
403          *      u64                             id;
404          *      u64                             stream_id;
405          * };
406          */
407         PERF_RECORD_THROTTLE                    = 5,
408         PERF_RECORD_UNTHROTTLE                  = 6,
409
410         /*
411          * struct {
412          *      struct perf_event_header        header;
413          *      u32                             pid, ppid;
414          *      u32                             tid, ptid;
415          *      u64                             time;
416          * };
417          */
418         PERF_RECORD_FORK                        = 7,
419
420         /*
421          * struct {
422          *      struct perf_event_header        header;
423          *      u32                             pid, tid;
424          *
425          *      struct read_format              values;
426          * };
427          */
428         PERF_RECORD_READ                        = 8,
429
430         /*
431          * struct {
432          *      struct perf_event_header        header;
433          *
434          *      { u64                   ip;       } && PERF_SAMPLE_IP
435          *      { u32                   pid, tid; } && PERF_SAMPLE_TID
436          *      { u64                   time;     } && PERF_SAMPLE_TIME
437          *      { u64                   addr;     } && PERF_SAMPLE_ADDR
438          *      { u64                   id;       } && PERF_SAMPLE_ID
439          *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
440          *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
441          *      { u64                   period;   } && PERF_SAMPLE_PERIOD
442          *
443          *      { struct read_format    values;   } && PERF_SAMPLE_READ
444          *
445          *      { u64                   nr,
446          *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
447          *
448          *      #
449          *      # The RAW record below is opaque data wrt the ABI
450          *      #
451          *      # That is, the ABI doesn't make any promises wrt to
452          *      # the stability of its content, it may vary depending
453          *      # on event, hardware, kernel version and phase of
454          *      # the moon.
455          *      #
456          *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
457          *      #
458          *
459          *      { u32                   size;
460          *        char                  data[size];}&& PERF_SAMPLE_RAW
461          * };
462          */
463         PERF_RECORD_SAMPLE                      = 9,
464
465         PERF_RECORD_MAX,                        /* non-ABI */
466 };
467
468 enum perf_callchain_context {
469         PERF_CONTEXT_HV                 = (__u64)-32,
470         PERF_CONTEXT_KERNEL             = (__u64)-128,
471         PERF_CONTEXT_USER               = (__u64)-512,
472
473         PERF_CONTEXT_GUEST              = (__u64)-2048,
474         PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
475         PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
476
477         PERF_CONTEXT_MAX                = (__u64)-4095,
478 };
479
480 #define PERF_FLAG_FD_NO_GROUP           (1U << 0)
481 #define PERF_FLAG_FD_OUTPUT             (1U << 1)
482 #define PERF_FLAG_PID_CGROUP            (1U << 2) /* pid=cgroup id, per-cpu mode only */
483
484 #ifdef __KERNEL__
485 /*
486  * Kernel-internal data types and definitions:
487  */
488
489 #ifdef CONFIG_PERF_EVENTS
490 # include <linux/cgroup.h>
491 # include <asm/perf_event.h>
492 # include <asm/local64.h>
493 #endif
494
495 struct perf_guest_info_callbacks {
496         int                             (*is_in_guest)(void);
497         int                             (*is_user_mode)(void);
498         unsigned long                   (*get_guest_ip)(void);
499 };
500
501 #ifdef CONFIG_HAVE_HW_BREAKPOINT
502 #include <asm/hw_breakpoint.h>
503 #endif
504
505 #include <linux/list.h>
506 #include <linux/mutex.h>
507 #include <linux/rculist.h>
508 #include <linux/rcupdate.h>
509 #include <linux/spinlock.h>
510 #include <linux/hrtimer.h>
511 #include <linux/fs.h>
512 #include <linux/pid_namespace.h>
513 #include <linux/workqueue.h>
514 #include <linux/ftrace.h>
515 #include <linux/cpu.h>
516 #include <linux/irq_work.h>
517 #include <linux/jump_label.h>
518 #include <linux/atomic.h>
519 #include <asm/local.h>
520
521 #define PERF_MAX_STACK_DEPTH            255
522
523 struct perf_callchain_entry {
524         __u64                           nr;
525         __u64                           ip[PERF_MAX_STACK_DEPTH];
526 };
527
528 struct perf_raw_record {
529         u32                             size;
530         void                            *data;
531 };
532
533 struct perf_branch_entry {
534         __u64                           from;
535         __u64                           to;
536         __u64                           flags;
537 };
538
539 struct perf_branch_stack {
540         __u64                           nr;
541         struct perf_branch_entry        entries[0];
542 };
543
544 struct task_struct;
545
546 /*
547  * extra PMU register associated with an event
548  */
549 struct hw_perf_event_extra {
550         u64             config; /* register value */
551         unsigned int    reg;    /* register address or index */
552         int             alloc;  /* extra register already allocated */
553         int             idx;    /* index in shared_regs->regs[] */
554 };
555
556 /**
557  * struct hw_perf_event - performance event hardware details:
558  */
559 struct hw_perf_event {
560 #ifdef CONFIG_PERF_EVENTS
561         union {
562                 struct { /* hardware */
563                         u64             config;
564                         u64             last_tag;
565                         unsigned long   config_base;
566                         unsigned long   event_base;
567                         int             idx;
568                         int             last_cpu;
569                         struct hw_perf_event_extra extra_reg;
570                 };
571                 struct { /* software */
572                         struct hrtimer  hrtimer;
573                 };
574 #ifdef CONFIG_HAVE_HW_BREAKPOINT
575                 struct { /* breakpoint */
576                         struct arch_hw_breakpoint       info;
577                         struct list_head                bp_list;
578                         /*
579                          * Crufty hack to avoid the chicken and egg
580                          * problem hw_breakpoint has with context
581                          * creation and event initalization.
582                          */
583                         struct task_struct              *bp_target;
584                 };
585 #endif
586         };
587         int                             state;
588         local64_t                       prev_count;
589         u64                             sample_period;
590         u64                             last_period;
591         local64_t                       period_left;
592         u64                             interrupts_seq;
593         u64                             interrupts;
594
595         u64                             freq_time_stamp;
596         u64                             freq_count_stamp;
597 #endif
598 };
599
600 /*
601  * hw_perf_event::state flags
602  */
603 #define PERF_HES_STOPPED        0x01 /* the counter is stopped */
604 #define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
605 #define PERF_HES_ARCH           0x04
606
607 struct perf_event;
608
609 /*
610  * Common implementation detail of pmu::{start,commit,cancel}_txn
611  */
612 #define PERF_EVENT_TXN 0x1
613
614 /**
615  * struct pmu - generic performance monitoring unit
616  */
617 struct pmu {
618         struct list_head                entry;
619
620         struct device                   *dev;
621         const struct attribute_group    **attr_groups;
622         char                            *name;
623         int                             type;
624
625         int * __percpu                  pmu_disable_count;
626         struct perf_cpu_context * __percpu pmu_cpu_context;
627         int                             task_ctx_nr;
628
629         /*
630          * Fully disable/enable this PMU, can be used to protect from the PMI
631          * as well as for lazy/batch writing of the MSRs.
632          */
633         void (*pmu_enable)              (struct pmu *pmu); /* optional */
634         void (*pmu_disable)             (struct pmu *pmu); /* optional */
635
636         /*
637          * Try and initialize the event for this PMU.
638          * Should return -ENOENT when the @event doesn't match this PMU.
639          */
640         int (*event_init)               (struct perf_event *event);
641
642 #define PERF_EF_START   0x01            /* start the counter when adding    */
643 #define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
644 #define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
645
646         /*
647          * Adds/Removes a counter to/from the PMU, can be done inside
648          * a transaction, see the ->*_txn() methods.
649          */
650         int  (*add)                     (struct perf_event *event, int flags);
651         void (*del)                     (struct perf_event *event, int flags);
652
653         /*
654          * Starts/Stops a counter present on the PMU. The PMI handler
655          * should stop the counter when perf_event_overflow() returns
656          * !0. ->start() will be used to continue.
657          */
658         void (*start)                   (struct perf_event *event, int flags);
659         void (*stop)                    (struct perf_event *event, int flags);
660
661         /*
662          * Updates the counter value of the event.
663          */
664         void (*read)                    (struct perf_event *event);
665
666         /*
667          * Group events scheduling is treated as a transaction, add
668          * group events as a whole and perform one schedulability test.
669          * If the test fails, roll back the whole group
670          *
671          * Start the transaction, after this ->add() doesn't need to
672          * do schedulability tests.
673          */
674         void (*start_txn)               (struct pmu *pmu); /* optional */
675         /*
676          * If ->start_txn() disabled the ->add() schedulability test
677          * then ->commit_txn() is required to perform one. On success
678          * the transaction is closed. On error the transaction is kept
679          * open until ->cancel_txn() is called.
680          */
681         int  (*commit_txn)              (struct pmu *pmu); /* optional */
682         /*
683          * Will cancel the transaction, assumes ->del() is called
684          * for each successful ->add() during the transaction.
685          */
686         void (*cancel_txn)              (struct pmu *pmu); /* optional */
687
688         /*
689          * Will return the value for perf_event_mmap_page::index for this event,
690          * if no implementation is provided it will default to: event->hw.idx + 1.
691          */
692         int (*event_idx)                (struct perf_event *event); /*optional */
693 };
694
695 /**
696  * enum perf_event_active_state - the states of a event
697  */
698 enum perf_event_active_state {
699         PERF_EVENT_STATE_ERROR          = -2,
700         PERF_EVENT_STATE_OFF            = -1,
701         PERF_EVENT_STATE_INACTIVE       =  0,
702         PERF_EVENT_STATE_ACTIVE         =  1,
703 };
704
705 struct file;
706 struct perf_sample_data;
707
708 typedef void (*perf_overflow_handler_t)(struct perf_event *,
709                                         struct perf_sample_data *,
710                                         struct pt_regs *regs);
711
712 enum perf_group_flag {
713         PERF_GROUP_SOFTWARE             = 0x1,
714 };
715
716 #define SWEVENT_HLIST_BITS              8
717 #define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
718
719 struct swevent_hlist {
720         struct hlist_head               heads[SWEVENT_HLIST_SIZE];
721         struct rcu_head                 rcu_head;
722 };
723
724 #define PERF_ATTACH_CONTEXT     0x01
725 #define PERF_ATTACH_GROUP       0x02
726 #define PERF_ATTACH_TASK        0x04
727
728 #ifdef CONFIG_CGROUP_PERF
729 /*
730  * perf_cgroup_info keeps track of time_enabled for a cgroup.
731  * This is a per-cpu dynamically allocated data structure.
732  */
733 struct perf_cgroup_info {
734         u64                             time;
735         u64                             timestamp;
736 };
737
738 struct perf_cgroup {
739         struct                          cgroup_subsys_state css;
740         struct                          perf_cgroup_info *info; /* timing info, one per cpu */
741 };
742 #endif
743
744 struct ring_buffer;
745
746 /**
747  * struct perf_event - performance event kernel representation:
748  */
749 struct perf_event {
750 #ifdef CONFIG_PERF_EVENTS
751         struct list_head                group_entry;
752         struct list_head                event_entry;
753         struct list_head                sibling_list;
754         struct hlist_node               hlist_entry;
755         int                             nr_siblings;
756         int                             group_flags;
757         struct perf_event               *group_leader;
758         struct pmu                      *pmu;
759
760         enum perf_event_active_state    state;
761         unsigned int                    attach_state;
762         local64_t                       count;
763         atomic64_t                      child_count;
764
765         /*
766          * These are the total time in nanoseconds that the event
767          * has been enabled (i.e. eligible to run, and the task has
768          * been scheduled in, if this is a per-task event)
769          * and running (scheduled onto the CPU), respectively.
770          *
771          * They are computed from tstamp_enabled, tstamp_running and
772          * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
773          */
774         u64                             total_time_enabled;
775         u64                             total_time_running;
776
777         /*
778          * These are timestamps used for computing total_time_enabled
779          * and total_time_running when the event is in INACTIVE or
780          * ACTIVE state, measured in nanoseconds from an arbitrary point
781          * in time.
782          * tstamp_enabled: the notional time when the event was enabled
783          * tstamp_running: the notional time when the event was scheduled on
784          * tstamp_stopped: in INACTIVE state, the notional time when the
785          *      event was scheduled off.
786          */
787         u64                             tstamp_enabled;
788         u64                             tstamp_running;
789         u64                             tstamp_stopped;
790
791         /*
792          * timestamp shadows the actual context timing but it can
793          * be safely used in NMI interrupt context. It reflects the
794          * context time as it was when the event was last scheduled in.
795          *
796          * ctx_time already accounts for ctx->timestamp. Therefore to
797          * compute ctx_time for a sample, simply add perf_clock().
798          */
799         u64                             shadow_ctx_time;
800
801         struct perf_event_attr          attr;
802         u16                             header_size;
803         u16                             id_header_size;
804         u16                             read_size;
805         struct hw_perf_event            hw;
806
807         struct perf_event_context       *ctx;
808         struct file                     *filp;
809
810         /*
811          * These accumulate total time (in nanoseconds) that children
812          * events have been enabled and running, respectively.
813          */
814         atomic64_t                      child_total_time_enabled;
815         atomic64_t                      child_total_time_running;
816
817         /*
818          * Protect attach/detach and child_list:
819          */
820         struct mutex                    child_mutex;
821         struct list_head                child_list;
822         struct perf_event               *parent;
823
824         int                             oncpu;
825         int                             cpu;
826
827         struct list_head                owner_entry;
828         struct task_struct              *owner;
829
830         /* mmap bits */
831         struct mutex                    mmap_mutex;
832         atomic_t                        mmap_count;
833         int                             mmap_locked;
834         struct user_struct              *mmap_user;
835         struct ring_buffer              *rb;
836         struct list_head                rb_entry;
837
838         /* poll related */
839         wait_queue_head_t               waitq;
840         struct fasync_struct            *fasync;
841
842         /* delayed work for NMIs and such */
843         int                             pending_wakeup;
844         int                             pending_kill;
845         int                             pending_disable;
846         struct irq_work                 pending;
847
848         atomic_t                        event_limit;
849
850         void (*destroy)(struct perf_event *);
851         struct rcu_head                 rcu_head;
852
853         struct pid_namespace            *ns;
854         u64                             id;
855
856         perf_overflow_handler_t         overflow_handler;
857         void                            *overflow_handler_context;
858
859 #ifdef CONFIG_EVENT_TRACING
860         struct ftrace_event_call        *tp_event;
861         struct event_filter             *filter;
862 #endif
863
864 #ifdef CONFIG_CGROUP_PERF
865         struct perf_cgroup              *cgrp; /* cgroup event is attach to */
866         int                             cgrp_defer_enabled;
867 #endif
868
869 #endif /* CONFIG_PERF_EVENTS */
870 };
871
872 enum perf_event_context_type {
873         task_context,
874         cpu_context,
875 };
876
877 /**
878  * struct perf_event_context - event context structure
879  *
880  * Used as a container for task events and CPU events as well:
881  */
882 struct perf_event_context {
883         struct pmu                      *pmu;
884         enum perf_event_context_type    type;
885         /*
886          * Protect the states of the events in the list,
887          * nr_active, and the list:
888          */
889         raw_spinlock_t                  lock;
890         /*
891          * Protect the list of events.  Locking either mutex or lock
892          * is sufficient to ensure the list doesn't change; to change
893          * the list you need to lock both the mutex and the spinlock.
894          */
895         struct mutex                    mutex;
896
897         struct list_head                pinned_groups;
898         struct list_head                flexible_groups;
899         struct list_head                event_list;
900         int                             nr_events;
901         int                             nr_active;
902         int                             is_active;
903         int                             nr_stat;
904         int                             nr_freq;
905         int                             rotate_disable;
906         atomic_t                        refcount;
907         struct task_struct              *task;
908
909         /*
910          * Context clock, runs when context enabled.
911          */
912         u64                             time;
913         u64                             timestamp;
914
915         /*
916          * These fields let us detect when two contexts have both
917          * been cloned (inherited) from a common ancestor.
918          */
919         struct perf_event_context       *parent_ctx;
920         u64                             parent_gen;
921         u64                             generation;
922         int                             pin_count;
923         int                             nr_cgroups; /* cgroup events present */
924         struct rcu_head                 rcu_head;
925 };
926
927 /*
928  * Number of contexts where an event can trigger:
929  *      task, softirq, hardirq, nmi.
930  */
931 #define PERF_NR_CONTEXTS        4
932
933 /**
934  * struct perf_event_cpu_context - per cpu event context structure
935  */
936 struct perf_cpu_context {
937         struct perf_event_context       ctx;
938         struct perf_event_context       *task_ctx;
939         int                             active_oncpu;
940         int                             exclusive;
941         struct list_head                rotation_list;
942         int                             jiffies_interval;
943         struct pmu                      *active_pmu;
944         struct perf_cgroup              *cgrp;
945 };
946
947 struct perf_output_handle {
948         struct perf_event               *event;
949         struct ring_buffer              *rb;
950         unsigned long                   wakeup;
951         unsigned long                   size;
952         void                            *addr;
953         int                             page;
954 };
955
956 #ifdef CONFIG_PERF_EVENTS
957
958 extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
959 extern void perf_pmu_unregister(struct pmu *pmu);
960
961 extern int perf_num_counters(void);
962 extern const char *perf_pmu_name(void);
963 extern void __perf_event_task_sched_in(struct task_struct *prev,
964                                        struct task_struct *task);
965 extern void __perf_event_task_sched_out(struct task_struct *prev,
966                                         struct task_struct *next);
967 extern int perf_event_init_task(struct task_struct *child);
968 extern void perf_event_exit_task(struct task_struct *child);
969 extern void perf_event_free_task(struct task_struct *task);
970 extern void perf_event_delayed_put(struct task_struct *task);
971 extern void perf_event_print_debug(void);
972 extern void perf_pmu_disable(struct pmu *pmu);
973 extern void perf_pmu_enable(struct pmu *pmu);
974 extern int perf_event_task_disable(void);
975 extern int perf_event_task_enable(void);
976 extern int perf_event_refresh(struct perf_event *event, int refresh);
977 extern void perf_event_update_userpage(struct perf_event *event);
978 extern int perf_event_release_kernel(struct perf_event *event);
979 extern struct perf_event *
980 perf_event_create_kernel_counter(struct perf_event_attr *attr,
981                                 int cpu,
982                                 struct task_struct *task,
983                                 perf_overflow_handler_t callback,
984                                 void *context);
985 extern u64 perf_event_read_value(struct perf_event *event,
986                                  u64 *enabled, u64 *running);
987
988 struct perf_sample_data {
989         u64                             type;
990
991         u64                             ip;
992         struct {
993                 u32     pid;
994                 u32     tid;
995         }                               tid_entry;
996         u64                             time;
997         u64                             addr;
998         u64                             id;
999         u64                             stream_id;
1000         struct {
1001                 u32     cpu;
1002                 u32     reserved;
1003         }                               cpu_entry;
1004         u64                             period;
1005         struct perf_callchain_entry     *callchain;
1006         struct perf_raw_record          *raw;
1007 };
1008
1009 static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1010 {
1011         data->addr = addr;
1012         data->raw  = NULL;
1013 }
1014
1015 extern void perf_output_sample(struct perf_output_handle *handle,
1016                                struct perf_event_header *header,
1017                                struct perf_sample_data *data,
1018                                struct perf_event *event);
1019 extern void perf_prepare_sample(struct perf_event_header *header,
1020                                 struct perf_sample_data *data,
1021                                 struct perf_event *event,
1022                                 struct pt_regs *regs);
1023
1024 extern int perf_event_overflow(struct perf_event *event,
1025                                  struct perf_sample_data *data,
1026                                  struct pt_regs *regs);
1027
1028 static inline bool is_sampling_event(struct perf_event *event)
1029 {
1030         return event->attr.sample_period != 0;
1031 }
1032
1033 /*
1034  * Return 1 for a software event, 0 for a hardware event
1035  */
1036 static inline int is_software_event(struct perf_event *event)
1037 {
1038         return event->pmu->task_ctx_nr == perf_sw_context;
1039 }
1040
1041 extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1042
1043 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1044
1045 #ifndef perf_arch_fetch_caller_regs
1046 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1047 #endif
1048
1049 /*
1050  * Take a snapshot of the regs. Skip ip and frame pointer to
1051  * the nth caller. We only need a few of the regs:
1052  * - ip for PERF_SAMPLE_IP
1053  * - cs for user_mode() tests
1054  * - bp for callchains
1055  * - eflags, for future purposes, just in case
1056  */
1057 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1058 {
1059         memset(regs, 0, sizeof(*regs));
1060
1061         perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1062 }
1063
1064 static __always_inline void
1065 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1066 {
1067         struct pt_regs hot_regs;
1068
1069         if (static_branch(&perf_swevent_enabled[event_id])) {
1070                 if (!regs) {
1071                         perf_fetch_caller_regs(&hot_regs);
1072                         regs = &hot_regs;
1073                 }
1074                 __perf_sw_event(event_id, nr, regs, addr);
1075         }
1076 }
1077
1078 extern struct jump_label_key_deferred perf_sched_events;
1079
1080 static inline void perf_event_task_sched_in(struct task_struct *prev,
1081                                             struct task_struct *task)
1082 {
1083         if (static_branch(&perf_sched_events.key))
1084                 __perf_event_task_sched_in(prev, task);
1085 }
1086
1087 static inline void perf_event_task_sched_out(struct task_struct *prev,
1088                                              struct task_struct *next)
1089 {
1090         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1091
1092         if (static_branch(&perf_sched_events.key))
1093                 __perf_event_task_sched_out(prev, next);
1094 }
1095
1096 extern void perf_event_mmap(struct vm_area_struct *vma);
1097 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1098 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1099 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1100
1101 extern void perf_event_comm(struct task_struct *tsk);
1102 extern void perf_event_fork(struct task_struct *tsk);
1103
1104 /* Callchains */
1105 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1106
1107 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
1108 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1109
1110 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1111 {
1112         if (entry->nr < PERF_MAX_STACK_DEPTH)
1113                 entry->ip[entry->nr++] = ip;
1114 }
1115
1116 extern int sysctl_perf_event_paranoid;
1117 extern int sysctl_perf_event_mlock;
1118 extern int sysctl_perf_event_sample_rate;
1119
1120 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1121                 void __user *buffer, size_t *lenp,
1122                 loff_t *ppos);
1123
1124 static inline bool perf_paranoid_tracepoint_raw(void)
1125 {
1126         return sysctl_perf_event_paranoid > -1;
1127 }
1128
1129 static inline bool perf_paranoid_cpu(void)
1130 {
1131         return sysctl_perf_event_paranoid > 0;
1132 }
1133
1134 static inline bool perf_paranoid_kernel(void)
1135 {
1136         return sysctl_perf_event_paranoid > 1;
1137 }
1138
1139 extern void perf_event_init(void);
1140 extern void perf_tp_event(u64 addr, u64 count, void *record,
1141                           int entry_size, struct pt_regs *regs,
1142                           struct hlist_head *head, int rctx);
1143 extern void perf_bp_event(struct perf_event *event, void *data);
1144
1145 #ifndef perf_misc_flags
1146 # define perf_misc_flags(regs) \
1147                 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1148 # define perf_instruction_pointer(regs) instruction_pointer(regs)
1149 #endif
1150
1151 extern int perf_output_begin(struct perf_output_handle *handle,
1152                              struct perf_event *event, unsigned int size);
1153 extern void perf_output_end(struct perf_output_handle *handle);
1154 extern void perf_output_copy(struct perf_output_handle *handle,
1155                              const void *buf, unsigned int len);
1156 extern int perf_swevent_get_recursion_context(void);
1157 extern void perf_swevent_put_recursion_context(int rctx);
1158 extern void perf_event_enable(struct perf_event *event);
1159 extern void perf_event_disable(struct perf_event *event);
1160 extern void perf_event_task_tick(void);
1161 #else
1162 static inline void
1163 perf_event_task_sched_in(struct task_struct *prev,
1164                          struct task_struct *task)                      { }
1165 static inline void
1166 perf_event_task_sched_out(struct task_struct *prev,
1167                           struct task_struct *next)                     { }
1168 static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1169 static inline void perf_event_exit_task(struct task_struct *child)      { }
1170 static inline void perf_event_free_task(struct task_struct *task)       { }
1171 static inline void perf_event_delayed_put(struct task_struct *task)     { }
1172 static inline void perf_event_print_debug(void)                         { }
1173 static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1174 static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1175 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1176 {
1177         return -EINVAL;
1178 }
1179
1180 static inline void
1181 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
1182 static inline void
1183 perf_bp_event(struct perf_event *event, void *data)                     { }
1184
1185 static inline int perf_register_guest_info_callbacks
1186 (struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1187 static inline int perf_unregister_guest_info_callbacks
1188 (struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1189
1190 static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1191 static inline void perf_event_comm(struct task_struct *tsk)             { }
1192 static inline void perf_event_fork(struct task_struct *tsk)             { }
1193 static inline void perf_event_init(void)                                { }
1194 static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1195 static inline void perf_swevent_put_recursion_context(int rctx)         { }
1196 static inline void perf_event_enable(struct perf_event *event)          { }
1197 static inline void perf_event_disable(struct perf_event *event)         { }
1198 static inline void perf_event_task_tick(void)                           { }
1199 #endif
1200
1201 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1202
1203 /*
1204  * This has to have a higher priority than migration_notifier in sched.c.
1205  */
1206 #define perf_cpu_notifier(fn)                                           \
1207 do {                                                                    \
1208         static struct notifier_block fn##_nb __cpuinitdata =            \
1209                 { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
1210         fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
1211                 (void *)(unsigned long)smp_processor_id());             \
1212         fn(&fn##_nb, (unsigned long)CPU_STARTING,                       \
1213                 (void *)(unsigned long)smp_processor_id());             \
1214         fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
1215                 (void *)(unsigned long)smp_processor_id());             \
1216         register_cpu_notifier(&fn##_nb);                                \
1217 } while (0)
1218
1219 #endif /* __KERNEL__ */
1220 #endif /* _LINUX_PERF_EVENT_H */