perf_counter: uncouple data_head updates from wakeups
[platform/kernel/linux-starfive.git] / include / linux / perf_counter.h
index 8d5d11b..17b6310 100644 (file)
@@ -100,8 +100,10 @@ enum sw_event_ids {
 enum perf_counter_record_format {
        PERF_RECORD_IP          = 1U << 0,
        PERF_RECORD_TID         = 1U << 1,
-       PERF_RECORD_GROUP       = 1U << 2,
-       PERF_RECORD_CALLCHAIN   = 1U << 3,
+       PERF_RECORD_TIME        = 1U << 2,
+       PERF_RECORD_ADDR        = 1U << 3,
+       PERF_RECORD_GROUP       = 1U << 4,
+       PERF_RECORD_CALLCHAIN   = 1U << 5,
 };
 
 /*
@@ -141,8 +143,9 @@ struct perf_counter_hw_event {
                                exclude_idle   :  1, /* don't count when idle */
                                mmap           :  1, /* include mmap data     */
                                munmap         :  1, /* include munmap data   */
+                               comm           :  1, /* include comm data     */
 
-                               __reserved_1   : 53;
+                               __reserved_1   : 52;
 
        __u32                   extra_config_len;
        __u32                   wakeup_events;  /* wakeup every n events */
@@ -154,8 +157,9 @@ struct perf_counter_hw_event {
 /*
  * Ioctls that can be done on a perf counter fd:
  */
-#define PERF_COUNTER_IOC_ENABLE                _IO('$', 0)
-#define PERF_COUNTER_IOC_DISABLE       _IO('$', 1)
+#define PERF_COUNTER_IOC_ENABLE                _IO ('$', 0)
+#define PERF_COUNTER_IOC_DISABLE       _IO ('$', 1)
+#define PERF_COUNTER_IOC_REFRESH       _IOW('$', 2, u32)
 
 /*
  * Structure of the page that can be mapped via mmap
@@ -199,28 +203,67 @@ struct perf_counter_mmap_page {
        __u32   data_head;              /* head in the data section */
 };
 
+#define PERF_EVENT_MISC_KERNEL         (1 << 0)
+#define PERF_EVENT_MISC_USER           (1 << 1)
+#define PERF_EVENT_MISC_OVERFLOW       (1 << 2)
+
 struct perf_event_header {
        __u32   type;
-       __u32   size;
+       __u16   misc;
+       __u16   size;
 };
 
 enum perf_event_type {
 
+       /*
+        * The MMAP events record the PROT_EXEC mappings so that we can
+        * correlate userspace IPs to code. They have the following structure:
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      u32                             pid, tid;
+        *      u64                             addr;
+        *      u64                             len;
+        *      u64                             pgoff;
+        *      char                            filename[];
+        * };
+        */
        PERF_EVENT_MMAP                 = 1,
        PERF_EVENT_MUNMAP               = 2,
 
        /*
-        * Half the event type space is reserved for the counter overflow
-        * bitfields, as found in hw_event.record_type.
+        * struct {
+        *      struct perf_event_header        header;
         *
-        * These events will have types of the form:
-        *   PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } *
+        *      u32                             pid, tid;
+        *      char                            comm[];
+        * };
+        */
+       PERF_EVENT_COMM                 = 3,
+
+       /*
+        * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
+        * will be PERF_RECORD_*
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      { u64                   ip;       } && PERF_RECORD_IP
+        *      { u32                   pid, tid; } && PERF_RECORD_TID
+        *      { u64                   time;     } && PERF_RECORD_TIME
+        *      { u64                   addr;     } && PERF_RECORD_ADDR
+        *
+        *      { u64                   nr;
+        *        { u64 event, val; }   cnt[nr];  } && PERF_RECORD_GROUP
+        *
+        *      { u16                   nr,
+        *                              hv,
+        *                              kernel,
+        *                              user;
+        *        u64                   ips[nr];  } && PERF_RECORD_CALLCHAIN
+        * };
         */
-       PERF_EVENT_COUNTER_OVERFLOW     = 1UL << 31,
-       __PERF_EVENT_IP                 = PERF_RECORD_IP,
-       __PERF_EVENT_TID                = PERF_RECORD_TID,
-       __PERF_EVENT_GROUP              = PERF_RECORD_GROUP,
-       __PERF_EVENT_CALLCHAIN          = PERF_RECORD_CALLCHAIN,
 };
 
 #ifdef __KERNEL__
@@ -275,7 +318,7 @@ struct hw_perf_counter {
                        unsigned long                   config_base;
                        unsigned long                   counter_base;
                        int                             nmi;
-                       unsigned int                    idx;
+                       int                             idx;
                };
                union { /* software */
                        atomic64_t                      count;
@@ -291,9 +334,9 @@ struct hw_perf_counter {
 struct perf_counter;
 
 /**
- * struct hw_perf_counter_ops - performance counter hw ops
+ * struct pmu - generic performance monitoring unit
  */
-struct hw_perf_counter_ops {
+struct pmu {
        int (*enable)                   (struct perf_counter *counter);
        void (*disable)                 (struct perf_counter *counter);
        void (*read)                    (struct perf_counter *counter);
@@ -313,16 +356,24 @@ struct file;
 
 struct perf_mmap_data {
        struct rcu_head                 rcu_head;
-       int                             nr_pages;
-       atomic_t                        wakeup;
-       atomic_t                        head;
-       atomic_t                        events;
+       int                             nr_pages;       /* nr of data pages  */
+
+       atomic_t                        poll;           /* POLL_ for wakeups */
+       atomic_t                        head;           /* write position    */
+       atomic_t                        events;         /* event limit       */
+
+       atomic_t                        done_head;      /* completed head    */
+       atomic_t                        lock;           /* concurrent writes */
+
+       atomic_t                        wakeup;         /* needs a wakeup    */
+
        struct perf_counter_mmap_page   *user_page;
        void                            *data_pages[0];
 };
 
-struct perf_wakeup_entry {
-       struct perf_wakeup_entry *next;
+struct perf_pending_entry {
+       struct perf_pending_entry *next;
+       void (*func)(struct perf_pending_entry *);
 };
 
 /**
@@ -335,7 +386,7 @@ struct perf_counter {
        struct list_head                sibling_list;
        int                             nr_siblings;
        struct perf_counter             *group_leader;
-       const struct hw_perf_counter_ops *hw_ops;
+       const struct pmu                *pmu;
 
        enum perf_counter_active_state  state;
        enum perf_counter_active_state  prev_state;
@@ -400,8 +451,14 @@ struct perf_counter {
        /* poll related */
        wait_queue_head_t               waitq;
        struct fasync_struct            *fasync;
-       /* optional: for NMIs */
-       struct perf_wakeup_entry        wakeup;
+
+       /* delayed work for NMIs and such */
+       int                             pending_wakeup;
+       int                             pending_kill;
+       int                             pending_disable;
+       struct perf_pending_entry       pending;
+
+       atomic_t                        event_limit;
 
        void (*destroy)(struct perf_counter *);
        struct rcu_head                 rcu_head;
@@ -435,14 +492,10 @@ struct perf_counter_context {
        struct task_struct      *task;
 
        /*
-        * time_now is the current time in nanoseconds since an arbitrary
-        * point in the past.  For per-task counters, this is based on the
-        * task clock, and for per-cpu counters it is based on the cpu clock.
-        * time_lost is an offset from the task/cpu clock, used to make it
-        * appear that time only passes while the context is scheduled in.
+        * Context clock, runs when context enabled.
         */
-       u64                     time_now;
-       u64                     time_lost;
+       u64                     time;
+       u64                     timestamp;
 #endif
 };
 
@@ -464,14 +517,14 @@ struct perf_cpu_context {
        int                     recursion[4];
 };
 
+#ifdef CONFIG_PERF_COUNTERS
+
 /*
  * Set by architecture code:
  */
 extern int perf_max_counters;
 
-#ifdef CONFIG_PERF_COUNTERS
-extern const struct hw_perf_counter_ops *
-hw_perf_counter_init(struct perf_counter *counter);
+extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
 
 extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
 extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
@@ -490,8 +543,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
               struct perf_counter_context *ctx, int cpu);
 extern void perf_counter_update_userpage(struct perf_counter *counter);
 
-extern void perf_counter_output(struct perf_counter *counter,
-                               int nmi, struct pt_regs *regs);
+extern int perf_counter_overflow(struct perf_counter *counter,
+                                int nmi, struct pt_regs *regs, u64 addr);
 /*
  * Return 1 for a software counter, 0 for a hardware counter
  */
@@ -501,7 +554,7 @@ static inline int is_software_counter(struct perf_counter *counter)
                perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE;
 }
 
-extern void perf_swcounter_event(u32, u64, int, struct pt_regs *);
+extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
 
 extern void perf_counter_mmap(unsigned long addr, unsigned long len,
                              unsigned long pgoff, struct file *file);
@@ -509,6 +562,8 @@ extern void perf_counter_mmap(unsigned long addr, unsigned long len,
 extern void perf_counter_munmap(unsigned long addr, unsigned long len,
                                unsigned long pgoff, struct file *file);
 
+extern void perf_counter_comm(struct task_struct *tsk);
+
 #define MAX_STACK_DEPTH                255
 
 struct perf_callchain_entry {
@@ -518,6 +573,10 @@ struct perf_callchain_entry {
 
 extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
 
+extern int sysctl_perf_counter_priv;
+
+extern void perf_counter_init(void);
+
 #else
 static inline void
 perf_counter_task_sched_in(struct task_struct *task, int cpu)          { }
@@ -536,8 +595,8 @@ static inline int perf_counter_task_disable(void)   { return -EINVAL; }
 static inline int perf_counter_task_enable(void)       { return -EINVAL; }
 
 static inline void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { }
-
+perf_swcounter_event(u32 event, u64 nr, int nmi,
+                    struct pt_regs *regs, u64 addr)                    { }
 
 static inline void
 perf_counter_mmap(unsigned long addr, unsigned long len,
@@ -545,8 +604,10 @@ perf_counter_mmap(unsigned long addr, unsigned long len,
 
 static inline void
 perf_counter_munmap(unsigned long addr, unsigned long len,
-                   unsigned long pgoff, struct file *file)             { }
+                   unsigned long pgoff, struct file *file)             { }
 
+static inline void perf_counter_comm(struct task_struct *tsk)          { }
+static inline void perf_counter_init(void)                             { }
 #endif
 
 #endif /* __KERNEL__ */