Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6
[profile/ivi/kernel-adaptation-intel-automotive.git] / kernel / perf_counter.c
index 63f1987..29b685f 100644 (file)
@@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly;
 static atomic_t nr_comm_counters __read_mostly;
 
 /*
- * 0 - not paranoid
- * 1 - disallow cpu counters to unpriv
- * 2 - disallow kernel profiling to unpriv
+ * perf counter paranoia level:
+ *  0 - not paranoid
+ *  1 - disallow cpu counters to unpriv
+ *  2 - disallow kernel profiling to unpriv
  */
-int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */
+int sysctl_perf_counter_paranoid __read_mostly;
 
 static inline bool perf_paranoid_cpu(void)
 {
@@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void)
 }
 
 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
-int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
+
+/*
+ * max perf counter sample rate
+ */
+int sysctl_perf_counter_sample_rate __read_mostly = 100000;
 
 static atomic64_t perf_counter_id;
 
@@ -1019,7 +1024,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
        int do_switch = 1;
 
        regs = task_pt_regs(task);
-       perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
+       perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
 
        if (likely(!ctx || !cpuctx->task_ctx))
                return;
@@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(counter, 1);
                        counter->pmu->unthrottle(counter);
-                       interrupts = 2*sysctl_perf_counter_limit/HZ;
+                       interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
                }
 
                if (!counter->attr.freq || !counter->attr.sample_freq)
@@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
 
        spin_lock_irq(&ctx->lock);
        if (counter->attr.freq) {
-               if (value > sysctl_perf_counter_limit) {
+               if (value > sysctl_perf_counter_sample_rate) {
                        ret = -EINVAL;
                        goto unlock;
                }
@@ -2945,13 +2950,15 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
        struct {
                struct perf_event_header        header;
                u64                             time;
+               u64                             id;
        } throttle_event = {
                .header = {
                        .type = PERF_EVENT_THROTTLE + 1,
                        .misc = 0,
                        .size = sizeof(throttle_event),
                },
-               .time = sched_clock(),
+               .time   = sched_clock(),
+               .id     = counter->id,
        };
 
        ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
@@ -2979,7 +2986,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
        } else {
                if (hwc->interrupts != MAX_INTERRUPTS) {
                        hwc->interrupts++;
-                       if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) {
+                       if (HZ * hwc->interrupts >
+                                       (u64)sysctl_perf_counter_sample_rate) {
                                hwc->interrupts = MAX_INTERRUPTS;
                                perf_log_throttle(counter, 0);
                                ret = 1;
@@ -3156,7 +3164,7 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
 }
 
 static int perf_swcounter_match(struct perf_counter *counter,
-                               enum perf_event_types type,
+                               enum perf_type_id type,
                                u32 event, struct pt_regs *regs)
 {
        if (!perf_swcounter_is_counting(counter))
@@ -3188,7 +3196,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
 }
 
 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
-                                    enum perf_event_types type, u32 event,
+                                    enum perf_type_id type, u32 event,
                                     u64 nr, int nmi, struct pt_regs *regs,
                                     u64 addr)
 {
@@ -3219,7 +3227,7 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
        return &cpuctx->recursion[0];
 }
 
-static void __perf_swcounter_event(enum perf_event_types type, u32 event,
+static void __perf_swcounter_event(enum perf_type_id type, u32 event,
                                   u64 nr, int nmi, struct pt_regs *regs,
                                   u64 addr)
 {
@@ -3405,13 +3413,13 @@ void perf_counter_task_migration(struct task_struct *task, int cpu)
        struct perf_counter_context *ctx;
 
        perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
-                                PERF_COUNT_CPU_MIGRATIONS,
+                                PERF_COUNT_SW_CPU_MIGRATIONS,
                                 1, 1, NULL, 0);
 
        ctx = perf_pin_task_context(task);
        if (ctx) {
                perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
-                                        PERF_COUNT_CPU_MIGRATIONS,
+                                        PERF_COUNT_SW_CPU_MIGRATIONS,
                                         1, 1, NULL, 0);
                perf_unpin_context(ctx);
        }
@@ -3469,11 +3477,11 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
         * events.
         */
        switch (counter->attr.config) {
-       case PERF_COUNT_CPU_CLOCK:
+       case PERF_COUNT_SW_CPU_CLOCK:
                pmu = &perf_ops_cpu_clock;
 
                break;
-       case PERF_COUNT_TASK_CLOCK:
+       case PERF_COUNT_SW_TASK_CLOCK:
                /*
                 * If the user instantiates this as a per-cpu counter,
                 * use the cpu_clock counter instead.
@@ -3484,11 +3492,11 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
                        pmu = &perf_ops_cpu_clock;
 
                break;
-       case PERF_COUNT_PAGE_FAULTS:
-       case PERF_COUNT_PAGE_FAULTS_MIN:
-       case PERF_COUNT_PAGE_FAULTS_MAJ:
-       case PERF_COUNT_CONTEXT_SWITCHES:
-       case PERF_COUNT_CPU_MIGRATIONS:
+       case PERF_COUNT_SW_PAGE_FAULTS:
+       case PERF_COUNT_SW_PAGE_FAULTS_MIN:
+       case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
+       case PERF_COUNT_SW_CONTEXT_SWITCHES:
+       case PERF_COUNT_SW_CPU_MIGRATIONS:
                pmu = &perf_ops_generic;
                break;
        }
@@ -3562,12 +3570,8 @@ perf_counter_alloc(struct perf_counter_attr *attr,
        if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
                goto done;
 
-       if (attr->type == PERF_TYPE_RAW) {
-               pmu = hw_perf_counter_init(counter);
-               goto done;
-       }
-
        switch (attr->type) {
+       case PERF_TYPE_RAW:
        case PERF_TYPE_HARDWARE:
        case PERF_TYPE_HW_CACHE:
                pmu = hw_perf_counter_init(counter);
@@ -3580,6 +3584,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
        case PERF_TYPE_TRACEPOINT:
                pmu = tp_perf_counter_init(counter);
                break;
+
+       default:
+               break;
        }
 done:
        err = 0;
@@ -3606,6 +3613,85 @@ done:
        return counter;
 }
 
+static int perf_copy_attr(struct perf_counter_attr __user *uattr,
+                         struct perf_counter_attr *attr)
+{
+       int ret;
+       u32 size;
+
+       if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
+               return -EFAULT;
+
+       /*
+        * zero the full structure, so that a short copy will be nice.
+        */
+       memset(attr, 0, sizeof(*attr));
+
+       ret = get_user(size, &uattr->size);
+       if (ret)
+               return ret;
+
+       if (size > PAGE_SIZE)   /* silly large */
+               goto err_size;
+
+       if (!size)              /* abi compat */
+               size = PERF_ATTR_SIZE_VER0;
+
+       if (size < PERF_ATTR_SIZE_VER0)
+               goto err_size;
+
+       /*
+        * If we're handed a bigger struct than we know of,
+        * ensure all the unknown bits are 0.
+        */
+       if (size > sizeof(*attr)) {
+               unsigned long val;
+               unsigned long __user *addr;
+               unsigned long __user *end;
+
+               addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
+                               sizeof(unsigned long));
+               end  = PTR_ALIGN((void __user *)uattr + size,
+                               sizeof(unsigned long));
+
+               for (; addr < end; addr += sizeof(unsigned long)) {
+                       ret = get_user(val, addr);
+                       if (ret)
+                               return ret;
+                       if (val)
+                               goto err_size;
+               }
+       }
+
+       ret = copy_from_user(attr, uattr, size);
+       if (ret)
+               return -EFAULT;
+
+       /*
+        * If the type exists, the corresponding creation will verify
+        * the attr->config.
+        */
+       if (attr->type >= PERF_TYPE_MAX)
+               return -EINVAL;
+
+       if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
+               return -EINVAL;
+
+       if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
+               return -EINVAL;
+
+       if (attr->read_format & ~(PERF_FORMAT_MAX-1))
+               return -EINVAL;
+
+out:
+       return ret;
+
+err_size:
+       put_user(sizeof(*attr), &uattr->size);
+       ret = -E2BIG;
+       goto out;
+}
+
 /**
  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  *
@@ -3615,7 +3701,7 @@ done:
  * @group_fd:          group leader counter fd
  */
 SYSCALL_DEFINE5(perf_counter_open,
-               const struct perf_counter_attr __user *, attr_uptr,
+               struct perf_counter_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
 {
        struct perf_counter *counter, *group_leader;
@@ -3631,14 +3717,20 @@ SYSCALL_DEFINE5(perf_counter_open,
        if (flags)
                return -EINVAL;
 
-       if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
-               return -EFAULT;
+       ret = perf_copy_attr(attr_uptr, &attr);
+       if (ret)
+               return ret;
 
        if (!attr.exclude_kernel) {
                if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
                        return -EACCES;
        }
 
+       if (attr.freq) {
+               if (attr.sample_freq > sysctl_perf_counter_sample_rate)
+                       return -EINVAL;
+       }
+
        /*
         * Get the target context (task or percpu):
         */