Merge branch 'perf/urgent' into perf/core
[platform/kernel/linux-rpi.git] / arch / x86 / kernel / cpu / perf_event.c
index 5adce10..f8bddb5 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/bitops.h>
+#include <linux/device.h>
 
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
@@ -31,6 +32,7 @@
 #include <asm/compat.h>
 #include <asm/smp.h>
 #include <asm/alternative.h>
+#include <asm/timer.h>
 
 #include "perf_event.h"
 
@@ -1210,6 +1212,8 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
                break;
 
        case CPU_STARTING:
+               if (x86_pmu.attr_rdpmc)
+                       set_in_cr4(X86_CR4_PCE);
                if (x86_pmu.cpu_starting)
                        x86_pmu.cpu_starting(cpu);
                break;
@@ -1319,6 +1323,8 @@ static int __init init_hw_perf_events(void)
                }
        }
 
+       x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+
        pr_info("... version:                %d\n",     x86_pmu.version);
        pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
        pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
@@ -1542,10 +1548,71 @@ static int x86_pmu_event_init(struct perf_event *event)
        return err;
 }
 
+static int x86_pmu_event_idx(struct perf_event *event)
+{
+       int idx = event->hw.idx;
+
+       if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
+               idx -= X86_PMC_IDX_FIXED;
+               idx |= 1 << 30;
+       }
+
+       return idx + 1;
+}
+
+static ssize_t get_attr_rdpmc(struct device *cdev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
+}
+
+static void change_rdpmc(void *info)
+{
+       bool enable = !!(unsigned long)info;
+
+       if (enable)
+               set_in_cr4(X86_CR4_PCE);
+       else
+               clear_in_cr4(X86_CR4_PCE);
+}
+
+static ssize_t set_attr_rdpmc(struct device *cdev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       unsigned long val = simple_strtoul(buf, NULL, 0);
+
+       if (!!val != !!x86_pmu.attr_rdpmc) {
+               x86_pmu.attr_rdpmc = !!val;
+               smp_call_function(change_rdpmc, (void *)val, 1);
+       }
+
+       return count;
+}
+
+static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
+
+static struct attribute *x86_pmu_attrs[] = {
+       &dev_attr_rdpmc.attr,
+       NULL,
+};
+
+static struct attribute_group x86_pmu_attr_group = {
+       .attrs = x86_pmu_attrs,
+};
+
+static const struct attribute_group *x86_pmu_attr_groups[] = {
+       &x86_pmu_attr_group,
+       NULL,
+};
+
 static struct pmu pmu = {
        .pmu_enable     = x86_pmu_enable,
        .pmu_disable    = x86_pmu_disable,
 
+       .attr_groups    = x86_pmu_attr_groups,
+
        .event_init     = x86_pmu_event_init,
 
        .add            = x86_pmu_add,
@@ -1557,8 +1624,23 @@ static struct pmu pmu = {
        .start_txn      = x86_pmu_start_txn,
        .cancel_txn     = x86_pmu_cancel_txn,
        .commit_txn     = x86_pmu_commit_txn,
+
+       .event_idx      = x86_pmu_event_idx,
 };
 
+void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+{
+       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+               return;
+
+       userpg->time_mult = this_cpu_read(cyc2ns);
+       userpg->time_shift = CYC2NS_SCALE_FACTOR;
+       userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
+}
+
 /*
  * callchain support
  */