1 #include "perf_event_intel_uncore.h"
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
9 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
11 /* mask of cpus that collect uncore events */
12 static cpumask_t uncore_cpu_mask;
14 /* constraint for the fixed counter */
15 static struct event_constraint constraint_fixed =
16 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
17 static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0);
20 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
21 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
22 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
23 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
24 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
25 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
26 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
27 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
28 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
29 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
30 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
31 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
32 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
33 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
34 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
35 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
36 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
37 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
42 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
46 rdmsrl(event->hw.event_base, count);
52 * generic get constraint function for shared match/mask registers.
54 static struct event_constraint *
55 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
57 struct intel_uncore_extra_reg *er;
58 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
59 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
64 * reg->alloc can be set due to existing state, so for fake box we
65 * need to ignore this, otherwise we might fail to allocate proper
66 * fake state for this extra reg constraint.
68 if (reg1->idx == EXTRA_REG_NONE ||
69 (!uncore_box_is_fake(box) && reg1->alloc))
72 er = &box->shared_regs[reg1->idx];
73 raw_spin_lock_irqsave(&er->lock, flags);
74 if (!atomic_read(&er->ref) ||
75 (er->config1 == reg1->config && er->config2 == reg2->config)) {
77 er->config1 = reg1->config;
78 er->config2 = reg2->config;
81 raw_spin_unlock_irqrestore(&er->lock, flags);
84 if (!uncore_box_is_fake(box))
89 return &constraint_empty;
92 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
94 struct intel_uncore_extra_reg *er;
95 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
98 * Only put constraint if extra reg was actually allocated. Also
99 * takes care of event which do not use an extra shared reg.
101 * Also, if this is a fake box we shouldn't touch any event state
102 * (reg->alloc) and we don't care about leaving inconsistent box
103 * state either since it will be thrown out.
105 if (uncore_box_is_fake(box) || !reg1->alloc)
108 er = &box->shared_regs[reg1->idx];
109 atomic_dec(&er->ref);
113 /* Sandy Bridge-EP uncore support */
114 static struct intel_uncore_type snbep_uncore_cbox;
115 static struct intel_uncore_type snbep_uncore_pcu;
117 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
119 struct pci_dev *pdev = box->pci_dev;
120 int box_ctl = uncore_pci_box_ctl(box);
123 pci_read_config_dword(pdev, box_ctl, &config);
124 config |= SNBEP_PMON_BOX_CTL_FRZ;
125 pci_write_config_dword(pdev, box_ctl, config);
128 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
130 struct pci_dev *pdev = box->pci_dev;
131 int box_ctl = uncore_pci_box_ctl(box);
134 pci_read_config_dword(pdev, box_ctl, &config);
135 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
136 pci_write_config_dword(pdev, box_ctl, config);
139 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
141 struct pci_dev *pdev = box->pci_dev;
142 struct hw_perf_event *hwc = &event->hw;
144 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
147 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
149 struct pci_dev *pdev = box->pci_dev;
150 struct hw_perf_event *hwc = &event->hw;
152 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
155 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
157 struct pci_dev *pdev = box->pci_dev;
158 struct hw_perf_event *hwc = &event->hw;
161 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
162 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
167 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
169 struct pci_dev *pdev = box->pci_dev;
171 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
174 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
179 msr = uncore_msr_box_ctl(box);
182 config |= SNBEP_PMON_BOX_CTL_FRZ;
187 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
192 msr = uncore_msr_box_ctl(box);
195 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
200 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
202 struct hw_perf_event *hwc = &event->hw;
203 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
205 if (reg1->idx != EXTRA_REG_NONE)
206 wrmsrl(reg1->reg, reg1->config);
208 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
211 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
212 struct perf_event *event)
214 struct hw_perf_event *hwc = &event->hw;
216 wrmsrl(hwc->config_base, hwc->config);
219 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
221 unsigned msr = uncore_msr_box_ctl(box);
224 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
227 static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
229 struct hw_perf_event *hwc = &event->hw;
230 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
232 if (box->pmu->type == &snbep_uncore_cbox) {
233 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
234 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
235 reg1->config = event->attr.config1 &
236 SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
238 if (box->pmu->type == &snbep_uncore_pcu) {
239 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
240 reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
250 static struct attribute *snbep_uncore_formats_attr[] = {
251 &format_attr_event.attr,
252 &format_attr_umask.attr,
253 &format_attr_edge.attr,
254 &format_attr_inv.attr,
255 &format_attr_thresh8.attr,
259 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
260 &format_attr_event.attr,
261 &format_attr_umask.attr,
262 &format_attr_edge.attr,
263 &format_attr_inv.attr,
264 &format_attr_thresh5.attr,
268 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
269 &format_attr_event.attr,
270 &format_attr_umask.attr,
271 &format_attr_edge.attr,
272 &format_attr_tid_en.attr,
273 &format_attr_inv.attr,
274 &format_attr_thresh8.attr,
275 &format_attr_filter_tid.attr,
276 &format_attr_filter_nid.attr,
277 &format_attr_filter_state.attr,
278 &format_attr_filter_opc.attr,
282 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
283 &format_attr_event.attr,
284 &format_attr_occ_sel.attr,
285 &format_attr_edge.attr,
286 &format_attr_inv.attr,
287 &format_attr_thresh5.attr,
288 &format_attr_occ_invert.attr,
289 &format_attr_occ_edge.attr,
290 &format_attr_filter_band0.attr,
291 &format_attr_filter_band1.attr,
292 &format_attr_filter_band2.attr,
293 &format_attr_filter_band3.attr,
297 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
298 &format_attr_event_ext.attr,
299 &format_attr_umask.attr,
300 &format_attr_edge.attr,
301 &format_attr_inv.attr,
302 &format_attr_thresh8.attr,
306 static struct uncore_event_desc snbep_uncore_imc_events[] = {
307 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
308 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
309 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
310 { /* end: all zeroes */ },
313 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
314 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
315 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
316 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
317 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
318 { /* end: all zeroes */ },
321 static struct attribute_group snbep_uncore_format_group = {
323 .attrs = snbep_uncore_formats_attr,
326 static struct attribute_group snbep_uncore_ubox_format_group = {
328 .attrs = snbep_uncore_ubox_formats_attr,
331 static struct attribute_group snbep_uncore_cbox_format_group = {
333 .attrs = snbep_uncore_cbox_formats_attr,
336 static struct attribute_group snbep_uncore_pcu_format_group = {
338 .attrs = snbep_uncore_pcu_formats_attr,
341 static struct attribute_group snbep_uncore_qpi_format_group = {
343 .attrs = snbep_uncore_qpi_formats_attr,
346 static struct intel_uncore_ops snbep_uncore_msr_ops = {
347 .init_box = snbep_uncore_msr_init_box,
348 .disable_box = snbep_uncore_msr_disable_box,
349 .enable_box = snbep_uncore_msr_enable_box,
350 .disable_event = snbep_uncore_msr_disable_event,
351 .enable_event = snbep_uncore_msr_enable_event,
352 .read_counter = uncore_msr_read_counter,
353 .get_constraint = uncore_get_constraint,
354 .put_constraint = uncore_put_constraint,
355 .hw_config = snbep_uncore_hw_config,
358 static struct intel_uncore_ops snbep_uncore_pci_ops = {
359 .init_box = snbep_uncore_pci_init_box,
360 .disable_box = snbep_uncore_pci_disable_box,
361 .enable_box = snbep_uncore_pci_enable_box,
362 .disable_event = snbep_uncore_pci_disable_event,
363 .enable_event = snbep_uncore_pci_enable_event,
364 .read_counter = snbep_uncore_pci_read_counter,
367 static struct event_constraint snbep_uncore_cbox_constraints[] = {
368 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
369 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
370 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
371 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
372 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
373 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
374 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
375 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
376 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
377 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
378 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
379 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
380 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
381 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
382 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
383 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
384 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
385 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
386 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
387 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
388 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
389 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
390 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
391 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
392 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
396 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
397 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
398 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
399 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
400 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
401 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
402 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
403 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
404 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
405 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
406 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
410 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
411 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
412 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
415 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
416 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
417 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
420 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
421 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
422 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
423 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
432 static struct intel_uncore_type snbep_uncore_ubox = {
437 .fixed_ctr_bits = 48,
438 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
439 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
440 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
441 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
442 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
443 .ops = &snbep_uncore_msr_ops,
444 .format_group = &snbep_uncore_ubox_format_group,
447 static struct intel_uncore_type snbep_uncore_cbox = {
452 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
453 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
454 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
455 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
456 .msr_offset = SNBEP_CBO_MSR_OFFSET,
457 .num_shared_regs = 1,
458 .constraints = snbep_uncore_cbox_constraints,
459 .ops = &snbep_uncore_msr_ops,
460 .format_group = &snbep_uncore_cbox_format_group,
463 static struct intel_uncore_type snbep_uncore_pcu = {
468 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
469 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
470 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
471 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
472 .num_shared_regs = 1,
473 .ops = &snbep_uncore_msr_ops,
474 .format_group = &snbep_uncore_pcu_format_group,
477 static struct intel_uncore_type *snbep_msr_uncores[] = {
484 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
485 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
486 .event_ctl = SNBEP_PCI_PMON_CTL0, \
487 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
488 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
489 .ops = &snbep_uncore_pci_ops, \
490 .format_group = &snbep_uncore_format_group
492 static struct intel_uncore_type snbep_uncore_ha = {
497 SNBEP_UNCORE_PCI_COMMON_INIT(),
500 static struct intel_uncore_type snbep_uncore_imc = {
505 .fixed_ctr_bits = 48,
506 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
507 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
508 .event_descs = snbep_uncore_imc_events,
509 SNBEP_UNCORE_PCI_COMMON_INIT(),
512 static struct intel_uncore_type snbep_uncore_qpi = {
517 .perf_ctr = SNBEP_PCI_PMON_CTR0,
518 .event_ctl = SNBEP_PCI_PMON_CTL0,
519 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
520 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
521 .ops = &snbep_uncore_pci_ops,
522 .event_descs = snbep_uncore_qpi_events,
523 .format_group = &snbep_uncore_qpi_format_group,
527 static struct intel_uncore_type snbep_uncore_r2pcie = {
532 .constraints = snbep_uncore_r2pcie_constraints,
533 SNBEP_UNCORE_PCI_COMMON_INIT(),
536 static struct intel_uncore_type snbep_uncore_r3qpi = {
541 .constraints = snbep_uncore_r3qpi_constraints,
542 SNBEP_UNCORE_PCI_COMMON_INIT(),
545 static struct intel_uncore_type *snbep_pci_uncores[] = {
549 &snbep_uncore_r2pcie,
554 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
556 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
557 .driver_data = (unsigned long)&snbep_uncore_ha,
560 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
561 .driver_data = (unsigned long)&snbep_uncore_imc,
564 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
565 .driver_data = (unsigned long)&snbep_uncore_imc,
568 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
569 .driver_data = (unsigned long)&snbep_uncore_imc,
572 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
573 .driver_data = (unsigned long)&snbep_uncore_imc,
576 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
577 .driver_data = (unsigned long)&snbep_uncore_qpi,
580 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
581 .driver_data = (unsigned long)&snbep_uncore_qpi,
584 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
585 .driver_data = (unsigned long)&snbep_uncore_r2pcie,
588 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
589 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
592 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
593 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
595 { /* end: all zeroes */ }
598 static struct pci_driver snbep_uncore_pci_driver = {
599 .name = "snbep_uncore",
600 .id_table = snbep_uncore_pci_ids,
604 * build pci bus to socket mapping
606 static void snbep_pci2phy_map_init(void)
608 struct pci_dev *ubox_dev = NULL;
613 /* find the UBOX device */
614 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
615 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
619 bus = ubox_dev->bus->number;
620 /* get the Node ID of the local register */
621 pci_read_config_dword(ubox_dev, 0x40, &config);
623 /* get the Node ID mapping */
624 pci_read_config_dword(ubox_dev, 0x54, &config);
626 * every three bits in the Node ID mapping register maps
627 * to a particular node.
629 for (i = 0; i < 8; i++) {
630 if (nodeid == ((config >> (3 * i)) & 0x7)) {
631 pcibus_to_physid[bus] = i;
638 /* end of Sandy Bridge-EP uncore support */
640 /* Sandy Bridge uncore support */
641 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
643 struct hw_perf_event *hwc = &event->hw;
645 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
646 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
648 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
651 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
653 wrmsrl(event->hw.config_base, 0);
656 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
658 if (box->pmu->pmu_idx == 0) {
659 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
660 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
664 static struct attribute *snb_uncore_formats_attr[] = {
665 &format_attr_event.attr,
666 &format_attr_umask.attr,
667 &format_attr_edge.attr,
668 &format_attr_inv.attr,
669 &format_attr_cmask5.attr,
673 static struct attribute_group snb_uncore_format_group = {
675 .attrs = snb_uncore_formats_attr,
678 static struct intel_uncore_ops snb_uncore_msr_ops = {
679 .init_box = snb_uncore_msr_init_box,
680 .disable_event = snb_uncore_msr_disable_event,
681 .enable_event = snb_uncore_msr_enable_event,
682 .read_counter = uncore_msr_read_counter,
685 static struct event_constraint snb_uncore_cbox_constraints[] = {
686 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
687 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
691 static struct intel_uncore_type snb_uncore_cbox = {
696 .fixed_ctr_bits = 48,
697 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
698 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
699 .fixed_ctr = SNB_UNC_FIXED_CTR,
700 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
702 .event_mask = SNB_UNC_RAW_EVENT_MASK,
703 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
704 .constraints = snb_uncore_cbox_constraints,
705 .ops = &snb_uncore_msr_ops,
706 .format_group = &snb_uncore_format_group,
709 static struct intel_uncore_type *snb_msr_uncores[] = {
713 /* end of Sandy Bridge uncore support */
715 /* Nehalem uncore support */
716 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
718 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
721 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
723 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
726 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
728 struct hw_perf_event *hwc = &event->hw;
730 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
731 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
733 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
736 static struct attribute *nhm_uncore_formats_attr[] = {
737 &format_attr_event.attr,
738 &format_attr_umask.attr,
739 &format_attr_edge.attr,
740 &format_attr_inv.attr,
741 &format_attr_cmask8.attr,
745 static struct attribute_group nhm_uncore_format_group = {
747 .attrs = nhm_uncore_formats_attr,
750 static struct uncore_event_desc nhm_uncore_events[] = {
751 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
752 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
753 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
754 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
755 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
756 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
757 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
758 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
759 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
760 { /* end: all zeroes */ },
763 static struct intel_uncore_ops nhm_uncore_msr_ops = {
764 .disable_box = nhm_uncore_msr_disable_box,
765 .enable_box = nhm_uncore_msr_enable_box,
766 .disable_event = snb_uncore_msr_disable_event,
767 .enable_event = nhm_uncore_msr_enable_event,
768 .read_counter = uncore_msr_read_counter,
771 static struct intel_uncore_type nhm_uncore = {
776 .fixed_ctr_bits = 48,
777 .event_ctl = NHM_UNC_PERFEVTSEL0,
778 .perf_ctr = NHM_UNC_UNCORE_PMC0,
779 .fixed_ctr = NHM_UNC_FIXED_CTR,
780 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
781 .event_mask = NHM_UNC_RAW_EVENT_MASK,
782 .event_descs = nhm_uncore_events,
783 .ops = &nhm_uncore_msr_ops,
784 .format_group = &nhm_uncore_format_group,
787 static struct intel_uncore_type *nhm_msr_uncores[] = {
791 /* end of Nehalem uncore support */
793 /* Nehalem-EX uncore support */
794 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
795 ((1ULL << (n)) - 1)))
797 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
798 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
799 DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");
800 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
801 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
803 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
805 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
808 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
810 unsigned msr = uncore_msr_box_ctl(box);
815 config &= ~((1ULL << uncore_num_counters(box)) - 1);
816 /* WBox has a fixed counter */
817 if (uncore_msr_fixed_ctl(box))
818 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
823 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
825 unsigned msr = uncore_msr_box_ctl(box);
830 config |= (1ULL << uncore_num_counters(box)) - 1;
831 /* WBox has a fixed counter */
832 if (uncore_msr_fixed_ctl(box))
833 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
838 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
840 wrmsrl(event->hw.config_base, 0);
843 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
845 struct hw_perf_event *hwc = &event->hw;
847 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
848 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
849 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
850 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
852 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
855 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
856 .init_box = nhmex_uncore_msr_init_box, \
857 .disable_box = nhmex_uncore_msr_disable_box, \
858 .enable_box = nhmex_uncore_msr_enable_box, \
859 .disable_event = nhmex_uncore_msr_disable_event, \
860 .read_counter = uncore_msr_read_counter
862 static struct intel_uncore_ops nhmex_uncore_ops = {
863 NHMEX_UNCORE_OPS_COMMON_INIT(),
864 .enable_event = nhmex_uncore_msr_enable_event,
867 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
868 &format_attr_event.attr,
869 &format_attr_edge.attr,
873 static struct attribute_group nhmex_uncore_ubox_format_group = {
875 .attrs = nhmex_uncore_ubox_formats_attr,
878 static struct intel_uncore_type nhmex_uncore_ubox = {
883 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
884 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
885 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
886 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
887 .ops = &nhmex_uncore_ops,
888 .format_group = &nhmex_uncore_ubox_format_group
891 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
892 &format_attr_event.attr,
893 &format_attr_umask.attr,
894 &format_attr_edge.attr,
895 &format_attr_inv.attr,
896 &format_attr_thresh8.attr,
900 static struct attribute_group nhmex_uncore_cbox_format_group = {
902 .attrs = nhmex_uncore_cbox_formats_attr,
905 static struct intel_uncore_type nhmex_uncore_cbox = {
910 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
911 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
912 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
913 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
914 .msr_offset = NHMEX_C_MSR_OFFSET,
916 .ops = &nhmex_uncore_ops,
917 .format_group = &nhmex_uncore_cbox_format_group
920 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
921 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
922 { /* end: all zeroes */ },
925 static struct intel_uncore_type nhmex_uncore_wbox = {
930 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
931 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
932 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
933 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
934 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
935 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
937 .event_descs = nhmex_uncore_wbox_events,
938 .ops = &nhmex_uncore_ops,
939 .format_group = &nhmex_uncore_cbox_format_group
942 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
944 struct hw_perf_event *hwc = &event->hw;
945 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
946 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
949 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
950 NHMEX_B_PMON_CTR_SHIFT;
951 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
952 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
954 /* events that do not use the match/mask registers */
955 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
956 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
959 if (box->pmu->pmu_idx == 0)
960 reg1->reg = NHMEX_B0_MSR_MATCH;
962 reg1->reg = NHMEX_B1_MSR_MATCH;
964 reg1->config = event->attr.config1;
965 reg2->config = event->attr.config2;
969 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
971 struct hw_perf_event *hwc = &event->hw;
972 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
973 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
975 if (reg1->idx != EXTRA_REG_NONE) {
976 wrmsrl(reg1->reg, reg1->config);
977 wrmsrl(reg1->reg + 1, reg2->config);
979 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
980 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
984 * The Bbox has 4 counters, but each counter monitors different events.
985 * Use bits 6-7 in the event config to select counter.
987 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
988 EVENT_CONSTRAINT(0 , 1, 0xc0),
989 EVENT_CONSTRAINT(0x40, 2, 0xc0),
990 EVENT_CONSTRAINT(0x80, 4, 0xc0),
991 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
992 EVENT_CONSTRAINT_END,
995 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
996 &format_attr_event5.attr,
997 &format_attr_counter.attr,
998 &format_attr_match.attr,
999 &format_attr_mask.attr,
1003 static struct attribute_group nhmex_uncore_bbox_format_group = {
1005 .attrs = nhmex_uncore_bbox_formats_attr,
1008 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1009 NHMEX_UNCORE_OPS_COMMON_INIT(),
1010 .enable_event = nhmex_bbox_msr_enable_event,
1011 .hw_config = nhmex_bbox_hw_config,
1012 .get_constraint = uncore_get_constraint,
1013 .put_constraint = uncore_put_constraint,
1016 static struct intel_uncore_type nhmex_uncore_bbox = {
1020 .perf_ctr_bits = 48,
1021 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1022 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1023 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1024 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1025 .msr_offset = NHMEX_B_MSR_OFFSET,
1027 .num_shared_regs = 1,
1028 .constraints = nhmex_uncore_bbox_constraints,
1029 .ops = &nhmex_uncore_bbox_ops,
1030 .format_group = &nhmex_uncore_bbox_format_group
1033 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1035 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1036 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1038 if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) {
1039 reg1->config = event->attr.config1;
1040 reg2->config = event->attr.config2;
1042 reg1->config = ~0ULL;
1043 reg2->config = ~0ULL;
1046 if (box->pmu->pmu_idx == 0)
1047 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1049 reg1->reg = NHMEX_S1_MSR_MM_CFG;
1056 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1058 struct hw_perf_event *hwc = &event->hw;
1059 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1062 wrmsrl(reg1->reg, 0);
1063 if (reg1->config != ~0ULL || reg2->config != ~0ULL) {
1064 wrmsrl(reg1->reg + 1, reg1->config);
1065 wrmsrl(reg1->reg + 2, reg2->config);
1066 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1068 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1071 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1072 &format_attr_event.attr,
1073 &format_attr_umask.attr,
1074 &format_attr_edge.attr,
1075 &format_attr_inv.attr,
1076 &format_attr_thresh8.attr,
1077 &format_attr_mm_cfg.attr,
1078 &format_attr_match.attr,
1079 &format_attr_mask.attr,
1083 static struct attribute_group nhmex_uncore_sbox_format_group = {
1085 .attrs = nhmex_uncore_sbox_formats_attr,
1088 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1089 NHMEX_UNCORE_OPS_COMMON_INIT(),
1090 .enable_event = nhmex_sbox_msr_enable_event,
1091 .hw_config = nhmex_sbox_hw_config,
1092 .get_constraint = uncore_get_constraint,
1093 .put_constraint = uncore_put_constraint,
1096 static struct intel_uncore_type nhmex_uncore_sbox = {
1100 .perf_ctr_bits = 48,
1101 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
1102 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
1103 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1104 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1105 .msr_offset = NHMEX_S_MSR_OFFSET,
1107 .num_shared_regs = 1,
1108 .ops = &nhmex_uncore_sbox_ops,
1109 .format_group = &nhmex_uncore_sbox_format_group
1113 EXTRA_REG_NHMEX_M_FILTER,
1114 EXTRA_REG_NHMEX_M_DSP,
1115 EXTRA_REG_NHMEX_M_ISS,
1116 EXTRA_REG_NHMEX_M_MAP,
1117 EXTRA_REG_NHMEX_M_MSC_THR,
1118 EXTRA_REG_NHMEX_M_PGT,
1119 EXTRA_REG_NHMEX_M_PLD,
1120 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
1123 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1124 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
1125 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1126 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1127 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1128 /* event 0xa uses two extra registers */
1129 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1130 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
1131 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
1132 /* events 0xd ~ 0x10 use the same extra register */
1133 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1134 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1135 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1136 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1137 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1138 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
1139 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1140 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1141 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
1145 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1147 struct intel_uncore_extra_reg *er;
1148 unsigned long flags;
1152 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1153 er = &box->shared_regs[idx];
1154 raw_spin_lock_irqsave(&er->lock, flags);
1155 if (!atomic_read(&er->ref) || er->config == config) {
1156 atomic_inc(&er->ref);
1157 er->config = config;
1160 raw_spin_unlock_irqrestore(&er->lock, flags);
1165 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1166 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1167 * fields which are shared.
1169 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1170 if (WARN_ON_ONCE(idx >= 4))
1173 /* mask of the shared fields */
1174 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1175 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1177 raw_spin_lock_irqsave(&er->lock, flags);
1178 /* add mask of the non-shared field if it's in use */
1179 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8))
1180 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1182 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1183 atomic_add(1 << (idx * 8), &er->ref);
1184 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
1185 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1186 er->config &= ~mask;
1187 er->config |= (config & mask);
1190 raw_spin_unlock_irqrestore(&er->lock, flags);
1195 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1197 struct intel_uncore_extra_reg *er;
1199 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1200 er = &box->shared_regs[idx];
1201 atomic_dec(&er->ref);
1205 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1206 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1207 atomic_sub(1 << (idx * 8), &er->ref);
1210 u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1212 struct hw_perf_event *hwc = &event->hw;
1213 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1214 int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
1215 u64 config = reg1->config;
1217 /* get the non-shared control bits and shift them */
1218 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1219 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1220 if (new_idx > orig_idx) {
1221 idx = new_idx - orig_idx;
1224 idx = orig_idx - new_idx;
1228 /* add the shared control bits back */
1229 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1231 /* adjust the main event selector */
1232 if (new_idx > orig_idx)
1233 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1235 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1236 reg1->config = config;
1237 reg1->idx = ~0xff | new_idx;
1242 static struct event_constraint *
1243 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1245 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1246 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1247 int i, idx[2], alloc = 0;
1248 u64 config1 = reg1->config;
1250 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
1251 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
1253 for (i = 0; i < 2; i++) {
1254 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
1260 if (!nhmex_mbox_get_shared_reg(box, idx[i],
1261 __BITS_VALUE(config1, i, 32)))
1263 alloc |= (0x1 << i);
1266 /* for the match/mask registers */
1267 if ((uncore_box_is_fake(box) || !reg2->alloc) &&
1268 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
1272 * If it's a fake box -- as per validate_{group,event}() we
1273 * shouldn't touch event state and we can avoid doing so
1274 * since both will only call get_event_constraints() once
1275 * on each event, this avoids the need for reg->alloc.
1277 if (!uncore_box_is_fake(box)) {
1278 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
1279 nhmex_mbox_alter_er(event, idx[0], true);
1280 reg1->alloc |= alloc;
1285 if (idx[0] != 0xff && !(alloc & 0x1) &&
1286 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1288 * events 0xd ~ 0x10 are functional identical, but are
1289 * controlled by different fields in the ZDP_CTL_FVC
1290 * register. If we failed to take one field, try the
1293 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
1294 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1295 idx[0] = (idx[0] + 1) % 4;
1296 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1297 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
1298 config1 = nhmex_mbox_alter_er(event, idx[0], false);
1304 nhmex_mbox_put_shared_reg(box, idx[0]);
1306 nhmex_mbox_put_shared_reg(box, idx[1]);
1307 return &constraint_empty;
1310 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1312 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1313 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1315 if (uncore_box_is_fake(box))
1318 if (reg1->alloc & 0x1)
1319 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
1320 if (reg1->alloc & 0x2)
1321 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
1325 nhmex_mbox_put_shared_reg(box, reg2->idx);
1330 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
1332 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1334 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
1337 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1339 struct intel_uncore_type *type = box->pmu->type;
1340 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1341 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1342 struct extra_reg *er;
1346 if (WARN_ON_ONCE(reg1->idx != -1))
1349 * The mbox events may require 2 extra MSRs at the most. But only
1350 * the lower 32 bits in these MSRs are significant, so we can use
1351 * config1 to pass two MSRs' config.
1353 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
1354 if (er->event != (event->hw.config & er->config_mask))
1356 if (event->attr.config1 & ~er->valid_mask)
1358 if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) ||
1359 er->idx == __BITS_VALUE(reg1->idx, 1, 8))
1361 if (WARN_ON_ONCE(reg_idx >= 2))
1364 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
1365 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
1368 /* always use the 32~63 bits to pass the PLD config */
1369 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
1372 reg1->idx &= ~(0xff << (reg_idx * 8));
1373 reg1->reg &= ~(0xffff << (reg_idx * 16));
1374 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
1375 reg1->reg |= msr << (reg_idx * 16);
1376 reg1->config = event->attr.config1;
1379 /* use config2 to pass the filter config */
1380 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
1381 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
1382 reg2->config = event->attr.config2;
1384 reg2->config = ~0ULL;
1385 if (box->pmu->pmu_idx == 0)
1386 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
1388 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
1393 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1395 struct intel_uncore_extra_reg *er;
1396 unsigned long flags;
1399 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1400 return box->shared_regs[idx].config;
1402 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1403 raw_spin_lock_irqsave(&er->lock, flags);
1404 config = er->config;
1405 raw_spin_unlock_irqrestore(&er->lock, flags);
1409 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1411 struct hw_perf_event *hwc = &event->hw;
1412 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1413 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1416 idx = __BITS_VALUE(reg1->idx, 0, 8);
1418 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
1419 nhmex_mbox_shared_reg_config(box, idx));
1420 idx = __BITS_VALUE(reg1->idx, 1, 8);
1422 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
1423 nhmex_mbox_shared_reg_config(box, idx));
1425 wrmsrl(reg2->reg, 0);
1426 if (reg2->config != ~0ULL) {
1427 wrmsrl(reg2->reg + 1,
1428 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
1429 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
1430 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
1431 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
1434 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1437 DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
1438 DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
1439 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
1440 DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
1441 DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
1442 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
1443 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63");
1444 DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
1445 DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
1446 DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
1447 DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
1448 DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
1449 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
1450 DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
1451 DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
1452 DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
1454 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1455 &format_attr_count_mode.attr,
1456 &format_attr_storage_mode.attr,
1457 &format_attr_wrap_mode.attr,
1458 &format_attr_flag_mode.attr,
1459 &format_attr_inc_sel.attr,
1460 &format_attr_set_flag_sel.attr,
1461 &format_attr_filter_cfg.attr,
1462 &format_attr_filter_match.attr,
1463 &format_attr_filter_mask.attr,
1464 &format_attr_dsp.attr,
1465 &format_attr_thr.attr,
1466 &format_attr_fvc.attr,
1467 &format_attr_pgt.attr,
1468 &format_attr_map.attr,
1469 &format_attr_iss.attr,
1470 &format_attr_pld.attr,
1474 static struct attribute_group nhmex_uncore_mbox_format_group = {
1476 .attrs = nhmex_uncore_mbox_formats_attr,
1479 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
1480 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
1481 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
1482 { /* end: all zeroes */ },
1485 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
1486 NHMEX_UNCORE_OPS_COMMON_INIT(),
1487 .enable_event = nhmex_mbox_msr_enable_event,
1488 .hw_config = nhmex_mbox_hw_config,
1489 .get_constraint = nhmex_mbox_get_constraint,
1490 .put_constraint = nhmex_mbox_put_constraint,
1493 static struct intel_uncore_type nhmex_uncore_mbox = {
1497 .perf_ctr_bits = 48,
1498 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
1499 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
1500 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
1501 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
1502 .msr_offset = NHMEX_M_MSR_OFFSET,
1504 .num_shared_regs = 8,
1505 .event_descs = nhmex_uncore_mbox_events,
1506 .ops = &nhmex_uncore_mbox_ops,
1507 .format_group = &nhmex_uncore_mbox_format_group,
1510 void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1512 struct hw_perf_event *hwc = &event->hw;
1513 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1516 /* adjust the main event selector */
1517 if (reg1->idx % 2) {
1519 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1522 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1525 /* adjust address or config of extra register */
1526 port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1527 switch (reg1->idx % 6) {
1529 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
1532 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
1535 /* the 8~15 bits to the 0~7 bits */
1539 /* the 0~7 bits to the 8~15 bits */
1543 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
1546 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
1552 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
1553 * An event set consists of 6 events, the 3rd and 4th events in
1554 * an event set use the same extra register. So an event set uses
1555 * 5 extra registers.
1557 static struct event_constraint *
1558 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1560 struct hw_perf_event *hwc = &event->hw;
1561 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1562 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1563 struct intel_uncore_extra_reg *er;
1564 unsigned long flags;
1569 if (!uncore_box_is_fake(box) && reg1->alloc)
1572 idx = reg1->idx % 6;
1573 config1 = reg1->config;
1576 /* the 3rd and 4th events use the same extra register */
1579 er_idx += (reg1->idx / 6) * 5;
1581 er = &box->shared_regs[er_idx];
1582 raw_spin_lock_irqsave(&er->lock, flags);
1584 if (!atomic_read(&er->ref) || er->config == reg1->config) {
1585 atomic_inc(&er->ref);
1586 er->config = reg1->config;
1589 } else if (idx == 2 || idx == 3) {
1591 * these two events use different fields in a extra register,
1592 * the 0~7 bits and the 8~15 bits respectively.
1594 u64 mask = 0xff << ((idx - 2) * 8);
1595 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1596 !((er->config ^ config1) & mask)) {
1597 atomic_add(1 << ((idx - 2) * 8), &er->ref);
1598 er->config &= ~mask;
1599 er->config |= config1 & mask;
1603 if (!atomic_read(&er->ref) ||
1604 (er->config == (hwc->config >> 32) &&
1605 er->config1 == reg1->config &&
1606 er->config2 == reg2->config)) {
1607 atomic_inc(&er->ref);
1608 er->config = (hwc->config >> 32);
1609 er->config1 = reg1->config;
1610 er->config2 = reg2->config;
1614 raw_spin_unlock_irqrestore(&er->lock, flags);
1618 * The Rbox events are always in pairs. The paired
1619 * events are functional identical, but use different
1620 * extra registers. If we failed to take an extra
1621 * register, try the alternative.
1627 if (idx != reg1->idx % 6) {
1635 if (!uncore_box_is_fake(box)) {
1636 if (idx != reg1->idx % 6)
1637 nhmex_rbox_alter_er(box, event);
1642 return &constraint_empty;
1645 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1647 struct intel_uncore_extra_reg *er;
1648 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1651 if (uncore_box_is_fake(box) || !reg1->alloc)
1654 idx = reg1->idx % 6;
1658 er_idx += (reg1->idx / 6) * 5;
1660 er = &box->shared_regs[er_idx];
1661 if (idx == 2 || idx == 3)
1662 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1664 atomic_dec(&er->ref);
1669 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1671 struct hw_perf_event *hwc = &event->hw;
1672 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1673 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1676 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1677 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1682 reg1->config = event->attr.config1;
1684 port = idx / 6 + box->pmu->pmu_idx * 4;
1688 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
1691 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
1695 reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port);
1700 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
1702 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
1703 reg2->config = event->attr.config2;
1704 hwc->config |= event->attr.config & (~0ULL << 32);
1710 static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1712 struct intel_uncore_extra_reg *er;
1713 unsigned long flags;
1716 er = &box->shared_regs[idx];
1718 raw_spin_lock_irqsave(&er->lock, flags);
1719 config = er->config;
1720 raw_spin_unlock_irqrestore(&er->lock, flags);
1725 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1727 struct hw_perf_event *hwc = &event->hw;
1728 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1729 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1732 idx = reg1->idx % 6;
1736 er_idx += (reg1->idx / 6) * 5;
1741 wrmsrl(reg1->reg, reg1->config);
1745 wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx));
1749 wrmsrl(reg1->reg, reg1->config);
1750 wrmsrl(reg1->reg + 1, hwc->config >> 32);
1751 wrmsrl(reg1->reg + 2, reg2->config);
1755 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1756 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1759 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63");
1760 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63");
1761 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1762 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1763 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1765 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1766 &format_attr_event5.attr,
1767 &format_attr_xbr_mm_cfg.attr,
1768 &format_attr_xbr_match.attr,
1769 &format_attr_xbr_mask.attr,
1770 &format_attr_qlx_cfg.attr,
1771 &format_attr_iperf_cfg.attr,
1775 static struct attribute_group nhmex_uncore_rbox_format_group = {
1777 .attrs = nhmex_uncore_rbox_formats_attr,
1780 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1781 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
1782 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
1783 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
1784 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
1785 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
1786 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
1787 { /* end: all zeroes */ },
1790 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1791 NHMEX_UNCORE_OPS_COMMON_INIT(),
1792 .enable_event = nhmex_rbox_msr_enable_event,
1793 .hw_config = nhmex_rbox_hw_config,
1794 .get_constraint = nhmex_rbox_get_constraint,
1795 .put_constraint = nhmex_rbox_put_constraint,
1798 static struct intel_uncore_type nhmex_uncore_rbox = {
1802 .perf_ctr_bits = 48,
1803 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
1804 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
1805 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
1806 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
1807 .msr_offset = NHMEX_R_MSR_OFFSET,
1809 .num_shared_regs = 20,
1810 .event_descs = nhmex_uncore_rbox_events,
1811 .ops = &nhmex_uncore_rbox_ops,
1812 .format_group = &nhmex_uncore_rbox_format_group
1815 static struct intel_uncore_type *nhmex_msr_uncores[] = {
1825 /* end of Nehalem-EX uncore support */
1827 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
1829 struct hw_perf_event *hwc = &event->hw;
1832 hwc->last_tag = ++box->tags[idx];
1834 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
1835 hwc->event_base = uncore_fixed_ctr(box);
1836 hwc->config_base = uncore_fixed_ctl(box);
1840 hwc->config_base = uncore_event_ctl(box, hwc->idx);
1841 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
1844 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
1846 u64 prev_count, new_count, delta;
1849 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
1850 shift = 64 - uncore_fixed_ctr_bits(box);
1852 shift = 64 - uncore_perf_ctr_bits(box);
1854 /* the hrtimer might modify the previous event value */
1856 prev_count = local64_read(&event->hw.prev_count);
1857 new_count = uncore_read_counter(box, event);
1858 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
1861 delta = (new_count << shift) - (prev_count << shift);
1864 local64_add(delta, &event->count);
1868 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
1869 * for SandyBridge. So we use hrtimer to periodically poll the counter
1870 * to avoid overflow.
1872 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
1874 struct intel_uncore_box *box;
1875 unsigned long flags;
1878 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
1879 if (!box->n_active || box->cpu != smp_processor_id())
1880 return HRTIMER_NORESTART;
1882 * disable local interrupt to prevent uncore_pmu_event_start/stop
1883 * to interrupt the update process
1885 local_irq_save(flags);
1887 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
1888 uncore_perf_event_update(box, box->events[bit]);
1890 local_irq_restore(flags);
1892 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
1893 return HRTIMER_RESTART;
1896 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
1898 __hrtimer_start_range_ns(&box->hrtimer,
1899 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
1900 HRTIMER_MODE_REL_PINNED, 0);
1903 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
1905 hrtimer_cancel(&box->hrtimer);
1908 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
1910 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1911 box->hrtimer.function = uncore_pmu_hrtimer;
1914 struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
1916 struct intel_uncore_box *box;
1919 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
1921 box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
1925 for (i = 0; i < type->num_shared_regs; i++)
1926 raw_spin_lock_init(&box->shared_regs[i].lock);
1928 uncore_pmu_init_hrtimer(box);
1929 atomic_set(&box->refcnt, 1);
1936 static struct intel_uncore_box *
1937 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
1939 static struct intel_uncore_box *box;
1941 box = *per_cpu_ptr(pmu->box, cpu);
1945 raw_spin_lock(&uncore_box_lock);
1946 list_for_each_entry(box, &pmu->box_list, list) {
1947 if (box->phys_id == topology_physical_package_id(cpu)) {
1948 atomic_inc(&box->refcnt);
1949 *per_cpu_ptr(pmu->box, cpu) = box;
1953 raw_spin_unlock(&uncore_box_lock);
1955 return *per_cpu_ptr(pmu->box, cpu);
1958 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
1960 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
1963 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
1966 * perf core schedules event on the basis of cpu, uncore events are
1967 * collected by one of the cpus inside a physical package.
1969 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
1973 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
1975 struct perf_event *event;
1978 max_count = box->pmu->type->num_counters;
1979 if (box->pmu->type->fixed_ctl)
1982 if (box->n_events >= max_count)
1986 box->event_list[n] = leader;
1991 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1992 if (event->state <= PERF_EVENT_STATE_OFF)
1998 box->event_list[n] = event;
2004 static struct event_constraint *
2005 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2007 struct intel_uncore_type *type = box->pmu->type;
2008 struct event_constraint *c;
2010 if (type->ops->get_constraint) {
2011 c = type->ops->get_constraint(box, event);
2016 if (event->hw.config == ~0ULL)
2017 return &constraint_fixed;
2019 if (type->constraints) {
2020 for_each_event_constraint(c, type->constraints) {
2021 if ((event->hw.config & c->cmask) == c->code)
2026 return &type->unconstrainted;
2029 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2031 if (box->pmu->type->ops->put_constraint)
2032 box->pmu->type->ops->put_constraint(box, event);
2035 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
2037 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2038 struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
2039 int i, wmin, wmax, ret = 0;
2040 struct hw_perf_event *hwc;
2042 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2044 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
2045 c = uncore_get_event_constraint(box, box->event_list[i]);
2047 wmin = min(wmin, c->weight);
2048 wmax = max(wmax, c->weight);
2051 /* fastpath, try to reuse previous register */
2052 for (i = 0; i < n; i++) {
2053 hwc = &box->event_list[i]->hw;
2056 /* never assigned */
2060 /* constraint still honored */
2061 if (!test_bit(hwc->idx, c->idxmsk))
2064 /* not already used */
2065 if (test_bit(hwc->idx, used_mask))
2068 __set_bit(hwc->idx, used_mask);
2070 assign[i] = hwc->idx;
2074 ret = perf_assign_events(constraints, n, wmin, wmax, assign);
2076 if (!assign || ret) {
2077 for (i = 0; i < n; i++)
2078 uncore_put_event_constraint(box, box->event_list[i]);
2080 return ret ? -EINVAL : 0;
2083 static void uncore_pmu_event_start(struct perf_event *event, int flags)
2085 struct intel_uncore_box *box = uncore_event_to_box(event);
2086 int idx = event->hw.idx;
2088 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2091 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2094 event->hw.state = 0;
2095 box->events[idx] = event;
2097 __set_bit(idx, box->active_mask);
2099 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2100 uncore_enable_event(box, event);
2102 if (box->n_active == 1) {
2103 uncore_enable_box(box);
2104 uncore_pmu_start_hrtimer(box);
2108 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2110 struct intel_uncore_box *box = uncore_event_to_box(event);
2111 struct hw_perf_event *hwc = &event->hw;
2113 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2114 uncore_disable_event(box, event);
2116 box->events[hwc->idx] = NULL;
2117 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2118 hwc->state |= PERF_HES_STOPPED;
2120 if (box->n_active == 0) {
2121 uncore_disable_box(box);
2122 uncore_pmu_cancel_hrtimer(box);
2126 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2128 * Drain the remaining delta count out of a event
2129 * that we are disabling:
2131 uncore_perf_event_update(box, event);
2132 hwc->state |= PERF_HES_UPTODATE;
2136 static int uncore_pmu_event_add(struct perf_event *event, int flags)
2138 struct intel_uncore_box *box = uncore_event_to_box(event);
2139 struct hw_perf_event *hwc = &event->hw;
2140 int assign[UNCORE_PMC_IDX_MAX];
2146 ret = n = uncore_collect_events(box, event, false);
2150 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2151 if (!(flags & PERF_EF_START))
2152 hwc->state |= PERF_HES_ARCH;
2154 ret = uncore_assign_events(box, assign, n);
2158 /* save events moving to new counters */
2159 for (i = 0; i < box->n_events; i++) {
2160 event = box->event_list[i];
2163 if (hwc->idx == assign[i] &&
2164 hwc->last_tag == box->tags[assign[i]])
2167 * Ensure we don't accidentally enable a stopped
2168 * counter simply because we rescheduled.
2170 if (hwc->state & PERF_HES_STOPPED)
2171 hwc->state |= PERF_HES_ARCH;
2173 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2176 /* reprogram moved events into new counters */
2177 for (i = 0; i < n; i++) {
2178 event = box->event_list[i];
2181 if (hwc->idx != assign[i] ||
2182 hwc->last_tag != box->tags[assign[i]])
2183 uncore_assign_hw_event(box, event, assign[i]);
2184 else if (i < box->n_events)
2187 if (hwc->state & PERF_HES_ARCH)
2190 uncore_pmu_event_start(event, 0);
2197 static void uncore_pmu_event_del(struct perf_event *event, int flags)
2199 struct intel_uncore_box *box = uncore_event_to_box(event);
2202 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2204 for (i = 0; i < box->n_events; i++) {
2205 if (event == box->event_list[i]) {
2206 uncore_put_event_constraint(box, event);
2208 while (++i < box->n_events)
2209 box->event_list[i - 1] = box->event_list[i];
2217 event->hw.last_tag = ~0ULL;
2220 static void uncore_pmu_event_read(struct perf_event *event)
2222 struct intel_uncore_box *box = uncore_event_to_box(event);
2223 uncore_perf_event_update(box, event);
2227 * validation ensures the group can be loaded onto the
2228 * PMU if it was the only group available.
2230 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
2231 struct perf_event *event)
2233 struct perf_event *leader = event->group_leader;
2234 struct intel_uncore_box *fake_box;
2235 int ret = -EINVAL, n;
2237 fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
2241 fake_box->pmu = pmu;
2243 * the event is not yet connected with its
2244 * siblings therefore we must first collect
2245 * existing siblings, then add the new event
2246 * before we can simulate the scheduling
2248 n = uncore_collect_events(fake_box, leader, true);
2252 fake_box->n_events = n;
2253 n = uncore_collect_events(fake_box, event, false);
2257 fake_box->n_events = n;
2259 ret = uncore_assign_events(fake_box, NULL, n);
2265 int uncore_pmu_event_init(struct perf_event *event)
2267 struct intel_uncore_pmu *pmu;
2268 struct intel_uncore_box *box;
2269 struct hw_perf_event *hwc = &event->hw;
2272 if (event->attr.type != event->pmu->type)
2275 pmu = uncore_event_to_pmu(event);
2276 /* no device found for this pmu */
2277 if (pmu->func_id < 0)
2281 * Uncore PMU does measure at all privilege level all the time.
2282 * So it doesn't make sense to specify any exclude bits.
2284 if (event->attr.exclude_user || event->attr.exclude_kernel ||
2285 event->attr.exclude_hv || event->attr.exclude_idle)
2288 /* Sampling not supported yet */
2289 if (hwc->sample_period)
2293 * Place all uncore events for a particular physical package
2298 box = uncore_pmu_to_box(pmu, event->cpu);
2299 if (!box || box->cpu < 0)
2301 event->cpu = box->cpu;
2304 event->hw.last_tag = ~0ULL;
2305 event->hw.extra_reg.idx = EXTRA_REG_NONE;
2307 if (event->attr.config == UNCORE_FIXED_EVENT) {
2308 /* no fixed counter */
2309 if (!pmu->type->fixed_ctl)
2312 * if there is only one fixed counter, only the first pmu
2313 * can access the fixed counter
2315 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
2317 hwc->config = ~0ULL;
2319 hwc->config = event->attr.config & pmu->type->event_mask;
2320 if (pmu->type->ops->hw_config) {
2321 ret = pmu->type->ops->hw_config(box, event);
2327 if (event->group_leader != event)
2328 ret = uncore_validate_group(pmu, event);
2335 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
2339 pmu->pmu = (struct pmu) {
2340 .attr_groups = pmu->type->attr_groups,
2341 .task_ctx_nr = perf_invalid_context,
2342 .event_init = uncore_pmu_event_init,
2343 .add = uncore_pmu_event_add,
2344 .del = uncore_pmu_event_del,
2345 .start = uncore_pmu_event_start,
2346 .stop = uncore_pmu_event_stop,
2347 .read = uncore_pmu_event_read,
2350 if (pmu->type->num_boxes == 1) {
2351 if (strlen(pmu->type->name) > 0)
2352 sprintf(pmu->name, "uncore_%s", pmu->type->name);
2354 sprintf(pmu->name, "uncore");
2356 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
2360 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
2364 static void __init uncore_type_exit(struct intel_uncore_type *type)
2368 for (i = 0; i < type->num_boxes; i++)
2369 free_percpu(type->pmus[i].box);
2372 kfree(type->attr_groups[1]);
2373 type->attr_groups[1] = NULL;
2376 static void uncore_types_exit(struct intel_uncore_type **types)
2379 for (i = 0; types[i]; i++)
2380 uncore_type_exit(types[i]);
2383 static int __init uncore_type_init(struct intel_uncore_type *type)
2385 struct intel_uncore_pmu *pmus;
2386 struct attribute_group *events_group;
2387 struct attribute **attrs;
2390 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
2394 type->unconstrainted = (struct event_constraint)
2395 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
2396 0, type->num_counters, 0);
2398 for (i = 0; i < type->num_boxes; i++) {
2399 pmus[i].func_id = -1;
2400 pmus[i].pmu_idx = i;
2401 pmus[i].type = type;
2402 INIT_LIST_HEAD(&pmus[i].box_list);
2403 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
2408 if (type->event_descs) {
2410 while (type->event_descs[i].attr.attr.name)
2413 events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
2414 sizeof(*events_group), GFP_KERNEL);
2418 attrs = (struct attribute **)(events_group + 1);
2419 events_group->name = "events";
2420 events_group->attrs = attrs;
2422 for (j = 0; j < i; j++)
2423 attrs[j] = &type->event_descs[j].attr.attr;
2425 type->attr_groups[1] = events_group;
2431 uncore_type_exit(type);
2435 static int __init uncore_types_init(struct intel_uncore_type **types)
2439 for (i = 0; types[i]; i++) {
2440 ret = uncore_type_init(types[i]);
2447 uncore_type_exit(types[i]);
2451 static struct pci_driver *uncore_pci_driver;
2452 static bool pcidrv_registered;
2455 * add a pci uncore device
2457 static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
2459 struct intel_uncore_pmu *pmu;
2460 struct intel_uncore_box *box;
2463 phys_id = pcibus_to_physid[pdev->bus->number];
2467 box = uncore_alloc_box(type, 0);
2472 * for performance monitoring unit with multiple boxes,
2473 * each box has a different function id.
2475 for (i = 0; i < type->num_boxes; i++) {
2476 pmu = &type->pmus[i];
2477 if (pmu->func_id == pdev->devfn)
2479 if (pmu->func_id < 0) {
2480 pmu->func_id = pdev->devfn;
2491 box->phys_id = phys_id;
2492 box->pci_dev = pdev;
2494 uncore_box_init(box);
2495 pci_set_drvdata(pdev, box);
2497 raw_spin_lock(&uncore_box_lock);
2498 list_add_tail(&box->list, &pmu->box_list);
2499 raw_spin_unlock(&uncore_box_lock);
2504 static void uncore_pci_remove(struct pci_dev *pdev)
2506 struct intel_uncore_box *box = pci_get_drvdata(pdev);
2507 struct intel_uncore_pmu *pmu = box->pmu;
2508 int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
2510 if (WARN_ON_ONCE(phys_id != box->phys_id))
2513 raw_spin_lock(&uncore_box_lock);
2514 list_del(&box->list);
2515 raw_spin_unlock(&uncore_box_lock);
2517 for_each_possible_cpu(cpu) {
2518 if (*per_cpu_ptr(pmu->box, cpu) == box) {
2519 *per_cpu_ptr(pmu->box, cpu) = NULL;
2520 atomic_dec(&box->refcnt);
2524 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
2528 static int __devinit uncore_pci_probe(struct pci_dev *pdev,
2529 const struct pci_device_id *id)
2531 struct intel_uncore_type *type;
2533 type = (struct intel_uncore_type *)id->driver_data;
2535 return uncore_pci_add(type, pdev);
2538 static int __init uncore_pci_init(void)
2542 switch (boot_cpu_data.x86_model) {
2543 case 45: /* Sandy Bridge-EP */
2544 pci_uncores = snbep_pci_uncores;
2545 uncore_pci_driver = &snbep_uncore_pci_driver;
2546 snbep_pci2phy_map_init();
2552 ret = uncore_types_init(pci_uncores);
2556 uncore_pci_driver->probe = uncore_pci_probe;
2557 uncore_pci_driver->remove = uncore_pci_remove;
2559 ret = pci_register_driver(uncore_pci_driver);
2561 pcidrv_registered = true;
2563 uncore_types_exit(pci_uncores);
2568 static void __init uncore_pci_exit(void)
2570 if (pcidrv_registered) {
2571 pcidrv_registered = false;
2572 pci_unregister_driver(uncore_pci_driver);
2573 uncore_types_exit(pci_uncores);
2577 static void __cpuinit uncore_cpu_dying(int cpu)
2579 struct intel_uncore_type *type;
2580 struct intel_uncore_pmu *pmu;
2581 struct intel_uncore_box *box;
2584 for (i = 0; msr_uncores[i]; i++) {
2585 type = msr_uncores[i];
2586 for (j = 0; j < type->num_boxes; j++) {
2587 pmu = &type->pmus[j];
2588 box = *per_cpu_ptr(pmu->box, cpu);
2589 *per_cpu_ptr(pmu->box, cpu) = NULL;
2590 if (box && atomic_dec_and_test(&box->refcnt))
2596 static int __cpuinit uncore_cpu_starting(int cpu)
2598 struct intel_uncore_type *type;
2599 struct intel_uncore_pmu *pmu;
2600 struct intel_uncore_box *box, *exist;
2601 int i, j, k, phys_id;
2603 phys_id = topology_physical_package_id(cpu);
2605 for (i = 0; msr_uncores[i]; i++) {
2606 type = msr_uncores[i];
2607 for (j = 0; j < type->num_boxes; j++) {
2608 pmu = &type->pmus[j];
2609 box = *per_cpu_ptr(pmu->box, cpu);
2610 /* called by uncore_cpu_init? */
2611 if (box && box->phys_id >= 0) {
2612 uncore_box_init(box);
2616 for_each_online_cpu(k) {
2617 exist = *per_cpu_ptr(pmu->box, k);
2618 if (exist && exist->phys_id == phys_id) {
2619 atomic_inc(&exist->refcnt);
2620 *per_cpu_ptr(pmu->box, cpu) = exist;
2628 box->phys_id = phys_id;
2629 uncore_box_init(box);
2636 static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
2638 struct intel_uncore_type *type;
2639 struct intel_uncore_pmu *pmu;
2640 struct intel_uncore_box *box;
2643 for (i = 0; msr_uncores[i]; i++) {
2644 type = msr_uncores[i];
2645 for (j = 0; j < type->num_boxes; j++) {
2646 pmu = &type->pmus[j];
2647 if (pmu->func_id < 0)
2650 box = uncore_alloc_box(type, cpu);
2655 box->phys_id = phys_id;
2656 *per_cpu_ptr(pmu->box, cpu) = box;
2662 static void __cpuinit
2663 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
2665 struct intel_uncore_type *type;
2666 struct intel_uncore_pmu *pmu;
2667 struct intel_uncore_box *box;
2670 for (i = 0; uncores[i]; i++) {
2672 for (j = 0; j < type->num_boxes; j++) {
2673 pmu = &type->pmus[j];
2675 box = uncore_pmu_to_box(pmu, new_cpu);
2677 box = uncore_pmu_to_box(pmu, old_cpu);
2682 WARN_ON_ONCE(box->cpu != -1);
2687 WARN_ON_ONCE(box->cpu != old_cpu);
2689 uncore_pmu_cancel_hrtimer(box);
2690 perf_pmu_migrate_context(&pmu->pmu,
2700 static void __cpuinit uncore_event_exit_cpu(int cpu)
2702 int i, phys_id, target;
2704 /* if exiting cpu is used for collecting uncore events */
2705 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
2708 /* find a new cpu to collect uncore events */
2709 phys_id = topology_physical_package_id(cpu);
2711 for_each_online_cpu(i) {
2714 if (phys_id == topology_physical_package_id(i)) {
2720 /* migrate uncore events to the new cpu */
2722 cpumask_set_cpu(target, &uncore_cpu_mask);
2724 uncore_change_context(msr_uncores, cpu, target);
2725 uncore_change_context(pci_uncores, cpu, target);
2728 static void __cpuinit uncore_event_init_cpu(int cpu)
2732 phys_id = topology_physical_package_id(cpu);
2733 for_each_cpu(i, &uncore_cpu_mask) {
2734 if (phys_id == topology_physical_package_id(i))
2738 cpumask_set_cpu(cpu, &uncore_cpu_mask);
2740 uncore_change_context(msr_uncores, -1, cpu);
2741 uncore_change_context(pci_uncores, -1, cpu);
2745 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
2747 unsigned int cpu = (long)hcpu;
2749 /* allocate/free data structure for uncore box */
2750 switch (action & ~CPU_TASKS_FROZEN) {
2751 case CPU_UP_PREPARE:
2752 uncore_cpu_prepare(cpu, -1);
2755 uncore_cpu_starting(cpu);
2757 case CPU_UP_CANCELED:
2759 uncore_cpu_dying(cpu);
2765 /* select the cpu that collects uncore events */
2766 switch (action & ~CPU_TASKS_FROZEN) {
2767 case CPU_DOWN_FAILED:
2769 uncore_event_init_cpu(cpu);
2771 case CPU_DOWN_PREPARE:
2772 uncore_event_exit_cpu(cpu);
2781 static struct notifier_block uncore_cpu_nb __cpuinitdata = {
2782 .notifier_call = uncore_cpu_notifier,
2784 * to migrate uncore events, our notifier should be executed
2785 * before perf core's notifier.
2787 .priority = CPU_PRI_PERF + 1,
2790 static void __init uncore_cpu_setup(void *dummy)
2792 uncore_cpu_starting(smp_processor_id());
2795 static int __init uncore_cpu_init(void)
2797 int ret, cpu, max_cores;
2799 max_cores = boot_cpu_data.x86_max_cores;
2800 switch (boot_cpu_data.x86_model) {
2801 case 26: /* Nehalem */
2803 case 37: /* Westmere */
2805 msr_uncores = nhm_msr_uncores;
2807 case 42: /* Sandy Bridge */
2808 if (snb_uncore_cbox.num_boxes > max_cores)
2809 snb_uncore_cbox.num_boxes = max_cores;
2810 msr_uncores = snb_msr_uncores;
2812 case 45: /* Sandy Birdge-EP */
2813 if (snbep_uncore_cbox.num_boxes > max_cores)
2814 snbep_uncore_cbox.num_boxes = max_cores;
2815 msr_uncores = snbep_msr_uncores;
2818 msr_uncores = nhmex_msr_uncores;
2824 ret = uncore_types_init(msr_uncores);
2830 for_each_online_cpu(cpu) {
2831 int i, phys_id = topology_physical_package_id(cpu);
2833 for_each_cpu(i, &uncore_cpu_mask) {
2834 if (phys_id == topology_physical_package_id(i)) {
2842 uncore_cpu_prepare(cpu, phys_id);
2843 uncore_event_init_cpu(cpu);
2845 on_each_cpu(uncore_cpu_setup, NULL, 1);
2847 register_cpu_notifier(&uncore_cpu_nb);
2854 static int __init uncore_pmus_register(void)
2856 struct intel_uncore_pmu *pmu;
2857 struct intel_uncore_type *type;
2860 for (i = 0; msr_uncores[i]; i++) {
2861 type = msr_uncores[i];
2862 for (j = 0; j < type->num_boxes; j++) {
2863 pmu = &type->pmus[j];
2864 uncore_pmu_register(pmu);
2868 for (i = 0; pci_uncores[i]; i++) {
2869 type = pci_uncores[i];
2870 for (j = 0; j < type->num_boxes; j++) {
2871 pmu = &type->pmus[j];
2872 uncore_pmu_register(pmu);
2879 static int __init intel_uncore_init(void)
2883 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2886 ret = uncore_pci_init();
2889 ret = uncore_cpu_init();
2895 uncore_pmus_register();
2900 device_initcall(intel_uncore_init);