1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
4 #include "uncore_discovery.h"
6 /* Uncore IMC PCI IDs */
7 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
8 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
9 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
10 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
11 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
12 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
13 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
14 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
15 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
16 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
17 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
18 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
19 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918
20 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
21 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
22 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
23 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
24 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
25 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910
26 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918
27 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
28 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
29 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
30 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
31 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
32 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
33 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
34 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
35 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
36 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
37 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
38 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
39 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
40 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
41 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c
42 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d
43 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0
44 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34
45 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35
46 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44
47 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54
48 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64
49 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51
50 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61
51 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71
52 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33
53 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43
54 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53
55 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63
56 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73
57 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
58 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
59 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02
60 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04
61 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12
62 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14
63 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36
64 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43
65 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
66 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660
67 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641
68 #define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601
69 #define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602
70 #define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609
71 #define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a
72 #define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621
73 #define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623
74 #define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629
75 #define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637
76 #define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b
77 #define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648
78 #define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649
79 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650
80 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668
81 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670
82 #define PCI_DEVICE_ID_INTEL_RPL_1_IMC 0xA700
83 #define PCI_DEVICE_ID_INTEL_RPL_2_IMC 0xA702
84 #define PCI_DEVICE_ID_INTEL_RPL_3_IMC 0xA706
85 #define PCI_DEVICE_ID_INTEL_RPL_4_IMC 0xA709
87 /* SNB event control */
88 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
89 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
90 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
91 #define SNB_UNC_CTL_EN (1 << 22)
92 #define SNB_UNC_CTL_INVERT (1 << 23)
93 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
94 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
95 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
97 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
98 SNB_UNC_CTL_UMASK_MASK | \
99 SNB_UNC_CTL_EDGE_DET | \
100 SNB_UNC_CTL_INVERT | \
101 SNB_UNC_CTL_CMASK_MASK)
103 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
104 SNB_UNC_CTL_UMASK_MASK | \
105 SNB_UNC_CTL_EDGE_DET | \
106 SNB_UNC_CTL_INVERT | \
107 NHM_UNC_CTL_CMASK_MASK)
109 /* SNB global control register */
110 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
111 #define SNB_UNC_FIXED_CTR_CTRL 0x394
112 #define SNB_UNC_FIXED_CTR 0x395
114 /* SNB uncore global control */
115 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
116 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
118 /* SNB Cbo register */
119 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
120 #define SNB_UNC_CBO_0_PER_CTR0 0x706
121 #define SNB_UNC_CBO_MSR_OFFSET 0x10
123 /* SNB ARB register */
124 #define SNB_UNC_ARB_PER_CTR0 0x3b0
125 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
126 #define SNB_UNC_ARB_MSR_OFFSET 0x10
128 /* NHM global control register */
129 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
130 #define NHM_UNC_FIXED_CTR 0x394
131 #define NHM_UNC_FIXED_CTR_CTRL 0x395
133 /* NHM uncore global control */
134 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
135 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
137 /* NHM uncore register */
138 #define NHM_UNC_PERFEVTSEL0 0x3c0
139 #define NHM_UNC_UNCORE_PMC0 0x3b0
141 /* SKL uncore global control */
142 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
143 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
145 /* ICL Cbo register */
146 #define ICL_UNC_CBO_CONFIG 0x396
147 #define ICL_UNC_NUM_CBO_MASK 0xf
148 #define ICL_UNC_CBO_0_PER_CTR0 0x702
149 #define ICL_UNC_CBO_MSR_OFFSET 0x8
151 /* ICL ARB register */
152 #define ICL_UNC_ARB_PER_CTR 0x3b1
153 #define ICL_UNC_ARB_PERFEVTSEL 0x3b3
155 /* ADL uncore global control */
156 #define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0
157 #define ADL_UNC_FIXED_CTR_CTRL 0x2fde
158 #define ADL_UNC_FIXED_CTR 0x2fdf
160 /* ADL Cbo register */
161 #define ADL_UNC_CBO_0_PER_CTR0 0x2002
162 #define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000
163 #define ADL_UNC_CTL_THRESHOLD 0x3f000000
164 #define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
165 SNB_UNC_CTL_UMASK_MASK | \
166 SNB_UNC_CTL_EDGE_DET | \
167 SNB_UNC_CTL_INVERT | \
168 ADL_UNC_CTL_THRESHOLD)
170 /* ADL ARB register */
171 #define ADL_UNC_ARB_PER_CTR0 0x2FD2
172 #define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0
173 #define ADL_UNC_ARB_MSR_OFFSET 0x8
175 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
176 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
177 DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
178 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
179 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
180 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
181 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
182 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
184 /* Sandy Bridge uncore support */
185 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
187 struct hw_perf_event *hwc = &event->hw;
189 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
190 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
192 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
195 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
197 wrmsrl(event->hw.config_base, 0);
200 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
202 if (box->pmu->pmu_idx == 0) {
203 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
204 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
208 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
210 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
211 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
214 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
216 if (box->pmu->pmu_idx == 0)
217 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
220 static struct uncore_event_desc snb_uncore_events[] = {
221 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
222 { /* end: all zeroes */ },
225 static struct attribute *snb_uncore_formats_attr[] = {
226 &format_attr_event.attr,
227 &format_attr_umask.attr,
228 &format_attr_edge.attr,
229 &format_attr_inv.attr,
230 &format_attr_cmask5.attr,
234 static const struct attribute_group snb_uncore_format_group = {
236 .attrs = snb_uncore_formats_attr,
239 static struct intel_uncore_ops snb_uncore_msr_ops = {
240 .init_box = snb_uncore_msr_init_box,
241 .enable_box = snb_uncore_msr_enable_box,
242 .exit_box = snb_uncore_msr_exit_box,
243 .disable_event = snb_uncore_msr_disable_event,
244 .enable_event = snb_uncore_msr_enable_event,
245 .read_counter = uncore_msr_read_counter,
248 static struct event_constraint snb_uncore_arb_constraints[] = {
249 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
250 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
254 static struct intel_uncore_type snb_uncore_cbox = {
259 .fixed_ctr_bits = 48,
260 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
261 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
262 .fixed_ctr = SNB_UNC_FIXED_CTR,
263 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
265 .event_mask = SNB_UNC_RAW_EVENT_MASK,
266 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
267 .ops = &snb_uncore_msr_ops,
268 .format_group = &snb_uncore_format_group,
269 .event_descs = snb_uncore_events,
272 static struct intel_uncore_type snb_uncore_arb = {
277 .perf_ctr = SNB_UNC_ARB_PER_CTR0,
278 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0,
279 .event_mask = SNB_UNC_RAW_EVENT_MASK,
280 .msr_offset = SNB_UNC_ARB_MSR_OFFSET,
281 .constraints = snb_uncore_arb_constraints,
282 .ops = &snb_uncore_msr_ops,
283 .format_group = &snb_uncore_format_group,
286 static struct intel_uncore_type *snb_msr_uncores[] = {
292 void snb_uncore_cpu_init(void)
294 uncore_msr_uncores = snb_msr_uncores;
295 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
296 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
299 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
301 if (box->pmu->pmu_idx == 0) {
302 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
303 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
306 /* The 8th CBOX has different MSR space */
307 if (box->pmu->pmu_idx == 7)
308 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
311 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
313 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
314 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
317 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
319 if (box->pmu->pmu_idx == 0)
320 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
323 static struct intel_uncore_ops skl_uncore_msr_ops = {
324 .init_box = skl_uncore_msr_init_box,
325 .enable_box = skl_uncore_msr_enable_box,
326 .exit_box = skl_uncore_msr_exit_box,
327 .disable_event = snb_uncore_msr_disable_event,
328 .enable_event = snb_uncore_msr_enable_event,
329 .read_counter = uncore_msr_read_counter,
332 static struct intel_uncore_type skl_uncore_cbox = {
337 .fixed_ctr_bits = 48,
338 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
339 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
340 .fixed_ctr = SNB_UNC_FIXED_CTR,
341 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
343 .event_mask = SNB_UNC_RAW_EVENT_MASK,
344 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
345 .ops = &skl_uncore_msr_ops,
346 .format_group = &snb_uncore_format_group,
347 .event_descs = snb_uncore_events,
350 static struct intel_uncore_type *skl_msr_uncores[] = {
356 void skl_uncore_cpu_init(void)
358 uncore_msr_uncores = skl_msr_uncores;
359 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
360 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
361 snb_uncore_arb.ops = &skl_uncore_msr_ops;
364 static struct intel_uncore_ops icl_uncore_msr_ops = {
365 .disable_event = snb_uncore_msr_disable_event,
366 .enable_event = snb_uncore_msr_enable_event,
367 .read_counter = uncore_msr_read_counter,
370 static struct intel_uncore_type icl_uncore_cbox = {
374 .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
375 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
376 .event_mask = SNB_UNC_RAW_EVENT_MASK,
377 .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
378 .ops = &icl_uncore_msr_ops,
379 .format_group = &snb_uncore_format_group,
382 static struct uncore_event_desc icl_uncore_events[] = {
383 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
384 { /* end: all zeroes */ },
387 static struct attribute *icl_uncore_clock_formats_attr[] = {
388 &format_attr_event.attr,
392 static struct attribute_group icl_uncore_clock_format_group = {
394 .attrs = icl_uncore_clock_formats_attr,
397 static struct intel_uncore_type icl_uncore_clockbox = {
401 .fixed_ctr_bits = 48,
402 .fixed_ctr = SNB_UNC_FIXED_CTR,
403 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
405 .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
406 .format_group = &icl_uncore_clock_format_group,
407 .ops = &icl_uncore_msr_ops,
408 .event_descs = icl_uncore_events,
411 static struct intel_uncore_type icl_uncore_arb = {
416 .perf_ctr = ICL_UNC_ARB_PER_CTR,
417 .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
418 .event_mask = SNB_UNC_RAW_EVENT_MASK,
419 .ops = &icl_uncore_msr_ops,
420 .format_group = &snb_uncore_format_group,
423 static struct intel_uncore_type *icl_msr_uncores[] = {
426 &icl_uncore_clockbox,
430 static int icl_get_cbox_num(void)
434 rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
436 return num_boxes & ICL_UNC_NUM_CBO_MASK;
439 void icl_uncore_cpu_init(void)
441 uncore_msr_uncores = icl_msr_uncores;
442 icl_uncore_cbox.num_boxes = icl_get_cbox_num();
445 static struct intel_uncore_type *tgl_msr_uncores[] = {
448 &icl_uncore_clockbox,
452 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
454 if (box->pmu->pmu_idx == 0)
455 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
458 void tgl_uncore_cpu_init(void)
460 uncore_msr_uncores = tgl_msr_uncores;
461 icl_uncore_cbox.num_boxes = icl_get_cbox_num();
462 icl_uncore_cbox.ops = &skl_uncore_msr_ops;
463 icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
464 snb_uncore_arb.ops = &skl_uncore_msr_ops;
465 skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
468 static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
470 if (box->pmu->pmu_idx == 0)
471 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
474 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
476 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
479 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
481 if (box->pmu->pmu_idx == 0)
482 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
485 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
487 if (box->pmu->pmu_idx == 0)
488 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
491 static struct intel_uncore_ops adl_uncore_msr_ops = {
492 .init_box = adl_uncore_msr_init_box,
493 .enable_box = adl_uncore_msr_enable_box,
494 .disable_box = adl_uncore_msr_disable_box,
495 .exit_box = adl_uncore_msr_exit_box,
496 .disable_event = snb_uncore_msr_disable_event,
497 .enable_event = snb_uncore_msr_enable_event,
498 .read_counter = uncore_msr_read_counter,
501 static struct attribute *adl_uncore_formats_attr[] = {
502 &format_attr_event.attr,
503 &format_attr_umask.attr,
504 &format_attr_edge.attr,
505 &format_attr_inv.attr,
506 &format_attr_threshold.attr,
510 static const struct attribute_group adl_uncore_format_group = {
512 .attrs = adl_uncore_formats_attr,
515 static struct intel_uncore_type adl_uncore_cbox = {
519 .perf_ctr = ADL_UNC_CBO_0_PER_CTR0,
520 .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0,
521 .event_mask = ADL_UNC_RAW_EVENT_MASK,
522 .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
523 .ops = &adl_uncore_msr_ops,
524 .format_group = &adl_uncore_format_group,
527 static struct intel_uncore_type adl_uncore_arb = {
532 .perf_ctr = ADL_UNC_ARB_PER_CTR0,
533 .event_ctl = ADL_UNC_ARB_PERFEVTSEL0,
534 .event_mask = SNB_UNC_RAW_EVENT_MASK,
535 .msr_offset = ADL_UNC_ARB_MSR_OFFSET,
536 .constraints = snb_uncore_arb_constraints,
537 .ops = &adl_uncore_msr_ops,
538 .format_group = &snb_uncore_format_group,
541 static struct intel_uncore_type adl_uncore_clockbox = {
545 .fixed_ctr_bits = 48,
546 .fixed_ctr = ADL_UNC_FIXED_CTR,
547 .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL,
549 .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
550 .format_group = &icl_uncore_clock_format_group,
551 .ops = &adl_uncore_msr_ops,
552 .event_descs = icl_uncore_events,
555 static struct intel_uncore_type *adl_msr_uncores[] = {
558 &adl_uncore_clockbox,
562 void adl_uncore_cpu_init(void)
564 adl_uncore_cbox.num_boxes = icl_get_cbox_num();
565 uncore_msr_uncores = adl_msr_uncores;
572 static struct uncore_event_desc snb_uncore_imc_events[] = {
573 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
574 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
575 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
577 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
578 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
579 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
581 INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
582 INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
583 INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
585 INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
586 INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
587 INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
589 INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
590 INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
591 INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
593 { /* end: all zeroes */ },
596 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
597 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
599 /* page size multiple covering all config regs */
600 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
602 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
603 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
604 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
605 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
606 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
608 /* BW break down- legacy counters */
609 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3
610 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040
611 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4
612 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044
613 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5
614 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048
616 enum perf_snb_uncore_imc_freerunning_types {
617 SNB_PCI_UNCORE_IMC_DATA_READS = 0,
618 SNB_PCI_UNCORE_IMC_DATA_WRITES,
619 SNB_PCI_UNCORE_IMC_GT_REQUESTS,
620 SNB_PCI_UNCORE_IMC_IA_REQUESTS,
621 SNB_PCI_UNCORE_IMC_IO_REQUESTS,
623 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
626 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
627 [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
629 [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
631 [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
633 [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
635 [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
639 static struct attribute *snb_uncore_imc_formats_attr[] = {
640 &format_attr_event.attr,
644 static const struct attribute_group snb_uncore_imc_format_group = {
646 .attrs = snb_uncore_imc_formats_attr,
649 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
651 struct intel_uncore_type *type = box->pmu->type;
652 struct pci_dev *pdev = box->pci_dev;
653 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
654 resource_size_t addr;
657 pci_read_config_dword(pdev, where, &pci_dword);
660 #ifdef CONFIG_PHYS_ADDR_T_64BIT
661 pci_read_config_dword(pdev, where + 4, &pci_dword);
662 addr |= ((resource_size_t)pci_dword << 32);
665 addr &= ~(PAGE_SIZE - 1);
667 box->io_addr = ioremap(addr, type->mmio_map_size);
669 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
671 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
674 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
677 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
680 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
683 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
687 * Keep the custom event_init() function compatible with old event
688 * encoding for free running counters.
690 static int snb_uncore_imc_event_init(struct perf_event *event)
692 struct intel_uncore_pmu *pmu;
693 struct intel_uncore_box *box;
694 struct hw_perf_event *hwc = &event->hw;
695 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
698 if (event->attr.type != event->pmu->type)
701 pmu = uncore_event_to_pmu(event);
702 /* no device found for this pmu */
703 if (pmu->func_id < 0)
706 /* Sampling not supported yet */
707 if (hwc->sample_period)
710 /* unsupported modes and filters */
711 if (event->attr.sample_period) /* no sampling */
715 * Place all uncore events for a particular physical package
721 /* check only supported bits are set */
722 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
725 box = uncore_pmu_to_box(pmu, event->cpu);
726 if (!box || box->cpu < 0)
729 event->cpu = box->cpu;
730 event->pmu_private = box;
732 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
735 event->hw.last_tag = ~0ULL;
736 event->hw.extra_reg.idx = EXTRA_REG_NONE;
737 event->hw.branch_reg.idx = EXTRA_REG_NONE;
739 * check event is known (whitelist, determines counter)
742 case SNB_UNCORE_PCI_IMC_DATA_READS:
743 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
744 idx = UNCORE_PMC_IDX_FREERUNNING;
746 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
747 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
748 idx = UNCORE_PMC_IDX_FREERUNNING;
750 case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
751 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
752 idx = UNCORE_PMC_IDX_FREERUNNING;
754 case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
755 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
756 idx = UNCORE_PMC_IDX_FREERUNNING;
758 case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
759 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
760 idx = UNCORE_PMC_IDX_FREERUNNING;
766 /* must be done before validate_group */
767 event->hw.event_base = base;
770 /* Convert to standard encoding format for freerunning counters */
771 event->hw.config = ((cfg - 1) << 8) | 0x10ff;
773 /* no group validation needed, we have free running counters */
778 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
783 int snb_pci2phy_map_init(int devid)
785 struct pci_dev *dev = NULL;
786 struct pci2phy_map *map;
789 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
793 bus = dev->bus->number;
794 segment = pci_domain_nr(dev->bus);
796 raw_spin_lock(&pci2phy_map_lock);
797 map = __find_pci2phy_map(segment);
799 raw_spin_unlock(&pci2phy_map_lock);
803 map->pbus_to_dieid[bus] = 0;
804 raw_spin_unlock(&pci2phy_map_lock);
811 static struct pmu snb_uncore_imc_pmu = {
812 .task_ctx_nr = perf_invalid_context,
813 .event_init = snb_uncore_imc_event_init,
814 .add = uncore_pmu_event_add,
815 .del = uncore_pmu_event_del,
816 .start = uncore_pmu_event_start,
817 .stop = uncore_pmu_event_stop,
818 .read = uncore_pmu_event_read,
819 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
822 static struct intel_uncore_ops snb_uncore_imc_ops = {
823 .init_box = snb_uncore_imc_init_box,
824 .exit_box = uncore_mmio_exit_box,
825 .enable_box = snb_uncore_imc_enable_box,
826 .disable_box = snb_uncore_imc_disable_box,
827 .disable_event = snb_uncore_imc_disable_event,
828 .enable_event = snb_uncore_imc_enable_event,
829 .hw_config = snb_uncore_imc_hw_config,
830 .read_counter = uncore_mmio_read_counter,
833 static struct intel_uncore_type snb_uncore_imc = {
837 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
838 .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,
839 .freerunning = snb_uncore_imc_freerunning,
840 .event_descs = snb_uncore_imc_events,
841 .format_group = &snb_uncore_imc_format_group,
842 .ops = &snb_uncore_imc_ops,
843 .pmu = &snb_uncore_imc_pmu,
846 static struct intel_uncore_type *snb_pci_uncores[] = {
847 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
851 static const struct pci_device_id snb_uncore_pci_ids[] = {
853 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
854 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
856 { /* end: all zeroes */ },
859 static const struct pci_device_id ivb_uncore_pci_ids[] = {
861 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
862 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
865 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
866 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
868 { /* end: all zeroes */ },
871 static const struct pci_device_id hsw_uncore_pci_ids[] = {
873 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
874 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
877 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
878 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
880 { /* end: all zeroes */ },
883 static const struct pci_device_id bdw_uncore_pci_ids[] = {
885 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
886 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
888 { /* end: all zeroes */ },
891 static const struct pci_device_id skl_uncore_pci_ids[] = {
893 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
894 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
897 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
898 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
901 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
902 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
905 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
906 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
909 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
910 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
913 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
914 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
917 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
918 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
921 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
922 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
925 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
926 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
929 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
930 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
933 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
934 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
937 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
938 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
941 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
942 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
945 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
946 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
949 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
950 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
953 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
954 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
957 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
958 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
962 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
966 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
970 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
974 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
978 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
982 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
986 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
990 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
994 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
998 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
1002 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
1006 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
1010 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
1014 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1017 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
1018 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1021 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
1022 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1025 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC),
1026 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1029 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC),
1030 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1033 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC),
1034 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1037 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC),
1038 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1041 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC),
1042 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1045 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC),
1046 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1049 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC),
1050 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1053 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC),
1054 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1057 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC),
1058 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1061 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC),
1062 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1065 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC),
1066 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1068 { /* end: all zeroes */ },
1071 static const struct pci_device_id icl_uncore_pci_ids[] = {
1073 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
1074 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1077 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
1078 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1081 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_1_IMC),
1082 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1085 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_2_IMC),
1086 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1088 { /* end: all zeroes */ },
1091 static struct pci_driver snb_uncore_pci_driver = {
1092 .name = "snb_uncore",
1093 .id_table = snb_uncore_pci_ids,
1096 static struct pci_driver ivb_uncore_pci_driver = {
1097 .name = "ivb_uncore",
1098 .id_table = ivb_uncore_pci_ids,
1101 static struct pci_driver hsw_uncore_pci_driver = {
1102 .name = "hsw_uncore",
1103 .id_table = hsw_uncore_pci_ids,
1106 static struct pci_driver bdw_uncore_pci_driver = {
1107 .name = "bdw_uncore",
1108 .id_table = bdw_uncore_pci_ids,
1111 static struct pci_driver skl_uncore_pci_driver = {
1112 .name = "skl_uncore",
1113 .id_table = skl_uncore_pci_ids,
1116 static struct pci_driver icl_uncore_pci_driver = {
1117 .name = "icl_uncore",
1118 .id_table = icl_uncore_pci_ids,
1121 struct imc_uncore_pci_dev {
1123 struct pci_driver *driver;
1125 #define IMC_DEV(a, d) \
1126 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
1128 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
1129 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
1130 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
1131 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
1132 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
1133 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
1134 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
1135 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
1136 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
1137 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
1138 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
1139 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
1140 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
1141 IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */
1142 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
1143 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
1144 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
1145 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
1146 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
1147 IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */
1148 IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */
1149 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
1150 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
1151 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
1152 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
1153 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
1154 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
1155 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
1156 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
1157 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
1158 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
1159 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
1160 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
1161 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
1162 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
1163 IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */
1164 IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */
1165 IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
1166 IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
1167 IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */
1168 IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
1169 IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
1170 IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
1171 IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
1172 IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
1173 IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
1174 IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
1175 IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
1176 IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
1177 IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
1178 IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
1179 IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
1180 IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
1181 IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
1182 IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
1183 { /* end marker */ }
1187 #define for_each_imc_pci_id(x, t) \
1188 for (x = (t); (x)->pci_id; x++)
1190 static struct pci_driver *imc_uncore_find_dev(void)
1192 const struct imc_uncore_pci_dev *p;
1195 for_each_imc_pci_id(p, desktop_imc_pci_ids) {
1196 ret = snb_pci2phy_map_init(p->pci_id);
1203 static int imc_uncore_pci_init(void)
1205 struct pci_driver *imc_drv = imc_uncore_find_dev();
1210 uncore_pci_uncores = snb_pci_uncores;
1211 uncore_pci_driver = imc_drv;
1216 int snb_uncore_pci_init(void)
1218 return imc_uncore_pci_init();
1221 int ivb_uncore_pci_init(void)
1223 return imc_uncore_pci_init();
1225 int hsw_uncore_pci_init(void)
1227 return imc_uncore_pci_init();
1230 int bdw_uncore_pci_init(void)
1232 return imc_uncore_pci_init();
1235 int skl_uncore_pci_init(void)
1237 return imc_uncore_pci_init();
1240 /* end of Sandy Bridge uncore support */
1242 /* Nehalem uncore support */
1243 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1245 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1248 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1250 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1253 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1255 struct hw_perf_event *hwc = &event->hw;
1257 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1258 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1260 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1263 static struct attribute *nhm_uncore_formats_attr[] = {
1264 &format_attr_event.attr,
1265 &format_attr_umask.attr,
1266 &format_attr_edge.attr,
1267 &format_attr_inv.attr,
1268 &format_attr_cmask8.attr,
1272 static const struct attribute_group nhm_uncore_format_group = {
1274 .attrs = nhm_uncore_formats_attr,
1277 static struct uncore_event_desc nhm_uncore_events[] = {
1278 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1279 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
1280 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
1281 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
1282 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
1283 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
1284 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1285 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
1286 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
1287 { /* end: all zeroes */ },
1290 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1291 .disable_box = nhm_uncore_msr_disable_box,
1292 .enable_box = nhm_uncore_msr_enable_box,
1293 .disable_event = snb_uncore_msr_disable_event,
1294 .enable_event = nhm_uncore_msr_enable_event,
1295 .read_counter = uncore_msr_read_counter,
1298 static struct intel_uncore_type nhm_uncore = {
1302 .perf_ctr_bits = 48,
1303 .fixed_ctr_bits = 48,
1304 .event_ctl = NHM_UNC_PERFEVTSEL0,
1305 .perf_ctr = NHM_UNC_UNCORE_PMC0,
1306 .fixed_ctr = NHM_UNC_FIXED_CTR,
1307 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
1308 .event_mask = NHM_UNC_RAW_EVENT_MASK,
1309 .event_descs = nhm_uncore_events,
1310 .ops = &nhm_uncore_msr_ops,
1311 .format_group = &nhm_uncore_format_group,
1314 static struct intel_uncore_type *nhm_msr_uncores[] = {
1319 void nhm_uncore_cpu_init(void)
1321 uncore_msr_uncores = nhm_msr_uncores;
1324 /* end of Nehalem uncore support */
1326 /* Tiger Lake MMIO uncore support */
1328 static const struct pci_device_id tgl_uncore_pci_ids[] = {
1330 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC),
1331 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC),
1335 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC),
1339 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC),
1343 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
1347 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1350 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_1_IMC),
1351 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1354 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC),
1355 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1358 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_3_IMC),
1359 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1362 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_4_IMC),
1363 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1366 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_5_IMC),
1367 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1370 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_6_IMC),
1371 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1374 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_7_IMC),
1375 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1378 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_8_IMC),
1379 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1382 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_9_IMC),
1383 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1386 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_10_IMC),
1387 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1390 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_11_IMC),
1391 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1394 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_12_IMC),
1395 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1398 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_13_IMC),
1399 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1402 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_14_IMC),
1403 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1406 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_15_IMC),
1407 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1410 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_16_IMC),
1411 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1414 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_1_IMC),
1415 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1418 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_2_IMC),
1419 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1422 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_3_IMC),
1423 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1426 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_4_IMC),
1427 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1429 { /* end: all zeroes */ }
1432 enum perf_tgl_uncore_imc_freerunning_types {
1433 TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
1434 TGL_MMIO_UNCORE_IMC_DATA_READ,
1435 TGL_MMIO_UNCORE_IMC_DATA_WRITE,
1436 TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1439 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
1440 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 },
1441 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 },
1442 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 },
1445 static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
1446 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 },
1447 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 },
1448 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 },
1451 static struct uncore_event_desc tgl_uncore_imc_events[] = {
1452 INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"),
1453 INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"),
1454 INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"),
1456 INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"),
1457 INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"),
1458 INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"),
1460 INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"),
1461 INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"),
1462 INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"),
1464 { /* end: all zeroes */ }
1467 static struct pci_dev *tgl_uncore_get_mc_dev(void)
1469 const struct pci_device_id *ids = tgl_uncore_pci_ids;
1470 struct pci_dev *mc_dev = NULL;
1472 while (ids && ids->vendor) {
1473 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
1482 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
1483 #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
1485 static void __uncore_imc_init_box(struct intel_uncore_box *box,
1486 unsigned int base_offset)
1488 struct pci_dev *pdev = tgl_uncore_get_mc_dev();
1489 struct intel_uncore_pmu *pmu = box->pmu;
1490 struct intel_uncore_type *type = pmu->type;
1491 resource_size_t addr;
1495 pr_warn("perf uncore: Cannot find matched IMC device.\n");
1499 pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
1500 /* MCHBAR is disabled */
1501 if (!(mch_bar & BIT(0))) {
1502 pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
1506 addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
1508 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1509 pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
1510 addr |= ((resource_size_t)mch_bar << 32);
1513 addr += base_offset;
1514 box->io_addr = ioremap(addr, type->mmio_map_size);
1516 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
1519 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1521 __uncore_imc_init_box(box, 0);
1524 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1525 .init_box = tgl_uncore_imc_freerunning_init_box,
1526 .exit_box = uncore_mmio_exit_box,
1527 .read_counter = uncore_mmio_read_counter,
1528 .hw_config = uncore_freerunning_hw_config,
1531 static struct attribute *tgl_uncore_imc_formats_attr[] = {
1532 &format_attr_event.attr,
1533 &format_attr_umask.attr,
1537 static const struct attribute_group tgl_uncore_imc_format_group = {
1539 .attrs = tgl_uncore_imc_formats_attr,
1542 static struct intel_uncore_type tgl_uncore_imc_free_running = {
1543 .name = "imc_free_running",
1546 .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1547 .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE,
1548 .freerunning = tgl_uncore_imc_freerunning,
1549 .ops = &tgl_uncore_imc_freerunning_ops,
1550 .event_descs = tgl_uncore_imc_events,
1551 .format_group = &tgl_uncore_imc_format_group,
1554 static struct intel_uncore_type *tgl_mmio_uncores[] = {
1555 &tgl_uncore_imc_free_running,
1559 void tgl_l_uncore_mmio_init(void)
1561 tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
1562 uncore_mmio_uncores = tgl_mmio_uncores;
1565 void tgl_uncore_mmio_init(void)
1567 uncore_mmio_uncores = tgl_mmio_uncores;
1570 /* end of Tiger Lake MMIO uncore support */
1572 /* Alder Lake MMIO uncore support */
1573 #define ADL_UNCORE_IMC_BASE 0xd900
1574 #define ADL_UNCORE_IMC_MAP_SIZE 0x200
1575 #define ADL_UNCORE_IMC_CTR 0xe8
1576 #define ADL_UNCORE_IMC_CTRL 0xd0
1577 #define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0
1578 #define ADL_UNCORE_IMC_BOX_CTL 0xc4
1579 #define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800
1580 #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100
1582 #define ADL_UNCORE_IMC_CTL_FRZ (1 << 0)
1583 #define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1)
1584 #define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2)
1585 #define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \
1586 ADL_UNCORE_IMC_CTL_RST_CTRS)
1588 static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
1590 __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
1592 /* The global control in MC1 can control both MCs. */
1593 if (box->io_addr && (box->pmu->pmu_idx == 1))
1594 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
1597 static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
1602 writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
1605 static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
1610 writel(0, box->io_addr + uncore_mmio_box_ctl(box));
1613 static struct intel_uncore_ops adl_uncore_mmio_ops = {
1614 .init_box = adl_uncore_imc_init_box,
1615 .exit_box = uncore_mmio_exit_box,
1616 .disable_box = adl_uncore_mmio_disable_box,
1617 .enable_box = adl_uncore_mmio_enable_box,
1618 .disable_event = intel_generic_uncore_mmio_disable_event,
1619 .enable_event = intel_generic_uncore_mmio_enable_event,
1620 .read_counter = uncore_mmio_read_counter,
1623 #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00
1624 #define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
1625 ADL_UNC_CTL_CHMASK_MASK | \
1626 SNB_UNC_CTL_EDGE_DET)
1628 static struct attribute *adl_uncore_imc_formats_attr[] = {
1629 &format_attr_event.attr,
1630 &format_attr_chmask.attr,
1631 &format_attr_edge.attr,
1635 static const struct attribute_group adl_uncore_imc_format_group = {
1637 .attrs = adl_uncore_imc_formats_attr,
1640 static struct intel_uncore_type adl_uncore_imc = {
1644 .perf_ctr_bits = 64,
1645 .perf_ctr = ADL_UNCORE_IMC_CTR,
1646 .event_ctl = ADL_UNCORE_IMC_CTRL,
1647 .event_mask = ADL_UNC_IMC_EVENT_MASK,
1648 .box_ctl = ADL_UNCORE_IMC_BOX_CTL,
1650 .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE,
1651 .ops = &adl_uncore_mmio_ops,
1652 .format_group = &adl_uncore_imc_format_group,
1655 enum perf_adl_uncore_imc_freerunning_types {
1656 ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
1657 ADL_MMIO_UNCORE_IMC_DATA_READ,
1658 ADL_MMIO_UNCORE_IMC_DATA_WRITE,
1659 ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1662 static struct freerunning_counters adl_uncore_imc_freerunning[] = {
1663 [ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 },
1664 [ADL_MMIO_UNCORE_IMC_DATA_READ] = { 0x58, 0x0, 0x0, 1, 64 },
1665 [ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xA0, 0x0, 0x0, 1, 64 },
1668 static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1670 __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
1673 static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
1674 .init_box = adl_uncore_imc_freerunning_init_box,
1675 .exit_box = uncore_mmio_exit_box,
1676 .read_counter = uncore_mmio_read_counter,
1677 .hw_config = uncore_freerunning_hw_config,
1680 static struct intel_uncore_type adl_uncore_imc_free_running = {
1681 .name = "imc_free_running",
1684 .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1685 .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
1686 .freerunning = adl_uncore_imc_freerunning,
1687 .ops = &adl_uncore_imc_freerunning_ops,
1688 .event_descs = tgl_uncore_imc_events,
1689 .format_group = &tgl_uncore_imc_format_group,
1692 static struct intel_uncore_type *adl_mmio_uncores[] = {
1694 &adl_uncore_imc_free_running,
1698 void adl_uncore_mmio_init(void)
1700 uncore_mmio_uncores = adl_mmio_uncores;
1703 /* end of Alder Lake MMIO uncore support */