Merge tag 'samsung-soc-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/krzk...
[platform/kernel/linux-starfive.git] / arch / x86 / events / intel / uncore_snb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3 #include "uncore.h"
4 #include "uncore_discovery.h"
5
6 /* Uncore IMC PCI IDs */
7 #define PCI_DEVICE_ID_INTEL_SNB_IMC             0x0100
8 #define PCI_DEVICE_ID_INTEL_IVB_IMC             0x0154
9 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC          0x0150
10 #define PCI_DEVICE_ID_INTEL_HSW_IMC             0x0c00
11 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC           0x0a04
12 #define PCI_DEVICE_ID_INTEL_BDW_IMC             0x1604
13 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC           0x1904
14 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC           0x190c
15 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC          0x1900
16 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC          0x1910
17 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC          0x190f
18 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC          0x191f
19 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC          0x1918
20 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC           0x590c
21 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC           0x5904
22 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC          0x5914
23 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC          0x590f
24 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC          0x591f
25 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC          0x5910
26 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC          0x5918
27 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC          0x3ecc
28 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC          0x3ed0
29 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC          0x3e10
30 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC          0x3ec4
31 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC        0x3e0f
32 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC        0x3e1f
33 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC        0x3ec2
34 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC        0x3e30
35 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC        0x3e18
36 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC        0x3ec6
37 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC        0x3e31
38 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC        0x3e33
39 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC        0x3eca
40 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC        0x3e32
41 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC          0x590c
42 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC          0x590d
43 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC          0x3ed0
44 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC        0x3e34
45 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC          0x3e35
46 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC          0x9b44
47 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC          0x9b54
48 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC          0x9b64
49 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC          0x9b51
50 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC          0x9b61
51 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC          0x9b71
52 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC          0x9b33
53 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC          0x9b43
54 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC          0x9b53
55 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC          0x9b63
56 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC          0x9b73
57 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC           0x8a02
58 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC          0x8a12
59 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC          0x9a02
60 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC          0x9a04
61 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC          0x9a12
62 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC          0x9a14
63 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC           0x9a36
64 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC           0x4c43
65 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC           0x4c53
66 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC           0x4660
67 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC           0x4641
68 #define PCI_DEVICE_ID_INTEL_ADL_3_IMC           0x4601
69 #define PCI_DEVICE_ID_INTEL_ADL_4_IMC           0x4602
70 #define PCI_DEVICE_ID_INTEL_ADL_5_IMC           0x4609
71 #define PCI_DEVICE_ID_INTEL_ADL_6_IMC           0x460a
72 #define PCI_DEVICE_ID_INTEL_ADL_7_IMC           0x4621
73 #define PCI_DEVICE_ID_INTEL_ADL_8_IMC           0x4623
74 #define PCI_DEVICE_ID_INTEL_ADL_9_IMC           0x4629
75 #define PCI_DEVICE_ID_INTEL_ADL_10_IMC          0x4637
76 #define PCI_DEVICE_ID_INTEL_ADL_11_IMC          0x463b
77 #define PCI_DEVICE_ID_INTEL_ADL_12_IMC          0x4648
78 #define PCI_DEVICE_ID_INTEL_ADL_13_IMC          0x4649
79 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC          0x4650
80 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC          0x4668
81 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC          0x4670
82 #define PCI_DEVICE_ID_INTEL_RPL_1_IMC           0xA700
83 #define PCI_DEVICE_ID_INTEL_RPL_2_IMC           0xA702
84 #define PCI_DEVICE_ID_INTEL_RPL_3_IMC           0xA706
85 #define PCI_DEVICE_ID_INTEL_RPL_4_IMC           0xA709
86
87 /* SNB event control */
88 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
89 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
90 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
91 #define SNB_UNC_CTL_EN                          (1 << 22)
92 #define SNB_UNC_CTL_INVERT                      (1 << 23)
93 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
94 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
95 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
96
97 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
98                                                  SNB_UNC_CTL_UMASK_MASK | \
99                                                  SNB_UNC_CTL_EDGE_DET | \
100                                                  SNB_UNC_CTL_INVERT | \
101                                                  SNB_UNC_CTL_CMASK_MASK)
102
103 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
104                                                  SNB_UNC_CTL_UMASK_MASK | \
105                                                  SNB_UNC_CTL_EDGE_DET | \
106                                                  SNB_UNC_CTL_INVERT | \
107                                                  NHM_UNC_CTL_CMASK_MASK)
108
109 /* SNB global control register */
110 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
111 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
112 #define SNB_UNC_FIXED_CTR                       0x395
113
114 /* SNB uncore global control */
115 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
116 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
117
118 /* SNB Cbo register */
119 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
120 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
121 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
122
123 /* SNB ARB register */
124 #define SNB_UNC_ARB_PER_CTR0                    0x3b0
125 #define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
126 #define SNB_UNC_ARB_MSR_OFFSET                  0x10
127
128 /* NHM global control register */
129 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
130 #define NHM_UNC_FIXED_CTR                       0x394
131 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
132
133 /* NHM uncore global control */
134 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
135 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
136
137 /* NHM uncore register */
138 #define NHM_UNC_PERFEVTSEL0                     0x3c0
139 #define NHM_UNC_UNCORE_PMC0                     0x3b0
140
141 /* SKL uncore global control */
142 #define SKL_UNC_PERF_GLOBAL_CTL                 0xe01
143 #define SKL_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 5) - 1)
144
145 /* ICL Cbo register */
146 #define ICL_UNC_CBO_CONFIG                      0x396
147 #define ICL_UNC_NUM_CBO_MASK                    0xf
148 #define ICL_UNC_CBO_0_PER_CTR0                  0x702
149 #define ICL_UNC_CBO_MSR_OFFSET                  0x8
150
151 /* ICL ARB register */
152 #define ICL_UNC_ARB_PER_CTR                     0x3b1
153 #define ICL_UNC_ARB_PERFEVTSEL                  0x3b3
154
155 /* ADL uncore global control */
156 #define ADL_UNC_PERF_GLOBAL_CTL                 0x2ff0
157 #define ADL_UNC_FIXED_CTR_CTRL                  0x2fde
158 #define ADL_UNC_FIXED_CTR                       0x2fdf
159
160 /* ADL Cbo register */
161 #define ADL_UNC_CBO_0_PER_CTR0                  0x2002
162 #define ADL_UNC_CBO_0_PERFEVTSEL0               0x2000
163 #define ADL_UNC_CTL_THRESHOLD                   0x3f000000
164 #define ADL_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
165                                                  SNB_UNC_CTL_UMASK_MASK | \
166                                                  SNB_UNC_CTL_EDGE_DET | \
167                                                  SNB_UNC_CTL_INVERT | \
168                                                  ADL_UNC_CTL_THRESHOLD)
169
170 /* ADL ARB register */
171 #define ADL_UNC_ARB_PER_CTR0                    0x2FD2
172 #define ADL_UNC_ARB_PERFEVTSEL0                 0x2FD0
173 #define ADL_UNC_ARB_MSR_OFFSET                  0x8
174
175 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
176 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
177 DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
178 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
179 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
180 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
181 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
182 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
183
184 /* Sandy Bridge uncore support */
185 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
186 {
187         struct hw_perf_event *hwc = &event->hw;
188
189         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
190                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
191         else
192                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
193 }
194
195 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
196 {
197         wrmsrl(event->hw.config_base, 0);
198 }
199
200 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
201 {
202         if (box->pmu->pmu_idx == 0) {
203                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
204                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
205         }
206 }
207
208 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
209 {
210         wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
211                 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
212 }
213
214 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
215 {
216         if (box->pmu->pmu_idx == 0)
217                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
218 }
219
220 static struct uncore_event_desc snb_uncore_events[] = {
221         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
222         { /* end: all zeroes */ },
223 };
224
225 static struct attribute *snb_uncore_formats_attr[] = {
226         &format_attr_event.attr,
227         &format_attr_umask.attr,
228         &format_attr_edge.attr,
229         &format_attr_inv.attr,
230         &format_attr_cmask5.attr,
231         NULL,
232 };
233
234 static const struct attribute_group snb_uncore_format_group = {
235         .name           = "format",
236         .attrs          = snb_uncore_formats_attr,
237 };
238
239 static struct intel_uncore_ops snb_uncore_msr_ops = {
240         .init_box       = snb_uncore_msr_init_box,
241         .enable_box     = snb_uncore_msr_enable_box,
242         .exit_box       = snb_uncore_msr_exit_box,
243         .disable_event  = snb_uncore_msr_disable_event,
244         .enable_event   = snb_uncore_msr_enable_event,
245         .read_counter   = uncore_msr_read_counter,
246 };
247
248 static struct event_constraint snb_uncore_arb_constraints[] = {
249         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
250         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
251         EVENT_CONSTRAINT_END
252 };
253
254 static struct intel_uncore_type snb_uncore_cbox = {
255         .name           = "cbox",
256         .num_counters   = 2,
257         .num_boxes      = 4,
258         .perf_ctr_bits  = 44,
259         .fixed_ctr_bits = 48,
260         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
261         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
262         .fixed_ctr      = SNB_UNC_FIXED_CTR,
263         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
264         .single_fixed   = 1,
265         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
266         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
267         .ops            = &snb_uncore_msr_ops,
268         .format_group   = &snb_uncore_format_group,
269         .event_descs    = snb_uncore_events,
270 };
271
272 static struct intel_uncore_type snb_uncore_arb = {
273         .name           = "arb",
274         .num_counters   = 2,
275         .num_boxes      = 1,
276         .perf_ctr_bits  = 44,
277         .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
278         .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
279         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
280         .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
281         .constraints    = snb_uncore_arb_constraints,
282         .ops            = &snb_uncore_msr_ops,
283         .format_group   = &snb_uncore_format_group,
284 };
285
286 static struct intel_uncore_type *snb_msr_uncores[] = {
287         &snb_uncore_cbox,
288         &snb_uncore_arb,
289         NULL,
290 };
291
292 void snb_uncore_cpu_init(void)
293 {
294         uncore_msr_uncores = snb_msr_uncores;
295         if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
296                 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
297 }
298
299 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
300 {
301         if (box->pmu->pmu_idx == 0) {
302                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
303                         SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
304         }
305
306         /* The 8th CBOX has different MSR space */
307         if (box->pmu->pmu_idx == 7)
308                 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
309 }
310
311 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
312 {
313         wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
314                 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
315 }
316
317 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
318 {
319         if (box->pmu->pmu_idx == 0)
320                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
321 }
322
323 static struct intel_uncore_ops skl_uncore_msr_ops = {
324         .init_box       = skl_uncore_msr_init_box,
325         .enable_box     = skl_uncore_msr_enable_box,
326         .exit_box       = skl_uncore_msr_exit_box,
327         .disable_event  = snb_uncore_msr_disable_event,
328         .enable_event   = snb_uncore_msr_enable_event,
329         .read_counter   = uncore_msr_read_counter,
330 };
331
332 static struct intel_uncore_type skl_uncore_cbox = {
333         .name           = "cbox",
334         .num_counters   = 4,
335         .num_boxes      = 8,
336         .perf_ctr_bits  = 44,
337         .fixed_ctr_bits = 48,
338         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
339         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
340         .fixed_ctr      = SNB_UNC_FIXED_CTR,
341         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
342         .single_fixed   = 1,
343         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
344         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
345         .ops            = &skl_uncore_msr_ops,
346         .format_group   = &snb_uncore_format_group,
347         .event_descs    = snb_uncore_events,
348 };
349
350 static struct intel_uncore_type *skl_msr_uncores[] = {
351         &skl_uncore_cbox,
352         &snb_uncore_arb,
353         NULL,
354 };
355
356 void skl_uncore_cpu_init(void)
357 {
358         uncore_msr_uncores = skl_msr_uncores;
359         if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
360                 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
361         snb_uncore_arb.ops = &skl_uncore_msr_ops;
362 }
363
364 static struct intel_uncore_ops icl_uncore_msr_ops = {
365         .disable_event  = snb_uncore_msr_disable_event,
366         .enable_event   = snb_uncore_msr_enable_event,
367         .read_counter   = uncore_msr_read_counter,
368 };
369
370 static struct intel_uncore_type icl_uncore_cbox = {
371         .name           = "cbox",
372         .num_counters   = 2,
373         .perf_ctr_bits  = 44,
374         .perf_ctr       = ICL_UNC_CBO_0_PER_CTR0,
375         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
376         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
377         .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
378         .ops            = &icl_uncore_msr_ops,
379         .format_group   = &snb_uncore_format_group,
380 };
381
382 static struct uncore_event_desc icl_uncore_events[] = {
383         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
384         { /* end: all zeroes */ },
385 };
386
387 static struct attribute *icl_uncore_clock_formats_attr[] = {
388         &format_attr_event.attr,
389         NULL,
390 };
391
392 static struct attribute_group icl_uncore_clock_format_group = {
393         .name = "format",
394         .attrs = icl_uncore_clock_formats_attr,
395 };
396
397 static struct intel_uncore_type icl_uncore_clockbox = {
398         .name           = "clock",
399         .num_counters   = 1,
400         .num_boxes      = 1,
401         .fixed_ctr_bits = 48,
402         .fixed_ctr      = SNB_UNC_FIXED_CTR,
403         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
404         .single_fixed   = 1,
405         .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
406         .format_group   = &icl_uncore_clock_format_group,
407         .ops            = &icl_uncore_msr_ops,
408         .event_descs    = icl_uncore_events,
409 };
410
411 static struct intel_uncore_type icl_uncore_arb = {
412         .name           = "arb",
413         .num_counters   = 1,
414         .num_boxes      = 1,
415         .perf_ctr_bits  = 44,
416         .perf_ctr       = ICL_UNC_ARB_PER_CTR,
417         .event_ctl      = ICL_UNC_ARB_PERFEVTSEL,
418         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
419         .ops            = &icl_uncore_msr_ops,
420         .format_group   = &snb_uncore_format_group,
421 };
422
423 static struct intel_uncore_type *icl_msr_uncores[] = {
424         &icl_uncore_cbox,
425         &icl_uncore_arb,
426         &icl_uncore_clockbox,
427         NULL,
428 };
429
430 static int icl_get_cbox_num(void)
431 {
432         u64 num_boxes;
433
434         rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
435
436         return num_boxes & ICL_UNC_NUM_CBO_MASK;
437 }
438
439 void icl_uncore_cpu_init(void)
440 {
441         uncore_msr_uncores = icl_msr_uncores;
442         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
443 }
444
445 static struct intel_uncore_type *tgl_msr_uncores[] = {
446         &icl_uncore_cbox,
447         &snb_uncore_arb,
448         &icl_uncore_clockbox,
449         NULL,
450 };
451
452 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
453 {
454         if (box->pmu->pmu_idx == 0)
455                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
456 }
457
458 void tgl_uncore_cpu_init(void)
459 {
460         uncore_msr_uncores = tgl_msr_uncores;
461         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
462         icl_uncore_cbox.ops = &skl_uncore_msr_ops;
463         icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
464         snb_uncore_arb.ops = &skl_uncore_msr_ops;
465         skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
466 }
467
468 static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
469 {
470         if (box->pmu->pmu_idx == 0)
471                 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
472 }
473
474 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
475 {
476         wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
477 }
478
479 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
480 {
481         if (box->pmu->pmu_idx == 0)
482                 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
483 }
484
485 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
486 {
487         if (box->pmu->pmu_idx == 0)
488                 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
489 }
490
491 static struct intel_uncore_ops adl_uncore_msr_ops = {
492         .init_box       = adl_uncore_msr_init_box,
493         .enable_box     = adl_uncore_msr_enable_box,
494         .disable_box    = adl_uncore_msr_disable_box,
495         .exit_box       = adl_uncore_msr_exit_box,
496         .disable_event  = snb_uncore_msr_disable_event,
497         .enable_event   = snb_uncore_msr_enable_event,
498         .read_counter   = uncore_msr_read_counter,
499 };
500
501 static struct attribute *adl_uncore_formats_attr[] = {
502         &format_attr_event.attr,
503         &format_attr_umask.attr,
504         &format_attr_edge.attr,
505         &format_attr_inv.attr,
506         &format_attr_threshold.attr,
507         NULL,
508 };
509
510 static const struct attribute_group adl_uncore_format_group = {
511         .name           = "format",
512         .attrs          = adl_uncore_formats_attr,
513 };
514
515 static struct intel_uncore_type adl_uncore_cbox = {
516         .name           = "cbox",
517         .num_counters   = 2,
518         .perf_ctr_bits  = 44,
519         .perf_ctr       = ADL_UNC_CBO_0_PER_CTR0,
520         .event_ctl      = ADL_UNC_CBO_0_PERFEVTSEL0,
521         .event_mask     = ADL_UNC_RAW_EVENT_MASK,
522         .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
523         .ops            = &adl_uncore_msr_ops,
524         .format_group   = &adl_uncore_format_group,
525 };
526
527 static struct intel_uncore_type adl_uncore_arb = {
528         .name           = "arb",
529         .num_counters   = 2,
530         .num_boxes      = 2,
531         .perf_ctr_bits  = 44,
532         .perf_ctr       = ADL_UNC_ARB_PER_CTR0,
533         .event_ctl      = ADL_UNC_ARB_PERFEVTSEL0,
534         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
535         .msr_offset     = ADL_UNC_ARB_MSR_OFFSET,
536         .constraints    = snb_uncore_arb_constraints,
537         .ops            = &adl_uncore_msr_ops,
538         .format_group   = &snb_uncore_format_group,
539 };
540
541 static struct intel_uncore_type adl_uncore_clockbox = {
542         .name           = "clock",
543         .num_counters   = 1,
544         .num_boxes      = 1,
545         .fixed_ctr_bits = 48,
546         .fixed_ctr      = ADL_UNC_FIXED_CTR,
547         .fixed_ctl      = ADL_UNC_FIXED_CTR_CTRL,
548         .single_fixed   = 1,
549         .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
550         .format_group   = &icl_uncore_clock_format_group,
551         .ops            = &adl_uncore_msr_ops,
552         .event_descs    = icl_uncore_events,
553 };
554
555 static struct intel_uncore_type *adl_msr_uncores[] = {
556         &adl_uncore_cbox,
557         &adl_uncore_arb,
558         &adl_uncore_clockbox,
559         NULL,
560 };
561
562 void adl_uncore_cpu_init(void)
563 {
564         adl_uncore_cbox.num_boxes = icl_get_cbox_num();
565         uncore_msr_uncores = adl_msr_uncores;
566 }
567
568 enum {
569         SNB_PCI_UNCORE_IMC,
570 };
571
572 static struct uncore_event_desc snb_uncore_imc_events[] = {
573         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
574         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
575         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
576
577         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
578         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
579         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
580
581         INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
582         INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
583         INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
584
585         INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
586         INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
587         INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
588
589         INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
590         INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
591         INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
592
593         { /* end: all zeroes */ },
594 };
595
596 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
597 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
598
599 /* page size multiple covering all config regs */
600 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
601
602 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
603 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
604 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
605 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
606 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
607
608 /* BW break down- legacy counters */
609 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS          0x3
610 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE     0x5040
611 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS          0x4
612 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE     0x5044
613 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS          0x5
614 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE     0x5048
615
616 enum perf_snb_uncore_imc_freerunning_types {
617         SNB_PCI_UNCORE_IMC_DATA_READS           = 0,
618         SNB_PCI_UNCORE_IMC_DATA_WRITES,
619         SNB_PCI_UNCORE_IMC_GT_REQUESTS,
620         SNB_PCI_UNCORE_IMC_IA_REQUESTS,
621         SNB_PCI_UNCORE_IMC_IO_REQUESTS,
622
623         SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
624 };
625
626 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
627         [SNB_PCI_UNCORE_IMC_DATA_READS]         = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
628                                                         0x0, 0x0, 1, 32 },
629         [SNB_PCI_UNCORE_IMC_DATA_WRITES]        = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
630                                                         0x0, 0x0, 1, 32 },
631         [SNB_PCI_UNCORE_IMC_GT_REQUESTS]        = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
632                                                         0x0, 0x0, 1, 32 },
633         [SNB_PCI_UNCORE_IMC_IA_REQUESTS]        = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
634                                                         0x0, 0x0, 1, 32 },
635         [SNB_PCI_UNCORE_IMC_IO_REQUESTS]        = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
636                                                         0x0, 0x0, 1, 32 },
637 };
638
639 static struct attribute *snb_uncore_imc_formats_attr[] = {
640         &format_attr_event.attr,
641         NULL,
642 };
643
644 static const struct attribute_group snb_uncore_imc_format_group = {
645         .name = "format",
646         .attrs = snb_uncore_imc_formats_attr,
647 };
648
649 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
650 {
651         struct intel_uncore_type *type = box->pmu->type;
652         struct pci_dev *pdev = box->pci_dev;
653         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
654         resource_size_t addr;
655         u32 pci_dword;
656
657         pci_read_config_dword(pdev, where, &pci_dword);
658         addr = pci_dword;
659
660 #ifdef CONFIG_PHYS_ADDR_T_64BIT
661         pci_read_config_dword(pdev, where + 4, &pci_dword);
662         addr |= ((resource_size_t)pci_dword << 32);
663 #endif
664
665         addr &= ~(PAGE_SIZE - 1);
666
667         box->io_addr = ioremap(addr, type->mmio_map_size);
668         if (!box->io_addr)
669                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
670
671         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
672 }
673
674 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
675 {}
676
677 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
678 {}
679
680 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
681 {}
682
683 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
684 {}
685
686 /*
687  * Keep the custom event_init() function compatible with old event
688  * encoding for free running counters.
689  */
690 static int snb_uncore_imc_event_init(struct perf_event *event)
691 {
692         struct intel_uncore_pmu *pmu;
693         struct intel_uncore_box *box;
694         struct hw_perf_event *hwc = &event->hw;
695         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
696         int idx, base;
697
698         if (event->attr.type != event->pmu->type)
699                 return -ENOENT;
700
701         pmu = uncore_event_to_pmu(event);
702         /* no device found for this pmu */
703         if (pmu->func_id < 0)
704                 return -ENOENT;
705
706         /* Sampling not supported yet */
707         if (hwc->sample_period)
708                 return -EINVAL;
709
710         /* unsupported modes and filters */
711         if (event->attr.sample_period) /* no sampling */
712                 return -EINVAL;
713
714         /*
715          * Place all uncore events for a particular physical package
716          * onto a single cpu
717          */
718         if (event->cpu < 0)
719                 return -EINVAL;
720
721         /* check only supported bits are set */
722         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
723                 return -EINVAL;
724
725         box = uncore_pmu_to_box(pmu, event->cpu);
726         if (!box || box->cpu < 0)
727                 return -EINVAL;
728
729         event->cpu = box->cpu;
730         event->pmu_private = box;
731
732         event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
733
734         event->hw.idx = -1;
735         event->hw.last_tag = ~0ULL;
736         event->hw.extra_reg.idx = EXTRA_REG_NONE;
737         event->hw.branch_reg.idx = EXTRA_REG_NONE;
738         /*
739          * check event is known (whitelist, determines counter)
740          */
741         switch (cfg) {
742         case SNB_UNCORE_PCI_IMC_DATA_READS:
743                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
744                 idx = UNCORE_PMC_IDX_FREERUNNING;
745                 break;
746         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
747                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
748                 idx = UNCORE_PMC_IDX_FREERUNNING;
749                 break;
750         case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
751                 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
752                 idx = UNCORE_PMC_IDX_FREERUNNING;
753                 break;
754         case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
755                 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
756                 idx = UNCORE_PMC_IDX_FREERUNNING;
757                 break;
758         case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
759                 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
760                 idx = UNCORE_PMC_IDX_FREERUNNING;
761                 break;
762         default:
763                 return -EINVAL;
764         }
765
766         /* must be done before validate_group */
767         event->hw.event_base = base;
768         event->hw.idx = idx;
769
770         /* Convert to standard encoding format for freerunning counters */
771         event->hw.config = ((cfg - 1) << 8) | 0x10ff;
772
773         /* no group validation needed, we have free running counters */
774
775         return 0;
776 }
777
778 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
779 {
780         return 0;
781 }
782
783 int snb_pci2phy_map_init(int devid)
784 {
785         struct pci_dev *dev = NULL;
786         struct pci2phy_map *map;
787         int bus, segment;
788
789         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
790         if (!dev)
791                 return -ENOTTY;
792
793         bus = dev->bus->number;
794         segment = pci_domain_nr(dev->bus);
795
796         raw_spin_lock(&pci2phy_map_lock);
797         map = __find_pci2phy_map(segment);
798         if (!map) {
799                 raw_spin_unlock(&pci2phy_map_lock);
800                 pci_dev_put(dev);
801                 return -ENOMEM;
802         }
803         map->pbus_to_dieid[bus] = 0;
804         raw_spin_unlock(&pci2phy_map_lock);
805
806         pci_dev_put(dev);
807
808         return 0;
809 }
810
811 static struct pmu snb_uncore_imc_pmu = {
812         .task_ctx_nr    = perf_invalid_context,
813         .event_init     = snb_uncore_imc_event_init,
814         .add            = uncore_pmu_event_add,
815         .del            = uncore_pmu_event_del,
816         .start          = uncore_pmu_event_start,
817         .stop           = uncore_pmu_event_stop,
818         .read           = uncore_pmu_event_read,
819         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
820 };
821
822 static struct intel_uncore_ops snb_uncore_imc_ops = {
823         .init_box       = snb_uncore_imc_init_box,
824         .exit_box       = uncore_mmio_exit_box,
825         .enable_box     = snb_uncore_imc_enable_box,
826         .disable_box    = snb_uncore_imc_disable_box,
827         .disable_event  = snb_uncore_imc_disable_event,
828         .enable_event   = snb_uncore_imc_enable_event,
829         .hw_config      = snb_uncore_imc_hw_config,
830         .read_counter   = uncore_mmio_read_counter,
831 };
832
833 static struct intel_uncore_type snb_uncore_imc = {
834         .name           = "imc",
835         .num_counters   = 5,
836         .num_boxes      = 1,
837         .num_freerunning_types  = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
838         .mmio_map_size  = SNB_UNCORE_PCI_IMC_MAP_SIZE,
839         .freerunning    = snb_uncore_imc_freerunning,
840         .event_descs    = snb_uncore_imc_events,
841         .format_group   = &snb_uncore_imc_format_group,
842         .ops            = &snb_uncore_imc_ops,
843         .pmu            = &snb_uncore_imc_pmu,
844 };
845
846 static struct intel_uncore_type *snb_pci_uncores[] = {
847         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
848         NULL,
849 };
850
851 static const struct pci_device_id snb_uncore_pci_ids[] = {
852         { /* IMC */
853                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
854                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
855         },
856         { /* end: all zeroes */ },
857 };
858
859 static const struct pci_device_id ivb_uncore_pci_ids[] = {
860         { /* IMC */
861                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
862                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
863         },
864         { /* IMC */
865                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
866                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
867         },
868         { /* end: all zeroes */ },
869 };
870
871 static const struct pci_device_id hsw_uncore_pci_ids[] = {
872         { /* IMC */
873                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
874                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
875         },
876         { /* IMC */
877                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
878                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
879         },
880         { /* end: all zeroes */ },
881 };
882
883 static const struct pci_device_id bdw_uncore_pci_ids[] = {
884         { /* IMC */
885                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
886                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
887         },
888         { /* end: all zeroes */ },
889 };
890
891 static const struct pci_device_id skl_uncore_pci_ids[] = {
892         { /* IMC */
893                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
894                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
895         },
896         { /* IMC */
897                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
898                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
899         },
900         { /* IMC */
901                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
902                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
903         },
904         { /* IMC */
905                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
906                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
907         },
908         { /* IMC */
909                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
910                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
911         },
912         { /* IMC */
913                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
914                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
915         },
916         { /* IMC */
917                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
918                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
919         },
920         { /* IMC */
921                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
922                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
923         },
924         { /* IMC */
925                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
926                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
927         },
928         { /* IMC */
929                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
930                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
931         },
932         { /* IMC */
933                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
934                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
935         },
936         { /* IMC */
937                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
938                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
939         },
940         { /* IMC */
941                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
942                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
943         },
944         { /* IMC */
945                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
946                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
947         },
948         { /* IMC */
949                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
950                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
951         },
952         { /* IMC */
953                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
954                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
955         },
956         { /* IMC */
957                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
958                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
959         },
960         { /* IMC */
961                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
962                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
963         },
964         { /* IMC */
965                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
966                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
967         },
968         { /* IMC */
969                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
970                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
971         },
972         { /* IMC */
973                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
974                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
975         },
976         { /* IMC */
977                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
978                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
979         },
980         { /* IMC */
981                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
982                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
983         },
984         { /* IMC */
985                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
986                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
987         },
988         { /* IMC */
989                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
990                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
991         },
992         { /* IMC */
993                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
994                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
995         },
996         { /* IMC */
997                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
998                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
999         },
1000         { /* IMC */
1001                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
1002                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1003         },
1004         { /* IMC */
1005                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
1006                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1007         },
1008         { /* IMC */
1009                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
1010                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1011         },
1012         { /* IMC */
1013                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
1014                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1015         },
1016         { /* IMC */
1017                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
1018                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1019         },
1020         { /* IMC */
1021                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
1022                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1023         },
1024         { /* IMC */
1025                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC),
1026                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1027         },
1028         { /* IMC */
1029                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC),
1030                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1031         },
1032         { /* IMC */
1033                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC),
1034                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1035         },
1036         { /* IMC */
1037                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC),
1038                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1039         },
1040         { /* IMC */
1041                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC),
1042                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1043         },
1044         { /* IMC */
1045                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC),
1046                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1047         },
1048         { /* IMC */
1049                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC),
1050                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1051         },
1052         { /* IMC */
1053                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC),
1054                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1055         },
1056         { /* IMC */
1057                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC),
1058                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1059         },
1060         { /* IMC */
1061                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC),
1062                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1063         },
1064         { /* IMC */
1065                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC),
1066                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1067         },
1068         { /* end: all zeroes */ },
1069 };
1070
1071 static const struct pci_device_id icl_uncore_pci_ids[] = {
1072         { /* IMC */
1073                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
1074                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1075         },
1076         { /* IMC */
1077                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
1078                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1079         },
1080         { /* IMC */
1081                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_1_IMC),
1082                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1083         },
1084         { /* IMC */
1085                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_2_IMC),
1086                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1087         },
1088         { /* end: all zeroes */ },
1089 };
1090
1091 static struct pci_driver snb_uncore_pci_driver = {
1092         .name           = "snb_uncore",
1093         .id_table       = snb_uncore_pci_ids,
1094 };
1095
1096 static struct pci_driver ivb_uncore_pci_driver = {
1097         .name           = "ivb_uncore",
1098         .id_table       = ivb_uncore_pci_ids,
1099 };
1100
1101 static struct pci_driver hsw_uncore_pci_driver = {
1102         .name           = "hsw_uncore",
1103         .id_table       = hsw_uncore_pci_ids,
1104 };
1105
1106 static struct pci_driver bdw_uncore_pci_driver = {
1107         .name           = "bdw_uncore",
1108         .id_table       = bdw_uncore_pci_ids,
1109 };
1110
1111 static struct pci_driver skl_uncore_pci_driver = {
1112         .name           = "skl_uncore",
1113         .id_table       = skl_uncore_pci_ids,
1114 };
1115
1116 static struct pci_driver icl_uncore_pci_driver = {
1117         .name           = "icl_uncore",
1118         .id_table       = icl_uncore_pci_ids,
1119 };
1120
1121 struct imc_uncore_pci_dev {
1122         __u32 pci_id;
1123         struct pci_driver *driver;
1124 };
1125 #define IMC_DEV(a, d) \
1126         { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
1127
1128 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
1129         IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
1130         IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
1131         IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
1132         IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
1133         IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
1134         IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
1135         IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
1136         IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
1137         IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
1138         IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
1139         IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
1140         IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
1141         IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
1142         IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
1143         IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
1144         IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
1145         IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
1146         IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
1147         IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core H Quad Core */
1148         IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S 4 cores Work Station */
1149         IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
1150         IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
1151         IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
1152         IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
1153         IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
1154         IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
1155         IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
1156         IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
1157         IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
1158         IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
1159         IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
1160         IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
1161         IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
1162         IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
1163         IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Dual Core */
1164         IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Quad Core */
1165         IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Quad Core */
1166         IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U Mobile Quad Core */
1167         IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Dual Core */
1168         IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
1169         IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
1170         IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
1171         IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
1172         IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
1173         IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
1174         IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
1175         IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
1176         IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
1177         IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
1178         IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
1179         IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),     /* 10th Gen Core Mobile */
1180         IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),    /* 10th Gen Core Mobile */
1181         IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
1182         IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
1183         {  /* end marker */ }
1184 };
1185
1186
1187 #define for_each_imc_pci_id(x, t) \
1188         for (x = (t); (x)->pci_id; x++)
1189
1190 static struct pci_driver *imc_uncore_find_dev(void)
1191 {
1192         const struct imc_uncore_pci_dev *p;
1193         int ret;
1194
1195         for_each_imc_pci_id(p, desktop_imc_pci_ids) {
1196                 ret = snb_pci2phy_map_init(p->pci_id);
1197                 if (ret == 0)
1198                         return p->driver;
1199         }
1200         return NULL;
1201 }
1202
1203 static int imc_uncore_pci_init(void)
1204 {
1205         struct pci_driver *imc_drv = imc_uncore_find_dev();
1206
1207         if (!imc_drv)
1208                 return -ENODEV;
1209
1210         uncore_pci_uncores = snb_pci_uncores;
1211         uncore_pci_driver = imc_drv;
1212
1213         return 0;
1214 }
1215
1216 int snb_uncore_pci_init(void)
1217 {
1218         return imc_uncore_pci_init();
1219 }
1220
1221 int ivb_uncore_pci_init(void)
1222 {
1223         return imc_uncore_pci_init();
1224 }
1225 int hsw_uncore_pci_init(void)
1226 {
1227         return imc_uncore_pci_init();
1228 }
1229
1230 int bdw_uncore_pci_init(void)
1231 {
1232         return imc_uncore_pci_init();
1233 }
1234
1235 int skl_uncore_pci_init(void)
1236 {
1237         return imc_uncore_pci_init();
1238 }
1239
1240 /* end of Sandy Bridge uncore support */
1241
1242 /* Nehalem uncore support */
1243 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1244 {
1245         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1246 }
1247
1248 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1249 {
1250         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1251 }
1252
1253 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1254 {
1255         struct hw_perf_event *hwc = &event->hw;
1256
1257         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1258                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1259         else
1260                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1261 }
1262
1263 static struct attribute *nhm_uncore_formats_attr[] = {
1264         &format_attr_event.attr,
1265         &format_attr_umask.attr,
1266         &format_attr_edge.attr,
1267         &format_attr_inv.attr,
1268         &format_attr_cmask8.attr,
1269         NULL,
1270 };
1271
1272 static const struct attribute_group nhm_uncore_format_group = {
1273         .name = "format",
1274         .attrs = nhm_uncore_formats_attr,
1275 };
1276
1277 static struct uncore_event_desc nhm_uncore_events[] = {
1278         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
1279         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
1280         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
1281         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
1282         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
1283         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
1284         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1285         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
1286         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
1287         { /* end: all zeroes */ },
1288 };
1289
1290 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1291         .disable_box    = nhm_uncore_msr_disable_box,
1292         .enable_box     = nhm_uncore_msr_enable_box,
1293         .disable_event  = snb_uncore_msr_disable_event,
1294         .enable_event   = nhm_uncore_msr_enable_event,
1295         .read_counter   = uncore_msr_read_counter,
1296 };
1297
1298 static struct intel_uncore_type nhm_uncore = {
1299         .name           = "",
1300         .num_counters   = 8,
1301         .num_boxes      = 1,
1302         .perf_ctr_bits  = 48,
1303         .fixed_ctr_bits = 48,
1304         .event_ctl      = NHM_UNC_PERFEVTSEL0,
1305         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
1306         .fixed_ctr      = NHM_UNC_FIXED_CTR,
1307         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
1308         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
1309         .event_descs    = nhm_uncore_events,
1310         .ops            = &nhm_uncore_msr_ops,
1311         .format_group   = &nhm_uncore_format_group,
1312 };
1313
1314 static struct intel_uncore_type *nhm_msr_uncores[] = {
1315         &nhm_uncore,
1316         NULL,
1317 };
1318
1319 void nhm_uncore_cpu_init(void)
1320 {
1321         uncore_msr_uncores = nhm_msr_uncores;
1322 }
1323
1324 /* end of Nehalem uncore support */
1325
1326 /* Tiger Lake MMIO uncore support */
1327
1328 static const struct pci_device_id tgl_uncore_pci_ids[] = {
1329         { /* IMC */
1330                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC),
1331                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1332         },
1333         { /* IMC */
1334                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC),
1335                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1336         },
1337         { /* IMC */
1338                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC),
1339                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1340         },
1341         { /* IMC */
1342                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC),
1343                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1344         },
1345         { /* IMC */
1346                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
1347                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1348         },
1349         { /* IMC */
1350                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_1_IMC),
1351                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1352         },
1353         { /* IMC */
1354                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC),
1355                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1356         },
1357         { /* IMC */
1358                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_3_IMC),
1359                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1360         },
1361         { /* IMC */
1362                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_4_IMC),
1363                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1364         },
1365         { /* IMC */
1366                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_5_IMC),
1367                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1368         },
1369         { /* IMC */
1370                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_6_IMC),
1371                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1372         },
1373         { /* IMC */
1374                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_7_IMC),
1375                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1376         },
1377         { /* IMC */
1378                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_8_IMC),
1379                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1380         },
1381         { /* IMC */
1382                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_9_IMC),
1383                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1384         },
1385         { /* IMC */
1386                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_10_IMC),
1387                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1388         },
1389         { /* IMC */
1390                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_11_IMC),
1391                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1392         },
1393         { /* IMC */
1394                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_12_IMC),
1395                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1396         },
1397         { /* IMC */
1398                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_13_IMC),
1399                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1400         },
1401         { /* IMC */
1402                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_14_IMC),
1403                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1404         },
1405         { /* IMC */
1406                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_15_IMC),
1407                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1408         },
1409         { /* IMC */
1410                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_16_IMC),
1411                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1412         },
1413         { /* IMC */
1414                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_1_IMC),
1415                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1416         },
1417         { /* IMC */
1418                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_2_IMC),
1419                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1420         },
1421         { /* IMC */
1422                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_3_IMC),
1423                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1424         },
1425         { /* IMC */
1426                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_4_IMC),
1427                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1428         },
1429         { /* end: all zeroes */ }
1430 };
1431
1432 enum perf_tgl_uncore_imc_freerunning_types {
1433         TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
1434         TGL_MMIO_UNCORE_IMC_DATA_READ,
1435         TGL_MMIO_UNCORE_IMC_DATA_WRITE,
1436         TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1437 };
1438
1439 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
1440         [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0x5040, 0x0, 0x0, 1, 64 },
1441         [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0x5058, 0x0, 0x0, 1, 64 },
1442         [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0x50A0, 0x0, 0x0, 1, 64 },
1443 };
1444
1445 static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
1446         [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0xd840, 0x0, 0x0, 1, 64 },
1447         [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0xd858, 0x0, 0x0, 1, 64 },
1448         [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0xd8A0, 0x0, 0x0, 1, 64 },
1449 };
1450
1451 static struct uncore_event_desc tgl_uncore_imc_events[] = {
1452         INTEL_UNCORE_EVENT_DESC(data_total,         "event=0xff,umask=0x10"),
1453         INTEL_UNCORE_EVENT_DESC(data_total.scale,   "6.103515625e-5"),
1454         INTEL_UNCORE_EVENT_DESC(data_total.unit,    "MiB"),
1455
1456         INTEL_UNCORE_EVENT_DESC(data_read,         "event=0xff,umask=0x20"),
1457         INTEL_UNCORE_EVENT_DESC(data_read.scale,   "6.103515625e-5"),
1458         INTEL_UNCORE_EVENT_DESC(data_read.unit,    "MiB"),
1459
1460         INTEL_UNCORE_EVENT_DESC(data_write,        "event=0xff,umask=0x30"),
1461         INTEL_UNCORE_EVENT_DESC(data_write.scale,  "6.103515625e-5"),
1462         INTEL_UNCORE_EVENT_DESC(data_write.unit,   "MiB"),
1463
1464         { /* end: all zeroes */ }
1465 };
1466
1467 static struct pci_dev *tgl_uncore_get_mc_dev(void)
1468 {
1469         const struct pci_device_id *ids = tgl_uncore_pci_ids;
1470         struct pci_dev *mc_dev = NULL;
1471
1472         while (ids && ids->vendor) {
1473                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
1474                 if (mc_dev)
1475                         return mc_dev;
1476                 ids++;
1477         }
1478
1479         return mc_dev;
1480 }
1481
1482 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET          0x10000
1483 #define TGL_UNCORE_PCI_IMC_MAP_SIZE             0xe000
1484
1485 static void __uncore_imc_init_box(struct intel_uncore_box *box,
1486                                   unsigned int base_offset)
1487 {
1488         struct pci_dev *pdev = tgl_uncore_get_mc_dev();
1489         struct intel_uncore_pmu *pmu = box->pmu;
1490         struct intel_uncore_type *type = pmu->type;
1491         resource_size_t addr;
1492         u32 mch_bar;
1493
1494         if (!pdev) {
1495                 pr_warn("perf uncore: Cannot find matched IMC device.\n");
1496                 return;
1497         }
1498
1499         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
1500         /* MCHBAR is disabled */
1501         if (!(mch_bar & BIT(0))) {
1502                 pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
1503                 return;
1504         }
1505         mch_bar &= ~BIT(0);
1506         addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
1507
1508 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1509         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
1510         addr |= ((resource_size_t)mch_bar << 32);
1511 #endif
1512
1513         addr += base_offset;
1514         box->io_addr = ioremap(addr, type->mmio_map_size);
1515         if (!box->io_addr)
1516                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
1517 }
1518
1519 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1520 {
1521         __uncore_imc_init_box(box, 0);
1522 }
1523
1524 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1525         .init_box       = tgl_uncore_imc_freerunning_init_box,
1526         .exit_box       = uncore_mmio_exit_box,
1527         .read_counter   = uncore_mmio_read_counter,
1528         .hw_config      = uncore_freerunning_hw_config,
1529 };
1530
1531 static struct attribute *tgl_uncore_imc_formats_attr[] = {
1532         &format_attr_event.attr,
1533         &format_attr_umask.attr,
1534         NULL
1535 };
1536
1537 static const struct attribute_group tgl_uncore_imc_format_group = {
1538         .name = "format",
1539         .attrs = tgl_uncore_imc_formats_attr,
1540 };
1541
1542 static struct intel_uncore_type tgl_uncore_imc_free_running = {
1543         .name                   = "imc_free_running",
1544         .num_counters           = 3,
1545         .num_boxes              = 2,
1546         .num_freerunning_types  = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1547         .mmio_map_size          = TGL_UNCORE_PCI_IMC_MAP_SIZE,
1548         .freerunning            = tgl_uncore_imc_freerunning,
1549         .ops                    = &tgl_uncore_imc_freerunning_ops,
1550         .event_descs            = tgl_uncore_imc_events,
1551         .format_group           = &tgl_uncore_imc_format_group,
1552 };
1553
1554 static struct intel_uncore_type *tgl_mmio_uncores[] = {
1555         &tgl_uncore_imc_free_running,
1556         NULL
1557 };
1558
1559 void tgl_l_uncore_mmio_init(void)
1560 {
1561         tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
1562         uncore_mmio_uncores = tgl_mmio_uncores;
1563 }
1564
1565 void tgl_uncore_mmio_init(void)
1566 {
1567         uncore_mmio_uncores = tgl_mmio_uncores;
1568 }
1569
1570 /* end of Tiger Lake MMIO uncore support */
1571
1572 /* Alder Lake MMIO uncore support */
1573 #define ADL_UNCORE_IMC_BASE                     0xd900
1574 #define ADL_UNCORE_IMC_MAP_SIZE                 0x200
1575 #define ADL_UNCORE_IMC_CTR                      0xe8
1576 #define ADL_UNCORE_IMC_CTRL                     0xd0
1577 #define ADL_UNCORE_IMC_GLOBAL_CTL               0xc0
1578 #define ADL_UNCORE_IMC_BOX_CTL                  0xc4
1579 #define ADL_UNCORE_IMC_FREERUNNING_BASE         0xd800
1580 #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE     0x100
1581
1582 #define ADL_UNCORE_IMC_CTL_FRZ                  (1 << 0)
1583 #define ADL_UNCORE_IMC_CTL_RST_CTRL             (1 << 1)
1584 #define ADL_UNCORE_IMC_CTL_RST_CTRS             (1 << 2)
1585 #define ADL_UNCORE_IMC_CTL_INT                  (ADL_UNCORE_IMC_CTL_RST_CTRL | \
1586                                                 ADL_UNCORE_IMC_CTL_RST_CTRS)
1587
1588 static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
1589 {
1590         __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
1591
1592         /* The global control in MC1 can control both MCs. */
1593         if (box->io_addr && (box->pmu->pmu_idx == 1))
1594                 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
1595 }
1596
1597 static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
1598 {
1599         if (!box->io_addr)
1600                 return;
1601
1602         writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
1603 }
1604
1605 static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
1606 {
1607         if (!box->io_addr)
1608                 return;
1609
1610         writel(0, box->io_addr + uncore_mmio_box_ctl(box));
1611 }
1612
1613 static struct intel_uncore_ops adl_uncore_mmio_ops = {
1614         .init_box       = adl_uncore_imc_init_box,
1615         .exit_box       = uncore_mmio_exit_box,
1616         .disable_box    = adl_uncore_mmio_disable_box,
1617         .enable_box     = adl_uncore_mmio_enable_box,
1618         .disable_event  = intel_generic_uncore_mmio_disable_event,
1619         .enable_event   = intel_generic_uncore_mmio_enable_event,
1620         .read_counter   = uncore_mmio_read_counter,
1621 };
1622
1623 #define ADL_UNC_CTL_CHMASK_MASK                 0x00000f00
1624 #define ADL_UNC_IMC_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
1625                                                  ADL_UNC_CTL_CHMASK_MASK | \
1626                                                  SNB_UNC_CTL_EDGE_DET)
1627
1628 static struct attribute *adl_uncore_imc_formats_attr[] = {
1629         &format_attr_event.attr,
1630         &format_attr_chmask.attr,
1631         &format_attr_edge.attr,
1632         NULL,
1633 };
1634
1635 static const struct attribute_group adl_uncore_imc_format_group = {
1636         .name           = "format",
1637         .attrs          = adl_uncore_imc_formats_attr,
1638 };
1639
1640 static struct intel_uncore_type adl_uncore_imc = {
1641         .name           = "imc",
1642         .num_counters   = 5,
1643         .num_boxes      = 2,
1644         .perf_ctr_bits  = 64,
1645         .perf_ctr       = ADL_UNCORE_IMC_CTR,
1646         .event_ctl      = ADL_UNCORE_IMC_CTRL,
1647         .event_mask     = ADL_UNC_IMC_EVENT_MASK,
1648         .box_ctl        = ADL_UNCORE_IMC_BOX_CTL,
1649         .mmio_offset    = 0,
1650         .mmio_map_size  = ADL_UNCORE_IMC_MAP_SIZE,
1651         .ops            = &adl_uncore_mmio_ops,
1652         .format_group   = &adl_uncore_imc_format_group,
1653 };
1654
1655 enum perf_adl_uncore_imc_freerunning_types {
1656         ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
1657         ADL_MMIO_UNCORE_IMC_DATA_READ,
1658         ADL_MMIO_UNCORE_IMC_DATA_WRITE,
1659         ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1660 };
1661
1662 static struct freerunning_counters adl_uncore_imc_freerunning[] = {
1663         [ADL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0x40, 0x0, 0x0, 1, 64 },
1664         [ADL_MMIO_UNCORE_IMC_DATA_READ]         = { 0x58, 0x0, 0x0, 1, 64 },
1665         [ADL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0xA0, 0x0, 0x0, 1, 64 },
1666 };
1667
1668 static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1669 {
1670         __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
1671 }
1672
1673 static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
1674         .init_box       = adl_uncore_imc_freerunning_init_box,
1675         .exit_box       = uncore_mmio_exit_box,
1676         .read_counter   = uncore_mmio_read_counter,
1677         .hw_config      = uncore_freerunning_hw_config,
1678 };
1679
1680 static struct intel_uncore_type adl_uncore_imc_free_running = {
1681         .name                   = "imc_free_running",
1682         .num_counters           = 3,
1683         .num_boxes              = 2,
1684         .num_freerunning_types  = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1685         .mmio_map_size          = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
1686         .freerunning            = adl_uncore_imc_freerunning,
1687         .ops                    = &adl_uncore_imc_freerunning_ops,
1688         .event_descs            = tgl_uncore_imc_events,
1689         .format_group           = &tgl_uncore_imc_format_group,
1690 };
1691
1692 static struct intel_uncore_type *adl_mmio_uncores[] = {
1693         &adl_uncore_imc,
1694         &adl_uncore_imc_free_running,
1695         NULL
1696 };
1697
1698 void adl_uncore_mmio_init(void)
1699 {
1700         uncore_mmio_uncores = adl_mmio_uncores;
1701 }
1702
1703 /* end of Alder Lake MMIO uncore support */