b10a5ec79e48aac1af8d9edb1396e22449dd8d14
[platform/kernel/linux-rpi.git] / arch / x86 / events / intel / uncore_snbep.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID                 0x40
7 #define SNBEP_GIDNIDMAP                 0x54
8
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
16                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
20 #define SNBEP_PMON_CTL_RST              (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
23 #define SNBEP_PMON_CTL_EN               (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
27                                          SNBEP_PMON_CTL_UMASK_MASK | \
28                                          SNBEP_PMON_CTL_EDGE_DET | \
29                                          SNBEP_PMON_CTL_INVERT | \
30                                          SNBEP_PMON_CTL_TRESH_MASK)
31
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
35                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36                                  SNBEP_PMON_CTL_UMASK_MASK | \
37                                  SNBEP_PMON_CTL_EDGE_DET | \
38                                  SNBEP_PMON_CTL_INVERT | \
39                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
43                                                  SNBEP_CBO_PMON_CTL_TID_EN)
44
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
51                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53                                  SNBEP_PMON_CTL_EDGE_DET | \
54                                  SNBEP_PMON_CTL_INVERT | \
55                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
60                                 (SNBEP_PMON_RAW_EVENT_MASK | \
61                                  SNBEP_PMON_CTL_EV_SEL_EXT)
62
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
65 #define SNBEP_PCI_PMON_CTL0                     0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0                     0xa0
68
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
81
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
84 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
85
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
88
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
94 #define SNBEP_CBO_MSR_OFFSET                    0x20
95
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
100
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
102         .event = (e),                           \
103         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
104         .config_mask = (m),                     \
105         .idx = (i)                              \
106 }
107
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
116
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
121                                          SNBEP_PMON_CTL_UMASK_MASK | \
122                                          SNBEP_PMON_CTL_EDGE_DET | \
123                                          SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
128
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131                                  SNBEP_PMON_CTL_UMASK_MASK | \
132                                  SNBEP_PMON_CTL_EDGE_DET | \
133                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
136                                                  SNBEP_CBO_PMON_CTL_TID_EN)
137
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
146
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
150                                 (IVBEP_PMON_RAW_EVENT_MASK | \
151                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
154                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156                                  SNBEP_PMON_CTL_EDGE_DET | \
157                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
162                                 (IVBEP_PMON_RAW_EVENT_MASK | \
163                                  SNBEP_PMON_CTL_EV_SEL_EXT)
164
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166                                 ((1ULL << (n)) - 1)))
167
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0                   0x709
170 #define HSWEP_U_MSR_PMON_CTL0                   0x705
171 #define HSWEP_U_MSR_PMON_FILTER                 0x707
172
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
175
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
187 #define HSWEP_CBO_MSR_OFFSET                    0x10
188
189
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
198
199
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
202 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
204 #define HSWEP_SBOX_MSR_OFFSET                   0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
206                                                  SNBEP_CBO_PMON_CTL_TID_EN)
207
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
213
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217                                                 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET                      0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223                                          KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
230
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
233 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
237 #define KNL_PMON_FIXED_CTL_EN                   0x1
238
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
245
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
252
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
256                                                  KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265                                  SNBEP_PMON_CTL_EDGE_DET | \
266                                  SNBEP_CBO_PMON_CTL_TID_EN | \
267                                  SNBEP_PMON_CTL_INVERT | \
268                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID                   0xc0
274 #define SKX_GIDNIDMAP                   0xd4
275
276 /* SKX CHA */
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
290
291 /* SKX IIO */
292 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
295 #define SKX_IIO_MSR_OFFSET              0x20
296
297 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
299 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
302                                          SNBEP_PMON_CTL_UMASK_MASK | \
303                                          SNBEP_PMON_CTL_EDGE_DET | \
304                                          SNBEP_PMON_CTL_INVERT | \
305                                          SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307                                          SKX_PMON_CTL_CH_MASK | \
308                                          SKX_PMON_CTL_FC_MASK)
309
310 /* SKX IRP */
311 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
314 #define SKX_IRP_MSR_OFFSET              0x20
315
316 /* SKX UPI */
317 #define SKX_UPI_PCI_PMON_CTL0           0x350
318 #define SKX_UPI_PCI_PMON_CTR0           0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
320 #define SKX_UPI_CTL_UMASK_EXT           0xffefff
321
322 /* SKX M2M */
323 #define SKX_M2M_PCI_PMON_CTL0           0x228
324 #define SKX_M2M_PCI_PMON_CTR0           0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
326
327 /* SNR Ubox */
328 #define SNR_U_MSR_PMON_CTR0                     0x1f98
329 #define SNR_U_MSR_PMON_CTL0                     0x1f91
330 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL           0x1f93
331 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR           0x1f94
332
333 /* SNR CHA */
334 #define SNR_CHA_RAW_EVENT_MASK_EXT              0x3ffffff
335 #define SNR_CHA_MSR_PMON_CTL0                   0x1c01
336 #define SNR_CHA_MSR_PMON_CTR0                   0x1c08
337 #define SNR_CHA_MSR_PMON_BOX_CTL                0x1c00
338 #define SNR_C0_MSR_PMON_BOX_FILTER0             0x1c05
339
340
341 /* SNR IIO */
342 #define SNR_IIO_MSR_PMON_CTL0                   0x1e08
343 #define SNR_IIO_MSR_PMON_CTR0                   0x1e01
344 #define SNR_IIO_MSR_PMON_BOX_CTL                0x1e00
345 #define SNR_IIO_MSR_OFFSET                      0x10
346 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT         0x7ffff
347
348 /* SNR IRP */
349 #define SNR_IRP0_MSR_PMON_CTL0                  0x1ea8
350 #define SNR_IRP0_MSR_PMON_CTR0                  0x1ea1
351 #define SNR_IRP0_MSR_PMON_BOX_CTL               0x1ea0
352 #define SNR_IRP_MSR_OFFSET                      0x10
353
354 /* SNR M2PCIE */
355 #define SNR_M2PCIE_MSR_PMON_CTL0                0x1e58
356 #define SNR_M2PCIE_MSR_PMON_CTR0                0x1e51
357 #define SNR_M2PCIE_MSR_PMON_BOX_CTL             0x1e50
358 #define SNR_M2PCIE_MSR_OFFSET                   0x10
359
360 /* SNR PCU */
361 #define SNR_PCU_MSR_PMON_CTL0                   0x1ef1
362 #define SNR_PCU_MSR_PMON_CTR0                   0x1ef8
363 #define SNR_PCU_MSR_PMON_BOX_CTL                0x1ef0
364 #define SNR_PCU_MSR_PMON_BOX_FILTER             0x1efc
365
366 /* SNR M2M */
367 #define SNR_M2M_PCI_PMON_CTL0                   0x468
368 #define SNR_M2M_PCI_PMON_CTR0                   0x440
369 #define SNR_M2M_PCI_PMON_BOX_CTL                0x438
370 #define SNR_M2M_PCI_PMON_UMASK_EXT              0xff
371
372 /* SNR PCIE3 */
373 #define SNR_PCIE3_PCI_PMON_CTL0                 0x508
374 #define SNR_PCIE3_PCI_PMON_CTR0                 0x4e8
375 #define SNR_PCIE3_PCI_PMON_BOX_CTL              0x4e4
376
377 /* SNR IMC */
378 #define SNR_IMC_MMIO_PMON_FIXED_CTL             0x54
379 #define SNR_IMC_MMIO_PMON_FIXED_CTR             0x38
380 #define SNR_IMC_MMIO_PMON_CTL0                  0x40
381 #define SNR_IMC_MMIO_PMON_CTR0                  0x8
382 #define SNR_IMC_MMIO_PMON_BOX_CTL               0x22800
383 #define SNR_IMC_MMIO_OFFSET                     0x4000
384 #define SNR_IMC_MMIO_SIZE                       0x4000
385 #define SNR_IMC_MMIO_BASE_OFFSET                0xd0
386 #define SNR_IMC_MMIO_BASE_MASK                  0x1FFFFFFF
387 #define SNR_IMC_MMIO_MEM0_OFFSET                0xd8
388 #define SNR_IMC_MMIO_MEM0_MASK                  0x7FF
389
390 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
391 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
392 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
393 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
394 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
395 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
396 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
397 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
398 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
399 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
400 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
401 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
402 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
403 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
404 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
405 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
406 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
407 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
408 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
409 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
410 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
411 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
412 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
413 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
414 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
415 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
416 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
417 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
418 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
419 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
420 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
421 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
422 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
423 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
424 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
425 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
426 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
427 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
428 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
429 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
430 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
431 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
432 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
433 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
434 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
435 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
436 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
437 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
438 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
439 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
440 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
441 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
442 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
443 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
444 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
445 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
446 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
447 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
448 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
449 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
450 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
451 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
452 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
453 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
454 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
455 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
456 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
457 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
458 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
459 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
460 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
461 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
462 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
463 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
464 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
465 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
466 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
467
468 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
469 {
470         struct pci_dev *pdev = box->pci_dev;
471         int box_ctl = uncore_pci_box_ctl(box);
472         u32 config = 0;
473
474         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
475                 config |= SNBEP_PMON_BOX_CTL_FRZ;
476                 pci_write_config_dword(pdev, box_ctl, config);
477         }
478 }
479
480 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
481 {
482         struct pci_dev *pdev = box->pci_dev;
483         int box_ctl = uncore_pci_box_ctl(box);
484         u32 config = 0;
485
486         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
487                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
488                 pci_write_config_dword(pdev, box_ctl, config);
489         }
490 }
491
492 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
493 {
494         struct pci_dev *pdev = box->pci_dev;
495         struct hw_perf_event *hwc = &event->hw;
496
497         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
498 }
499
500 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
501 {
502         struct pci_dev *pdev = box->pci_dev;
503         struct hw_perf_event *hwc = &event->hw;
504
505         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
506 }
507
508 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
509 {
510         struct pci_dev *pdev = box->pci_dev;
511         struct hw_perf_event *hwc = &event->hw;
512         u64 count = 0;
513
514         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
515         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
516
517         return count;
518 }
519
520 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
521 {
522         struct pci_dev *pdev = box->pci_dev;
523         int box_ctl = uncore_pci_box_ctl(box);
524
525         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
526 }
527
528 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
529 {
530         u64 config;
531         unsigned msr;
532
533         msr = uncore_msr_box_ctl(box);
534         if (msr) {
535                 rdmsrl(msr, config);
536                 config |= SNBEP_PMON_BOX_CTL_FRZ;
537                 wrmsrl(msr, config);
538         }
539 }
540
541 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
542 {
543         u64 config;
544         unsigned msr;
545
546         msr = uncore_msr_box_ctl(box);
547         if (msr) {
548                 rdmsrl(msr, config);
549                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
550                 wrmsrl(msr, config);
551         }
552 }
553
554 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
555 {
556         struct hw_perf_event *hwc = &event->hw;
557         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
558
559         if (reg1->idx != EXTRA_REG_NONE)
560                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
561
562         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
563 }
564
565 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
566                                         struct perf_event *event)
567 {
568         struct hw_perf_event *hwc = &event->hw;
569
570         wrmsrl(hwc->config_base, hwc->config);
571 }
572
573 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
574 {
575         unsigned msr = uncore_msr_box_ctl(box);
576
577         if (msr)
578                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
579 }
580
581 static struct attribute *snbep_uncore_formats_attr[] = {
582         &format_attr_event.attr,
583         &format_attr_umask.attr,
584         &format_attr_edge.attr,
585         &format_attr_inv.attr,
586         &format_attr_thresh8.attr,
587         NULL,
588 };
589
590 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
591         &format_attr_event.attr,
592         &format_attr_umask.attr,
593         &format_attr_edge.attr,
594         &format_attr_inv.attr,
595         &format_attr_thresh5.attr,
596         NULL,
597 };
598
599 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
600         &format_attr_event.attr,
601         &format_attr_umask.attr,
602         &format_attr_edge.attr,
603         &format_attr_tid_en.attr,
604         &format_attr_inv.attr,
605         &format_attr_thresh8.attr,
606         &format_attr_filter_tid.attr,
607         &format_attr_filter_nid.attr,
608         &format_attr_filter_state.attr,
609         &format_attr_filter_opc.attr,
610         NULL,
611 };
612
613 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
614         &format_attr_event.attr,
615         &format_attr_occ_sel.attr,
616         &format_attr_edge.attr,
617         &format_attr_inv.attr,
618         &format_attr_thresh5.attr,
619         &format_attr_occ_invert.attr,
620         &format_attr_occ_edge.attr,
621         &format_attr_filter_band0.attr,
622         &format_attr_filter_band1.attr,
623         &format_attr_filter_band2.attr,
624         &format_attr_filter_band3.attr,
625         NULL,
626 };
627
628 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
629         &format_attr_event_ext.attr,
630         &format_attr_umask.attr,
631         &format_attr_edge.attr,
632         &format_attr_inv.attr,
633         &format_attr_thresh8.attr,
634         &format_attr_match_rds.attr,
635         &format_attr_match_rnid30.attr,
636         &format_attr_match_rnid4.attr,
637         &format_attr_match_dnid.attr,
638         &format_attr_match_mc.attr,
639         &format_attr_match_opc.attr,
640         &format_attr_match_vnw.attr,
641         &format_attr_match0.attr,
642         &format_attr_match1.attr,
643         &format_attr_mask_rds.attr,
644         &format_attr_mask_rnid30.attr,
645         &format_attr_mask_rnid4.attr,
646         &format_attr_mask_dnid.attr,
647         &format_attr_mask_mc.attr,
648         &format_attr_mask_opc.attr,
649         &format_attr_mask_vnw.attr,
650         &format_attr_mask0.attr,
651         &format_attr_mask1.attr,
652         NULL,
653 };
654
655 static struct uncore_event_desc snbep_uncore_imc_events[] = {
656         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
657         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
658         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
659         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
660         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
661         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
662         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
663         { /* end: all zeroes */ },
664 };
665
666 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
667         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
668         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
669         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
670         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
671         { /* end: all zeroes */ },
672 };
673
674 static const struct attribute_group snbep_uncore_format_group = {
675         .name = "format",
676         .attrs = snbep_uncore_formats_attr,
677 };
678
679 static const struct attribute_group snbep_uncore_ubox_format_group = {
680         .name = "format",
681         .attrs = snbep_uncore_ubox_formats_attr,
682 };
683
684 static const struct attribute_group snbep_uncore_cbox_format_group = {
685         .name = "format",
686         .attrs = snbep_uncore_cbox_formats_attr,
687 };
688
689 static const struct attribute_group snbep_uncore_pcu_format_group = {
690         .name = "format",
691         .attrs = snbep_uncore_pcu_formats_attr,
692 };
693
694 static const struct attribute_group snbep_uncore_qpi_format_group = {
695         .name = "format",
696         .attrs = snbep_uncore_qpi_formats_attr,
697 };
698
699 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
700         .disable_box    = snbep_uncore_msr_disable_box,         \
701         .enable_box     = snbep_uncore_msr_enable_box,          \
702         .disable_event  = snbep_uncore_msr_disable_event,       \
703         .enable_event   = snbep_uncore_msr_enable_event,        \
704         .read_counter   = uncore_msr_read_counter
705
706 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
707         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
708         .init_box       = snbep_uncore_msr_init_box             \
709
710 static struct intel_uncore_ops snbep_uncore_msr_ops = {
711         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
712 };
713
714 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
715         .init_box       = snbep_uncore_pci_init_box,            \
716         .disable_box    = snbep_uncore_pci_disable_box,         \
717         .enable_box     = snbep_uncore_pci_enable_box,          \
718         .disable_event  = snbep_uncore_pci_disable_event,       \
719         .read_counter   = snbep_uncore_pci_read_counter
720
721 static struct intel_uncore_ops snbep_uncore_pci_ops = {
722         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
723         .enable_event   = snbep_uncore_pci_enable_event,        \
724 };
725
726 static struct event_constraint snbep_uncore_cbox_constraints[] = {
727         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
728         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
729         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
730         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
731         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
732         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
733         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
734         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
735         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
736         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
737         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
738         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
739         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
740         UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
741         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
742         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
743         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
744         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
745         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
746         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
747         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
748         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
749         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
750         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
751         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
752         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
753         EVENT_CONSTRAINT_END
754 };
755
756 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
757         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
758         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
759         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
760         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
761         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
762         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
763         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
764         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
765         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
766         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
767         EVENT_CONSTRAINT_END
768 };
769
770 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
771         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
772         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
773         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
774         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
775         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
776         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
777         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
778         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
779         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
780         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
781         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
782         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
783         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
784         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
785         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
786         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
787         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
788         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
789         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
790         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
791         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
792         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
793         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
794         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
795         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
796         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
797         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
798         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
799         EVENT_CONSTRAINT_END
800 };
801
802 static struct intel_uncore_type snbep_uncore_ubox = {
803         .name           = "ubox",
804         .num_counters   = 2,
805         .num_boxes      = 1,
806         .perf_ctr_bits  = 44,
807         .fixed_ctr_bits = 48,
808         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
809         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
810         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
811         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
812         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
813         .ops            = &snbep_uncore_msr_ops,
814         .format_group   = &snbep_uncore_ubox_format_group,
815 };
816
817 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
818         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
819                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
820         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
821         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
822         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
823         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
824         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
825         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
826         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
827         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
828         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
829         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
830         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
831         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
832         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
833         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
834         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
835         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
836         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
837         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
838         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
839         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
840         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
841         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
842         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
843         EVENT_EXTRA_END
844 };
845
846 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
847 {
848         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
849         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
850         int i;
851
852         if (uncore_box_is_fake(box))
853                 return;
854
855         for (i = 0; i < 5; i++) {
856                 if (reg1->alloc & (0x1 << i))
857                         atomic_sub(1 << (i * 6), &er->ref);
858         }
859         reg1->alloc = 0;
860 }
861
862 static struct event_constraint *
863 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
864                             u64 (*cbox_filter_mask)(int fields))
865 {
866         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
867         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
868         int i, alloc = 0;
869         unsigned long flags;
870         u64 mask;
871
872         if (reg1->idx == EXTRA_REG_NONE)
873                 return NULL;
874
875         raw_spin_lock_irqsave(&er->lock, flags);
876         for (i = 0; i < 5; i++) {
877                 if (!(reg1->idx & (0x1 << i)))
878                         continue;
879                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
880                         continue;
881
882                 mask = cbox_filter_mask(0x1 << i);
883                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
884                     !((reg1->config ^ er->config) & mask)) {
885                         atomic_add(1 << (i * 6), &er->ref);
886                         er->config &= ~mask;
887                         er->config |= reg1->config & mask;
888                         alloc |= (0x1 << i);
889                 } else {
890                         break;
891                 }
892         }
893         raw_spin_unlock_irqrestore(&er->lock, flags);
894         if (i < 5)
895                 goto fail;
896
897         if (!uncore_box_is_fake(box))
898                 reg1->alloc |= alloc;
899
900         return NULL;
901 fail:
902         for (; i >= 0; i--) {
903                 if (alloc & (0x1 << i))
904                         atomic_sub(1 << (i * 6), &er->ref);
905         }
906         return &uncore_constraint_empty;
907 }
908
909 static u64 snbep_cbox_filter_mask(int fields)
910 {
911         u64 mask = 0;
912
913         if (fields & 0x1)
914                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
915         if (fields & 0x2)
916                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
917         if (fields & 0x4)
918                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
919         if (fields & 0x8)
920                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
921
922         return mask;
923 }
924
925 static struct event_constraint *
926 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
927 {
928         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
929 }
930
931 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
932 {
933         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
934         struct extra_reg *er;
935         int idx = 0;
936
937         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
938                 if (er->event != (event->hw.config & er->config_mask))
939                         continue;
940                 idx |= er->idx;
941         }
942
943         if (idx) {
944                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
945                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
946                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
947                 reg1->idx = idx;
948         }
949         return 0;
950 }
951
952 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
953         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
954         .hw_config              = snbep_cbox_hw_config,
955         .get_constraint         = snbep_cbox_get_constraint,
956         .put_constraint         = snbep_cbox_put_constraint,
957 };
958
959 static struct intel_uncore_type snbep_uncore_cbox = {
960         .name                   = "cbox",
961         .num_counters           = 4,
962         .num_boxes              = 8,
963         .perf_ctr_bits          = 44,
964         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
965         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
966         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
967         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
968         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
969         .num_shared_regs        = 1,
970         .constraints            = snbep_uncore_cbox_constraints,
971         .ops                    = &snbep_uncore_cbox_ops,
972         .format_group           = &snbep_uncore_cbox_format_group,
973 };
974
975 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
976 {
977         struct hw_perf_event *hwc = &event->hw;
978         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
979         u64 config = reg1->config;
980
981         if (new_idx > reg1->idx)
982                 config <<= 8 * (new_idx - reg1->idx);
983         else
984                 config >>= 8 * (reg1->idx - new_idx);
985
986         if (modify) {
987                 hwc->config += new_idx - reg1->idx;
988                 reg1->config = config;
989                 reg1->idx = new_idx;
990         }
991         return config;
992 }
993
994 static struct event_constraint *
995 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
996 {
997         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
998         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
999         unsigned long flags;
1000         int idx = reg1->idx;
1001         u64 mask, config1 = reg1->config;
1002         bool ok = false;
1003
1004         if (reg1->idx == EXTRA_REG_NONE ||
1005             (!uncore_box_is_fake(box) && reg1->alloc))
1006                 return NULL;
1007 again:
1008         mask = 0xffULL << (idx * 8);
1009         raw_spin_lock_irqsave(&er->lock, flags);
1010         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1011             !((config1 ^ er->config) & mask)) {
1012                 atomic_add(1 << (idx * 8), &er->ref);
1013                 er->config &= ~mask;
1014                 er->config |= config1 & mask;
1015                 ok = true;
1016         }
1017         raw_spin_unlock_irqrestore(&er->lock, flags);
1018
1019         if (!ok) {
1020                 idx = (idx + 1) % 4;
1021                 if (idx != reg1->idx) {
1022                         config1 = snbep_pcu_alter_er(event, idx, false);
1023                         goto again;
1024                 }
1025                 return &uncore_constraint_empty;
1026         }
1027
1028         if (!uncore_box_is_fake(box)) {
1029                 if (idx != reg1->idx)
1030                         snbep_pcu_alter_er(event, idx, true);
1031                 reg1->alloc = 1;
1032         }
1033         return NULL;
1034 }
1035
1036 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1037 {
1038         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1039         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1040
1041         if (uncore_box_is_fake(box) || !reg1->alloc)
1042                 return;
1043
1044         atomic_sub(1 << (reg1->idx * 8), &er->ref);
1045         reg1->alloc = 0;
1046 }
1047
1048 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1049 {
1050         struct hw_perf_event *hwc = &event->hw;
1051         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1052         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1053
1054         if (ev_sel >= 0xb && ev_sel <= 0xe) {
1055                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1056                 reg1->idx = ev_sel - 0xb;
1057                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1058         }
1059         return 0;
1060 }
1061
1062 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1063         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1064         .hw_config              = snbep_pcu_hw_config,
1065         .get_constraint         = snbep_pcu_get_constraint,
1066         .put_constraint         = snbep_pcu_put_constraint,
1067 };
1068
1069 static struct intel_uncore_type snbep_uncore_pcu = {
1070         .name                   = "pcu",
1071         .num_counters           = 4,
1072         .num_boxes              = 1,
1073         .perf_ctr_bits          = 48,
1074         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1075         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1076         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1077         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1078         .num_shared_regs        = 1,
1079         .ops                    = &snbep_uncore_pcu_ops,
1080         .format_group           = &snbep_uncore_pcu_format_group,
1081 };
1082
1083 static struct intel_uncore_type *snbep_msr_uncores[] = {
1084         &snbep_uncore_ubox,
1085         &snbep_uncore_cbox,
1086         &snbep_uncore_pcu,
1087         NULL,
1088 };
1089
1090 void snbep_uncore_cpu_init(void)
1091 {
1092         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1093                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1094         uncore_msr_uncores = snbep_msr_uncores;
1095 }
1096
1097 enum {
1098         SNBEP_PCI_QPI_PORT0_FILTER,
1099         SNBEP_PCI_QPI_PORT1_FILTER,
1100         BDX_PCI_QPI_PORT2_FILTER,
1101         HSWEP_PCI_PCU_3,
1102 };
1103
1104 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1105 {
1106         struct hw_perf_event *hwc = &event->hw;
1107         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1108         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1109
1110         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1111                 reg1->idx = 0;
1112                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1113                 reg1->config = event->attr.config1;
1114                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1115                 reg2->config = event->attr.config2;
1116         }
1117         return 0;
1118 }
1119
1120 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1121 {
1122         struct pci_dev *pdev = box->pci_dev;
1123         struct hw_perf_event *hwc = &event->hw;
1124         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1125         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1126
1127         if (reg1->idx != EXTRA_REG_NONE) {
1128                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1129                 int die = box->dieid;
1130                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1131
1132                 if (filter_pdev) {
1133                         pci_write_config_dword(filter_pdev, reg1->reg,
1134                                                 (u32)reg1->config);
1135                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1136                                                 (u32)(reg1->config >> 32));
1137                         pci_write_config_dword(filter_pdev, reg2->reg,
1138                                                 (u32)reg2->config);
1139                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1140                                                 (u32)(reg2->config >> 32));
1141                 }
1142         }
1143
1144         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1145 }
1146
1147 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1148         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1149         .enable_event           = snbep_qpi_enable_event,
1150         .hw_config              = snbep_qpi_hw_config,
1151         .get_constraint         = uncore_get_constraint,
1152         .put_constraint         = uncore_put_constraint,
1153 };
1154
1155 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1156         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1157         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1158         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1159         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1160         .ops            = &snbep_uncore_pci_ops,                \
1161         .format_group   = &snbep_uncore_format_group
1162
1163 static struct intel_uncore_type snbep_uncore_ha = {
1164         .name           = "ha",
1165         .num_counters   = 4,
1166         .num_boxes      = 1,
1167         .perf_ctr_bits  = 48,
1168         SNBEP_UNCORE_PCI_COMMON_INIT(),
1169 };
1170
1171 static struct intel_uncore_type snbep_uncore_imc = {
1172         .name           = "imc",
1173         .num_counters   = 4,
1174         .num_boxes      = 4,
1175         .perf_ctr_bits  = 48,
1176         .fixed_ctr_bits = 48,
1177         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1178         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1179         .event_descs    = snbep_uncore_imc_events,
1180         SNBEP_UNCORE_PCI_COMMON_INIT(),
1181 };
1182
1183 static struct intel_uncore_type snbep_uncore_qpi = {
1184         .name                   = "qpi",
1185         .num_counters           = 4,
1186         .num_boxes              = 2,
1187         .perf_ctr_bits          = 48,
1188         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1189         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1190         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1191         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1192         .num_shared_regs        = 1,
1193         .ops                    = &snbep_uncore_qpi_ops,
1194         .event_descs            = snbep_uncore_qpi_events,
1195         .format_group           = &snbep_uncore_qpi_format_group,
1196 };
1197
1198
1199 static struct intel_uncore_type snbep_uncore_r2pcie = {
1200         .name           = "r2pcie",
1201         .num_counters   = 4,
1202         .num_boxes      = 1,
1203         .perf_ctr_bits  = 44,
1204         .constraints    = snbep_uncore_r2pcie_constraints,
1205         SNBEP_UNCORE_PCI_COMMON_INIT(),
1206 };
1207
1208 static struct intel_uncore_type snbep_uncore_r3qpi = {
1209         .name           = "r3qpi",
1210         .num_counters   = 3,
1211         .num_boxes      = 2,
1212         .perf_ctr_bits  = 44,
1213         .constraints    = snbep_uncore_r3qpi_constraints,
1214         SNBEP_UNCORE_PCI_COMMON_INIT(),
1215 };
1216
1217 enum {
1218         SNBEP_PCI_UNCORE_HA,
1219         SNBEP_PCI_UNCORE_IMC,
1220         SNBEP_PCI_UNCORE_QPI,
1221         SNBEP_PCI_UNCORE_R2PCIE,
1222         SNBEP_PCI_UNCORE_R3QPI,
1223 };
1224
1225 static struct intel_uncore_type *snbep_pci_uncores[] = {
1226         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1227         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1228         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1229         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1230         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1231         NULL,
1232 };
1233
1234 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1235         { /* Home Agent */
1236                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1237                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1238         },
1239         { /* MC Channel 0 */
1240                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1241                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1242         },
1243         { /* MC Channel 1 */
1244                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1245                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1246         },
1247         { /* MC Channel 2 */
1248                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1249                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1250         },
1251         { /* MC Channel 3 */
1252                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1253                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1254         },
1255         { /* QPI Port 0 */
1256                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1257                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1258         },
1259         { /* QPI Port 1 */
1260                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1261                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1262         },
1263         { /* R2PCIe */
1264                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1265                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1266         },
1267         { /* R3QPI Link 0 */
1268                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1269                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1270         },
1271         { /* R3QPI Link 1 */
1272                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1273                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1274         },
1275         { /* QPI Port 0 filter  */
1276                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1277                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1278                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1279         },
1280         { /* QPI Port 0 filter  */
1281                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1282                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1283                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1284         },
1285         { /* end: all zeroes */ }
1286 };
1287
1288 static struct pci_driver snbep_uncore_pci_driver = {
1289         .name           = "snbep_uncore",
1290         .id_table       = snbep_uncore_pci_ids,
1291 };
1292
1293 #define NODE_ID_MASK    0x7
1294
1295 /*
1296  * build pci bus to socket mapping
1297  */
1298 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1299 {
1300         struct pci_dev *ubox_dev = NULL;
1301         int i, bus, nodeid, segment;
1302         struct pci2phy_map *map;
1303         int err = 0;
1304         u32 config = 0;
1305
1306         while (1) {
1307                 /* find the UBOX device */
1308                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1309                 if (!ubox_dev)
1310                         break;
1311                 bus = ubox_dev->bus->number;
1312                 /* get the Node ID of the local register */
1313                 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1314                 if (err)
1315                         break;
1316                 nodeid = config & NODE_ID_MASK;
1317                 /* get the Node ID mapping */
1318                 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1319                 if (err)
1320                         break;
1321
1322                 segment = pci_domain_nr(ubox_dev->bus);
1323                 raw_spin_lock(&pci2phy_map_lock);
1324                 map = __find_pci2phy_map(segment);
1325                 if (!map) {
1326                         raw_spin_unlock(&pci2phy_map_lock);
1327                         err = -ENOMEM;
1328                         break;
1329                 }
1330
1331                 /*
1332                  * every three bits in the Node ID mapping register maps
1333                  * to a particular node.
1334                  */
1335                 for (i = 0; i < 8; i++) {
1336                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1337                                 map->pbus_to_physid[bus] = i;
1338                                 break;
1339                         }
1340                 }
1341                 raw_spin_unlock(&pci2phy_map_lock);
1342         }
1343
1344         if (!err) {
1345                 /*
1346                  * For PCI bus with no UBOX device, find the next bus
1347                  * that has UBOX device and use its mapping.
1348                  */
1349                 raw_spin_lock(&pci2phy_map_lock);
1350                 list_for_each_entry(map, &pci2phy_map_head, list) {
1351                         i = -1;
1352                         if (reverse) {
1353                                 for (bus = 255; bus >= 0; bus--) {
1354                                         if (map->pbus_to_physid[bus] >= 0)
1355                                                 i = map->pbus_to_physid[bus];
1356                                         else
1357                                                 map->pbus_to_physid[bus] = i;
1358                                 }
1359                         } else {
1360                                 for (bus = 0; bus <= 255; bus++) {
1361                                         if (map->pbus_to_physid[bus] >= 0)
1362                                                 i = map->pbus_to_physid[bus];
1363                                         else
1364                                                 map->pbus_to_physid[bus] = i;
1365                                 }
1366                         }
1367                 }
1368                 raw_spin_unlock(&pci2phy_map_lock);
1369         }
1370
1371         pci_dev_put(ubox_dev);
1372
1373         return err ? pcibios_err_to_errno(err) : 0;
1374 }
1375
1376 int snbep_uncore_pci_init(void)
1377 {
1378         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1379         if (ret)
1380                 return ret;
1381         uncore_pci_uncores = snbep_pci_uncores;
1382         uncore_pci_driver = &snbep_uncore_pci_driver;
1383         return 0;
1384 }
1385 /* end of Sandy Bridge-EP uncore support */
1386
1387 /* IvyTown uncore support */
1388 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1389 {
1390         unsigned msr = uncore_msr_box_ctl(box);
1391         if (msr)
1392                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1393 }
1394
1395 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1396 {
1397         struct pci_dev *pdev = box->pci_dev;
1398
1399         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1400 }
1401
1402 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1403         .init_box       = ivbep_uncore_msr_init_box,            \
1404         .disable_box    = snbep_uncore_msr_disable_box,         \
1405         .enable_box     = snbep_uncore_msr_enable_box,          \
1406         .disable_event  = snbep_uncore_msr_disable_event,       \
1407         .enable_event   = snbep_uncore_msr_enable_event,        \
1408         .read_counter   = uncore_msr_read_counter
1409
1410 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1411         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1412 };
1413
1414 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1415         .init_box       = ivbep_uncore_pci_init_box,
1416         .disable_box    = snbep_uncore_pci_disable_box,
1417         .enable_box     = snbep_uncore_pci_enable_box,
1418         .disable_event  = snbep_uncore_pci_disable_event,
1419         .enable_event   = snbep_uncore_pci_enable_event,
1420         .read_counter   = snbep_uncore_pci_read_counter,
1421 };
1422
1423 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1424         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1425         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1426         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1427         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1428         .ops            = &ivbep_uncore_pci_ops,                        \
1429         .format_group   = &ivbep_uncore_format_group
1430
1431 static struct attribute *ivbep_uncore_formats_attr[] = {
1432         &format_attr_event.attr,
1433         &format_attr_umask.attr,
1434         &format_attr_edge.attr,
1435         &format_attr_inv.attr,
1436         &format_attr_thresh8.attr,
1437         NULL,
1438 };
1439
1440 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1441         &format_attr_event.attr,
1442         &format_attr_umask.attr,
1443         &format_attr_edge.attr,
1444         &format_attr_inv.attr,
1445         &format_attr_thresh5.attr,
1446         NULL,
1447 };
1448
1449 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1450         &format_attr_event.attr,
1451         &format_attr_umask.attr,
1452         &format_attr_edge.attr,
1453         &format_attr_tid_en.attr,
1454         &format_attr_thresh8.attr,
1455         &format_attr_filter_tid.attr,
1456         &format_attr_filter_link.attr,
1457         &format_attr_filter_state2.attr,
1458         &format_attr_filter_nid2.attr,
1459         &format_attr_filter_opc2.attr,
1460         &format_attr_filter_nc.attr,
1461         &format_attr_filter_c6.attr,
1462         &format_attr_filter_isoc.attr,
1463         NULL,
1464 };
1465
1466 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1467         &format_attr_event.attr,
1468         &format_attr_occ_sel.attr,
1469         &format_attr_edge.attr,
1470         &format_attr_thresh5.attr,
1471         &format_attr_occ_invert.attr,
1472         &format_attr_occ_edge.attr,
1473         &format_attr_filter_band0.attr,
1474         &format_attr_filter_band1.attr,
1475         &format_attr_filter_band2.attr,
1476         &format_attr_filter_band3.attr,
1477         NULL,
1478 };
1479
1480 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1481         &format_attr_event_ext.attr,
1482         &format_attr_umask.attr,
1483         &format_attr_edge.attr,
1484         &format_attr_thresh8.attr,
1485         &format_attr_match_rds.attr,
1486         &format_attr_match_rnid30.attr,
1487         &format_attr_match_rnid4.attr,
1488         &format_attr_match_dnid.attr,
1489         &format_attr_match_mc.attr,
1490         &format_attr_match_opc.attr,
1491         &format_attr_match_vnw.attr,
1492         &format_attr_match0.attr,
1493         &format_attr_match1.attr,
1494         &format_attr_mask_rds.attr,
1495         &format_attr_mask_rnid30.attr,
1496         &format_attr_mask_rnid4.attr,
1497         &format_attr_mask_dnid.attr,
1498         &format_attr_mask_mc.attr,
1499         &format_attr_mask_opc.attr,
1500         &format_attr_mask_vnw.attr,
1501         &format_attr_mask0.attr,
1502         &format_attr_mask1.attr,
1503         NULL,
1504 };
1505
1506 static const struct attribute_group ivbep_uncore_format_group = {
1507         .name = "format",
1508         .attrs = ivbep_uncore_formats_attr,
1509 };
1510
1511 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1512         .name = "format",
1513         .attrs = ivbep_uncore_ubox_formats_attr,
1514 };
1515
1516 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1517         .name = "format",
1518         .attrs = ivbep_uncore_cbox_formats_attr,
1519 };
1520
1521 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1522         .name = "format",
1523         .attrs = ivbep_uncore_pcu_formats_attr,
1524 };
1525
1526 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1527         .name = "format",
1528         .attrs = ivbep_uncore_qpi_formats_attr,
1529 };
1530
1531 static struct intel_uncore_type ivbep_uncore_ubox = {
1532         .name           = "ubox",
1533         .num_counters   = 2,
1534         .num_boxes      = 1,
1535         .perf_ctr_bits  = 44,
1536         .fixed_ctr_bits = 48,
1537         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1538         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1539         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1540         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1541         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1542         .ops            = &ivbep_uncore_msr_ops,
1543         .format_group   = &ivbep_uncore_ubox_format_group,
1544 };
1545
1546 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1547         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1548                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1549         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1550         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1551         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1552         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1553         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1554         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1555         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1556         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1557         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1558         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1559         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1560         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1561         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1562         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1563         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1564         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1565         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1566         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1567         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1568         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1569         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1570         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1571         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1572         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1573         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1574         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1575         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1576         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1577         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1578         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1579         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1580         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1581         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1582         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1583         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1584         EVENT_EXTRA_END
1585 };
1586
1587 static u64 ivbep_cbox_filter_mask(int fields)
1588 {
1589         u64 mask = 0;
1590
1591         if (fields & 0x1)
1592                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1593         if (fields & 0x2)
1594                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1595         if (fields & 0x4)
1596                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1597         if (fields & 0x8)
1598                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1599         if (fields & 0x10) {
1600                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1601                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1602                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1603                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1604         }
1605
1606         return mask;
1607 }
1608
1609 static struct event_constraint *
1610 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1611 {
1612         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1613 }
1614
1615 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1616 {
1617         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1618         struct extra_reg *er;
1619         int idx = 0;
1620
1621         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1622                 if (er->event != (event->hw.config & er->config_mask))
1623                         continue;
1624                 idx |= er->idx;
1625         }
1626
1627         if (idx) {
1628                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1629                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1630                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1631                 reg1->idx = idx;
1632         }
1633         return 0;
1634 }
1635
1636 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1637 {
1638         struct hw_perf_event *hwc = &event->hw;
1639         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1640
1641         if (reg1->idx != EXTRA_REG_NONE) {
1642                 u64 filter = uncore_shared_reg_config(box, 0);
1643                 wrmsrl(reg1->reg, filter & 0xffffffff);
1644                 wrmsrl(reg1->reg + 6, filter >> 32);
1645         }
1646
1647         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1648 }
1649
1650 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1651         .init_box               = ivbep_uncore_msr_init_box,
1652         .disable_box            = snbep_uncore_msr_disable_box,
1653         .enable_box             = snbep_uncore_msr_enable_box,
1654         .disable_event          = snbep_uncore_msr_disable_event,
1655         .enable_event           = ivbep_cbox_enable_event,
1656         .read_counter           = uncore_msr_read_counter,
1657         .hw_config              = ivbep_cbox_hw_config,
1658         .get_constraint         = ivbep_cbox_get_constraint,
1659         .put_constraint         = snbep_cbox_put_constraint,
1660 };
1661
1662 static struct intel_uncore_type ivbep_uncore_cbox = {
1663         .name                   = "cbox",
1664         .num_counters           = 4,
1665         .num_boxes              = 15,
1666         .perf_ctr_bits          = 44,
1667         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1668         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1669         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1670         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1671         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1672         .num_shared_regs        = 1,
1673         .constraints            = snbep_uncore_cbox_constraints,
1674         .ops                    = &ivbep_uncore_cbox_ops,
1675         .format_group           = &ivbep_uncore_cbox_format_group,
1676 };
1677
1678 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1679         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1680         .hw_config              = snbep_pcu_hw_config,
1681         .get_constraint         = snbep_pcu_get_constraint,
1682         .put_constraint         = snbep_pcu_put_constraint,
1683 };
1684
1685 static struct intel_uncore_type ivbep_uncore_pcu = {
1686         .name                   = "pcu",
1687         .num_counters           = 4,
1688         .num_boxes              = 1,
1689         .perf_ctr_bits          = 48,
1690         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1691         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1692         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1693         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1694         .num_shared_regs        = 1,
1695         .ops                    = &ivbep_uncore_pcu_ops,
1696         .format_group           = &ivbep_uncore_pcu_format_group,
1697 };
1698
1699 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1700         &ivbep_uncore_ubox,
1701         &ivbep_uncore_cbox,
1702         &ivbep_uncore_pcu,
1703         NULL,
1704 };
1705
1706 void ivbep_uncore_cpu_init(void)
1707 {
1708         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1709                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1710         uncore_msr_uncores = ivbep_msr_uncores;
1711 }
1712
1713 static struct intel_uncore_type ivbep_uncore_ha = {
1714         .name           = "ha",
1715         .num_counters   = 4,
1716         .num_boxes      = 2,
1717         .perf_ctr_bits  = 48,
1718         IVBEP_UNCORE_PCI_COMMON_INIT(),
1719 };
1720
1721 static struct intel_uncore_type ivbep_uncore_imc = {
1722         .name           = "imc",
1723         .num_counters   = 4,
1724         .num_boxes      = 8,
1725         .perf_ctr_bits  = 48,
1726         .fixed_ctr_bits = 48,
1727         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1728         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1729         .event_descs    = snbep_uncore_imc_events,
1730         IVBEP_UNCORE_PCI_COMMON_INIT(),
1731 };
1732
1733 /* registers in IRP boxes are not properly aligned */
1734 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1735 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1736
1737 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1738 {
1739         struct pci_dev *pdev = box->pci_dev;
1740         struct hw_perf_event *hwc = &event->hw;
1741
1742         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1743                                hwc->config | SNBEP_PMON_CTL_EN);
1744 }
1745
1746 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1747 {
1748         struct pci_dev *pdev = box->pci_dev;
1749         struct hw_perf_event *hwc = &event->hw;
1750
1751         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1752 }
1753
1754 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1755 {
1756         struct pci_dev *pdev = box->pci_dev;
1757         struct hw_perf_event *hwc = &event->hw;
1758         u64 count = 0;
1759
1760         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1761         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1762
1763         return count;
1764 }
1765
1766 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1767         .init_box       = ivbep_uncore_pci_init_box,
1768         .disable_box    = snbep_uncore_pci_disable_box,
1769         .enable_box     = snbep_uncore_pci_enable_box,
1770         .disable_event  = ivbep_uncore_irp_disable_event,
1771         .enable_event   = ivbep_uncore_irp_enable_event,
1772         .read_counter   = ivbep_uncore_irp_read_counter,
1773 };
1774
1775 static struct intel_uncore_type ivbep_uncore_irp = {
1776         .name                   = "irp",
1777         .num_counters           = 4,
1778         .num_boxes              = 1,
1779         .perf_ctr_bits          = 48,
1780         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1781         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1782         .ops                    = &ivbep_uncore_irp_ops,
1783         .format_group           = &ivbep_uncore_format_group,
1784 };
1785
1786 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1787         .init_box       = ivbep_uncore_pci_init_box,
1788         .disable_box    = snbep_uncore_pci_disable_box,
1789         .enable_box     = snbep_uncore_pci_enable_box,
1790         .disable_event  = snbep_uncore_pci_disable_event,
1791         .enable_event   = snbep_qpi_enable_event,
1792         .read_counter   = snbep_uncore_pci_read_counter,
1793         .hw_config      = snbep_qpi_hw_config,
1794         .get_constraint = uncore_get_constraint,
1795         .put_constraint = uncore_put_constraint,
1796 };
1797
1798 static struct intel_uncore_type ivbep_uncore_qpi = {
1799         .name                   = "qpi",
1800         .num_counters           = 4,
1801         .num_boxes              = 3,
1802         .perf_ctr_bits          = 48,
1803         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1804         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1805         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1806         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1807         .num_shared_regs        = 1,
1808         .ops                    = &ivbep_uncore_qpi_ops,
1809         .format_group           = &ivbep_uncore_qpi_format_group,
1810 };
1811
1812 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1813         .name           = "r2pcie",
1814         .num_counters   = 4,
1815         .num_boxes      = 1,
1816         .perf_ctr_bits  = 44,
1817         .constraints    = snbep_uncore_r2pcie_constraints,
1818         IVBEP_UNCORE_PCI_COMMON_INIT(),
1819 };
1820
1821 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1822         .name           = "r3qpi",
1823         .num_counters   = 3,
1824         .num_boxes      = 2,
1825         .perf_ctr_bits  = 44,
1826         .constraints    = snbep_uncore_r3qpi_constraints,
1827         IVBEP_UNCORE_PCI_COMMON_INIT(),
1828 };
1829
1830 enum {
1831         IVBEP_PCI_UNCORE_HA,
1832         IVBEP_PCI_UNCORE_IMC,
1833         IVBEP_PCI_UNCORE_IRP,
1834         IVBEP_PCI_UNCORE_QPI,
1835         IVBEP_PCI_UNCORE_R2PCIE,
1836         IVBEP_PCI_UNCORE_R3QPI,
1837 };
1838
1839 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1840         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1841         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1842         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1843         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1844         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1845         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1846         NULL,
1847 };
1848
1849 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1850         { /* Home Agent 0 */
1851                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1852                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1853         },
1854         { /* Home Agent 1 */
1855                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1856                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1857         },
1858         { /* MC0 Channel 0 */
1859                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1860                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1861         },
1862         { /* MC0 Channel 1 */
1863                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1864                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1865         },
1866         { /* MC0 Channel 3 */
1867                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1868                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1869         },
1870         { /* MC0 Channel 4 */
1871                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1872                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1873         },
1874         { /* MC1 Channel 0 */
1875                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1876                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1877         },
1878         { /* MC1 Channel 1 */
1879                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1880                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1881         },
1882         { /* MC1 Channel 3 */
1883                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1884                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1885         },
1886         { /* MC1 Channel 4 */
1887                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1888                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1889         },
1890         { /* IRP */
1891                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1892                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1893         },
1894         { /* QPI0 Port 0 */
1895                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1896                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1897         },
1898         { /* QPI0 Port 1 */
1899                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1900                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1901         },
1902         { /* QPI1 Port 2 */
1903                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1904                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1905         },
1906         { /* R2PCIe */
1907                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1908                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1909         },
1910         { /* R3QPI0 Link 0 */
1911                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1912                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1913         },
1914         { /* R3QPI0 Link 1 */
1915                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1916                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1917         },
1918         { /* R3QPI1 Link 2 */
1919                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1920                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1921         },
1922         { /* QPI Port 0 filter  */
1923                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1924                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1925                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1926         },
1927         { /* QPI Port 0 filter  */
1928                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1929                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1930                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1931         },
1932         { /* end: all zeroes */ }
1933 };
1934
1935 static struct pci_driver ivbep_uncore_pci_driver = {
1936         .name           = "ivbep_uncore",
1937         .id_table       = ivbep_uncore_pci_ids,
1938 };
1939
1940 int ivbep_uncore_pci_init(void)
1941 {
1942         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1943         if (ret)
1944                 return ret;
1945         uncore_pci_uncores = ivbep_pci_uncores;
1946         uncore_pci_driver = &ivbep_uncore_pci_driver;
1947         return 0;
1948 }
1949 /* end of IvyTown uncore support */
1950
1951 /* KNL uncore support */
1952 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1953         &format_attr_event.attr,
1954         &format_attr_umask.attr,
1955         &format_attr_edge.attr,
1956         &format_attr_tid_en.attr,
1957         &format_attr_inv.attr,
1958         &format_attr_thresh5.attr,
1959         NULL,
1960 };
1961
1962 static const struct attribute_group knl_uncore_ubox_format_group = {
1963         .name = "format",
1964         .attrs = knl_uncore_ubox_formats_attr,
1965 };
1966
1967 static struct intel_uncore_type knl_uncore_ubox = {
1968         .name                   = "ubox",
1969         .num_counters           = 2,
1970         .num_boxes              = 1,
1971         .perf_ctr_bits          = 48,
1972         .fixed_ctr_bits         = 48,
1973         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1974         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1975         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1976         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1977         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1978         .ops                    = &snbep_uncore_msr_ops,
1979         .format_group           = &knl_uncore_ubox_format_group,
1980 };
1981
1982 static struct attribute *knl_uncore_cha_formats_attr[] = {
1983         &format_attr_event.attr,
1984         &format_attr_umask.attr,
1985         &format_attr_qor.attr,
1986         &format_attr_edge.attr,
1987         &format_attr_tid_en.attr,
1988         &format_attr_inv.attr,
1989         &format_attr_thresh8.attr,
1990         &format_attr_filter_tid4.attr,
1991         &format_attr_filter_link3.attr,
1992         &format_attr_filter_state4.attr,
1993         &format_attr_filter_local.attr,
1994         &format_attr_filter_all_op.attr,
1995         &format_attr_filter_nnm.attr,
1996         &format_attr_filter_opc3.attr,
1997         &format_attr_filter_nc.attr,
1998         &format_attr_filter_isoc.attr,
1999         NULL,
2000 };
2001
2002 static const struct attribute_group knl_uncore_cha_format_group = {
2003         .name = "format",
2004         .attrs = knl_uncore_cha_formats_attr,
2005 };
2006
2007 static struct event_constraint knl_uncore_cha_constraints[] = {
2008         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2009         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2010         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2011         EVENT_CONSTRAINT_END
2012 };
2013
2014 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2015         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2016                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2017         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2018         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2019         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2020         EVENT_EXTRA_END
2021 };
2022
2023 static u64 knl_cha_filter_mask(int fields)
2024 {
2025         u64 mask = 0;
2026
2027         if (fields & 0x1)
2028                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2029         if (fields & 0x2)
2030                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2031         if (fields & 0x4)
2032                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2033         return mask;
2034 }
2035
2036 static struct event_constraint *
2037 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2038 {
2039         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2040 }
2041
2042 static int knl_cha_hw_config(struct intel_uncore_box *box,
2043                              struct perf_event *event)
2044 {
2045         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2046         struct extra_reg *er;
2047         int idx = 0;
2048
2049         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2050                 if (er->event != (event->hw.config & er->config_mask))
2051                         continue;
2052                 idx |= er->idx;
2053         }
2054
2055         if (idx) {
2056                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2057                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2058                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2059
2060                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2061                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2062                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2063                 reg1->idx = idx;
2064         }
2065         return 0;
2066 }
2067
2068 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2069                                     struct perf_event *event);
2070
2071 static struct intel_uncore_ops knl_uncore_cha_ops = {
2072         .init_box               = snbep_uncore_msr_init_box,
2073         .disable_box            = snbep_uncore_msr_disable_box,
2074         .enable_box             = snbep_uncore_msr_enable_box,
2075         .disable_event          = snbep_uncore_msr_disable_event,
2076         .enable_event           = hswep_cbox_enable_event,
2077         .read_counter           = uncore_msr_read_counter,
2078         .hw_config              = knl_cha_hw_config,
2079         .get_constraint         = knl_cha_get_constraint,
2080         .put_constraint         = snbep_cbox_put_constraint,
2081 };
2082
2083 static struct intel_uncore_type knl_uncore_cha = {
2084         .name                   = "cha",
2085         .num_counters           = 4,
2086         .num_boxes              = 38,
2087         .perf_ctr_bits          = 48,
2088         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2089         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2090         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2091         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2092         .msr_offset             = KNL_CHA_MSR_OFFSET,
2093         .num_shared_regs        = 1,
2094         .constraints            = knl_uncore_cha_constraints,
2095         .ops                    = &knl_uncore_cha_ops,
2096         .format_group           = &knl_uncore_cha_format_group,
2097 };
2098
2099 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2100         &format_attr_event2.attr,
2101         &format_attr_use_occ_ctr.attr,
2102         &format_attr_occ_sel.attr,
2103         &format_attr_edge.attr,
2104         &format_attr_tid_en.attr,
2105         &format_attr_inv.attr,
2106         &format_attr_thresh6.attr,
2107         &format_attr_occ_invert.attr,
2108         &format_attr_occ_edge_det.attr,
2109         NULL,
2110 };
2111
2112 static const struct attribute_group knl_uncore_pcu_format_group = {
2113         .name = "format",
2114         .attrs = knl_uncore_pcu_formats_attr,
2115 };
2116
2117 static struct intel_uncore_type knl_uncore_pcu = {
2118         .name                   = "pcu",
2119         .num_counters           = 4,
2120         .num_boxes              = 1,
2121         .perf_ctr_bits          = 48,
2122         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2123         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2124         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2125         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2126         .ops                    = &snbep_uncore_msr_ops,
2127         .format_group           = &knl_uncore_pcu_format_group,
2128 };
2129
2130 static struct intel_uncore_type *knl_msr_uncores[] = {
2131         &knl_uncore_ubox,
2132         &knl_uncore_cha,
2133         &knl_uncore_pcu,
2134         NULL,
2135 };
2136
2137 void knl_uncore_cpu_init(void)
2138 {
2139         uncore_msr_uncores = knl_msr_uncores;
2140 }
2141
2142 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2143 {
2144         struct pci_dev *pdev = box->pci_dev;
2145         int box_ctl = uncore_pci_box_ctl(box);
2146
2147         pci_write_config_dword(pdev, box_ctl, 0);
2148 }
2149
2150 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2151                                         struct perf_event *event)
2152 {
2153         struct pci_dev *pdev = box->pci_dev;
2154         struct hw_perf_event *hwc = &event->hw;
2155
2156         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2157                                                         == UNCORE_FIXED_EVENT)
2158                 pci_write_config_dword(pdev, hwc->config_base,
2159                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2160         else
2161                 pci_write_config_dword(pdev, hwc->config_base,
2162                                        hwc->config | SNBEP_PMON_CTL_EN);
2163 }
2164
2165 static struct intel_uncore_ops knl_uncore_imc_ops = {
2166         .init_box       = snbep_uncore_pci_init_box,
2167         .disable_box    = snbep_uncore_pci_disable_box,
2168         .enable_box     = knl_uncore_imc_enable_box,
2169         .read_counter   = snbep_uncore_pci_read_counter,
2170         .enable_event   = knl_uncore_imc_enable_event,
2171         .disable_event  = snbep_uncore_pci_disable_event,
2172 };
2173
2174 static struct intel_uncore_type knl_uncore_imc_uclk = {
2175         .name                   = "imc_uclk",
2176         .num_counters           = 4,
2177         .num_boxes              = 2,
2178         .perf_ctr_bits          = 48,
2179         .fixed_ctr_bits         = 48,
2180         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2181         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2182         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2183         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2184         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2185         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2186         .ops                    = &knl_uncore_imc_ops,
2187         .format_group           = &snbep_uncore_format_group,
2188 };
2189
2190 static struct intel_uncore_type knl_uncore_imc_dclk = {
2191         .name                   = "imc",
2192         .num_counters           = 4,
2193         .num_boxes              = 6,
2194         .perf_ctr_bits          = 48,
2195         .fixed_ctr_bits         = 48,
2196         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2197         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2198         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2199         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2200         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2201         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2202         .ops                    = &knl_uncore_imc_ops,
2203         .format_group           = &snbep_uncore_format_group,
2204 };
2205
2206 static struct intel_uncore_type knl_uncore_edc_uclk = {
2207         .name                   = "edc_uclk",
2208         .num_counters           = 4,
2209         .num_boxes              = 8,
2210         .perf_ctr_bits          = 48,
2211         .fixed_ctr_bits         = 48,
2212         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2213         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2214         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2215         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2216         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2217         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2218         .ops                    = &knl_uncore_imc_ops,
2219         .format_group           = &snbep_uncore_format_group,
2220 };
2221
2222 static struct intel_uncore_type knl_uncore_edc_eclk = {
2223         .name                   = "edc_eclk",
2224         .num_counters           = 4,
2225         .num_boxes              = 8,
2226         .perf_ctr_bits          = 48,
2227         .fixed_ctr_bits         = 48,
2228         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2229         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2230         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2231         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2232         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2233         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2234         .ops                    = &knl_uncore_imc_ops,
2235         .format_group           = &snbep_uncore_format_group,
2236 };
2237
2238 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2239         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2240         EVENT_CONSTRAINT_END
2241 };
2242
2243 static struct intel_uncore_type knl_uncore_m2pcie = {
2244         .name           = "m2pcie",
2245         .num_counters   = 4,
2246         .num_boxes      = 1,
2247         .perf_ctr_bits  = 48,
2248         .constraints    = knl_uncore_m2pcie_constraints,
2249         SNBEP_UNCORE_PCI_COMMON_INIT(),
2250 };
2251
2252 static struct attribute *knl_uncore_irp_formats_attr[] = {
2253         &format_attr_event.attr,
2254         &format_attr_umask.attr,
2255         &format_attr_qor.attr,
2256         &format_attr_edge.attr,
2257         &format_attr_inv.attr,
2258         &format_attr_thresh8.attr,
2259         NULL,
2260 };
2261
2262 static const struct attribute_group knl_uncore_irp_format_group = {
2263         .name = "format",
2264         .attrs = knl_uncore_irp_formats_attr,
2265 };
2266
2267 static struct intel_uncore_type knl_uncore_irp = {
2268         .name                   = "irp",
2269         .num_counters           = 2,
2270         .num_boxes              = 1,
2271         .perf_ctr_bits          = 48,
2272         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2273         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2274         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2275         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2276         .ops                    = &snbep_uncore_pci_ops,
2277         .format_group           = &knl_uncore_irp_format_group,
2278 };
2279
2280 enum {
2281         KNL_PCI_UNCORE_MC_UCLK,
2282         KNL_PCI_UNCORE_MC_DCLK,
2283         KNL_PCI_UNCORE_EDC_UCLK,
2284         KNL_PCI_UNCORE_EDC_ECLK,
2285         KNL_PCI_UNCORE_M2PCIE,
2286         KNL_PCI_UNCORE_IRP,
2287 };
2288
2289 static struct intel_uncore_type *knl_pci_uncores[] = {
2290         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2291         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2292         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2293         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2294         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2295         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2296         NULL,
2297 };
2298
2299 /*
2300  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2301  * device type. prior to KNL, each instance of a PMU device type had a unique
2302  * device ID.
2303  *
2304  *      PCI Device ID   Uncore PMU Devices
2305  *      ----------------------------------
2306  *      0x7841          MC0 UClk, MC1 UClk
2307  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2308  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2309  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2310  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2311  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2312  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2313  *      0x7817          M2PCIe
2314  *      0x7814          IRP
2315 */
2316
2317 static const struct pci_device_id knl_uncore_pci_ids[] = {
2318         { /* MC0 UClk */
2319                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2320                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2321         },
2322         { /* MC1 UClk */
2323                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2324                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2325         },
2326         { /* MC0 DClk CH 0 */
2327                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2328                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2329         },
2330         { /* MC0 DClk CH 1 */
2331                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2332                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2333         },
2334         { /* MC0 DClk CH 2 */
2335                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2336                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2337         },
2338         { /* MC1 DClk CH 0 */
2339                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2340                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2341         },
2342         { /* MC1 DClk CH 1 */
2343                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2344                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2345         },
2346         { /* MC1 DClk CH 2 */
2347                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2348                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2349         },
2350         { /* EDC0 UClk */
2351                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2352                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2353         },
2354         { /* EDC1 UClk */
2355                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2356                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2357         },
2358         { /* EDC2 UClk */
2359                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2360                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2361         },
2362         { /* EDC3 UClk */
2363                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2364                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2365         },
2366         { /* EDC4 UClk */
2367                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2368                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2369         },
2370         { /* EDC5 UClk */
2371                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2372                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2373         },
2374         { /* EDC6 UClk */
2375                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2376                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2377         },
2378         { /* EDC7 UClk */
2379                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2380                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2381         },
2382         { /* EDC0 EClk */
2383                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2384                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2385         },
2386         { /* EDC1 EClk */
2387                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2388                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2389         },
2390         { /* EDC2 EClk */
2391                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2392                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2393         },
2394         { /* EDC3 EClk */
2395                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2396                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2397         },
2398         { /* EDC4 EClk */
2399                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2400                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2401         },
2402         { /* EDC5 EClk */
2403                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2404                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2405         },
2406         { /* EDC6 EClk */
2407                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2408                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2409         },
2410         { /* EDC7 EClk */
2411                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2412                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2413         },
2414         { /* M2PCIe */
2415                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2416                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2417         },
2418         { /* IRP */
2419                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2420                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2421         },
2422         { /* end: all zeroes */ }
2423 };
2424
2425 static struct pci_driver knl_uncore_pci_driver = {
2426         .name           = "knl_uncore",
2427         .id_table       = knl_uncore_pci_ids,
2428 };
2429
2430 int knl_uncore_pci_init(void)
2431 {
2432         int ret;
2433
2434         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2435         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2436         if (ret)
2437                 return ret;
2438         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2439         if (ret)
2440                 return ret;
2441         uncore_pci_uncores = knl_pci_uncores;
2442         uncore_pci_driver = &knl_uncore_pci_driver;
2443         return 0;
2444 }
2445
2446 /* end of KNL uncore support */
2447
2448 /* Haswell-EP uncore support */
2449 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2450         &format_attr_event.attr,
2451         &format_attr_umask.attr,
2452         &format_attr_edge.attr,
2453         &format_attr_inv.attr,
2454         &format_attr_thresh5.attr,
2455         &format_attr_filter_tid2.attr,
2456         &format_attr_filter_cid.attr,
2457         NULL,
2458 };
2459
2460 static const struct attribute_group hswep_uncore_ubox_format_group = {
2461         .name = "format",
2462         .attrs = hswep_uncore_ubox_formats_attr,
2463 };
2464
2465 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2466 {
2467         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2468         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2469         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2470         reg1->idx = 0;
2471         return 0;
2472 }
2473
2474 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2475         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2476         .hw_config              = hswep_ubox_hw_config,
2477         .get_constraint         = uncore_get_constraint,
2478         .put_constraint         = uncore_put_constraint,
2479 };
2480
2481 static struct intel_uncore_type hswep_uncore_ubox = {
2482         .name                   = "ubox",
2483         .num_counters           = 2,
2484         .num_boxes              = 1,
2485         .perf_ctr_bits          = 44,
2486         .fixed_ctr_bits         = 48,
2487         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2488         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2489         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2490         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2491         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2492         .num_shared_regs        = 1,
2493         .ops                    = &hswep_uncore_ubox_ops,
2494         .format_group           = &hswep_uncore_ubox_format_group,
2495 };
2496
2497 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2498         &format_attr_event.attr,
2499         &format_attr_umask.attr,
2500         &format_attr_edge.attr,
2501         &format_attr_tid_en.attr,
2502         &format_attr_thresh8.attr,
2503         &format_attr_filter_tid3.attr,
2504         &format_attr_filter_link2.attr,
2505         &format_attr_filter_state3.attr,
2506         &format_attr_filter_nid2.attr,
2507         &format_attr_filter_opc2.attr,
2508         &format_attr_filter_nc.attr,
2509         &format_attr_filter_c6.attr,
2510         &format_attr_filter_isoc.attr,
2511         NULL,
2512 };
2513
2514 static const struct attribute_group hswep_uncore_cbox_format_group = {
2515         .name = "format",
2516         .attrs = hswep_uncore_cbox_formats_attr,
2517 };
2518
2519 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2520         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2521         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2522         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2523         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2524         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2525         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2526         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2527         EVENT_CONSTRAINT_END
2528 };
2529
2530 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2531         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2532                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2533         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2534         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2535         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2536         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2537         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2538         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2539         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2540         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2541         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2542         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2543         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2544         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2545         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2546         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2547         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2548         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2549         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2550         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2551         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2552         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2553         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2554         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2555         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2556         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2557         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2558         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2559         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2560         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2561         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2562         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2563         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2564         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2565         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2566         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2567         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2568         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2569         EVENT_EXTRA_END
2570 };
2571
2572 static u64 hswep_cbox_filter_mask(int fields)
2573 {
2574         u64 mask = 0;
2575         if (fields & 0x1)
2576                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2577         if (fields & 0x2)
2578                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2579         if (fields & 0x4)
2580                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2581         if (fields & 0x8)
2582                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2583         if (fields & 0x10) {
2584                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2585                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2586                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2587                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2588         }
2589         return mask;
2590 }
2591
2592 static struct event_constraint *
2593 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2594 {
2595         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2596 }
2597
2598 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2599 {
2600         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2601         struct extra_reg *er;
2602         int idx = 0;
2603
2604         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2605                 if (er->event != (event->hw.config & er->config_mask))
2606                         continue;
2607                 idx |= er->idx;
2608         }
2609
2610         if (idx) {
2611                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2612                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2613                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2614                 reg1->idx = idx;
2615         }
2616         return 0;
2617 }
2618
2619 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2620                                   struct perf_event *event)
2621 {
2622         struct hw_perf_event *hwc = &event->hw;
2623         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2624
2625         if (reg1->idx != EXTRA_REG_NONE) {
2626                 u64 filter = uncore_shared_reg_config(box, 0);
2627                 wrmsrl(reg1->reg, filter & 0xffffffff);
2628                 wrmsrl(reg1->reg + 1, filter >> 32);
2629         }
2630
2631         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2632 }
2633
2634 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2635         .init_box               = snbep_uncore_msr_init_box,
2636         .disable_box            = snbep_uncore_msr_disable_box,
2637         .enable_box             = snbep_uncore_msr_enable_box,
2638         .disable_event          = snbep_uncore_msr_disable_event,
2639         .enable_event           = hswep_cbox_enable_event,
2640         .read_counter           = uncore_msr_read_counter,
2641         .hw_config              = hswep_cbox_hw_config,
2642         .get_constraint         = hswep_cbox_get_constraint,
2643         .put_constraint         = snbep_cbox_put_constraint,
2644 };
2645
2646 static struct intel_uncore_type hswep_uncore_cbox = {
2647         .name                   = "cbox",
2648         .num_counters           = 4,
2649         .num_boxes              = 18,
2650         .perf_ctr_bits          = 48,
2651         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2652         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2653         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2654         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2655         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2656         .num_shared_regs        = 1,
2657         .constraints            = hswep_uncore_cbox_constraints,
2658         .ops                    = &hswep_uncore_cbox_ops,
2659         .format_group           = &hswep_uncore_cbox_format_group,
2660 };
2661
2662 /*
2663  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2664  */
2665 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2666 {
2667         unsigned msr = uncore_msr_box_ctl(box);
2668
2669         if (msr) {
2670                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2671                 u64 flags = 0;
2672                 int i;
2673
2674                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2675                         flags |= (1ULL << i);
2676                         wrmsrl(msr, flags);
2677                 }
2678         }
2679 }
2680
2681 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2682         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2683         .init_box               = hswep_uncore_sbox_msr_init_box
2684 };
2685
2686 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2687         &format_attr_event.attr,
2688         &format_attr_umask.attr,
2689         &format_attr_edge.attr,
2690         &format_attr_tid_en.attr,
2691         &format_attr_inv.attr,
2692         &format_attr_thresh8.attr,
2693         NULL,
2694 };
2695
2696 static const struct attribute_group hswep_uncore_sbox_format_group = {
2697         .name = "format",
2698         .attrs = hswep_uncore_sbox_formats_attr,
2699 };
2700
2701 static struct intel_uncore_type hswep_uncore_sbox = {
2702         .name                   = "sbox",
2703         .num_counters           = 4,
2704         .num_boxes              = 4,
2705         .perf_ctr_bits          = 44,
2706         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2707         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2708         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2709         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2710         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2711         .ops                    = &hswep_uncore_sbox_msr_ops,
2712         .format_group           = &hswep_uncore_sbox_format_group,
2713 };
2714
2715 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2716 {
2717         struct hw_perf_event *hwc = &event->hw;
2718         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2719         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2720
2721         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2722                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2723                 reg1->idx = ev_sel - 0xb;
2724                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2725         }
2726         return 0;
2727 }
2728
2729 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2730         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2731         .hw_config              = hswep_pcu_hw_config,
2732         .get_constraint         = snbep_pcu_get_constraint,
2733         .put_constraint         = snbep_pcu_put_constraint,
2734 };
2735
2736 static struct intel_uncore_type hswep_uncore_pcu = {
2737         .name                   = "pcu",
2738         .num_counters           = 4,
2739         .num_boxes              = 1,
2740         .perf_ctr_bits          = 48,
2741         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2742         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2743         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2744         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2745         .num_shared_regs        = 1,
2746         .ops                    = &hswep_uncore_pcu_ops,
2747         .format_group           = &snbep_uncore_pcu_format_group,
2748 };
2749
2750 static struct intel_uncore_type *hswep_msr_uncores[] = {
2751         &hswep_uncore_ubox,
2752         &hswep_uncore_cbox,
2753         &hswep_uncore_sbox,
2754         &hswep_uncore_pcu,
2755         NULL,
2756 };
2757
2758 void hswep_uncore_cpu_init(void)
2759 {
2760         int pkg = boot_cpu_data.logical_proc_id;
2761
2762         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2763                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2764
2765         /* Detect 6-8 core systems with only two SBOXes */
2766         if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2767                 u32 capid4;
2768
2769                 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2770                                       0x94, &capid4);
2771                 if (((capid4 >> 6) & 0x3) == 0)
2772                         hswep_uncore_sbox.num_boxes = 2;
2773         }
2774
2775         uncore_msr_uncores = hswep_msr_uncores;
2776 }
2777
2778 static struct intel_uncore_type hswep_uncore_ha = {
2779         .name           = "ha",
2780         .num_counters   = 4,
2781         .num_boxes      = 2,
2782         .perf_ctr_bits  = 48,
2783         SNBEP_UNCORE_PCI_COMMON_INIT(),
2784 };
2785
2786 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2787         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2788         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2789         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2790         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2791         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2792         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2793         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2794         { /* end: all zeroes */ },
2795 };
2796
2797 static struct intel_uncore_type hswep_uncore_imc = {
2798         .name           = "imc",
2799         .num_counters   = 4,
2800         .num_boxes      = 8,
2801         .perf_ctr_bits  = 48,
2802         .fixed_ctr_bits = 48,
2803         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2804         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2805         .event_descs    = hswep_uncore_imc_events,
2806         SNBEP_UNCORE_PCI_COMMON_INIT(),
2807 };
2808
2809 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2810
2811 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2812 {
2813         struct pci_dev *pdev = box->pci_dev;
2814         struct hw_perf_event *hwc = &event->hw;
2815         u64 count = 0;
2816
2817         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2818         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2819
2820         return count;
2821 }
2822
2823 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2824         .init_box       = snbep_uncore_pci_init_box,
2825         .disable_box    = snbep_uncore_pci_disable_box,
2826         .enable_box     = snbep_uncore_pci_enable_box,
2827         .disable_event  = ivbep_uncore_irp_disable_event,
2828         .enable_event   = ivbep_uncore_irp_enable_event,
2829         .read_counter   = hswep_uncore_irp_read_counter,
2830 };
2831
2832 static struct intel_uncore_type hswep_uncore_irp = {
2833         .name                   = "irp",
2834         .num_counters           = 4,
2835         .num_boxes              = 1,
2836         .perf_ctr_bits          = 48,
2837         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2838         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2839         .ops                    = &hswep_uncore_irp_ops,
2840         .format_group           = &snbep_uncore_format_group,
2841 };
2842
2843 static struct intel_uncore_type hswep_uncore_qpi = {
2844         .name                   = "qpi",
2845         .num_counters           = 4,
2846         .num_boxes              = 3,
2847         .perf_ctr_bits          = 48,
2848         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2849         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2850         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2851         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2852         .num_shared_regs        = 1,
2853         .ops                    = &snbep_uncore_qpi_ops,
2854         .format_group           = &snbep_uncore_qpi_format_group,
2855 };
2856
2857 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2858         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2859         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2860         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2861         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2862         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2863         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2864         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2865         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2866         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2867         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2868         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2869         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2870         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2871         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2872         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2873         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2874         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2875         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2876         EVENT_CONSTRAINT_END
2877 };
2878
2879 static struct intel_uncore_type hswep_uncore_r2pcie = {
2880         .name           = "r2pcie",
2881         .num_counters   = 4,
2882         .num_boxes      = 1,
2883         .perf_ctr_bits  = 48,
2884         .constraints    = hswep_uncore_r2pcie_constraints,
2885         SNBEP_UNCORE_PCI_COMMON_INIT(),
2886 };
2887
2888 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2889         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2890         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2891         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2892         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2893         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2894         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2895         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2896         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2897         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2898         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2899         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2900         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2901         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2902         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2903         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2904         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2905         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2906         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2907         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2908         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2909         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2910         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2911         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2912         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2913         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2914         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2915         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2916         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2917         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2918         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2919         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2920         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2921         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2922         EVENT_CONSTRAINT_END
2923 };
2924
2925 static struct intel_uncore_type hswep_uncore_r3qpi = {
2926         .name           = "r3qpi",
2927         .num_counters   = 3,
2928         .num_boxes      = 3,
2929         .perf_ctr_bits  = 44,
2930         .constraints    = hswep_uncore_r3qpi_constraints,
2931         SNBEP_UNCORE_PCI_COMMON_INIT(),
2932 };
2933
2934 enum {
2935         HSWEP_PCI_UNCORE_HA,
2936         HSWEP_PCI_UNCORE_IMC,
2937         HSWEP_PCI_UNCORE_IRP,
2938         HSWEP_PCI_UNCORE_QPI,
2939         HSWEP_PCI_UNCORE_R2PCIE,
2940         HSWEP_PCI_UNCORE_R3QPI,
2941 };
2942
2943 static struct intel_uncore_type *hswep_pci_uncores[] = {
2944         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2945         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2946         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2947         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2948         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2949         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2950         NULL,
2951 };
2952
2953 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2954         { /* Home Agent 0 */
2955                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2956                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2957         },
2958         { /* Home Agent 1 */
2959                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2960                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2961         },
2962         { /* MC0 Channel 0 */
2963                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2964                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2965         },
2966         { /* MC0 Channel 1 */
2967                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2968                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2969         },
2970         { /* MC0 Channel 2 */
2971                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2972                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2973         },
2974         { /* MC0 Channel 3 */
2975                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2976                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2977         },
2978         { /* MC1 Channel 0 */
2979                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2980                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2981         },
2982         { /* MC1 Channel 1 */
2983                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2984                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2985         },
2986         { /* MC1 Channel 2 */
2987                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2988                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2989         },
2990         { /* MC1 Channel 3 */
2991                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2992                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2993         },
2994         { /* IRP */
2995                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2996                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2997         },
2998         { /* QPI0 Port 0 */
2999                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3000                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3001         },
3002         { /* QPI0 Port 1 */
3003                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3004                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3005         },
3006         { /* QPI1 Port 2 */
3007                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3008                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3009         },
3010         { /* R2PCIe */
3011                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3012                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3013         },
3014         { /* R3QPI0 Link 0 */
3015                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3016                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3017         },
3018         { /* R3QPI0 Link 1 */
3019                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3020                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3021         },
3022         { /* R3QPI1 Link 2 */
3023                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3024                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3025         },
3026         { /* QPI Port 0 filter  */
3027                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3028                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3029                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3030         },
3031         { /* QPI Port 1 filter  */
3032                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3033                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3034                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3035         },
3036         { /* PCU.3 (for Capability registers) */
3037                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3038                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3039                                                    HSWEP_PCI_PCU_3),
3040         },
3041         { /* end: all zeroes */ }
3042 };
3043
3044 static struct pci_driver hswep_uncore_pci_driver = {
3045         .name           = "hswep_uncore",
3046         .id_table       = hswep_uncore_pci_ids,
3047 };
3048
3049 int hswep_uncore_pci_init(void)
3050 {
3051         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3052         if (ret)
3053                 return ret;
3054         uncore_pci_uncores = hswep_pci_uncores;
3055         uncore_pci_driver = &hswep_uncore_pci_driver;
3056         return 0;
3057 }
3058 /* end of Haswell-EP uncore support */
3059
3060 /* BDX uncore support */
3061
3062 static struct intel_uncore_type bdx_uncore_ubox = {
3063         .name                   = "ubox",
3064         .num_counters           = 2,
3065         .num_boxes              = 1,
3066         .perf_ctr_bits          = 48,
3067         .fixed_ctr_bits         = 48,
3068         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3069         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3070         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3071         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3072         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3073         .num_shared_regs        = 1,
3074         .ops                    = &ivbep_uncore_msr_ops,
3075         .format_group           = &ivbep_uncore_ubox_format_group,
3076 };
3077
3078 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3079         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3080         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3081         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3082         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3083         EVENT_CONSTRAINT_END
3084 };
3085
3086 static struct intel_uncore_type bdx_uncore_cbox = {
3087         .name                   = "cbox",
3088         .num_counters           = 4,
3089         .num_boxes              = 24,
3090         .perf_ctr_bits          = 48,
3091         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3092         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3093         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3094         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3095         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3096         .num_shared_regs        = 1,
3097         .constraints            = bdx_uncore_cbox_constraints,
3098         .ops                    = &hswep_uncore_cbox_ops,
3099         .format_group           = &hswep_uncore_cbox_format_group,
3100 };
3101
3102 static struct intel_uncore_type bdx_uncore_sbox = {
3103         .name                   = "sbox",
3104         .num_counters           = 4,
3105         .num_boxes              = 4,
3106         .perf_ctr_bits          = 48,
3107         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
3108         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
3109         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3110         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
3111         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
3112         .ops                    = &hswep_uncore_sbox_msr_ops,
3113         .format_group           = &hswep_uncore_sbox_format_group,
3114 };
3115
3116 #define BDX_MSR_UNCORE_SBOX     3
3117
3118 static struct intel_uncore_type *bdx_msr_uncores[] = {
3119         &bdx_uncore_ubox,
3120         &bdx_uncore_cbox,
3121         &hswep_uncore_pcu,
3122         &bdx_uncore_sbox,
3123         NULL,
3124 };
3125
3126 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3127 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3128         EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3129         EVENT_CONSTRAINT_END
3130 };
3131
3132 void bdx_uncore_cpu_init(void)
3133 {
3134         int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3135
3136         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3137                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3138         uncore_msr_uncores = bdx_msr_uncores;
3139
3140         /* BDX-DE doesn't have SBOX */
3141         if (boot_cpu_data.x86_model == 86) {
3142                 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3143         /* Detect systems with no SBOXes */
3144         } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3145                 struct pci_dev *pdev;
3146                 u32 capid4;
3147
3148                 pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3149                 pci_read_config_dword(pdev, 0x94, &capid4);
3150                 if (((capid4 >> 6) & 0x3) == 0)
3151                         bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3152         }
3153         hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3154 }
3155
3156 static struct intel_uncore_type bdx_uncore_ha = {
3157         .name           = "ha",
3158         .num_counters   = 4,
3159         .num_boxes      = 2,
3160         .perf_ctr_bits  = 48,
3161         SNBEP_UNCORE_PCI_COMMON_INIT(),
3162 };
3163
3164 static struct intel_uncore_type bdx_uncore_imc = {
3165         .name           = "imc",
3166         .num_counters   = 4,
3167         .num_boxes      = 8,
3168         .perf_ctr_bits  = 48,
3169         .fixed_ctr_bits = 48,
3170         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3171         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3172         .event_descs    = hswep_uncore_imc_events,
3173         SNBEP_UNCORE_PCI_COMMON_INIT(),
3174 };
3175
3176 static struct intel_uncore_type bdx_uncore_irp = {
3177         .name                   = "irp",
3178         .num_counters           = 4,
3179         .num_boxes              = 1,
3180         .perf_ctr_bits          = 48,
3181         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3182         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3183         .ops                    = &hswep_uncore_irp_ops,
3184         .format_group           = &snbep_uncore_format_group,
3185 };
3186
3187 static struct intel_uncore_type bdx_uncore_qpi = {
3188         .name                   = "qpi",
3189         .num_counters           = 4,
3190         .num_boxes              = 3,
3191         .perf_ctr_bits          = 48,
3192         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3193         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3194         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3195         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3196         .num_shared_regs        = 1,
3197         .ops                    = &snbep_uncore_qpi_ops,
3198         .format_group           = &snbep_uncore_qpi_format_group,
3199 };
3200
3201 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3202         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3203         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3204         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3205         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3206         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3207         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3208         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3209         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3210         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3211         EVENT_CONSTRAINT_END
3212 };
3213
3214 static struct intel_uncore_type bdx_uncore_r2pcie = {
3215         .name           = "r2pcie",
3216         .num_counters   = 4,
3217         .num_boxes      = 1,
3218         .perf_ctr_bits  = 48,
3219         .constraints    = bdx_uncore_r2pcie_constraints,
3220         SNBEP_UNCORE_PCI_COMMON_INIT(),
3221 };
3222
3223 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3224         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3225         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3226         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3227         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3228         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3229         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3230         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3231         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3232         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3233         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3234         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3235         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3236         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3237         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3238         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3239         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3240         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3241         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3242         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3243         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3244         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3245         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3246         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3247         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3248         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3249         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3250         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3251         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3252         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3253         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3254         EVENT_CONSTRAINT_END
3255 };
3256
3257 static struct intel_uncore_type bdx_uncore_r3qpi = {
3258         .name           = "r3qpi",
3259         .num_counters   = 3,
3260         .num_boxes      = 3,
3261         .perf_ctr_bits  = 48,
3262         .constraints    = bdx_uncore_r3qpi_constraints,
3263         SNBEP_UNCORE_PCI_COMMON_INIT(),
3264 };
3265
3266 enum {
3267         BDX_PCI_UNCORE_HA,
3268         BDX_PCI_UNCORE_IMC,
3269         BDX_PCI_UNCORE_IRP,
3270         BDX_PCI_UNCORE_QPI,
3271         BDX_PCI_UNCORE_R2PCIE,
3272         BDX_PCI_UNCORE_R3QPI,
3273 };
3274
3275 static struct intel_uncore_type *bdx_pci_uncores[] = {
3276         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3277         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3278         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3279         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3280         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3281         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3282         NULL,
3283 };
3284
3285 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3286         { /* Home Agent 0 */
3287                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3288                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3289         },
3290         { /* Home Agent 1 */
3291                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3292                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3293         },
3294         { /* MC0 Channel 0 */
3295                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3296                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3297         },
3298         { /* MC0 Channel 1 */
3299                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3300                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3301         },
3302         { /* MC0 Channel 2 */
3303                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3304                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3305         },
3306         { /* MC0 Channel 3 */
3307                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3308                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3309         },
3310         { /* MC1 Channel 0 */
3311                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3312                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3313         },
3314         { /* MC1 Channel 1 */
3315                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3316                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3317         },
3318         { /* MC1 Channel 2 */
3319                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3320                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3321         },
3322         { /* MC1 Channel 3 */
3323                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3324                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3325         },
3326         { /* IRP */
3327                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3328                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3329         },
3330         { /* QPI0 Port 0 */
3331                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3332                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3333         },
3334         { /* QPI0 Port 1 */
3335                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3336                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3337         },
3338         { /* QPI1 Port 2 */
3339                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3340                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3341         },
3342         { /* R2PCIe */
3343                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3344                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3345         },
3346         { /* R3QPI0 Link 0 */
3347                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3348                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3349         },
3350         { /* R3QPI0 Link 1 */
3351                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3352                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3353         },
3354         { /* R3QPI1 Link 2 */
3355                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3356                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3357         },
3358         { /* QPI Port 0 filter  */
3359                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3360                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3361                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3362         },
3363         { /* QPI Port 1 filter  */
3364                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3365                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3366                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3367         },
3368         { /* QPI Port 2 filter  */
3369                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3370                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3371                                                    BDX_PCI_QPI_PORT2_FILTER),
3372         },
3373         { /* PCU.3 (for Capability registers) */
3374                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3375                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3376                                                    HSWEP_PCI_PCU_3),
3377         },
3378         { /* end: all zeroes */ }
3379 };
3380
3381 static struct pci_driver bdx_uncore_pci_driver = {
3382         .name           = "bdx_uncore",
3383         .id_table       = bdx_uncore_pci_ids,
3384 };
3385
3386 int bdx_uncore_pci_init(void)
3387 {
3388         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3389
3390         if (ret)
3391                 return ret;
3392         uncore_pci_uncores = bdx_pci_uncores;
3393         uncore_pci_driver = &bdx_uncore_pci_driver;
3394         return 0;
3395 }
3396
3397 /* end of BDX uncore support */
3398
3399 /* SKX uncore support */
3400
3401 static struct intel_uncore_type skx_uncore_ubox = {
3402         .name                   = "ubox",
3403         .num_counters           = 2,
3404         .num_boxes              = 1,
3405         .perf_ctr_bits          = 48,
3406         .fixed_ctr_bits         = 48,
3407         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3408         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3409         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3410         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3411         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3412         .ops                    = &ivbep_uncore_msr_ops,
3413         .format_group           = &ivbep_uncore_ubox_format_group,
3414 };
3415
3416 static struct attribute *skx_uncore_cha_formats_attr[] = {
3417         &format_attr_event.attr,
3418         &format_attr_umask.attr,
3419         &format_attr_edge.attr,
3420         &format_attr_tid_en.attr,
3421         &format_attr_inv.attr,
3422         &format_attr_thresh8.attr,
3423         &format_attr_filter_tid4.attr,
3424         &format_attr_filter_state5.attr,
3425         &format_attr_filter_rem.attr,
3426         &format_attr_filter_loc.attr,
3427         &format_attr_filter_nm.attr,
3428         &format_attr_filter_all_op.attr,
3429         &format_attr_filter_not_nm.attr,
3430         &format_attr_filter_opc_0.attr,
3431         &format_attr_filter_opc_1.attr,
3432         &format_attr_filter_nc.attr,
3433         &format_attr_filter_isoc.attr,
3434         NULL,
3435 };
3436
3437 static const struct attribute_group skx_uncore_chabox_format_group = {
3438         .name = "format",
3439         .attrs = skx_uncore_cha_formats_attr,
3440 };
3441
3442 static struct event_constraint skx_uncore_chabox_constraints[] = {
3443         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3444         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3445         EVENT_CONSTRAINT_END
3446 };
3447
3448 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3449         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3450         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3451         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3452         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3453         SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3454         SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3455         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3456         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3457         SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3458         EVENT_EXTRA_END
3459 };
3460
3461 static u64 skx_cha_filter_mask(int fields)
3462 {
3463         u64 mask = 0;
3464
3465         if (fields & 0x1)
3466                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3467         if (fields & 0x2)
3468                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3469         if (fields & 0x4)
3470                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3471         if (fields & 0x8) {
3472                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3473                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3474                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3475                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3476                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3477                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3478                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3479                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3480                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3481         }
3482         return mask;
3483 }
3484
3485 static struct event_constraint *
3486 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3487 {
3488         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3489 }
3490
3491 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3492 {
3493         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3494         struct extra_reg *er;
3495         int idx = 0;
3496
3497         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3498                 if (er->event != (event->hw.config & er->config_mask))
3499                         continue;
3500                 idx |= er->idx;
3501         }
3502
3503         if (idx) {
3504                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3505                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3506                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3507                 reg1->idx = idx;
3508         }
3509         return 0;
3510 }
3511
3512 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3513         /* There is no frz_en for chabox ctl */
3514         .init_box               = ivbep_uncore_msr_init_box,
3515         .disable_box            = snbep_uncore_msr_disable_box,
3516         .enable_box             = snbep_uncore_msr_enable_box,
3517         .disable_event          = snbep_uncore_msr_disable_event,
3518         .enable_event           = hswep_cbox_enable_event,
3519         .read_counter           = uncore_msr_read_counter,
3520         .hw_config              = skx_cha_hw_config,
3521         .get_constraint         = skx_cha_get_constraint,
3522         .put_constraint         = snbep_cbox_put_constraint,
3523 };
3524
3525 static struct intel_uncore_type skx_uncore_chabox = {
3526         .name                   = "cha",
3527         .num_counters           = 4,
3528         .perf_ctr_bits          = 48,
3529         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3530         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3531         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3532         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3533         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3534         .num_shared_regs        = 1,
3535         .constraints            = skx_uncore_chabox_constraints,
3536         .ops                    = &skx_uncore_chabox_ops,
3537         .format_group           = &skx_uncore_chabox_format_group,
3538 };
3539
3540 static struct attribute *skx_uncore_iio_formats_attr[] = {
3541         &format_attr_event.attr,
3542         &format_attr_umask.attr,
3543         &format_attr_edge.attr,
3544         &format_attr_inv.attr,
3545         &format_attr_thresh9.attr,
3546         &format_attr_ch_mask.attr,
3547         &format_attr_fc_mask.attr,
3548         NULL,
3549 };
3550
3551 static const struct attribute_group skx_uncore_iio_format_group = {
3552         .name = "format",
3553         .attrs = skx_uncore_iio_formats_attr,
3554 };
3555
3556 static struct event_constraint skx_uncore_iio_constraints[] = {
3557         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3558         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3559         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3560         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3561         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3562         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3563         EVENT_CONSTRAINT_END
3564 };
3565
3566 static void skx_iio_enable_event(struct intel_uncore_box *box,
3567                                  struct perf_event *event)
3568 {
3569         struct hw_perf_event *hwc = &event->hw;
3570
3571         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3572 }
3573
3574 static struct intel_uncore_ops skx_uncore_iio_ops = {
3575         .init_box               = ivbep_uncore_msr_init_box,
3576         .disable_box            = snbep_uncore_msr_disable_box,
3577         .enable_box             = snbep_uncore_msr_enable_box,
3578         .disable_event          = snbep_uncore_msr_disable_event,
3579         .enable_event           = skx_iio_enable_event,
3580         .read_counter           = uncore_msr_read_counter,
3581 };
3582
3583 static struct intel_uncore_type skx_uncore_iio = {
3584         .name                   = "iio",
3585         .num_counters           = 4,
3586         .num_boxes              = 6,
3587         .perf_ctr_bits          = 48,
3588         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3589         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3590         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3591         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3592         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3593         .msr_offset             = SKX_IIO_MSR_OFFSET,
3594         .constraints            = skx_uncore_iio_constraints,
3595         .ops                    = &skx_uncore_iio_ops,
3596         .format_group           = &skx_uncore_iio_format_group,
3597 };
3598
3599 enum perf_uncore_iio_freerunning_type_id {
3600         SKX_IIO_MSR_IOCLK                       = 0,
3601         SKX_IIO_MSR_BW                          = 1,
3602         SKX_IIO_MSR_UTIL                        = 2,
3603
3604         SKX_IIO_FREERUNNING_TYPE_MAX,
3605 };
3606
3607
3608 static struct freerunning_counters skx_iio_freerunning[] = {
3609         [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3610         [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3611         [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3612 };
3613
3614 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3615         /* Free-Running IO CLOCKS Counter */
3616         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3617         /* Free-Running IIO BANDWIDTH Counters */
3618         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3619         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3620         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3621         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3622         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3623         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3624         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3625         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3626         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3627         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3628         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3629         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3630         INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3631         INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3632         INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3633         INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3634         INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3635         INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3636         INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3637         INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3638         INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3639         INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3640         INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3641         INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3642         /* Free-running IIO UTILIZATION Counters */
3643         INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3644         INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3645         INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3646         INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3647         INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3648         INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3649         INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3650         INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3651         { /* end: all zeroes */ },
3652 };
3653
3654 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3655         .read_counter           = uncore_msr_read_counter,
3656         .hw_config              = uncore_freerunning_hw_config,
3657 };
3658
3659 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3660         &format_attr_event.attr,
3661         &format_attr_umask.attr,
3662         NULL,
3663 };
3664
3665 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3666         .name = "format",
3667         .attrs = skx_uncore_iio_freerunning_formats_attr,
3668 };
3669
3670 static struct intel_uncore_type skx_uncore_iio_free_running = {
3671         .name                   = "iio_free_running",
3672         .num_counters           = 17,
3673         .num_boxes              = 6,
3674         .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3675         .freerunning            = skx_iio_freerunning,
3676         .ops                    = &skx_uncore_iio_freerunning_ops,
3677         .event_descs            = skx_uncore_iio_freerunning_events,
3678         .format_group           = &skx_uncore_iio_freerunning_format_group,
3679 };
3680
3681 static struct attribute *skx_uncore_formats_attr[] = {
3682         &format_attr_event.attr,
3683         &format_attr_umask.attr,
3684         &format_attr_edge.attr,
3685         &format_attr_inv.attr,
3686         &format_attr_thresh8.attr,
3687         NULL,
3688 };
3689
3690 static const struct attribute_group skx_uncore_format_group = {
3691         .name = "format",
3692         .attrs = skx_uncore_formats_attr,
3693 };
3694
3695 static struct intel_uncore_type skx_uncore_irp = {
3696         .name                   = "irp",
3697         .num_counters           = 2,
3698         .num_boxes              = 6,
3699         .perf_ctr_bits          = 48,
3700         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3701         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3702         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3703         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3704         .msr_offset             = SKX_IRP_MSR_OFFSET,
3705         .ops                    = &skx_uncore_iio_ops,
3706         .format_group           = &skx_uncore_format_group,
3707 };
3708
3709 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3710         &format_attr_event.attr,
3711         &format_attr_umask.attr,
3712         &format_attr_edge.attr,
3713         &format_attr_inv.attr,
3714         &format_attr_thresh8.attr,
3715         &format_attr_occ_invert.attr,
3716         &format_attr_occ_edge_det.attr,
3717         &format_attr_filter_band0.attr,
3718         &format_attr_filter_band1.attr,
3719         &format_attr_filter_band2.attr,
3720         &format_attr_filter_band3.attr,
3721         NULL,
3722 };
3723
3724 static struct attribute_group skx_uncore_pcu_format_group = {
3725         .name = "format",
3726         .attrs = skx_uncore_pcu_formats_attr,
3727 };
3728
3729 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3730         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3731         .hw_config              = hswep_pcu_hw_config,
3732         .get_constraint         = snbep_pcu_get_constraint,
3733         .put_constraint         = snbep_pcu_put_constraint,
3734 };
3735
3736 static struct intel_uncore_type skx_uncore_pcu = {
3737         .name                   = "pcu",
3738         .num_counters           = 4,
3739         .num_boxes              = 1,
3740         .perf_ctr_bits          = 48,
3741         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
3742         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
3743         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3744         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
3745         .num_shared_regs        = 1,
3746         .ops                    = &skx_uncore_pcu_ops,
3747         .format_group           = &skx_uncore_pcu_format_group,
3748 };
3749
3750 static struct intel_uncore_type *skx_msr_uncores[] = {
3751         &skx_uncore_ubox,
3752         &skx_uncore_chabox,
3753         &skx_uncore_iio,
3754         &skx_uncore_iio_free_running,
3755         &skx_uncore_irp,
3756         &skx_uncore_pcu,
3757         NULL,
3758 };
3759
3760 /*
3761  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3762  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3763  */
3764 #define SKX_CAPID6              0x9c
3765 #define SKX_CHA_BIT_MASK        GENMASK(27, 0)
3766
3767 static int skx_count_chabox(void)
3768 {
3769         struct pci_dev *dev = NULL;
3770         u32 val = 0;
3771
3772         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3773         if (!dev)
3774                 goto out;
3775
3776         pci_read_config_dword(dev, SKX_CAPID6, &val);
3777         val &= SKX_CHA_BIT_MASK;
3778 out:
3779         pci_dev_put(dev);
3780         return hweight32(val);
3781 }
3782
3783 void skx_uncore_cpu_init(void)
3784 {
3785         skx_uncore_chabox.num_boxes = skx_count_chabox();
3786         uncore_msr_uncores = skx_msr_uncores;
3787 }
3788
3789 static struct intel_uncore_type skx_uncore_imc = {
3790         .name           = "imc",
3791         .num_counters   = 4,
3792         .num_boxes      = 6,
3793         .perf_ctr_bits  = 48,
3794         .fixed_ctr_bits = 48,
3795         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3796         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3797         .event_descs    = hswep_uncore_imc_events,
3798         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3799         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3800         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3801         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3802         .ops            = &ivbep_uncore_pci_ops,
3803         .format_group   = &skx_uncore_format_group,
3804 };
3805
3806 static struct attribute *skx_upi_uncore_formats_attr[] = {
3807         &format_attr_event.attr,
3808         &format_attr_umask_ext.attr,
3809         &format_attr_edge.attr,
3810         &format_attr_inv.attr,
3811         &format_attr_thresh8.attr,
3812         NULL,
3813 };
3814
3815 static const struct attribute_group skx_upi_uncore_format_group = {
3816         .name = "format",
3817         .attrs = skx_upi_uncore_formats_attr,
3818 };
3819
3820 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3821 {
3822         struct pci_dev *pdev = box->pci_dev;
3823
3824         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3825         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3826 }
3827
3828 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3829         .init_box       = skx_upi_uncore_pci_init_box,
3830         .disable_box    = snbep_uncore_pci_disable_box,
3831         .enable_box     = snbep_uncore_pci_enable_box,
3832         .disable_event  = snbep_uncore_pci_disable_event,
3833         .enable_event   = snbep_uncore_pci_enable_event,
3834         .read_counter   = snbep_uncore_pci_read_counter,
3835 };
3836
3837 static struct intel_uncore_type skx_uncore_upi = {
3838         .name           = "upi",
3839         .num_counters   = 4,
3840         .num_boxes      = 3,
3841         .perf_ctr_bits  = 48,
3842         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
3843         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
3844         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3845         .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3846         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
3847         .ops            = &skx_upi_uncore_pci_ops,
3848         .format_group   = &skx_upi_uncore_format_group,
3849 };
3850
3851 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3852 {
3853         struct pci_dev *pdev = box->pci_dev;
3854
3855         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3856         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3857 }
3858
3859 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3860         .init_box       = skx_m2m_uncore_pci_init_box,
3861         .disable_box    = snbep_uncore_pci_disable_box,
3862         .enable_box     = snbep_uncore_pci_enable_box,
3863         .disable_event  = snbep_uncore_pci_disable_event,
3864         .enable_event   = snbep_uncore_pci_enable_event,
3865         .read_counter   = snbep_uncore_pci_read_counter,
3866 };
3867
3868 static struct intel_uncore_type skx_uncore_m2m = {
3869         .name           = "m2m",
3870         .num_counters   = 4,
3871         .num_boxes      = 2,
3872         .perf_ctr_bits  = 48,
3873         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
3874         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
3875         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3876         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
3877         .ops            = &skx_m2m_uncore_pci_ops,
3878         .format_group   = &skx_uncore_format_group,
3879 };
3880
3881 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3882         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3883         EVENT_CONSTRAINT_END
3884 };
3885
3886 static struct intel_uncore_type skx_uncore_m2pcie = {
3887         .name           = "m2pcie",
3888         .num_counters   = 4,
3889         .num_boxes      = 4,
3890         .perf_ctr_bits  = 48,
3891         .constraints    = skx_uncore_m2pcie_constraints,
3892         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3893         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3894         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3895         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3896         .ops            = &ivbep_uncore_pci_ops,
3897         .format_group   = &skx_uncore_format_group,
3898 };
3899
3900 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3901         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3902         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3903         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3904         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3905         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3906         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3907         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3908         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3909         EVENT_CONSTRAINT_END
3910 };
3911
3912 static struct intel_uncore_type skx_uncore_m3upi = {
3913         .name           = "m3upi",
3914         .num_counters   = 3,
3915         .num_boxes      = 3,
3916         .perf_ctr_bits  = 48,
3917         .constraints    = skx_uncore_m3upi_constraints,
3918         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3919         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3920         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3921         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3922         .ops            = &ivbep_uncore_pci_ops,
3923         .format_group   = &skx_uncore_format_group,
3924 };
3925
3926 enum {
3927         SKX_PCI_UNCORE_IMC,
3928         SKX_PCI_UNCORE_M2M,
3929         SKX_PCI_UNCORE_UPI,
3930         SKX_PCI_UNCORE_M2PCIE,
3931         SKX_PCI_UNCORE_M3UPI,
3932 };
3933
3934 static struct intel_uncore_type *skx_pci_uncores[] = {
3935         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
3936         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
3937         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
3938         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3939         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
3940         NULL,
3941 };
3942
3943 static const struct pci_device_id skx_uncore_pci_ids[] = {
3944         { /* MC0 Channel 0 */
3945                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3946                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3947         },
3948         { /* MC0 Channel 1 */
3949                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3950                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3951         },
3952         { /* MC0 Channel 2 */
3953                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3954                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3955         },
3956         { /* MC1 Channel 0 */
3957                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3958                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3959         },
3960         { /* MC1 Channel 1 */
3961                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3962                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3963         },
3964         { /* MC1 Channel 2 */
3965                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3966                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3967         },
3968         { /* M2M0 */
3969                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3970                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3971         },
3972         { /* M2M1 */
3973                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3974                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3975         },
3976         { /* UPI0 Link 0 */
3977                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3978                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3979         },
3980         { /* UPI0 Link 1 */
3981                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3982                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3983         },
3984         { /* UPI1 Link 2 */
3985                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3986                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3987         },
3988         { /* M2PCIe 0 */
3989                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3990                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3991         },
3992         { /* M2PCIe 1 */
3993                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3994                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3995         },
3996         { /* M2PCIe 2 */
3997                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3998                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3999         },
4000         { /* M2PCIe 3 */
4001                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4002                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4003         },
4004         { /* M3UPI0 Link 0 */
4005                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4006                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4007         },
4008         { /* M3UPI0 Link 1 */
4009                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4010                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4011         },
4012         { /* M3UPI1 Link 2 */
4013                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4014                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4015         },
4016         { /* end: all zeroes */ }
4017 };
4018
4019
4020 static struct pci_driver skx_uncore_pci_driver = {
4021         .name           = "skx_uncore",
4022         .id_table       = skx_uncore_pci_ids,
4023 };
4024
4025 int skx_uncore_pci_init(void)
4026 {
4027         /* need to double check pci address */
4028         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4029
4030         if (ret)
4031                 return ret;
4032
4033         uncore_pci_uncores = skx_pci_uncores;
4034         uncore_pci_driver = &skx_uncore_pci_driver;
4035         return 0;
4036 }
4037
4038 /* end of SKX uncore support */
4039
4040 /* SNR uncore support */
4041
4042 static struct intel_uncore_type snr_uncore_ubox = {
4043         .name                   = "ubox",
4044         .num_counters           = 2,
4045         .num_boxes              = 1,
4046         .perf_ctr_bits          = 48,
4047         .fixed_ctr_bits         = 48,
4048         .perf_ctr               = SNR_U_MSR_PMON_CTR0,
4049         .event_ctl              = SNR_U_MSR_PMON_CTL0,
4050         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4051         .fixed_ctr              = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4052         .fixed_ctl              = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4053         .ops                    = &ivbep_uncore_msr_ops,
4054         .format_group           = &ivbep_uncore_format_group,
4055 };
4056
4057 static struct attribute *snr_uncore_cha_formats_attr[] = {
4058         &format_attr_event.attr,
4059         &format_attr_umask_ext2.attr,
4060         &format_attr_edge.attr,
4061         &format_attr_tid_en.attr,
4062         &format_attr_inv.attr,
4063         &format_attr_thresh8.attr,
4064         &format_attr_filter_tid5.attr,
4065         NULL,
4066 };
4067 static const struct attribute_group snr_uncore_chabox_format_group = {
4068         .name = "format",
4069         .attrs = snr_uncore_cha_formats_attr,
4070 };
4071
4072 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4073 {
4074         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4075
4076         reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4077                     box->pmu->type->msr_offset * box->pmu->pmu_idx;
4078         reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4079         reg1->idx = 0;
4080
4081         return 0;
4082 }
4083
4084 static void snr_cha_enable_event(struct intel_uncore_box *box,
4085                                    struct perf_event *event)
4086 {
4087         struct hw_perf_event *hwc = &event->hw;
4088         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4089
4090         if (reg1->idx != EXTRA_REG_NONE)
4091                 wrmsrl(reg1->reg, reg1->config);
4092
4093         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4094 }
4095
4096 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4097         .init_box               = ivbep_uncore_msr_init_box,
4098         .disable_box            = snbep_uncore_msr_disable_box,
4099         .enable_box             = snbep_uncore_msr_enable_box,
4100         .disable_event          = snbep_uncore_msr_disable_event,
4101         .enable_event           = snr_cha_enable_event,
4102         .read_counter           = uncore_msr_read_counter,
4103         .hw_config              = snr_cha_hw_config,
4104 };
4105
4106 static struct intel_uncore_type snr_uncore_chabox = {
4107         .name                   = "cha",
4108         .num_counters           = 4,
4109         .num_boxes              = 6,
4110         .perf_ctr_bits          = 48,
4111         .event_ctl              = SNR_CHA_MSR_PMON_CTL0,
4112         .perf_ctr               = SNR_CHA_MSR_PMON_CTR0,
4113         .box_ctl                = SNR_CHA_MSR_PMON_BOX_CTL,
4114         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
4115         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4116         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4117         .ops                    = &snr_uncore_chabox_ops,
4118         .format_group           = &snr_uncore_chabox_format_group,
4119 };
4120
4121 static struct attribute *snr_uncore_iio_formats_attr[] = {
4122         &format_attr_event.attr,
4123         &format_attr_umask.attr,
4124         &format_attr_edge.attr,
4125         &format_attr_inv.attr,
4126         &format_attr_thresh9.attr,
4127         &format_attr_ch_mask2.attr,
4128         &format_attr_fc_mask2.attr,
4129         NULL,
4130 };
4131
4132 static const struct attribute_group snr_uncore_iio_format_group = {
4133         .name = "format",
4134         .attrs = snr_uncore_iio_formats_attr,
4135 };
4136
4137 static struct intel_uncore_type snr_uncore_iio = {
4138         .name                   = "iio",
4139         .num_counters           = 4,
4140         .num_boxes              = 5,
4141         .perf_ctr_bits          = 48,
4142         .event_ctl              = SNR_IIO_MSR_PMON_CTL0,
4143         .perf_ctr               = SNR_IIO_MSR_PMON_CTR0,
4144         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4145         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4146         .box_ctl                = SNR_IIO_MSR_PMON_BOX_CTL,
4147         .msr_offset             = SNR_IIO_MSR_OFFSET,
4148         .ops                    = &ivbep_uncore_msr_ops,
4149         .format_group           = &snr_uncore_iio_format_group,
4150 };
4151
4152 static struct intel_uncore_type snr_uncore_irp = {
4153         .name                   = "irp",
4154         .num_counters           = 2,
4155         .num_boxes              = 5,
4156         .perf_ctr_bits          = 48,
4157         .event_ctl              = SNR_IRP0_MSR_PMON_CTL0,
4158         .perf_ctr               = SNR_IRP0_MSR_PMON_CTR0,
4159         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4160         .box_ctl                = SNR_IRP0_MSR_PMON_BOX_CTL,
4161         .msr_offset             = SNR_IRP_MSR_OFFSET,
4162         .ops                    = &ivbep_uncore_msr_ops,
4163         .format_group           = &ivbep_uncore_format_group,
4164 };
4165
4166 static struct intel_uncore_type snr_uncore_m2pcie = {
4167         .name           = "m2pcie",
4168         .num_counters   = 4,
4169         .num_boxes      = 5,
4170         .perf_ctr_bits  = 48,
4171         .event_ctl      = SNR_M2PCIE_MSR_PMON_CTL0,
4172         .perf_ctr       = SNR_M2PCIE_MSR_PMON_CTR0,
4173         .box_ctl        = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4174         .msr_offset     = SNR_M2PCIE_MSR_OFFSET,
4175         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4176         .ops            = &ivbep_uncore_msr_ops,
4177         .format_group   = &ivbep_uncore_format_group,
4178 };
4179
4180 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4181 {
4182         struct hw_perf_event *hwc = &event->hw;
4183         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4184         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4185
4186         if (ev_sel >= 0xb && ev_sel <= 0xe) {
4187                 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4188                 reg1->idx = ev_sel - 0xb;
4189                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4190         }
4191         return 0;
4192 }
4193
4194 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4195         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4196         .hw_config              = snr_pcu_hw_config,
4197         .get_constraint         = snbep_pcu_get_constraint,
4198         .put_constraint         = snbep_pcu_put_constraint,
4199 };
4200
4201 static struct intel_uncore_type snr_uncore_pcu = {
4202         .name                   = "pcu",
4203         .num_counters           = 4,
4204         .num_boxes              = 1,
4205         .perf_ctr_bits          = 48,
4206         .perf_ctr               = SNR_PCU_MSR_PMON_CTR0,
4207         .event_ctl              = SNR_PCU_MSR_PMON_CTL0,
4208         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4209         .box_ctl                = SNR_PCU_MSR_PMON_BOX_CTL,
4210         .num_shared_regs        = 1,
4211         .ops                    = &snr_uncore_pcu_ops,
4212         .format_group           = &skx_uncore_pcu_format_group,
4213 };
4214
4215 enum perf_uncore_snr_iio_freerunning_type_id {
4216         SNR_IIO_MSR_IOCLK,
4217         SNR_IIO_MSR_BW_IN,
4218
4219         SNR_IIO_FREERUNNING_TYPE_MAX,
4220 };
4221
4222 static struct freerunning_counters snr_iio_freerunning[] = {
4223         [SNR_IIO_MSR_IOCLK]     = { 0x1eac, 0x1, 0x10, 1, 48 },
4224         [SNR_IIO_MSR_BW_IN]     = { 0x1f00, 0x1, 0x10, 8, 48 },
4225 };
4226
4227 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4228         /* Free-Running IIO CLOCKS Counter */
4229         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
4230         /* Free-Running IIO BANDWIDTH IN Counters */
4231         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
4232         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
4233         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
4234         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
4235         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
4236         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
4237         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
4238         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
4239         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
4240         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
4241         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
4242         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
4243         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
4244         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
4245         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
4246         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
4247         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
4248         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
4249         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
4250         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
4251         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
4252         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
4253         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
4254         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
4255         { /* end: all zeroes */ },
4256 };
4257
4258 static struct intel_uncore_type snr_uncore_iio_free_running = {
4259         .name                   = "iio_free_running",
4260         .num_counters           = 9,
4261         .num_boxes              = 5,
4262         .num_freerunning_types  = SNR_IIO_FREERUNNING_TYPE_MAX,
4263         .freerunning            = snr_iio_freerunning,
4264         .ops                    = &skx_uncore_iio_freerunning_ops,
4265         .event_descs            = snr_uncore_iio_freerunning_events,
4266         .format_group           = &skx_uncore_iio_freerunning_format_group,
4267 };
4268
4269 static struct intel_uncore_type *snr_msr_uncores[] = {
4270         &snr_uncore_ubox,
4271         &snr_uncore_chabox,
4272         &snr_uncore_iio,
4273         &snr_uncore_irp,
4274         &snr_uncore_m2pcie,
4275         &snr_uncore_pcu,
4276         &snr_uncore_iio_free_running,
4277         NULL,
4278 };
4279
4280 void snr_uncore_cpu_init(void)
4281 {
4282         uncore_msr_uncores = snr_msr_uncores;
4283 }
4284
4285 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4286 {
4287         struct pci_dev *pdev = box->pci_dev;
4288         int box_ctl = uncore_pci_box_ctl(box);
4289
4290         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4291         pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4292 }
4293
4294 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4295         .init_box       = snr_m2m_uncore_pci_init_box,
4296         .disable_box    = snbep_uncore_pci_disable_box,
4297         .enable_box     = snbep_uncore_pci_enable_box,
4298         .disable_event  = snbep_uncore_pci_disable_event,
4299         .enable_event   = snbep_uncore_pci_enable_event,
4300         .read_counter   = snbep_uncore_pci_read_counter,
4301 };
4302
4303 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4304         &format_attr_event.attr,
4305         &format_attr_umask_ext3.attr,
4306         &format_attr_edge.attr,
4307         &format_attr_inv.attr,
4308         &format_attr_thresh8.attr,
4309         NULL,
4310 };
4311
4312 static const struct attribute_group snr_m2m_uncore_format_group = {
4313         .name = "format",
4314         .attrs = snr_m2m_uncore_formats_attr,
4315 };
4316
4317 static struct intel_uncore_type snr_uncore_m2m = {
4318         .name           = "m2m",
4319         .num_counters   = 4,
4320         .num_boxes      = 1,
4321         .perf_ctr_bits  = 48,
4322         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
4323         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
4324         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4325         .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4326         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
4327         .ops            = &snr_m2m_uncore_pci_ops,
4328         .format_group   = &snr_m2m_uncore_format_group,
4329 };
4330
4331 static struct intel_uncore_type snr_uncore_pcie3 = {
4332         .name           = "pcie3",
4333         .num_counters   = 4,
4334         .num_boxes      = 1,
4335         .perf_ctr_bits  = 48,
4336         .perf_ctr       = SNR_PCIE3_PCI_PMON_CTR0,
4337         .event_ctl      = SNR_PCIE3_PCI_PMON_CTL0,
4338         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4339         .box_ctl        = SNR_PCIE3_PCI_PMON_BOX_CTL,
4340         .ops            = &ivbep_uncore_pci_ops,
4341         .format_group   = &ivbep_uncore_format_group,
4342 };
4343
4344 enum {
4345         SNR_PCI_UNCORE_M2M,
4346         SNR_PCI_UNCORE_PCIE3,
4347 };
4348
4349 static struct intel_uncore_type *snr_pci_uncores[] = {
4350         [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
4351         [SNR_PCI_UNCORE_PCIE3]          = &snr_uncore_pcie3,
4352         NULL,
4353 };
4354
4355 static const struct pci_device_id snr_uncore_pci_ids[] = {
4356         { /* M2M */
4357                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4358                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4359         },
4360         { /* PCIe3 */
4361                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4362                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4363         },
4364         { /* end: all zeroes */ }
4365 };
4366
4367 static struct pci_driver snr_uncore_pci_driver = {
4368         .name           = "snr_uncore",
4369         .id_table       = snr_uncore_pci_ids,
4370 };
4371
4372 int snr_uncore_pci_init(void)
4373 {
4374         /* SNR UBOX DID */
4375         int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4376                                          SKX_GIDNIDMAP, true);
4377
4378         if (ret)
4379                 return ret;
4380
4381         uncore_pci_uncores = snr_pci_uncores;
4382         uncore_pci_driver = &snr_uncore_pci_driver;
4383         return 0;
4384 }
4385
4386 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4387 {
4388         struct pci_dev *mc_dev = NULL;
4389         int phys_id, pkg;
4390
4391         while (1) {
4392                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4393                 if (!mc_dev)
4394                         break;
4395                 phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4396                 if (phys_id < 0)
4397                         continue;
4398                 pkg = topology_phys_to_logical_pkg(phys_id);
4399                 if (pkg < 0)
4400                         continue;
4401                 else if (pkg == id)
4402                         break;
4403         }
4404         return mc_dev;
4405 }
4406
4407 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4408 {
4409         struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4410         unsigned int box_ctl = uncore_mmio_box_ctl(box);
4411         resource_size_t addr;
4412         u32 pci_dword;
4413
4414         if (!pdev)
4415                 return;
4416
4417         pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4418         addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4419
4420         pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
4421         addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4422
4423         addr += box_ctl;
4424
4425         box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
4426         if (!box->io_addr)
4427                 return;
4428
4429         writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4430 }
4431
4432 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4433 {
4434         u32 config;
4435
4436         if (!box->io_addr)
4437                 return;
4438
4439         config = readl(box->io_addr);
4440         config |= SNBEP_PMON_BOX_CTL_FRZ;
4441         writel(config, box->io_addr);
4442 }
4443
4444 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4445 {
4446         u32 config;
4447
4448         if (!box->io_addr)
4449                 return;
4450
4451         config = readl(box->io_addr);
4452         config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4453         writel(config, box->io_addr);
4454 }
4455
4456 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4457                                            struct perf_event *event)
4458 {
4459         struct hw_perf_event *hwc = &event->hw;
4460
4461         if (!box->io_addr)
4462                 return;
4463
4464         writel(hwc->config | SNBEP_PMON_CTL_EN,
4465                box->io_addr + hwc->config_base);
4466 }
4467
4468 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4469                                             struct perf_event *event)
4470 {
4471         struct hw_perf_event *hwc = &event->hw;
4472
4473         if (!box->io_addr)
4474                 return;
4475
4476         writel(hwc->config, box->io_addr + hwc->config_base);
4477 }
4478
4479 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4480         .init_box       = snr_uncore_mmio_init_box,
4481         .exit_box       = uncore_mmio_exit_box,
4482         .disable_box    = snr_uncore_mmio_disable_box,
4483         .enable_box     = snr_uncore_mmio_enable_box,
4484         .disable_event  = snr_uncore_mmio_disable_event,
4485         .enable_event   = snr_uncore_mmio_enable_event,
4486         .read_counter   = uncore_mmio_read_counter,
4487 };
4488
4489 static struct uncore_event_desc snr_uncore_imc_events[] = {
4490         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4491         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4492         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4493         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4494         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4495         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4496         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4497         { /* end: all zeroes */ },
4498 };
4499
4500 static struct intel_uncore_type snr_uncore_imc = {
4501         .name           = "imc",
4502         .num_counters   = 4,
4503         .num_boxes      = 2,
4504         .perf_ctr_bits  = 48,
4505         .fixed_ctr_bits = 48,
4506         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
4507         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
4508         .event_descs    = snr_uncore_imc_events,
4509         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
4510         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
4511         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4512         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
4513         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
4514         .ops            = &snr_uncore_mmio_ops,
4515         .format_group   = &skx_uncore_format_group,
4516 };
4517
4518 enum perf_uncore_snr_imc_freerunning_type_id {
4519         SNR_IMC_DCLK,
4520         SNR_IMC_DDR,
4521
4522         SNR_IMC_FREERUNNING_TYPE_MAX,
4523 };
4524
4525 static struct freerunning_counters snr_imc_freerunning[] = {
4526         [SNR_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
4527         [SNR_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
4528 };
4529
4530 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4531         INTEL_UNCORE_EVENT_DESC(dclk,           "event=0xff,umask=0x10"),
4532
4533         INTEL_UNCORE_EVENT_DESC(read,           "event=0xff,umask=0x20"),
4534         INTEL_UNCORE_EVENT_DESC(read.scale,     "3.814697266e-6"),
4535         INTEL_UNCORE_EVENT_DESC(read.unit,      "MiB"),
4536         INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
4537         INTEL_UNCORE_EVENT_DESC(write.scale,    "3.814697266e-6"),
4538         INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
4539 };
4540
4541 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4542         .init_box       = snr_uncore_mmio_init_box,
4543         .exit_box       = uncore_mmio_exit_box,
4544         .read_counter   = uncore_mmio_read_counter,
4545         .hw_config      = uncore_freerunning_hw_config,
4546 };
4547
4548 static struct intel_uncore_type snr_uncore_imc_free_running = {
4549         .name                   = "imc_free_running",
4550         .num_counters           = 3,
4551         .num_boxes              = 1,
4552         .num_freerunning_types  = SNR_IMC_FREERUNNING_TYPE_MAX,
4553         .freerunning            = snr_imc_freerunning,
4554         .ops                    = &snr_uncore_imc_freerunning_ops,
4555         .event_descs            = snr_uncore_imc_freerunning_events,
4556         .format_group           = &skx_uncore_iio_freerunning_format_group,
4557 };
4558
4559 static struct intel_uncore_type *snr_mmio_uncores[] = {
4560         &snr_uncore_imc,
4561         &snr_uncore_imc_free_running,
4562         NULL,
4563 };
4564
4565 void snr_uncore_mmio_init(void)
4566 {
4567         uncore_mmio_uncores = snr_mmio_uncores;
4568 }
4569
4570 /* end of SNR uncore support */