1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support Intel IOMMU PerfMon
4 * Copyright(c) 2023 Intel Corporation.
6 #define pr_fmt(fmt) "DMAR: " fmt
7 #define dev_fmt(fmt) pr_fmt(fmt)
9 #include <linux/dmar.h>
13 PMU_FORMAT_ATTR(event, "config:0-27"); /* ES: Events Select */
14 PMU_FORMAT_ATTR(event_group, "config:28-31"); /* EGI: Event Group Index */
16 static struct attribute *iommu_pmu_format_attrs[] = {
17 &format_attr_event_group.attr,
18 &format_attr_event.attr,
22 static struct attribute_group iommu_pmu_format_attr_group = {
24 .attrs = iommu_pmu_format_attrs,
27 /* The available events are added in attr_update later */
28 static struct attribute *attrs_empty[] = {
32 static struct attribute_group iommu_pmu_events_attr_group = {
37 static const struct attribute_group *iommu_pmu_attr_groups[] = {
38 &iommu_pmu_format_attr_group,
39 &iommu_pmu_events_attr_group,
43 static inline struct iommu_pmu *dev_to_iommu_pmu(struct device *dev)
46 * The perf_event creates its own dev for each PMU.
49 return container_of(dev_get_drvdata(dev), struct iommu_pmu, pmu);
52 #define IOMMU_PMU_ATTR(_name, _format, _filter) \
53 PMU_FORMAT_ATTR(_name, _format); \
55 static struct attribute *_name##_attr[] = { \
56 &format_attr_##_name.attr, \
61 _name##_is_visible(struct kobject *kobj, struct attribute *attr, int i) \
63 struct device *dev = kobj_to_dev(kobj); \
64 struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
68 return (iommu_pmu->filter & _filter) ? attr->mode : 0; \
71 static struct attribute_group _name = { \
73 .attrs = _name##_attr, \
74 .is_visible = _name##_is_visible, \
77 IOMMU_PMU_ATTR(filter_requester_id_en, "config1:0", IOMMU_PMU_FILTER_REQUESTER_ID);
78 IOMMU_PMU_ATTR(filter_domain_en, "config1:1", IOMMU_PMU_FILTER_DOMAIN);
79 IOMMU_PMU_ATTR(filter_pasid_en, "config1:2", IOMMU_PMU_FILTER_PASID);
80 IOMMU_PMU_ATTR(filter_ats_en, "config1:3", IOMMU_PMU_FILTER_ATS);
81 IOMMU_PMU_ATTR(filter_page_table_en, "config1:4", IOMMU_PMU_FILTER_PAGE_TABLE);
82 IOMMU_PMU_ATTR(filter_requester_id, "config1:16-31", IOMMU_PMU_FILTER_REQUESTER_ID);
83 IOMMU_PMU_ATTR(filter_domain, "config1:32-47", IOMMU_PMU_FILTER_DOMAIN);
84 IOMMU_PMU_ATTR(filter_pasid, "config2:0-21", IOMMU_PMU_FILTER_PASID);
85 IOMMU_PMU_ATTR(filter_ats, "config2:24-28", IOMMU_PMU_FILTER_ATS);
86 IOMMU_PMU_ATTR(filter_page_table, "config2:32-36", IOMMU_PMU_FILTER_PAGE_TABLE);
88 #define iommu_pmu_en_requester_id(e) ((e) & 0x1)
89 #define iommu_pmu_en_domain(e) (((e) >> 1) & 0x1)
90 #define iommu_pmu_en_pasid(e) (((e) >> 2) & 0x1)
91 #define iommu_pmu_en_ats(e) (((e) >> 3) & 0x1)
92 #define iommu_pmu_en_page_table(e) (((e) >> 4) & 0x1)
93 #define iommu_pmu_get_requester_id(filter) (((filter) >> 16) & 0xffff)
94 #define iommu_pmu_get_domain(filter) (((filter) >> 32) & 0xffff)
95 #define iommu_pmu_get_pasid(filter) ((filter) & 0x3fffff)
96 #define iommu_pmu_get_ats(filter) (((filter) >> 24) & 0x1f)
97 #define iommu_pmu_get_page_table(filter) (((filter) >> 32) & 0x1f)
99 #define iommu_pmu_set_filter(_name, _config, _filter, _idx, _econfig) \
101 if ((iommu_pmu->filter & _filter) && iommu_pmu_en_##_name(_econfig)) { \
102 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
103 IOMMU_PMU_CFG_SIZE + \
104 (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET, \
105 iommu_pmu_get_##_name(_config) | IOMMU_PMU_FILTER_EN);\
109 #define iommu_pmu_clear_filter(_filter, _idx) \
111 if (iommu_pmu->filter & _filter) { \
112 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
113 IOMMU_PMU_CFG_SIZE + \
114 (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET, \
120 * Define the event attr related functions
121 * Input: _name: event attr name
122 * _string: string of the event in sysfs
123 * _g_idx: event group encoding
124 * _event: event encoding
126 #define IOMMU_PMU_EVENT_ATTR(_name, _string, _g_idx, _event) \
127 PMU_EVENT_ATTR_STRING(_name, event_attr_##_name, _string) \
129 static struct attribute *_name##_attr[] = { \
130 &event_attr_##_name.attr.attr, \
135 _name##_is_visible(struct kobject *kobj, struct attribute *attr, int i) \
137 struct device *dev = kobj_to_dev(kobj); \
138 struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
142 return (iommu_pmu->evcap[_g_idx] & _event) ? attr->mode : 0; \
145 static struct attribute_group _name = { \
147 .attrs = _name##_attr, \
148 .is_visible = _name##_is_visible, \
151 IOMMU_PMU_EVENT_ATTR(iommu_clocks, "event_group=0x0,event=0x001", 0x0, 0x001)
152 IOMMU_PMU_EVENT_ATTR(iommu_requests, "event_group=0x0,event=0x002", 0x0, 0x002)
153 IOMMU_PMU_EVENT_ATTR(pw_occupancy, "event_group=0x0,event=0x004", 0x0, 0x004)
154 IOMMU_PMU_EVENT_ATTR(ats_blocked, "event_group=0x0,event=0x008", 0x0, 0x008)
155 IOMMU_PMU_EVENT_ATTR(iommu_mrds, "event_group=0x1,event=0x001", 0x1, 0x001)
156 IOMMU_PMU_EVENT_ATTR(iommu_mem_blocked, "event_group=0x1,event=0x020", 0x1, 0x020)
157 IOMMU_PMU_EVENT_ATTR(pg_req_posted, "event_group=0x1,event=0x040", 0x1, 0x040)
158 IOMMU_PMU_EVENT_ATTR(ctxt_cache_lookup, "event_group=0x2,event=0x001", 0x2, 0x001)
159 IOMMU_PMU_EVENT_ATTR(ctxt_cache_hit, "event_group=0x2,event=0x002", 0x2, 0x002)
160 IOMMU_PMU_EVENT_ATTR(pasid_cache_lookup, "event_group=0x2,event=0x004", 0x2, 0x004)
161 IOMMU_PMU_EVENT_ATTR(pasid_cache_hit, "event_group=0x2,event=0x008", 0x2, 0x008)
162 IOMMU_PMU_EVENT_ATTR(ss_nonleaf_lookup, "event_group=0x2,event=0x010", 0x2, 0x010)
163 IOMMU_PMU_EVENT_ATTR(ss_nonleaf_hit, "event_group=0x2,event=0x020", 0x2, 0x020)
164 IOMMU_PMU_EVENT_ATTR(fs_nonleaf_lookup, "event_group=0x2,event=0x040", 0x2, 0x040)
165 IOMMU_PMU_EVENT_ATTR(fs_nonleaf_hit, "event_group=0x2,event=0x080", 0x2, 0x080)
166 IOMMU_PMU_EVENT_ATTR(hpt_nonleaf_lookup, "event_group=0x2,event=0x100", 0x2, 0x100)
167 IOMMU_PMU_EVENT_ATTR(hpt_nonleaf_hit, "event_group=0x2,event=0x200", 0x2, 0x200)
168 IOMMU_PMU_EVENT_ATTR(iotlb_lookup, "event_group=0x3,event=0x001", 0x3, 0x001)
169 IOMMU_PMU_EVENT_ATTR(iotlb_hit, "event_group=0x3,event=0x002", 0x3, 0x002)
170 IOMMU_PMU_EVENT_ATTR(hpt_leaf_lookup, "event_group=0x3,event=0x004", 0x3, 0x004)
171 IOMMU_PMU_EVENT_ATTR(hpt_leaf_hit, "event_group=0x3,event=0x008", 0x3, 0x008)
172 IOMMU_PMU_EVENT_ATTR(int_cache_lookup, "event_group=0x4,event=0x001", 0x4, 0x001)
173 IOMMU_PMU_EVENT_ATTR(int_cache_hit_nonposted, "event_group=0x4,event=0x002", 0x4, 0x002)
174 IOMMU_PMU_EVENT_ATTR(int_cache_hit_posted, "event_group=0x4,event=0x004", 0x4, 0x004)
176 static const struct attribute_group *iommu_pmu_attr_update[] = {
177 &filter_requester_id_en,
181 &filter_page_table_en,
182 &filter_requester_id,
209 &int_cache_hit_nonposted,
210 &int_cache_hit_posted,
214 static inline void __iomem *
215 iommu_event_base(struct iommu_pmu *iommu_pmu, int idx)
217 return iommu_pmu->cntr_reg + idx * iommu_pmu->cntr_stride;
220 static inline void __iomem *
221 iommu_config_base(struct iommu_pmu *iommu_pmu, int idx)
223 return iommu_pmu->cfg_reg + idx * IOMMU_PMU_CFG_OFFSET;
226 static inline struct iommu_pmu *iommu_event_to_pmu(struct perf_event *event)
228 return container_of(event->pmu, struct iommu_pmu, pmu);
231 static inline u64 iommu_event_config(struct perf_event *event)
233 u64 config = event->attr.config;
235 return (iommu_event_select(config) << IOMMU_EVENT_CFG_ES_SHIFT) |
236 (iommu_event_group(config) << IOMMU_EVENT_CFG_EGI_SHIFT) |
240 static inline bool is_iommu_pmu_event(struct iommu_pmu *iommu_pmu,
241 struct perf_event *event)
243 return event->pmu == &iommu_pmu->pmu;
246 static int iommu_pmu_validate_event(struct perf_event *event)
248 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
249 u32 event_group = iommu_event_group(event->attr.config);
251 if (event_group >= iommu_pmu->num_eg)
257 static int iommu_pmu_validate_group(struct perf_event *event)
259 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
260 struct perf_event *sibling;
264 * All events in a group must be scheduled simultaneously.
265 * Check whether there is enough counters for all the events.
267 for_each_sibling_event(sibling, event->group_leader) {
268 if (!is_iommu_pmu_event(iommu_pmu, sibling) ||
269 sibling->state <= PERF_EVENT_STATE_OFF)
272 if (++nr > iommu_pmu->num_cntr)
279 static int iommu_pmu_event_init(struct perf_event *event)
281 struct hw_perf_event *hwc = &event->hw;
283 if (event->attr.type != event->pmu->type)
286 /* sampling not supported */
287 if (event->attr.sample_period)
293 if (iommu_pmu_validate_event(event))
296 hwc->config = iommu_event_config(event);
298 return iommu_pmu_validate_group(event);
301 static void iommu_pmu_event_update(struct perf_event *event)
303 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
304 struct hw_perf_event *hwc = &event->hw;
305 u64 prev_count, new_count, delta;
306 int shift = 64 - iommu_pmu->cntr_width;
309 prev_count = local64_read(&hwc->prev_count);
310 new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
311 if (local64_xchg(&hwc->prev_count, new_count) != prev_count)
315 * The counter width is enumerated. Always shift the counter
318 delta = (new_count << shift) - (prev_count << shift);
321 local64_add(delta, &event->count);
324 static void iommu_pmu_start(struct perf_event *event, int flags)
326 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
327 struct intel_iommu *iommu = iommu_pmu->iommu;
328 struct hw_perf_event *hwc = &event->hw;
331 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
334 if (WARN_ON_ONCE(hwc->idx < 0 || hwc->idx >= IOMMU_PMU_IDX_MAX))
337 if (flags & PERF_EF_RELOAD)
338 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
342 /* Always reprogram the period */
343 count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
344 local64_set((&hwc->prev_count), count);
347 * The error of ecmd will be ignored.
348 * - The existing perf_event subsystem doesn't handle the error.
349 * Only IOMMU PMU returns runtime HW error. We don't want to
350 * change the existing generic interfaces for the specific case.
351 * - It's a corner case caused by HW, which is very unlikely to
352 * happen. There is nothing SW can do.
353 * - The worst case is that the user will get <not count> with
354 * perf command, which can give the user some hints.
356 ecmd_submit_sync(iommu, DMA_ECMD_ENABLE, hwc->idx, 0);
358 perf_event_update_userpage(event);
361 static void iommu_pmu_stop(struct perf_event *event, int flags)
363 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
364 struct intel_iommu *iommu = iommu_pmu->iommu;
365 struct hw_perf_event *hwc = &event->hw;
367 if (!(hwc->state & PERF_HES_STOPPED)) {
368 ecmd_submit_sync(iommu, DMA_ECMD_DISABLE, hwc->idx, 0);
370 iommu_pmu_event_update(event);
372 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
377 iommu_pmu_validate_per_cntr_event(struct iommu_pmu *iommu_pmu,
378 int idx, struct perf_event *event)
380 u32 event_group = iommu_event_group(event->attr.config);
381 u32 select = iommu_event_select(event->attr.config);
383 if (!(iommu_pmu->cntr_evcap[idx][event_group] & select))
389 static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
390 struct perf_event *event)
392 struct hw_perf_event *hwc = &event->hw;
396 * The counters which support limited events are usually at the end.
397 * Schedule them first to accommodate more events.
399 for (idx = iommu_pmu->num_cntr - 1; idx >= 0; idx--) {
400 if (test_and_set_bit(idx, iommu_pmu->used_mask))
402 /* Check per-counter event capabilities */
403 if (!iommu_pmu_validate_per_cntr_event(iommu_pmu, idx, event))
405 clear_bit(idx, iommu_pmu->used_mask);
410 iommu_pmu->event_list[idx] = event;
414 dmar_writeq(iommu_config_base(iommu_pmu, idx), hwc->config);
416 iommu_pmu_set_filter(requester_id, event->attr.config1,
417 IOMMU_PMU_FILTER_REQUESTER_ID, idx,
418 event->attr.config1);
419 iommu_pmu_set_filter(domain, event->attr.config1,
420 IOMMU_PMU_FILTER_DOMAIN, idx,
421 event->attr.config1);
422 iommu_pmu_set_filter(pasid, event->attr.config1,
423 IOMMU_PMU_FILTER_PASID, idx,
424 event->attr.config1);
425 iommu_pmu_set_filter(ats, event->attr.config2,
426 IOMMU_PMU_FILTER_ATS, idx,
427 event->attr.config1);
428 iommu_pmu_set_filter(page_table, event->attr.config2,
429 IOMMU_PMU_FILTER_PAGE_TABLE, idx,
430 event->attr.config1);
435 static int iommu_pmu_add(struct perf_event *event, int flags)
437 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
438 struct hw_perf_event *hwc = &event->hw;
441 ret = iommu_pmu_assign_event(iommu_pmu, event);
445 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
447 if (flags & PERF_EF_START)
448 iommu_pmu_start(event, 0);
453 static void iommu_pmu_del(struct perf_event *event, int flags)
455 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
456 int idx = event->hw.idx;
458 iommu_pmu_stop(event, PERF_EF_UPDATE);
460 iommu_pmu_clear_filter(IOMMU_PMU_FILTER_REQUESTER_ID, idx);
461 iommu_pmu_clear_filter(IOMMU_PMU_FILTER_DOMAIN, idx);
462 iommu_pmu_clear_filter(IOMMU_PMU_FILTER_PASID, idx);
463 iommu_pmu_clear_filter(IOMMU_PMU_FILTER_ATS, idx);
464 iommu_pmu_clear_filter(IOMMU_PMU_FILTER_PAGE_TABLE, idx);
466 iommu_pmu->event_list[idx] = NULL;
468 clear_bit(idx, iommu_pmu->used_mask);
470 perf_event_update_userpage(event);
473 static void iommu_pmu_enable(struct pmu *pmu)
475 struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
476 struct intel_iommu *iommu = iommu_pmu->iommu;
478 ecmd_submit_sync(iommu, DMA_ECMD_UNFREEZE, 0, 0);
481 static void iommu_pmu_disable(struct pmu *pmu)
483 struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
484 struct intel_iommu *iommu = iommu_pmu->iommu;
486 ecmd_submit_sync(iommu, DMA_ECMD_FREEZE, 0, 0);
489 static int __iommu_pmu_register(struct intel_iommu *iommu)
491 struct iommu_pmu *iommu_pmu = iommu->pmu;
493 iommu_pmu->pmu.name = iommu->name;
494 iommu_pmu->pmu.task_ctx_nr = perf_invalid_context;
495 iommu_pmu->pmu.event_init = iommu_pmu_event_init;
496 iommu_pmu->pmu.pmu_enable = iommu_pmu_enable;
497 iommu_pmu->pmu.pmu_disable = iommu_pmu_disable;
498 iommu_pmu->pmu.add = iommu_pmu_add;
499 iommu_pmu->pmu.del = iommu_pmu_del;
500 iommu_pmu->pmu.start = iommu_pmu_start;
501 iommu_pmu->pmu.stop = iommu_pmu_stop;
502 iommu_pmu->pmu.read = iommu_pmu_event_update;
503 iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups;
504 iommu_pmu->pmu.attr_update = iommu_pmu_attr_update;
505 iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
506 iommu_pmu->pmu.module = THIS_MODULE;
508 return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
511 static inline void __iomem *
512 get_perf_reg_address(struct intel_iommu *iommu, u32 offset)
514 u32 off = dmar_readl(iommu->reg + offset);
516 return iommu->reg + off;
519 int alloc_iommu_pmu(struct intel_iommu *iommu)
521 struct iommu_pmu *iommu_pmu;
526 if (!ecap_pms(iommu->ecap))
529 /* The IOMMU PMU requires the ECMD support as well */
530 if (!cap_ecmds(iommu->cap))
533 perfcap = dmar_readq(iommu->reg + DMAR_PERFCAP_REG);
534 /* The performance monitoring is not supported. */
538 /* Sanity check for the number of the counters and event groups */
539 if (!pcap_num_cntr(perfcap) || !pcap_num_event_group(perfcap))
542 /* The interrupt on overflow is required */
543 if (!pcap_interrupt(perfcap))
546 /* Check required Enhanced Command Capability */
547 if (!ecmd_has_pmu_essential(iommu))
550 iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL);
554 iommu_pmu->num_cntr = pcap_num_cntr(perfcap);
555 if (iommu_pmu->num_cntr > IOMMU_PMU_IDX_MAX) {
556 pr_warn_once("The number of IOMMU counters %d > max(%d), clipping!",
557 iommu_pmu->num_cntr, IOMMU_PMU_IDX_MAX);
558 iommu_pmu->num_cntr = IOMMU_PMU_IDX_MAX;
561 iommu_pmu->cntr_width = pcap_cntr_width(perfcap);
562 iommu_pmu->filter = pcap_filters_mask(perfcap);
563 iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap);
564 iommu_pmu->num_eg = pcap_num_event_group(perfcap);
566 iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL);
567 if (!iommu_pmu->evcap) {
572 /* Parse event group capabilities */
573 for (i = 0; i < iommu_pmu->num_eg; i++) {
576 pcap = dmar_readq(iommu->reg + DMAR_PERFEVNTCAP_REG +
577 i * IOMMU_PMU_CAP_REGS_STEP);
578 iommu_pmu->evcap[i] = pecap_es(pcap);
581 iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL);
582 if (!iommu_pmu->cntr_evcap) {
586 for (i = 0; i < iommu_pmu->num_cntr; i++) {
587 iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL);
588 if (!iommu_pmu->cntr_evcap[i]) {
590 goto free_pmu_cntr_evcap;
593 * Set to the global capabilities, will adjust according
594 * to per-counter capabilities later.
596 for (j = 0; j < iommu_pmu->num_eg; j++)
597 iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j];
600 iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG);
601 iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG);
602 iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG);
605 * Check per-counter capabilities. All counters should have the
606 * same capabilities on Interrupt on Overflow Support and Counter
609 for (i = 0; i < iommu_pmu->num_cntr; i++) {
610 cap = dmar_readl(iommu_pmu->cfg_reg +
611 i * IOMMU_PMU_CFG_OFFSET +
612 IOMMU_PMU_CFG_CNTRCAP_OFFSET);
613 if (!iommu_cntrcap_pcc(cap))
617 * It's possible that some counters have a different
618 * capability because of e.g., HW bug. Check the corner
619 * case here and simply drop those counters.
621 if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) ||
622 !iommu_cntrcap_ios(cap)) {
623 iommu_pmu->num_cntr = i;
624 pr_warn("PMU counter capability inconsistent, counter number reduced to %d\n",
625 iommu_pmu->num_cntr);
628 /* Clear the pre-defined events group */
629 for (j = 0; j < iommu_pmu->num_eg; j++)
630 iommu_pmu->cntr_evcap[i][j] = 0;
632 /* Override with per-counter event capabilities */
633 for (j = 0; j < iommu_cntrcap_egcnt(cap); j++) {
634 cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET +
635 IOMMU_PMU_CFG_CNTREVCAP_OFFSET +
636 (j * IOMMU_PMU_OFF_REGS_STEP));
637 iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap);
639 * Some events may only be supported by a specific counter.
640 * Track them in the evcap as well.
642 iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap);
646 iommu_pmu->iommu = iommu;
647 iommu->pmu = iommu_pmu;
652 for (i = 0; i < iommu_pmu->num_cntr; i++)
653 kfree(iommu_pmu->cntr_evcap[i]);
654 kfree(iommu_pmu->cntr_evcap);
656 kfree(iommu_pmu->evcap);
663 void free_iommu_pmu(struct intel_iommu *iommu)
665 struct iommu_pmu *iommu_pmu = iommu->pmu;
670 if (iommu_pmu->evcap) {
673 for (i = 0; i < iommu_pmu->num_cntr; i++)
674 kfree(iommu_pmu->cntr_evcap[i]);
675 kfree(iommu_pmu->cntr_evcap);
677 kfree(iommu_pmu->evcap);
682 void iommu_pmu_register(struct intel_iommu *iommu)
687 if (__iommu_pmu_register(iommu)) {
688 pr_err("Failed to register PMU for iommu (seq_id = %d)\n",
690 free_iommu_pmu(iommu);
694 void iommu_pmu_unregister(struct intel_iommu *iommu)
697 perf_pmu_unregister(&iommu->pmu->pmu);