2 * Intel Cache Quality-of-Service Monitoring (CQM) support.
4 * Based very, very heavily on work by Peter Zijlstra.
7 #include <linux/perf_event.h>
8 #include <linux/slab.h>
9 #include <asm/cpu_device_id.h>
10 #include "../perf_event.h"
12 #define MSR_IA32_PQR_ASSOC 0x0c8f
13 #define MSR_IA32_QM_CTR 0x0c8e
14 #define MSR_IA32_QM_EVTSEL 0x0c8d
16 #define MBM_CNTR_WIDTH 24
18 * Guaranteed time in ms as per SDM where MBM counters will not overflow.
20 #define MBM_CTR_OVERFLOW_TIME 1000
22 static u32 cqm_max_rmid = -1;
23 static unsigned int cqm_l3_scale; /* supposedly cacheline size */
24 static bool cqm_enabled, mbm_enabled;
25 unsigned int mbm_socket_max;
28 * struct intel_pqr_state - State cache for the PQR MSR
29 * @rmid: The cached Resource Monitoring ID
30 * @closid: The cached Class Of Service ID
31 * @rmid_usecnt: The usage counter for rmid
33 * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
34 * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
35 * contains both parts, so we need to cache them.
37 * The cache also helps to avoid pointless updates if the value does
40 struct intel_pqr_state {
47 * The cached intel_pqr_state is strictly per CPU and can never be
48 * updated from a remote CPU. Both functions which modify the state
49 * (intel_cqm_event_start and intel_cqm_event_stop) are called with
50 * interrupts disabled, which is sufficient for the protection.
52 static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
53 static struct hrtimer *mbm_timers;
55 * struct sample - mbm event's (local or total) data
56 * @total_bytes #bytes since we began monitoring
57 * @prev_msr previous value of MSR
65 * samples profiled for total memory bandwidth type events
67 static struct sample *mbm_total;
69 * samples profiled for local memory bandwidth type events
71 static struct sample *mbm_local;
73 #define pkg_id topology_physical_package_id(smp_processor_id())
75 * rmid_2_index returns the index for the rmid in mbm_local/mbm_total array.
76 * mbm_total[] and mbm_local[] are linearly indexed by socket# * max number of
77 * rmids per socket, an example is given below
78 * RMID1 of Socket0: vrmid = 1
79 * RMID1 of Socket1: vrmid = 1 * (cqm_max_rmid + 1) + 1
80 * RMID1 of Socket2: vrmid = 2 * (cqm_max_rmid + 1) + 1
82 #define rmid_2_index(rmid) ((pkg_id * (cqm_max_rmid + 1)) + rmid)
84 * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
85 * Also protects event->hw.cqm_rmid
87 * Hold either for stability, both for modification of ->hw.cqm_rmid.
89 static DEFINE_MUTEX(cache_mutex);
90 static DEFINE_RAW_SPINLOCK(cache_lock);
93 * Groups of events that have the same target(s), one RMID per group.
95 static LIST_HEAD(cache_groups);
98 * Mask of CPUs for reading CQM values. We only need one per-socket.
100 static cpumask_t cqm_cpumask;
102 #define RMID_VAL_ERROR (1ULL << 63)
103 #define RMID_VAL_UNAVAIL (1ULL << 62)
106 * Event IDs are used to program IA32_QM_EVTSEL before reading event
107 * counter from IA32_QM_CTR
109 #define QOS_L3_OCCUP_EVENT_ID 0x01
110 #define QOS_MBM_TOTAL_EVENT_ID 0x02
111 #define QOS_MBM_LOCAL_EVENT_ID 0x03
114 * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
116 * This rmid is always free and is guaranteed to have an associated
117 * near-zero occupancy value, i.e. no cachelines are tagged with this
118 * RMID, once __intel_cqm_rmid_rotate() returns.
120 static u32 intel_cqm_rotation_rmid;
122 #define INVALID_RMID (-1)
125 * Is @rmid valid for programming the hardware?
127 * rmid 0 is reserved by the hardware for all non-monitored tasks, which
128 * means that we should never come across an rmid with that value.
129 * Likewise, an rmid value of -1 is used to indicate "no rmid currently
130 * assigned" and is used as part of the rotation code.
132 static inline bool __rmid_valid(u32 rmid)
134 if (!rmid || rmid == INVALID_RMID)
140 static u64 __rmid_read(u32 rmid)
145 * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
146 * it just says that to increase confusion.
148 wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
149 rdmsrl(MSR_IA32_QM_CTR, val);
152 * Aside from the ERROR and UNAVAIL bits, assume this thing returns
153 * the number of cachelines tagged with @rmid.
158 enum rmid_recycle_state {
164 struct cqm_rmid_entry {
166 enum rmid_recycle_state state;
167 struct list_head list;
168 unsigned long queue_time;
172 * cqm_rmid_free_lru - A least recently used list of RMIDs.
174 * Oldest entry at the head, newest (most recently used) entry at the
175 * tail. This list is never traversed, it's only used to keep track of
176 * the lru order. That is, we only pick entries of the head or insert
179 * All entries on the list are 'free', and their RMIDs are not currently
180 * in use. To mark an RMID as in use, remove its entry from the lru
184 * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
186 * This list is contains RMIDs that no one is currently using but that
187 * may have a non-zero occupancy value associated with them. The
188 * rotation worker moves RMIDs from the limbo list to the free list once
189 * the occupancy value drops below __intel_cqm_threshold.
191 * Both lists are protected by cache_mutex.
193 static LIST_HEAD(cqm_rmid_free_lru);
194 static LIST_HEAD(cqm_rmid_limbo_lru);
197 * We use a simple array of pointers so that we can lookup a struct
198 * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
199 * and __put_rmid() from having to worry about dealing with struct
200 * cqm_rmid_entry - they just deal with rmids, i.e. integers.
202 * Once this array is initialized it is read-only. No locks are required
205 * All entries for all RMIDs can be looked up in the this array at all
208 static struct cqm_rmid_entry **cqm_rmid_ptrs;
210 static inline struct cqm_rmid_entry *__rmid_entry(u32 rmid)
212 struct cqm_rmid_entry *entry;
214 entry = cqm_rmid_ptrs[rmid];
215 WARN_ON(entry->rmid != rmid);
221 * Returns < 0 on fail.
223 * We expect to be called with cache_mutex held.
225 static u32 __get_rmid(void)
227 struct cqm_rmid_entry *entry;
229 lockdep_assert_held(&cache_mutex);
231 if (list_empty(&cqm_rmid_free_lru))
234 entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list);
235 list_del(&entry->list);
240 static void __put_rmid(u32 rmid)
242 struct cqm_rmid_entry *entry;
244 lockdep_assert_held(&cache_mutex);
246 WARN_ON(!__rmid_valid(rmid));
247 entry = __rmid_entry(rmid);
249 entry->queue_time = jiffies;
250 entry->state = RMID_YOUNG;
252 list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
255 static void cqm_cleanup(void)
262 for (i = 0; i < cqm_max_rmid; i++)
263 kfree(cqm_rmid_ptrs[i]);
265 kfree(cqm_rmid_ptrs);
266 cqm_rmid_ptrs = NULL;
270 static int intel_cqm_setup_rmid_cache(void)
272 struct cqm_rmid_entry *entry;
273 unsigned int nr_rmids;
276 nr_rmids = cqm_max_rmid + 1;
277 cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
278 nr_rmids, GFP_KERNEL);
282 for (; r <= cqm_max_rmid; r++) {
283 struct cqm_rmid_entry *entry;
285 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
289 INIT_LIST_HEAD(&entry->list);
291 cqm_rmid_ptrs[r] = entry;
293 list_add_tail(&entry->list, &cqm_rmid_free_lru);
297 * RMID 0 is special and is always allocated. It's used for all
298 * tasks that are not monitored.
300 entry = __rmid_entry(0);
301 list_del(&entry->list);
303 mutex_lock(&cache_mutex);
304 intel_cqm_rotation_rmid = __get_rmid();
305 mutex_unlock(&cache_mutex);
315 * Determine if @a and @b measure the same set of tasks.
317 * If @a and @b measure the same set of tasks then we want to share a
320 static bool __match_event(struct perf_event *a, struct perf_event *b)
322 /* Per-cpu and task events don't mix */
323 if ((a->attach_state & PERF_ATTACH_TASK) !=
324 (b->attach_state & PERF_ATTACH_TASK))
327 #ifdef CONFIG_CGROUP_PERF
328 if (a->cgrp != b->cgrp)
332 /* If not task event, we're machine wide */
333 if (!(b->attach_state & PERF_ATTACH_TASK))
337 * Events that target same task are placed into the same cache group.
338 * Mark it as a multi event group, so that we update ->count
339 * for every event rather than just the group leader later.
341 if (a->hw.target == b->hw.target) {
342 b->hw.is_group_event = true;
347 * Are we an inherited event?
355 #ifdef CONFIG_CGROUP_PERF
356 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
358 if (event->attach_state & PERF_ATTACH_TASK)
359 return perf_cgroup_from_task(event->hw.target, event->ctx);
366 * Determine if @a's tasks intersect with @b's tasks
368 * There are combinations of events that we explicitly prohibit,
371 * system-wide -> cgroup and task
372 * cgroup -> system-wide
374 * task -> system-wide
377 * Call this function before allocating an RMID.
379 static bool __conflict_event(struct perf_event *a, struct perf_event *b)
381 #ifdef CONFIG_CGROUP_PERF
383 * We can have any number of cgroups but only one system-wide
386 if (a->cgrp && b->cgrp) {
387 struct perf_cgroup *ac = a->cgrp;
388 struct perf_cgroup *bc = b->cgrp;
391 * This condition should have been caught in
392 * __match_event() and we should be sharing an RMID.
394 WARN_ON_ONCE(ac == bc);
396 if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
397 cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
403 if (a->cgrp || b->cgrp) {
404 struct perf_cgroup *ac, *bc;
407 * cgroup and system-wide events are mutually exclusive
409 if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) ||
410 (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK)))
414 * Ensure neither event is part of the other's cgroup
416 ac = event_to_cgroup(a);
417 bc = event_to_cgroup(b);
422 * Must have cgroup and non-intersecting task events.
428 * We have cgroup and task events, and the task belongs
429 * to a cgroup. Check for for overlap.
431 if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
432 cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
439 * If one of them is not a task, same story as above with cgroups.
441 if (!(a->attach_state & PERF_ATTACH_TASK) ||
442 !(b->attach_state & PERF_ATTACH_TASK))
446 * Must be non-overlapping.
457 static void __intel_cqm_event_count(void *info);
458 static void init_mbm_sample(u32 rmid, u32 evt_type);
459 static void __intel_mbm_event_count(void *info);
461 static bool is_cqm_event(int e)
463 return (e == QOS_L3_OCCUP_EVENT_ID);
466 static bool is_mbm_event(int e)
468 return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
471 static void cqm_mask_call(struct rmid_read *rr)
473 if (is_mbm_event(rr->evt_type))
474 on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, rr, 1);
476 on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
480 * Exchange the RMID of a group of events.
482 static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
484 struct perf_event *event;
485 struct list_head *head = &group->hw.cqm_group_entry;
486 u32 old_rmid = group->hw.cqm_rmid;
488 lockdep_assert_held(&cache_mutex);
491 * If our RMID is being deallocated, perform a read now.
493 if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
494 struct rmid_read rr = {
496 .evt_type = group->attr.config,
497 .value = ATOMIC64_INIT(0),
501 local64_set(&group->count, atomic64_read(&rr.value));
504 raw_spin_lock_irq(&cache_lock);
506 group->hw.cqm_rmid = rmid;
507 list_for_each_entry(event, head, hw.cqm_group_entry)
508 event->hw.cqm_rmid = rmid;
510 raw_spin_unlock_irq(&cache_lock);
513 * If the allocation is for mbm, init the mbm stats.
514 * Need to check if each event in the group is mbm event
515 * because there could be multiple type of events in the same group.
517 if (__rmid_valid(rmid)) {
519 if (is_mbm_event(event->attr.config))
520 init_mbm_sample(rmid, event->attr.config);
522 list_for_each_entry(event, head, hw.cqm_group_entry) {
523 if (is_mbm_event(event->attr.config))
524 init_mbm_sample(rmid, event->attr.config);
532 * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
533 * cachelines are still tagged with RMIDs in limbo, we progressively
534 * increment the threshold until we find an RMID in limbo with <=
535 * __intel_cqm_threshold lines tagged. This is designed to mitigate the
536 * problem where cachelines tagged with an RMID are not steadily being
539 * On successful rotations we decrease the threshold back towards zero.
541 * __intel_cqm_max_threshold provides an upper bound on the threshold,
542 * and is measured in bytes because it's exposed to userland.
544 static unsigned int __intel_cqm_threshold;
545 static unsigned int __intel_cqm_max_threshold;
548 * Test whether an RMID has a zero occupancy value on this cpu.
550 static void intel_cqm_stable(void *arg)
552 struct cqm_rmid_entry *entry;
554 list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
555 if (entry->state != RMID_AVAILABLE)
558 if (__rmid_read(entry->rmid) > __intel_cqm_threshold)
559 entry->state = RMID_DIRTY;
564 * If we have group events waiting for an RMID that don't conflict with
565 * events already running, assign @rmid.
567 static bool intel_cqm_sched_in_event(u32 rmid)
569 struct perf_event *leader, *event;
571 lockdep_assert_held(&cache_mutex);
573 leader = list_first_entry(&cache_groups, struct perf_event,
574 hw.cqm_groups_entry);
577 list_for_each_entry_continue(event, &cache_groups,
578 hw.cqm_groups_entry) {
579 if (__rmid_valid(event->hw.cqm_rmid))
582 if (__conflict_event(event, leader))
585 intel_cqm_xchg_rmid(event, rmid);
593 * Initially use this constant for both the limbo queue time and the
594 * rotation timer interval, pmu::hrtimer_interval_ms.
596 * They don't need to be the same, but the two are related since if you
597 * rotate faster than you recycle RMIDs, you may run out of available
600 #define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
602 static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME;
605 * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
606 * @nr_available: number of freeable RMIDs on the limbo list
608 * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
609 * cachelines are tagged with those RMIDs. After this we can reuse them
610 * and know that the current set of active RMIDs is stable.
612 * Return %true or %false depending on whether stabilization needs to be
615 * If we return %true then @nr_available is updated to indicate the
616 * number of RMIDs on the limbo list that have been queued for the
617 * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
618 * are above __intel_cqm_threshold.
620 static bool intel_cqm_rmid_stabilize(unsigned int *available)
622 struct cqm_rmid_entry *entry, *tmp;
624 lockdep_assert_held(&cache_mutex);
627 list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
628 unsigned long min_queue_time;
629 unsigned long now = jiffies;
632 * We hold RMIDs placed into limbo for a minimum queue
633 * time. Before the minimum queue time has elapsed we do
636 * The reasoning is that until a sufficient time has
637 * passed since we stopped using an RMID, any RMID
638 * placed onto the limbo list will likely still have
639 * data tagged in the cache, which means we'll probably
640 * fail to recycle it anyway.
642 * We can save ourselves an expensive IPI by skipping
643 * any RMIDs that have not been queued for the minimum
646 min_queue_time = entry->queue_time +
647 msecs_to_jiffies(__rmid_queue_time_ms);
649 if (time_after(min_queue_time, now))
652 entry->state = RMID_AVAILABLE;
657 * Fast return if none of the RMIDs on the limbo list have been
658 * sitting on the queue for the minimum queue time.
664 * Test whether an RMID is free for each package.
666 on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true);
668 list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) {
670 * Exhausted all RMIDs that have waited min queue time.
672 if (entry->state == RMID_YOUNG)
675 if (entry->state == RMID_DIRTY)
678 list_del(&entry->list); /* remove from limbo */
681 * The rotation RMID gets priority if it's
682 * currently invalid. In which case, skip adding
683 * the RMID to the the free lru.
685 if (!__rmid_valid(intel_cqm_rotation_rmid)) {
686 intel_cqm_rotation_rmid = entry->rmid;
691 * If we have groups waiting for RMIDs, hand
692 * them one now provided they don't conflict.
694 if (intel_cqm_sched_in_event(entry->rmid))
698 * Otherwise place it onto the free list.
700 list_add_tail(&entry->list, &cqm_rmid_free_lru);
704 return __rmid_valid(intel_cqm_rotation_rmid);
708 * Pick a victim group and move it to the tail of the group list.
709 * @next: The first group without an RMID
711 static void __intel_cqm_pick_and_rotate(struct perf_event *next)
713 struct perf_event *rotor;
716 lockdep_assert_held(&cache_mutex);
718 rotor = list_first_entry(&cache_groups, struct perf_event,
719 hw.cqm_groups_entry);
722 * The group at the front of the list should always have a valid
723 * RMID. If it doesn't then no groups have RMIDs assigned and we
724 * don't need to rotate the list.
729 rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID);
732 list_rotate_left(&cache_groups);
736 * Deallocate the RMIDs from any events that conflict with @event, and
737 * place them on the back of the group list.
739 static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
741 struct perf_event *group, *g;
744 lockdep_assert_held(&cache_mutex);
746 list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) {
750 rmid = group->hw.cqm_rmid;
753 * Skip events that don't have a valid RMID.
755 if (!__rmid_valid(rmid))
759 * No conflict? No problem! Leave the event alone.
761 if (!__conflict_event(group, event))
764 intel_cqm_xchg_rmid(group, INVALID_RMID);
770 * Attempt to rotate the groups and assign new RMIDs.
772 * We rotate for two reasons,
773 * 1. To handle the scheduling of conflicting events
774 * 2. To recycle RMIDs
776 * Rotating RMIDs is complicated because the hardware doesn't give us
779 * There's problems with the hardware interface; when you change the
780 * task:RMID map cachelines retain their 'old' tags, giving a skewed
781 * picture. In order to work around this, we must always keep one free
782 * RMID - intel_cqm_rotation_rmid.
784 * Rotation works by taking away an RMID from a group (the old RMID),
785 * and assigning the free RMID to another group (the new RMID). We must
786 * then wait for the old RMID to not be used (no cachelines tagged).
787 * This ensure that all cachelines are tagged with 'active' RMIDs. At
788 * this point we can start reading values for the new RMID and treat the
789 * old RMID as the free RMID for the next rotation.
791 * Return %true or %false depending on whether we did any rotating.
793 static bool __intel_cqm_rmid_rotate(void)
795 struct perf_event *group, *start = NULL;
796 unsigned int threshold_limit;
797 unsigned int nr_needed = 0;
798 unsigned int nr_available;
799 bool rotated = false;
801 mutex_lock(&cache_mutex);
805 * Fast path through this function if there are no groups and no
806 * RMIDs that need cleaning.
808 if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
811 list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
812 if (!__rmid_valid(group->hw.cqm_rmid)) {
820 * We have some event groups, but they all have RMIDs assigned
821 * and no RMIDs need cleaning.
823 if (!nr_needed && list_empty(&cqm_rmid_limbo_lru))
830 * We have more event groups without RMIDs than available RMIDs,
831 * or we have event groups that conflict with the ones currently
834 * We force deallocate the rmid of the group at the head of
835 * cache_groups. The first event group without an RMID then gets
836 * assigned intel_cqm_rotation_rmid. This ensures we always make
839 * Rotate the cache_groups list so the previous head is now the
842 __intel_cqm_pick_and_rotate(start);
845 * If the rotation is going to succeed, reduce the threshold so
846 * that we don't needlessly reuse dirty RMIDs.
848 if (__rmid_valid(intel_cqm_rotation_rmid)) {
849 intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid);
850 intel_cqm_rotation_rmid = __get_rmid();
852 intel_cqm_sched_out_conflicting_events(start);
854 if (__intel_cqm_threshold)
855 __intel_cqm_threshold--;
862 * We now need to stablize the RMID we freed above (if any) to
863 * ensure that the next time we rotate we have an RMID with zero
866 * Alternatively, if we didn't need to perform any rotation,
867 * we'll have a bunch of RMIDs in limbo that need stabilizing.
869 threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale;
871 while (intel_cqm_rmid_stabilize(&nr_available) &&
872 __intel_cqm_threshold < threshold_limit) {
873 unsigned int steal_limit;
876 * Don't spin if nobody is actively waiting for an RMID,
877 * the rotation worker will be kicked as soon as an
878 * event needs an RMID anyway.
883 /* Allow max 25% of RMIDs to be in limbo. */
884 steal_limit = (cqm_max_rmid + 1) / 4;
887 * We failed to stabilize any RMIDs so our rotation
888 * logic is now stuck. In order to make forward progress
889 * we have a few options:
891 * 1. rotate ("steal") another RMID
892 * 2. increase the threshold
895 * We do both of 1. and 2. until we hit the steal limit.
897 * The steal limit prevents all RMIDs ending up on the
898 * limbo list. This can happen if every RMID has a
899 * non-zero occupancy above threshold_limit, and the
900 * occupancy values aren't dropping fast enough.
902 * Note that there is prioritisation at work here - we'd
903 * rather increase the number of RMIDs on the limbo list
904 * than increase the threshold, because increasing the
905 * threshold skews the event data (because we reuse
906 * dirty RMIDs) - threshold bumps are a last resort.
908 if (nr_available < steal_limit)
911 __intel_cqm_threshold++;
915 mutex_unlock(&cache_mutex);
919 static void intel_cqm_rmid_rotate(struct work_struct *work);
921 static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate);
923 static struct pmu intel_cqm_pmu;
925 static void intel_cqm_rmid_rotate(struct work_struct *work)
929 __intel_cqm_rmid_rotate();
931 delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms);
932 schedule_delayed_work(&intel_cqm_rmid_work, delay);
935 static u64 update_sample(unsigned int rmid, u32 evt_type, int first)
937 struct sample *mbm_current;
938 u32 vrmid = rmid_2_index(rmid);
939 u64 val, bytes, shift;
942 if (evt_type == QOS_MBM_LOCAL_EVENT_ID) {
943 mbm_current = &mbm_local[vrmid];
944 eventid = QOS_MBM_LOCAL_EVENT_ID;
946 mbm_current = &mbm_total[vrmid];
947 eventid = QOS_MBM_TOTAL_EVENT_ID;
950 wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
951 rdmsrl(MSR_IA32_QM_CTR, val);
952 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
953 return mbm_current->total_bytes;
956 mbm_current->prev_msr = val;
957 mbm_current->total_bytes = 0;
958 return mbm_current->total_bytes;
962 * The h/w guarantees that counters will not overflow
963 * so long as we poll them at least once per second.
965 shift = 64 - MBM_CNTR_WIDTH;
966 bytes = (val << shift) - (mbm_current->prev_msr << shift);
969 bytes *= cqm_l3_scale;
971 mbm_current->total_bytes += bytes;
972 mbm_current->prev_msr = val;
974 return mbm_current->total_bytes;
977 static u64 rmid_read_mbm(unsigned int rmid, u32 evt_type)
979 return update_sample(rmid, evt_type, 0);
982 static void __intel_mbm_event_init(void *info)
984 struct rmid_read *rr = info;
986 update_sample(rr->rmid, rr->evt_type, 1);
989 static void init_mbm_sample(u32 rmid, u32 evt_type)
991 struct rmid_read rr = {
993 .evt_type = evt_type,
994 .value = ATOMIC64_INIT(0),
997 /* on each socket, init sample */
998 on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_init, &rr, 1);
1002 * Find a group and setup RMID.
1004 * If we're part of a group, we use the group's RMID.
1006 static void intel_cqm_setup_event(struct perf_event *event,
1007 struct perf_event **group)
1009 struct perf_event *iter;
1010 bool conflict = false;
1013 event->hw.is_group_event = false;
1014 list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
1015 rmid = iter->hw.cqm_rmid;
1017 if (__match_event(iter, event)) {
1018 /* All tasks in a group share an RMID */
1019 event->hw.cqm_rmid = rmid;
1021 if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
1022 init_mbm_sample(rmid, event->attr.config);
1027 * We only care about conflicts for events that are
1028 * actually scheduled in (and hence have a valid RMID).
1030 if (__conflict_event(iter, event) && __rmid_valid(rmid))
1035 rmid = INVALID_RMID;
1037 rmid = __get_rmid();
1039 if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
1040 init_mbm_sample(rmid, event->attr.config);
1042 event->hw.cqm_rmid = rmid;
1045 static void intel_cqm_event_read(struct perf_event *event)
1047 unsigned long flags;
1052 * Task events are handled by intel_cqm_event_count().
1054 if (event->cpu == -1)
1057 raw_spin_lock_irqsave(&cache_lock, flags);
1058 rmid = event->hw.cqm_rmid;
1060 if (!__rmid_valid(rmid))
1063 if (is_mbm_event(event->attr.config))
1064 val = rmid_read_mbm(rmid, event->attr.config);
1066 val = __rmid_read(rmid);
1069 * Ignore this reading on error states and do not update the value.
1071 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
1074 local64_set(&event->count, val);
1076 raw_spin_unlock_irqrestore(&cache_lock, flags);
1079 static void __intel_cqm_event_count(void *info)
1081 struct rmid_read *rr = info;
1084 val = __rmid_read(rr->rmid);
1086 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
1089 atomic64_add(val, &rr->value);
1092 static inline bool cqm_group_leader(struct perf_event *event)
1094 return !list_empty(&event->hw.cqm_groups_entry);
1097 static void __intel_mbm_event_count(void *info)
1099 struct rmid_read *rr = info;
1102 val = rmid_read_mbm(rr->rmid, rr->evt_type);
1103 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
1105 atomic64_add(val, &rr->value);
1108 static enum hrtimer_restart mbm_hrtimer_handle(struct hrtimer *hrtimer)
1110 struct perf_event *iter, *iter1;
1111 int ret = HRTIMER_RESTART;
1112 struct list_head *head;
1113 unsigned long flags;
1117 * Need to cache_lock as the timer Event Select MSR reads
1118 * can race with the mbm/cqm count() and mbm_init() reads.
1120 raw_spin_lock_irqsave(&cache_lock, flags);
1122 if (list_empty(&cache_groups)) {
1123 ret = HRTIMER_NORESTART;
1127 list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
1128 grp_rmid = iter->hw.cqm_rmid;
1129 if (!__rmid_valid(grp_rmid))
1131 if (is_mbm_event(iter->attr.config))
1132 update_sample(grp_rmid, iter->attr.config, 0);
1134 head = &iter->hw.cqm_group_entry;
1135 if (list_empty(head))
1137 list_for_each_entry(iter1, head, hw.cqm_group_entry) {
1138 if (!iter1->hw.is_group_event)
1140 if (is_mbm_event(iter1->attr.config))
1141 update_sample(iter1->hw.cqm_rmid,
1142 iter1->attr.config, 0);
1146 hrtimer_forward_now(hrtimer, ms_to_ktime(MBM_CTR_OVERFLOW_TIME));
1148 raw_spin_unlock_irqrestore(&cache_lock, flags);
1153 static void __mbm_start_timer(void *info)
1155 hrtimer_start(&mbm_timers[pkg_id], ms_to_ktime(MBM_CTR_OVERFLOW_TIME),
1156 HRTIMER_MODE_REL_PINNED);
1159 static void __mbm_stop_timer(void *info)
1161 hrtimer_cancel(&mbm_timers[pkg_id]);
1164 static void mbm_start_timers(void)
1166 on_each_cpu_mask(&cqm_cpumask, __mbm_start_timer, NULL, 1);
1169 static void mbm_stop_timers(void)
1171 on_each_cpu_mask(&cqm_cpumask, __mbm_stop_timer, NULL, 1);
1174 static void mbm_hrtimer_init(void)
1179 for (i = 0; i < mbm_socket_max; i++) {
1180 hr = &mbm_timers[i];
1181 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1182 hr->function = mbm_hrtimer_handle;
1186 static u64 intel_cqm_event_count(struct perf_event *event)
1188 unsigned long flags;
1189 struct rmid_read rr = {
1190 .evt_type = event->attr.config,
1191 .value = ATOMIC64_INIT(0),
1195 * We only need to worry about task events. System-wide events
1196 * are handled like usual, i.e. entirely with
1197 * intel_cqm_event_read().
1199 if (event->cpu != -1)
1200 return __perf_event_count(event);
1203 * Only the group leader gets to report values except in case of
1204 * multiple events in the same group, we still need to read the
1205 * other events.This stops us
1206 * reporting duplicate values to userspace, and gives us a clear
1207 * rule for which task gets to report the values.
1209 * Note that it is impossible to attribute these values to
1210 * specific packages - we forfeit that ability when we create
1213 if (!cqm_group_leader(event) && !event->hw.is_group_event)
1217 * Getting up-to-date values requires an SMP IPI which is not
1218 * possible if we're being called in interrupt context. Return
1219 * the cached values instead.
1221 if (unlikely(in_interrupt()))
1225 * Notice that we don't perform the reading of an RMID
1226 * atomically, because we can't hold a spin lock across the
1229 * Speculatively perform the read, since @event might be
1230 * assigned a different (possibly invalid) RMID while we're
1231 * busying performing the IPI calls. It's therefore necessary to
1232 * check @event's RMID afterwards, and if it has changed,
1233 * discard the result of the read.
1235 rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid);
1237 if (!__rmid_valid(rr.rmid))
1242 raw_spin_lock_irqsave(&cache_lock, flags);
1243 if (event->hw.cqm_rmid == rr.rmid)
1244 local64_set(&event->count, atomic64_read(&rr.value));
1245 raw_spin_unlock_irqrestore(&cache_lock, flags);
1247 return __perf_event_count(event);
1250 static void intel_cqm_event_start(struct perf_event *event, int mode)
1252 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
1253 u32 rmid = event->hw.cqm_rmid;
1255 if (!(event->hw.cqm_state & PERF_HES_STOPPED))
1258 event->hw.cqm_state &= ~PERF_HES_STOPPED;
1260 if (state->rmid_usecnt++) {
1261 if (!WARN_ON_ONCE(state->rmid != rmid))
1264 WARN_ON_ONCE(state->rmid);
1268 wrmsr(MSR_IA32_PQR_ASSOC, rmid, state->closid);
1271 static void intel_cqm_event_stop(struct perf_event *event, int mode)
1273 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
1275 if (event->hw.cqm_state & PERF_HES_STOPPED)
1278 event->hw.cqm_state |= PERF_HES_STOPPED;
1280 intel_cqm_event_read(event);
1282 if (!--state->rmid_usecnt) {
1284 wrmsr(MSR_IA32_PQR_ASSOC, 0, state->closid);
1286 WARN_ON_ONCE(!state->rmid);
1290 static int intel_cqm_event_add(struct perf_event *event, int mode)
1292 unsigned long flags;
1295 raw_spin_lock_irqsave(&cache_lock, flags);
1297 event->hw.cqm_state = PERF_HES_STOPPED;
1298 rmid = event->hw.cqm_rmid;
1300 if (__rmid_valid(rmid) && (mode & PERF_EF_START))
1301 intel_cqm_event_start(event, mode);
1303 raw_spin_unlock_irqrestore(&cache_lock, flags);
1308 static void intel_cqm_event_destroy(struct perf_event *event)
1310 struct perf_event *group_other = NULL;
1311 unsigned long flags;
1313 mutex_lock(&cache_mutex);
1315 * Hold the cache_lock as mbm timer handlers could be
1316 * scanning the list of events.
1318 raw_spin_lock_irqsave(&cache_lock, flags);
1321 * If there's another event in this group...
1323 if (!list_empty(&event->hw.cqm_group_entry)) {
1324 group_other = list_first_entry(&event->hw.cqm_group_entry,
1326 hw.cqm_group_entry);
1327 list_del(&event->hw.cqm_group_entry);
1331 * And we're the group leader..
1333 if (cqm_group_leader(event)) {
1335 * If there was a group_other, make that leader, otherwise
1336 * destroy the group and return the RMID.
1339 list_replace(&event->hw.cqm_groups_entry,
1340 &group_other->hw.cqm_groups_entry);
1342 u32 rmid = event->hw.cqm_rmid;
1344 if (__rmid_valid(rmid))
1346 list_del(&event->hw.cqm_groups_entry);
1350 raw_spin_unlock_irqrestore(&cache_lock, flags);
1353 * Stop the mbm overflow timers when the last event is destroyed.
1355 if (mbm_enabled && list_empty(&cache_groups))
1358 mutex_unlock(&cache_mutex);
1361 static int intel_cqm_event_init(struct perf_event *event)
1363 struct perf_event *group = NULL;
1364 bool rotate = false;
1365 unsigned long flags;
1367 if (event->attr.type != intel_cqm_pmu.type)
1370 if ((event->attr.config < QOS_L3_OCCUP_EVENT_ID) ||
1371 (event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
1374 if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
1375 (is_mbm_event(event->attr.config) && !mbm_enabled))
1378 /* unsupported modes and filters */
1379 if (event->attr.exclude_user ||
1380 event->attr.exclude_kernel ||
1381 event->attr.exclude_hv ||
1382 event->attr.exclude_idle ||
1383 event->attr.exclude_host ||
1384 event->attr.exclude_guest ||
1385 event->attr.sample_period) /* no sampling */
1388 INIT_LIST_HEAD(&event->hw.cqm_group_entry);
1389 INIT_LIST_HEAD(&event->hw.cqm_groups_entry);
1391 event->destroy = intel_cqm_event_destroy;
1393 mutex_lock(&cache_mutex);
1396 * Start the mbm overflow timers when the first event is created.
1398 if (mbm_enabled && list_empty(&cache_groups))
1401 /* Will also set rmid */
1402 intel_cqm_setup_event(event, &group);
1405 * Hold the cache_lock as mbm timer handlers be
1406 * scanning the list of events.
1408 raw_spin_lock_irqsave(&cache_lock, flags);
1411 list_add_tail(&event->hw.cqm_group_entry,
1412 &group->hw.cqm_group_entry);
1414 list_add_tail(&event->hw.cqm_groups_entry,
1418 * All RMIDs are either in use or have recently been
1419 * used. Kick the rotation worker to clean/free some.
1421 * We only do this for the group leader, rather than for
1422 * every event in a group to save on needless work.
1424 if (!__rmid_valid(event->hw.cqm_rmid))
1428 raw_spin_unlock_irqrestore(&cache_lock, flags);
1429 mutex_unlock(&cache_mutex);
1432 schedule_delayed_work(&intel_cqm_rmid_work, 0);
1437 EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
1438 EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1");
1439 EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes");
1440 EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL);
1441 EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1");
1443 EVENT_ATTR_STR(total_bytes, intel_cqm_total_bytes, "event=0x02");
1444 EVENT_ATTR_STR(total_bytes.per-pkg, intel_cqm_total_bytes_pkg, "1");
1445 EVENT_ATTR_STR(total_bytes.unit, intel_cqm_total_bytes_unit, "MB");
1446 EVENT_ATTR_STR(total_bytes.scale, intel_cqm_total_bytes_scale, "1e-6");
1448 EVENT_ATTR_STR(local_bytes, intel_cqm_local_bytes, "event=0x03");
1449 EVENT_ATTR_STR(local_bytes.per-pkg, intel_cqm_local_bytes_pkg, "1");
1450 EVENT_ATTR_STR(local_bytes.unit, intel_cqm_local_bytes_unit, "MB");
1451 EVENT_ATTR_STR(local_bytes.scale, intel_cqm_local_bytes_scale, "1e-6");
1453 static struct attribute *intel_cqm_events_attr[] = {
1454 EVENT_PTR(intel_cqm_llc),
1455 EVENT_PTR(intel_cqm_llc_pkg),
1456 EVENT_PTR(intel_cqm_llc_unit),
1457 EVENT_PTR(intel_cqm_llc_scale),
1458 EVENT_PTR(intel_cqm_llc_snapshot),
1462 static struct attribute *intel_mbm_events_attr[] = {
1463 EVENT_PTR(intel_cqm_total_bytes),
1464 EVENT_PTR(intel_cqm_local_bytes),
1465 EVENT_PTR(intel_cqm_total_bytes_pkg),
1466 EVENT_PTR(intel_cqm_local_bytes_pkg),
1467 EVENT_PTR(intel_cqm_total_bytes_unit),
1468 EVENT_PTR(intel_cqm_local_bytes_unit),
1469 EVENT_PTR(intel_cqm_total_bytes_scale),
1470 EVENT_PTR(intel_cqm_local_bytes_scale),
1474 static struct attribute *intel_cmt_mbm_events_attr[] = {
1475 EVENT_PTR(intel_cqm_llc),
1476 EVENT_PTR(intel_cqm_total_bytes),
1477 EVENT_PTR(intel_cqm_local_bytes),
1478 EVENT_PTR(intel_cqm_llc_pkg),
1479 EVENT_PTR(intel_cqm_total_bytes_pkg),
1480 EVENT_PTR(intel_cqm_local_bytes_pkg),
1481 EVENT_PTR(intel_cqm_llc_unit),
1482 EVENT_PTR(intel_cqm_total_bytes_unit),
1483 EVENT_PTR(intel_cqm_local_bytes_unit),
1484 EVENT_PTR(intel_cqm_llc_scale),
1485 EVENT_PTR(intel_cqm_total_bytes_scale),
1486 EVENT_PTR(intel_cqm_local_bytes_scale),
1487 EVENT_PTR(intel_cqm_llc_snapshot),
1491 static struct attribute_group intel_cqm_events_group = {
1496 PMU_FORMAT_ATTR(event, "config:0-7");
1497 static struct attribute *intel_cqm_formats_attr[] = {
1498 &format_attr_event.attr,
1502 static struct attribute_group intel_cqm_format_group = {
1504 .attrs = intel_cqm_formats_attr,
1508 max_recycle_threshold_show(struct device *dev, struct device_attribute *attr,
1513 mutex_lock(&cache_mutex);
1514 rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold);
1515 mutex_unlock(&cache_mutex);
1521 max_recycle_threshold_store(struct device *dev,
1522 struct device_attribute *attr,
1523 const char *buf, size_t count)
1525 unsigned int bytes, cachelines;
1528 ret = kstrtouint(buf, 0, &bytes);
1532 mutex_lock(&cache_mutex);
1534 __intel_cqm_max_threshold = bytes;
1535 cachelines = bytes / cqm_l3_scale;
1538 * The new maximum takes effect immediately.
1540 if (__intel_cqm_threshold > cachelines)
1541 __intel_cqm_threshold = cachelines;
1543 mutex_unlock(&cache_mutex);
1548 static DEVICE_ATTR_RW(max_recycle_threshold);
1550 static struct attribute *intel_cqm_attrs[] = {
1551 &dev_attr_max_recycle_threshold.attr,
1555 static const struct attribute_group intel_cqm_group = {
1556 .attrs = intel_cqm_attrs,
1559 static const struct attribute_group *intel_cqm_attr_groups[] = {
1560 &intel_cqm_events_group,
1561 &intel_cqm_format_group,
1566 static struct pmu intel_cqm_pmu = {
1567 .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME,
1568 .attr_groups = intel_cqm_attr_groups,
1569 .task_ctx_nr = perf_sw_context,
1570 .event_init = intel_cqm_event_init,
1571 .add = intel_cqm_event_add,
1572 .del = intel_cqm_event_stop,
1573 .start = intel_cqm_event_start,
1574 .stop = intel_cqm_event_stop,
1575 .read = intel_cqm_event_read,
1576 .count = intel_cqm_event_count,
1579 static inline void cqm_pick_event_reader(int cpu)
1583 /* First online cpu in package becomes the reader */
1584 reader = cpumask_any_and(&cqm_cpumask, topology_core_cpumask(cpu));
1585 if (reader >= nr_cpu_ids)
1586 cpumask_set_cpu(cpu, &cqm_cpumask);
1589 static int intel_cqm_cpu_starting(unsigned int cpu)
1591 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
1592 struct cpuinfo_x86 *c = &cpu_data(cpu);
1596 state->rmid_usecnt = 0;
1598 WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
1599 WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
1601 cqm_pick_event_reader(cpu);
1605 static int intel_cqm_cpu_exit(unsigned int cpu)
1609 /* Is @cpu the current cqm reader for this package ? */
1610 if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
1613 /* Find another online reader in this package */
1614 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1616 if (target < nr_cpu_ids)
1617 cpumask_set_cpu(target, &cqm_cpumask);
1622 static const struct x86_cpu_id intel_cqm_match[] = {
1623 { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC },
1627 static void mbm_cleanup(void)
1634 mbm_enabled = false;
1637 static const struct x86_cpu_id intel_mbm_local_match[] = {
1638 { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_MBM_LOCAL },
1642 static const struct x86_cpu_id intel_mbm_total_match[] = {
1643 { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_MBM_TOTAL },
1647 static int intel_mbm_init(void)
1649 int ret = 0, array_size, maxid = cqm_max_rmid + 1;
1651 mbm_socket_max = topology_max_packages();
1652 array_size = sizeof(struct sample) * maxid * mbm_socket_max;
1653 mbm_local = kmalloc(array_size, GFP_KERNEL);
1657 mbm_total = kmalloc(array_size, GFP_KERNEL);
1663 array_size = sizeof(struct hrtimer) * mbm_socket_max;
1664 mbm_timers = kmalloc(array_size, GFP_KERNEL);
1678 static int __init intel_cqm_init(void)
1680 char *str = NULL, scale[20];
1683 if (x86_match_cpu(intel_cqm_match))
1686 if (x86_match_cpu(intel_mbm_local_match) &&
1687 x86_match_cpu(intel_mbm_total_match))
1690 if (!cqm_enabled && !mbm_enabled)
1693 cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale;
1696 * It's possible that not all resources support the same number
1697 * of RMIDs. Instead of making scheduling much more complicated
1698 * (where we have to match a task's RMID to a cpu that supports
1699 * that many RMIDs) just find the minimum RMIDs supported across
1702 * Also, check that the scales match on all cpus.
1705 for_each_online_cpu(cpu) {
1706 struct cpuinfo_x86 *c = &cpu_data(cpu);
1708 if (c->x86_cache_max_rmid < cqm_max_rmid)
1709 cqm_max_rmid = c->x86_cache_max_rmid;
1711 if (c->x86_cache_occ_scale != cqm_l3_scale) {
1712 pr_err("Multiple LLC scale values, disabling\n");
1719 * A reasonable upper limit on the max threshold is the number
1720 * of lines tagged per RMID if all RMIDs have the same number of
1721 * lines tagged in the LLC.
1723 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
1725 __intel_cqm_max_threshold =
1726 boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1);
1728 snprintf(scale, sizeof(scale), "%u", cqm_l3_scale);
1729 str = kstrdup(scale, GFP_KERNEL);
1735 event_attr_intel_cqm_llc_scale.event_str = str;
1737 ret = intel_cqm_setup_rmid_cache();
1742 ret = intel_mbm_init();
1743 if (ret && !cqm_enabled)
1746 if (cqm_enabled && mbm_enabled)
1747 intel_cqm_events_group.attrs = intel_cmt_mbm_events_attr;
1748 else if (!cqm_enabled && mbm_enabled)
1749 intel_cqm_events_group.attrs = intel_mbm_events_attr;
1750 else if (cqm_enabled && !mbm_enabled)
1751 intel_cqm_events_group.attrs = intel_cqm_events_attr;
1753 ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
1755 pr_err("Intel CQM perf registration failed: %d\n", ret);
1760 pr_info("Intel CQM monitoring enabled\n");
1762 pr_info("Intel MBM enabled\n");
1765 * Setup the hot cpu notifier once we are sure cqm
1766 * is enabled to avoid notifier leak.
1768 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
1769 "AP_PERF_X86_CQM_STARTING",
1770 intel_cqm_cpu_starting, NULL);
1771 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE",
1772 NULL, intel_cqm_cpu_exit);
1785 device_initcall(intel_cqm_init);