2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
29 #include <asm/amd-ibs.h>
31 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
32 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
38 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
39 * and any further add()s must fail.
41 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
42 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
43 * we've cleared the EN bit).
45 * In order to consume these late NMIs we have the STOPPED state, any NMI that
46 * happens after we've cleared the EN state will clear this bit and report the
47 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
48 * someone else can consume our BIT and our NMI will go unhandled).
50 * And since we cannot set/clear this separate bit together with the EN bit,
51 * there are races; if we cleared STARTED early, an NMI could land in
52 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
53 * could happen if the period is small enough), and consume our STOPPED bit
54 * and trigger streams of unhandled NMIs.
56 * If, however, we clear STARTED late, an NMI can hit between clearing the
57 * EN bit and clearing STARTED, still see STARTED set and process the event.
58 * If this event will have the VALID bit clear, we bail properly, but this
59 * is not a given. With VALID set we can end up calling pmu::stop() again
60 * (the throttle logic) and trigger the WARNs in there.
62 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
63 * nesting, and clear STARTED late, so that we have a well defined state over
64 * the clearing of the EN bit.
66 * XXX: we could probably be using !atomic bitops for all this.
79 struct perf_event *event;
80 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
91 unsigned long offset_mask[1];
93 unsigned int fetch_count_reset_broken : 1;
94 unsigned int fetch_ignore_if_zero_rip : 1;
95 struct cpu_perf_ibs __percpu *pcpu;
97 u64 (*get_count)(u64 config);
101 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
103 s64 left = local64_read(&hwc->period_left);
104 s64 period = hwc->sample_period;
108 * If we are way outside a reasonable range then just skip forward:
110 if (unlikely(left <= -period)) {
112 local64_set(&hwc->period_left, left);
113 hwc->last_period = period;
117 if (unlikely(left < (s64)min)) {
119 local64_set(&hwc->period_left, left);
120 hwc->last_period = period;
125 * If the hw period that triggers the sw overflow is too short
126 * we might hit the irq handler. This biases the results.
127 * Thus we shorten the next-to-last period and set the last
128 * period to the max period.
138 *hw_period = (u64)left;
144 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
146 struct hw_perf_event *hwc = &event->hw;
147 int shift = 64 - width;
152 * Careful: an NMI might modify the previous event value.
154 * Our tactic to handle this is to first atomically read and
155 * exchange a new raw count - then add that new-prev delta
156 * count to the generic event atomically:
158 prev_raw_count = local64_read(&hwc->prev_count);
159 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
160 new_raw_count) != prev_raw_count)
164 * Now we have the new raw value and have updated the prev
165 * timestamp already. We can now calculate the elapsed delta
166 * (event-)time and add that to the generic event.
168 * Careful, not all hw sign-extends above the physical width
171 delta = (new_raw_count << shift) - (prev_raw_count << shift);
174 local64_add(delta, &event->count);
175 local64_sub(delta, &hwc->period_left);
180 static struct perf_ibs perf_ibs_fetch;
181 static struct perf_ibs perf_ibs_op;
183 static struct perf_ibs *get_ibs_pmu(int type)
185 if (perf_ibs_fetch.pmu.type == type)
186 return &perf_ibs_fetch;
187 if (perf_ibs_op.pmu.type == type)
193 * Use IBS for precise event sampling:
195 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
196 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
197 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
199 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
200 * MSRC001_1033) is used to select either cycle or micro-ops counting
203 * The rip of IBS samples has skid 0. Thus, IBS supports precise
204 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
205 * rip is invalid when IBS was not able to record the rip correctly.
206 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
209 static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
211 switch (event->attr.precise_ip) {
221 switch (event->attr.type) {
222 case PERF_TYPE_HARDWARE:
223 switch (event->attr.config) {
224 case PERF_COUNT_HW_CPU_CYCLES:
230 switch (event->attr.config) {
235 *config = IBS_OP_CNT_CTL;
246 static int perf_ibs_init(struct perf_event *event)
248 struct hw_perf_event *hwc = &event->hw;
249 struct perf_ibs *perf_ibs;
253 perf_ibs = get_ibs_pmu(event->attr.type);
255 config = event->attr.config;
257 perf_ibs = &perf_ibs_op;
258 ret = perf_ibs_precise_event(event, &config);
263 if (event->pmu != &perf_ibs->pmu)
266 if (config & ~perf_ibs->config_mask)
269 if (hwc->sample_period) {
270 if (config & perf_ibs->cnt_mask)
271 /* raw max_cnt may not be set */
273 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
275 * lower 4 bits can not be set in ibs max cnt,
276 * but allowing it in case we adjust the
277 * sample period to set a frequency.
280 hwc->sample_period &= ~0x0FULL;
281 if (!hwc->sample_period)
282 hwc->sample_period = 0x10;
284 max_cnt = config & perf_ibs->cnt_mask;
285 config &= ~perf_ibs->cnt_mask;
286 event->attr.sample_period = max_cnt << 4;
287 hwc->sample_period = event->attr.sample_period;
290 if (!hwc->sample_period)
294 * If we modify hwc->sample_period, we also need to update
295 * hwc->last_period and hwc->period_left.
297 hwc->last_period = hwc->sample_period;
298 local64_set(&hwc->period_left, hwc->sample_period);
300 hwc->config_base = perf_ibs->msr;
301 hwc->config = config;
306 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
307 struct hw_perf_event *hwc, u64 *period)
311 /* ignore lower 4 bits in min count: */
312 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
313 local64_set(&hwc->prev_count, 0);
318 static u64 get_ibs_fetch_count(u64 config)
320 union ibs_fetch_ctl fetch_ctl = (union ibs_fetch_ctl)config;
322 return fetch_ctl.fetch_cnt << 4;
325 static u64 get_ibs_op_count(u64 config)
327 union ibs_op_ctl op_ctl = (union ibs_op_ctl)config;
331 * If the internal 27-bit counter rolled over, the count is MaxCnt
332 * and the lower 7 bits of CurCnt are randomized.
333 * Otherwise CurCnt has the full 27-bit current counter value.
336 count = op_ctl.opmaxcnt << 4;
337 if (ibs_caps & IBS_CAPS_OPCNTEXT)
338 count += op_ctl.opmaxcnt_ext << 20;
339 } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
340 count = op_ctl.opcurcnt;
347 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
350 u64 count = perf_ibs->get_count(*config);
353 * Set width to 64 since we do not overflow on max width but
354 * instead on max count. In perf_ibs_set_period() we clear
355 * prev count manually on overflow.
357 while (!perf_event_try_update(event, count, 64)) {
358 rdmsrl(event->hw.config_base, *config);
359 count = perf_ibs->get_count(*config);
363 static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
364 struct hw_perf_event *hwc, u64 config)
366 u64 tmp = hwc->config | config;
368 if (perf_ibs->fetch_count_reset_broken)
369 wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
371 wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
375 * Erratum #420 Instruction-Based Sampling Engine May Generate
376 * Interrupt that Cannot Be Cleared:
378 * Must clear counter mask first, then clear the enable bit. See
379 * Revision Guide for AMD Family 10h Processors, Publication #41322.
381 static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
382 struct hw_perf_event *hwc, u64 config)
384 config &= ~perf_ibs->cnt_mask;
385 if (boot_cpu_data.x86 == 0x10)
386 wrmsrl(hwc->config_base, config);
387 config &= ~perf_ibs->enable_mask;
388 wrmsrl(hwc->config_base, config);
392 * We cannot restore the ibs pmu state, so we always needs to update
393 * the event while stopping it and then reset the state when starting
394 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
395 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
397 static void perf_ibs_start(struct perf_event *event, int flags)
399 struct hw_perf_event *hwc = &event->hw;
400 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
401 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
402 u64 period, config = 0;
404 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
407 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
410 perf_ibs_set_period(perf_ibs, hwc, &period);
411 if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
412 config |= period & IBS_OP_MAX_CNT_EXT_MASK;
413 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
415 config |= period >> 4;
418 * Set STARTED before enabling the hardware, such that a subsequent NMI
421 set_bit(IBS_STARTED, pcpu->state);
422 clear_bit(IBS_STOPPING, pcpu->state);
423 perf_ibs_enable_event(perf_ibs, hwc, config);
425 perf_event_update_userpage(event);
428 static void perf_ibs_stop(struct perf_event *event, int flags)
430 struct hw_perf_event *hwc = &event->hw;
431 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
432 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
436 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
439 stopping = test_bit(IBS_STARTED, pcpu->state);
441 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
444 rdmsrl(hwc->config_base, config);
448 * Set STOPPED before disabling the hardware, such that it
449 * must be visible to NMIs the moment we clear the EN bit,
450 * at which point we can generate an !VALID sample which
451 * we need to consume.
453 set_bit(IBS_STOPPED, pcpu->state);
454 perf_ibs_disable_event(perf_ibs, hwc, config);
456 * Clear STARTED after disabling the hardware; if it were
457 * cleared before an NMI hitting after the clear but before
458 * clearing the EN bit might think it a spurious NMI and not
461 * Clearing it after, however, creates the problem of the NMI
462 * handler seeing STARTED but not having a valid sample.
464 clear_bit(IBS_STARTED, pcpu->state);
465 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
466 hwc->state |= PERF_HES_STOPPED;
469 if (hwc->state & PERF_HES_UPTODATE)
473 * Clear valid bit to not count rollovers on update, rollovers
474 * are only updated in the irq handler.
476 config &= ~perf_ibs->valid_mask;
478 perf_ibs_event_update(perf_ibs, event, &config);
479 hwc->state |= PERF_HES_UPTODATE;
482 static int perf_ibs_add(struct perf_event *event, int flags)
484 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
485 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
487 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
490 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
494 if (flags & PERF_EF_START)
495 perf_ibs_start(event, PERF_EF_RELOAD);
500 static void perf_ibs_del(struct perf_event *event, int flags)
502 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
503 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
505 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
508 perf_ibs_stop(event, PERF_EF_UPDATE);
512 perf_event_update_userpage(event);
515 static void perf_ibs_read(struct perf_event *event) { }
518 * We need to initialize with empty group if all attributes in the
521 static struct attribute *attrs_empty[] = {
525 static struct attribute_group empty_format_group = {
527 .attrs = attrs_empty,
530 static struct attribute_group empty_caps_group = {
532 .attrs = attrs_empty,
535 static const struct attribute_group *empty_attr_groups[] = {
541 PMU_FORMAT_ATTR(rand_en, "config:57");
542 PMU_FORMAT_ATTR(cnt_ctl, "config:19");
543 PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
544 PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
545 PMU_EVENT_ATTR_STRING(zen4_ibs_extensions, zen4_ibs_extensions, "1");
548 zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int i)
550 return ibs_caps & IBS_CAPS_ZEN4 ? attr->mode : 0;
553 static struct attribute *rand_en_attrs[] = {
554 &format_attr_rand_en.attr,
558 static struct attribute *fetch_l3missonly_attrs[] = {
559 &fetch_l3missonly.attr.attr,
563 static struct attribute *zen4_ibs_extensions_attrs[] = {
564 &zen4_ibs_extensions.attr.attr,
568 static struct attribute_group group_rand_en = {
570 .attrs = rand_en_attrs,
573 static struct attribute_group group_fetch_l3missonly = {
575 .attrs = fetch_l3missonly_attrs,
576 .is_visible = zen4_ibs_extensions_is_visible,
579 static struct attribute_group group_zen4_ibs_extensions = {
581 .attrs = zen4_ibs_extensions_attrs,
582 .is_visible = zen4_ibs_extensions_is_visible,
585 static const struct attribute_group *fetch_attr_groups[] = {
591 static const struct attribute_group *fetch_attr_update[] = {
592 &group_fetch_l3missonly,
593 &group_zen4_ibs_extensions,
598 cnt_ctl_is_visible(struct kobject *kobj, struct attribute *attr, int i)
600 return ibs_caps & IBS_CAPS_OPCNT ? attr->mode : 0;
603 static struct attribute *cnt_ctl_attrs[] = {
604 &format_attr_cnt_ctl.attr,
608 static struct attribute *op_l3missonly_attrs[] = {
609 &op_l3missonly.attr.attr,
613 static struct attribute_group group_cnt_ctl = {
615 .attrs = cnt_ctl_attrs,
616 .is_visible = cnt_ctl_is_visible,
619 static struct attribute_group group_op_l3missonly = {
621 .attrs = op_l3missonly_attrs,
622 .is_visible = zen4_ibs_extensions_is_visible,
625 static const struct attribute_group *op_attr_update[] = {
627 &group_op_l3missonly,
628 &group_zen4_ibs_extensions,
632 static struct perf_ibs perf_ibs_fetch = {
634 .task_ctx_nr = perf_invalid_context,
636 .event_init = perf_ibs_init,
639 .start = perf_ibs_start,
640 .stop = perf_ibs_stop,
641 .read = perf_ibs_read,
642 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
644 .msr = MSR_AMD64_IBSFETCHCTL,
645 .config_mask = IBS_FETCH_CONFIG_MASK,
646 .cnt_mask = IBS_FETCH_MAX_CNT,
647 .enable_mask = IBS_FETCH_ENABLE,
648 .valid_mask = IBS_FETCH_VAL,
649 .max_period = IBS_FETCH_MAX_CNT << 4,
650 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
651 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
653 .get_count = get_ibs_fetch_count,
656 static struct perf_ibs perf_ibs_op = {
658 .task_ctx_nr = perf_invalid_context,
660 .event_init = perf_ibs_init,
663 .start = perf_ibs_start,
664 .stop = perf_ibs_stop,
665 .read = perf_ibs_read,
666 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
668 .msr = MSR_AMD64_IBSOPCTL,
669 .config_mask = IBS_OP_CONFIG_MASK,
670 .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
672 .enable_mask = IBS_OP_ENABLE,
673 .valid_mask = IBS_OP_VAL,
674 .max_period = IBS_OP_MAX_CNT << 4,
675 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
676 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
678 .get_count = get_ibs_op_count,
681 static void perf_ibs_get_mem_op(union ibs_op_data3 *op_data3,
682 struct perf_sample_data *data)
684 union perf_mem_data_src *data_src = &data->data_src;
686 data_src->mem_op = PERF_MEM_OP_NA;
689 data_src->mem_op = PERF_MEM_OP_LOAD;
690 else if (op_data3->st_op)
691 data_src->mem_op = PERF_MEM_OP_STORE;
695 * Processors having CPUID_Fn8000001B_EAX[11] aka IBS_CAPS_ZEN4 has
696 * more fine granular DataSrc encodings. Others have coarse.
698 static u8 perf_ibs_data_src(union ibs_op_data2 *op_data2)
700 if (ibs_caps & IBS_CAPS_ZEN4)
701 return (op_data2->data_src_hi << 3) | op_data2->data_src_lo;
703 return op_data2->data_src_lo;
706 static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
707 union ibs_op_data3 *op_data3,
708 struct perf_sample_data *data)
710 union perf_mem_data_src *data_src = &data->data_src;
711 u8 ibs_data_src = perf_ibs_data_src(op_data2);
713 data_src->mem_lvl = 0;
716 * DcMiss, L2Miss, DataSrc, DcMissLat etc. are all invalid for Uncached
717 * memory accesses. So, check DcUcMemAcc bit early.
719 if (op_data3->dc_uc_mem_acc && ibs_data_src != IBS_DATA_SRC_EXT_IO) {
720 data_src->mem_lvl = PERF_MEM_LVL_UNC | PERF_MEM_LVL_HIT;
725 if (op_data3->dc_miss == 0) {
726 data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
731 if (op_data3->l2_miss == 0) {
733 if (boot_cpu_data.x86 != 0x19 || boot_cpu_data.x86_model > 0xF ||
734 !(op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc)) {
735 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
741 * OP_DATA2 is valid only for load ops. Skip all checks which
742 * uses OP_DATA2[DataSrc].
744 if (data_src->mem_op != PERF_MEM_OP_LOAD)
748 if (ibs_caps & IBS_CAPS_ZEN4) {
749 if (ibs_data_src == IBS_DATA_SRC_EXT_LOC_CACHE) {
750 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
754 if (ibs_data_src == IBS_DATA_SRC_LOC_CACHE) {
755 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_REM_CCE1 |
761 /* A peer cache in a near CCX */
762 if (ibs_caps & IBS_CAPS_ZEN4 &&
763 ibs_data_src == IBS_DATA_SRC_EXT_NEAR_CCX_CACHE) {
764 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
768 /* A peer cache in a far CCX */
769 if (ibs_caps & IBS_CAPS_ZEN4) {
770 if (ibs_data_src == IBS_DATA_SRC_EXT_FAR_CCX_CACHE) {
771 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
775 if (ibs_data_src == IBS_DATA_SRC_REM_CACHE) {
776 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
782 if (ibs_data_src == IBS_DATA_SRC_EXT_DRAM) {
783 if (op_data2->rmt_node == 0)
784 data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
786 data_src->mem_lvl = PERF_MEM_LVL_REM_RAM1 | PERF_MEM_LVL_HIT;
791 if (ibs_caps & IBS_CAPS_ZEN4 && ibs_data_src == IBS_DATA_SRC_EXT_PMEM) {
792 data_src->mem_lvl_num = PERF_MEM_LVLNUM_PMEM;
793 if (op_data2->rmt_node) {
794 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
795 /* IBS doesn't provide Remote socket detail */
796 data_src->mem_hops = PERF_MEM_HOPS_1;
801 /* Extension Memory */
802 if (ibs_caps & IBS_CAPS_ZEN4 &&
803 ibs_data_src == IBS_DATA_SRC_EXT_EXT_MEM) {
804 data_src->mem_lvl_num = PERF_MEM_LVLNUM_CXL;
805 if (op_data2->rmt_node) {
806 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
807 /* IBS doesn't provide Remote socket detail */
808 data_src->mem_hops = PERF_MEM_HOPS_1;
814 if (ibs_data_src == IBS_DATA_SRC_EXT_IO) {
815 data_src->mem_lvl = PERF_MEM_LVL_IO;
816 data_src->mem_lvl_num = PERF_MEM_LVLNUM_IO;
817 if (op_data2->rmt_node) {
818 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
819 /* IBS doesn't provide Remote socket detail */
820 data_src->mem_hops = PERF_MEM_HOPS_1;
827 * MAB (Miss Address Buffer) Hit. MAB keeps track of outstanding
828 * DC misses. However, such data may come from any level in mem
829 * hierarchy. IBS provides detail about both MAB as well as actual
830 * DataSrc simultaneously. Prioritize DataSrc over MAB, i.e. set
831 * MAB only when IBS fails to provide DataSrc.
833 if (op_data3->dc_miss_no_mab_alloc) {
834 data_src->mem_lvl = PERF_MEM_LVL_LFB | PERF_MEM_LVL_HIT;
838 data_src->mem_lvl = PERF_MEM_LVL_NA;
841 static bool perf_ibs_cache_hit_st_valid(void)
843 /* 0: Uninitialized, 1: Valid, -1: Invalid */
844 static int cache_hit_st_valid;
846 if (unlikely(!cache_hit_st_valid)) {
847 if (boot_cpu_data.x86 == 0x19 &&
848 (boot_cpu_data.x86_model <= 0xF ||
849 (boot_cpu_data.x86_model >= 0x20 &&
850 boot_cpu_data.x86_model <= 0x5F))) {
851 cache_hit_st_valid = -1;
853 cache_hit_st_valid = 1;
857 return cache_hit_st_valid == 1;
860 static void perf_ibs_get_mem_snoop(union ibs_op_data2 *op_data2,
861 struct perf_sample_data *data)
863 union perf_mem_data_src *data_src = &data->data_src;
866 data_src->mem_snoop = PERF_MEM_SNOOP_NA;
868 if (!perf_ibs_cache_hit_st_valid() ||
869 data_src->mem_op != PERF_MEM_OP_LOAD ||
870 data_src->mem_lvl & PERF_MEM_LVL_L1 ||
871 data_src->mem_lvl & PERF_MEM_LVL_L2 ||
872 op_data2->cache_hit_st)
875 ibs_data_src = perf_ibs_data_src(op_data2);
877 if (ibs_caps & IBS_CAPS_ZEN4) {
878 if (ibs_data_src == IBS_DATA_SRC_EXT_LOC_CACHE ||
879 ibs_data_src == IBS_DATA_SRC_EXT_NEAR_CCX_CACHE ||
880 ibs_data_src == IBS_DATA_SRC_EXT_FAR_CCX_CACHE)
881 data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
882 } else if (ibs_data_src == IBS_DATA_SRC_LOC_CACHE) {
883 data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
887 static void perf_ibs_get_tlb_lvl(union ibs_op_data3 *op_data3,
888 struct perf_sample_data *data)
890 union perf_mem_data_src *data_src = &data->data_src;
892 data_src->mem_dtlb = PERF_MEM_TLB_NA;
894 if (!op_data3->dc_lin_addr_valid)
897 if (!op_data3->dc_l1tlb_miss) {
898 data_src->mem_dtlb = PERF_MEM_TLB_L1 | PERF_MEM_TLB_HIT;
902 if (!op_data3->dc_l2tlb_miss) {
903 data_src->mem_dtlb = PERF_MEM_TLB_L2 | PERF_MEM_TLB_HIT;
907 data_src->mem_dtlb = PERF_MEM_TLB_L2 | PERF_MEM_TLB_MISS;
910 static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
911 struct perf_sample_data *data)
913 union perf_mem_data_src *data_src = &data->data_src;
915 data_src->mem_lock = PERF_MEM_LOCK_NA;
917 if (op_data3->dc_locked_op)
918 data_src->mem_lock = PERF_MEM_LOCK_LOCKED;
921 #define ibs_op_msr_idx(msr) (msr - MSR_AMD64_IBSOPCTL)
923 static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
924 struct perf_sample_data *data,
925 union ibs_op_data2 *op_data2,
926 union ibs_op_data3 *op_data3)
928 perf_ibs_get_mem_lvl(op_data2, op_data3, data);
929 perf_ibs_get_mem_snoop(op_data2, data);
930 perf_ibs_get_tlb_lvl(op_data3, data);
931 perf_ibs_get_mem_lock(op_data3, data);
934 static __u64 perf_ibs_get_op_data2(struct perf_ibs_data *ibs_data,
935 union ibs_op_data3 *op_data3)
937 __u64 val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA2)];
940 if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model <= 0xF &&
941 (op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc)) {
943 * OP_DATA2 has only two fields on Zen3: DataSrc and RmtNode.
944 * DataSrc=0 is 'No valid status' and RmtNode is invalid when
952 static void perf_ibs_parse_ld_st_data(__u64 sample_type,
953 struct perf_ibs_data *ibs_data,
954 struct perf_sample_data *data)
956 union ibs_op_data3 op_data3;
957 union ibs_op_data2 op_data2;
958 union ibs_op_data op_data;
960 data->data_src.val = PERF_MEM_NA;
961 op_data3.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
963 perf_ibs_get_mem_op(&op_data3, data);
964 if (data->data_src.mem_op != PERF_MEM_OP_LOAD &&
965 data->data_src.mem_op != PERF_MEM_OP_STORE)
968 op_data2.val = perf_ibs_get_op_data2(ibs_data, &op_data3);
970 if (sample_type & PERF_SAMPLE_DATA_SRC) {
971 perf_ibs_get_data_src(ibs_data, data, &op_data2, &op_data3);
972 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
975 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE && op_data3.dc_miss &&
976 data->data_src.mem_op == PERF_MEM_OP_LOAD) {
977 op_data.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA)];
979 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
980 data->weight.var1_dw = op_data3.dc_miss_lat;
981 data->weight.var2_w = op_data.tag_to_ret_ctr;
982 } else if (sample_type & PERF_SAMPLE_WEIGHT) {
983 data->weight.full = op_data3.dc_miss_lat;
985 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
988 if (sample_type & PERF_SAMPLE_ADDR && op_data3.dc_lin_addr_valid) {
989 data->addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCLINAD)];
990 data->sample_flags |= PERF_SAMPLE_ADDR;
993 if (sample_type & PERF_SAMPLE_PHYS_ADDR && op_data3.dc_phy_addr_valid) {
994 data->phys_addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCPHYSAD)];
995 data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
999 static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
1002 if (sample_type & PERF_SAMPLE_RAW ||
1003 (perf_ibs == &perf_ibs_op &&
1004 (sample_type & PERF_SAMPLE_DATA_SRC ||
1005 sample_type & PERF_SAMPLE_WEIGHT_TYPE ||
1006 sample_type & PERF_SAMPLE_ADDR ||
1007 sample_type & PERF_SAMPLE_PHYS_ADDR)))
1008 return perf_ibs->offset_max;
1014 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
1016 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
1017 struct perf_event *event = pcpu->event;
1018 struct hw_perf_event *hwc;
1019 struct perf_sample_data data;
1020 struct perf_raw_record raw;
1021 struct pt_regs regs;
1022 struct perf_ibs_data ibs_data;
1023 int offset, size, check_rip, offset_max, throttle = 0;
1025 u64 *buf, *config, period, new_config = 0;
1027 if (!test_bit(IBS_STARTED, pcpu->state)) {
1030 * Catch spurious interrupts after stopping IBS: After
1031 * disabling IBS there could be still incoming NMIs
1032 * with samples that even have the valid bit cleared.
1033 * Mark all this NMIs as handled.
1035 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
1041 if (WARN_ON_ONCE(!event))
1045 msr = hwc->config_base;
1046 buf = ibs_data.regs;
1048 if (!(*buf++ & perf_ibs->valid_mask))
1051 config = &ibs_data.regs[0];
1052 perf_ibs_event_update(perf_ibs, event, config);
1053 perf_sample_data_init(&data, 0, hwc->last_period);
1054 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
1055 goto out; /* no sw counter overflow */
1057 ibs_data.caps = ibs_caps;
1060 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
1062 offset_max = perf_ibs_get_offset_max(perf_ibs, event->attr.sample_type, check_rip);
1065 rdmsrl(msr + offset, *buf++);
1067 offset = find_next_bit(perf_ibs->offset_mask,
1068 perf_ibs->offset_max,
1070 } while (offset < offset_max);
1072 * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
1073 * depending on their availability.
1074 * Can't add to offset_max as they are staggered
1076 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1077 if (perf_ibs == &perf_ibs_op) {
1078 if (ibs_caps & IBS_CAPS_BRNTRGT) {
1079 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
1082 if (ibs_caps & IBS_CAPS_OPDATA4) {
1083 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
1087 if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
1088 rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
1092 ibs_data.size = sizeof(u64) * size;
1095 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
1096 regs.flags &= ~PERF_EFLAGS_EXACT;
1098 /* Workaround for erratum #1197 */
1099 if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
1102 set_linear_ip(®s, ibs_data.regs[1]);
1103 regs.flags |= PERF_EFLAGS_EXACT;
1106 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1107 raw = (struct perf_raw_record){
1109 .size = sizeof(u32) + ibs_data.size,
1110 .data = ibs_data.data,
1114 data.sample_flags |= PERF_SAMPLE_RAW;
1117 if (perf_ibs == &perf_ibs_op)
1118 perf_ibs_parse_ld_st_data(event->attr.sample_type, &ibs_data, &data);
1121 * rip recorded by IbsOpRip will not be consistent with rsp and rbp
1122 * recorded as part of interrupt regs. Thus we need to use rip from
1123 * interrupt regs while unwinding call stack.
1125 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
1126 data.callchain = perf_callchain(event, iregs);
1127 data.sample_flags |= PERF_SAMPLE_CALLCHAIN;
1130 throttle = perf_event_overflow(event, &data, ®s);
1133 perf_ibs_stop(event, 0);
1135 if (perf_ibs == &perf_ibs_op) {
1136 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
1137 new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
1138 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
1140 if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL))
1141 new_config |= *config & IBS_OP_CUR_CNT_RAND;
1143 new_config |= period >> 4;
1145 perf_ibs_enable_event(perf_ibs, hwc, new_config);
1148 perf_event_update_userpage(event);
1154 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1156 u64 stamp = sched_clock();
1159 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
1160 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
1163 inc_irq_stat(apic_perf_irqs);
1165 perf_sample_event_took(sched_clock() - stamp);
1169 NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
1171 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
1173 struct cpu_perf_ibs __percpu *pcpu;
1176 pcpu = alloc_percpu(struct cpu_perf_ibs);
1180 perf_ibs->pcpu = pcpu;
1182 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
1184 perf_ibs->pcpu = NULL;
1191 static __init int perf_ibs_fetch_init(void)
1194 * Some chips fail to reset the fetch count when it is written; instead
1195 * they need a 0-1 transition of IbsFetchEn.
1197 if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
1198 perf_ibs_fetch.fetch_count_reset_broken = 1;
1200 if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
1201 perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
1203 if (ibs_caps & IBS_CAPS_ZEN4)
1204 perf_ibs_fetch.config_mask |= IBS_FETCH_L3MISSONLY;
1206 perf_ibs_fetch.pmu.attr_groups = fetch_attr_groups;
1207 perf_ibs_fetch.pmu.attr_update = fetch_attr_update;
1209 return perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
1212 static __init int perf_ibs_op_init(void)
1214 if (ibs_caps & IBS_CAPS_OPCNT)
1215 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
1217 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
1218 perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
1219 perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
1220 perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
1223 if (ibs_caps & IBS_CAPS_ZEN4)
1224 perf_ibs_op.config_mask |= IBS_OP_L3MISSONLY;
1226 perf_ibs_op.pmu.attr_groups = empty_attr_groups;
1227 perf_ibs_op.pmu.attr_update = op_attr_update;
1229 return perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
1232 static __init int perf_event_ibs_init(void)
1236 ret = perf_ibs_fetch_init();
1240 ret = perf_ibs_op_init();
1244 ret = register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
1248 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
1252 perf_pmu_unregister(&perf_ibs_op.pmu);
1253 free_percpu(perf_ibs_op.pcpu);
1254 perf_ibs_op.pcpu = NULL;
1256 perf_pmu_unregister(&perf_ibs_fetch.pmu);
1257 free_percpu(perf_ibs_fetch.pcpu);
1258 perf_ibs_fetch.pcpu = NULL;
1263 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
1265 static __init int perf_event_ibs_init(void)
1272 /* IBS - apic initialization, for perf and oprofile */
1274 static __init u32 __get_ibs_caps(void)
1277 unsigned int max_level;
1279 if (!boot_cpu_has(X86_FEATURE_IBS))
1282 /* check IBS cpuid feature flags */
1283 max_level = cpuid_eax(0x80000000);
1284 if (max_level < IBS_CPUID_FEATURES)
1285 return IBS_CAPS_DEFAULT;
1287 caps = cpuid_eax(IBS_CPUID_FEATURES);
1288 if (!(caps & IBS_CAPS_AVAIL))
1289 /* cpuid flags not valid */
1290 return IBS_CAPS_DEFAULT;
1295 u32 get_ibs_caps(void)
1300 EXPORT_SYMBOL(get_ibs_caps);
1302 static inline int get_eilvt(int offset)
1304 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
1307 static inline int put_eilvt(int offset)
1309 return !setup_APIC_eilvt(offset, 0, 0, 1);
1313 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
1315 static inline int ibs_eilvt_valid(void)
1323 rdmsrl(MSR_AMD64_IBSCTL, val);
1324 offset = val & IBSCTL_LVT_OFFSET_MASK;
1326 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
1327 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
1328 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
1332 if (!get_eilvt(offset)) {
1333 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
1334 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
1345 static int setup_ibs_ctl(int ibs_eilvt_off)
1347 struct pci_dev *cpu_cfg;
1354 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
1355 PCI_DEVICE_ID_AMD_10H_NB_MISC,
1360 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
1361 | IBSCTL_LVT_OFFSET_VALID);
1362 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
1363 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
1364 pci_dev_put(cpu_cfg);
1365 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
1372 pr_debug("No CPU node configured for IBS\n");
1380 * This runs only on the current cpu. We try to find an LVT offset and
1381 * setup the local APIC. For this we must disable preemption. On
1382 * success we initialize all nodes with this offset. This updates then
1383 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
1384 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
1385 * is using the new offset.
1387 static void force_ibs_eilvt_setup(void)
1393 /* find the next free available EILVT entry, skip offset 0 */
1394 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
1395 if (get_eilvt(offset))
1400 if (offset == APIC_EILVT_NR_MAX) {
1401 pr_debug("No EILVT entry available\n");
1405 ret = setup_ibs_ctl(offset);
1409 if (!ibs_eilvt_valid())
1412 pr_info("LVT offset %d assigned\n", offset);
1422 static void ibs_eilvt_setup(void)
1425 * Force LVT offset assignment for family 10h: The offsets are
1426 * not assigned by the BIOS for this family, so the OS is
1427 * responsible for doing it. If the OS assignment fails, fall
1428 * back to BIOS settings and try to setup this.
1430 if (boot_cpu_data.x86 == 0x10)
1431 force_ibs_eilvt_setup();
1434 static inline int get_ibs_lvt_offset(void)
1438 rdmsrl(MSR_AMD64_IBSCTL, val);
1439 if (!(val & IBSCTL_LVT_OFFSET_VALID))
1442 return val & IBSCTL_LVT_OFFSET_MASK;
1445 static void setup_APIC_ibs(void)
1449 offset = get_ibs_lvt_offset();
1453 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
1456 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
1457 smp_processor_id());
1460 static void clear_APIC_ibs(void)
1464 offset = get_ibs_lvt_offset();
1466 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
1469 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
1477 static int perf_ibs_suspend(void)
1483 static void perf_ibs_resume(void)
1489 static struct syscore_ops perf_ibs_syscore_ops = {
1490 .resume = perf_ibs_resume,
1491 .suspend = perf_ibs_suspend,
1494 static void perf_ibs_pm_init(void)
1496 register_syscore_ops(&perf_ibs_syscore_ops);
1501 static inline void perf_ibs_pm_init(void) { }
1505 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
1511 static __init int amd_ibs_init(void)
1515 caps = __get_ibs_caps();
1517 return -ENODEV; /* ibs not supported by the cpu */
1521 if (!ibs_eilvt_valid())
1527 /* make ibs_caps visible to other cpus: */
1530 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1533 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1534 "perf/x86/amd/ibs:starting",
1535 x86_pmu_amd_ibs_starting_cpu,
1536 x86_pmu_amd_ibs_dying_cpu);
1538 return perf_event_ibs_init();
1541 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1542 device_initcall(amd_ibs_init);