2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017-2018 Intel Corporation
7 #include <linux/pm_runtime.h>
9 #include "gt/intel_engine.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_engine_regs.h"
12 #include "gt/intel_engine_user.h"
13 #include "gt/intel_gt.h"
14 #include "gt/intel_gt_pm.h"
15 #include "gt/intel_gt_regs.h"
16 #include "gt/intel_rc6.h"
17 #include "gt/intel_rps.h"
22 /* Frequency for the sampling timer for events which need it. */
24 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
26 #define ENGINE_SAMPLE_MASK \
27 (BIT(I915_SAMPLE_BUSY) | \
28 BIT(I915_SAMPLE_WAIT) | \
29 BIT(I915_SAMPLE_SEMA))
31 static cpumask_t i915_pmu_cpumask;
32 static unsigned int i915_pmu_target_cpu = -1;
34 static u8 engine_config_sample(u64 config)
36 return config & I915_PMU_SAMPLE_MASK;
39 static u8 engine_event_sample(struct perf_event *event)
41 return engine_config_sample(event->attr.config);
44 static u8 engine_event_class(struct perf_event *event)
46 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
49 static u8 engine_event_instance(struct perf_event *event)
51 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
54 static bool is_engine_config(const u64 config)
56 return config < __I915_PMU_OTHER(0);
59 static unsigned int config_gt_id(const u64 config)
61 return config >> __I915_PMU_GT_SHIFT;
64 static u64 config_counter(const u64 config)
66 return config & ~(~0ULL << __I915_PMU_GT_SHIFT);
69 static unsigned int other_bit(const u64 config)
73 switch (config_counter(config)) {
74 case I915_PMU_ACTUAL_FREQUENCY:
75 val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
77 case I915_PMU_REQUESTED_FREQUENCY:
78 val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
80 case I915_PMU_RC6_RESIDENCY:
81 val = __I915_PMU_RC6_RESIDENCY_ENABLED;
85 * Events that do not require sampling, or tracking state
86 * transitions between enabled and disabled can be ignored.
91 return I915_ENGINE_SAMPLE_COUNT +
92 config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT +
96 static unsigned int config_bit(const u64 config)
98 if (is_engine_config(config))
99 return engine_config_sample(config);
101 return other_bit(config);
104 static u32 config_mask(const u64 config)
106 unsigned int bit = config_bit(config);
108 if (__builtin_constant_p(config))
110 BITS_PER_TYPE(typeof_member(struct i915_pmu,
114 BITS_PER_TYPE(typeof_member(struct i915_pmu,
117 return BIT(config_bit(config));
120 static bool is_engine_event(struct perf_event *event)
122 return is_engine_config(event->attr.config);
125 static unsigned int event_bit(struct perf_event *event)
127 return config_bit(event->attr.config);
130 static u32 frequency_enabled_mask(void)
135 for (i = 0; i < I915_PMU_MAX_GT; i++)
136 mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) |
137 config_mask(__I915_PMU_REQUESTED_FREQUENCY(i));
142 static bool pmu_needs_timer(struct i915_pmu *pmu)
144 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
148 * Only some counters need the sampling timer.
150 * We start with a bitmask of all currently enabled events.
152 enable = pmu->enable;
155 * Mask out all the ones which do not need the timer, or in
156 * other words keep all the ones that could need the timer.
158 enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK;
161 * Also there is software busyness tracking available we do not
162 * need the timer for I915_SAMPLE_BUSY counter.
164 if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
165 enable &= ~BIT(I915_SAMPLE_BUSY);
168 * If some bits remain it means we need the sampling timer running.
173 static u64 __get_rc6(struct intel_gt *gt)
175 struct drm_i915_private *i915 = gt->i915;
178 val = intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6);
181 val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6p);
184 val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6pp);
189 static inline s64 ktime_since_raw(const ktime_t kt)
191 return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
194 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample)
196 return pmu->sample[gt_id][sample].cur;
200 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val)
202 pmu->sample[gt_id][sample].cur = val;
206 add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul)
208 pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul);
211 static u64 get_rc6(struct intel_gt *gt)
213 struct drm_i915_private *i915 = gt->i915;
214 const unsigned int gt_id = gt->info.id;
215 struct i915_pmu *pmu = &i915->pmu;
220 if (intel_gt_pm_get_if_awake(gt)) {
222 intel_gt_pm_put_async(gt);
226 spin_lock_irqsave(&pmu->lock, flags);
229 store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
232 * We think we are runtime suspended.
234 * Report the delta from when the device was suspended to now,
235 * on top of the last known real value, as the approximated RC6
238 val = ktime_since_raw(pmu->sleep_last[gt_id]);
239 val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6);
242 if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED))
243 val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED);
245 store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val);
247 spin_unlock_irqrestore(&pmu->lock, flags);
252 static void init_rc6(struct i915_pmu *pmu)
254 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
258 for_each_gt(gt, i915, i) {
259 intel_wakeref_t wakeref;
261 with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
262 u64 val = __get_rc6(gt);
264 store_sample(pmu, i, __I915_SAMPLE_RC6, val);
265 store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED,
267 pmu->sleep_last[i] = ktime_get_raw();
272 static void park_rc6(struct intel_gt *gt)
274 struct i915_pmu *pmu = >->i915->pmu;
276 store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt));
277 pmu->sleep_last[gt->info.id] = ktime_get_raw();
280 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
282 if (!pmu->timer_enabled && pmu_needs_timer(pmu)) {
283 pmu->timer_enabled = true;
284 pmu->timer_last = ktime_get();
285 hrtimer_start_range_ns(&pmu->timer,
286 ns_to_ktime(PERIOD), 0,
287 HRTIMER_MODE_REL_PINNED);
291 void i915_pmu_gt_parked(struct intel_gt *gt)
293 struct i915_pmu *pmu = >->i915->pmu;
295 if (!pmu->base.event_init)
298 spin_lock_irq(&pmu->lock);
303 * Signal sampling timer to stop if only engine events are enabled and
306 pmu->unparked &= ~BIT(gt->info.id);
307 if (pmu->unparked == 0)
308 pmu->timer_enabled = false;
310 spin_unlock_irq(&pmu->lock);
313 void i915_pmu_gt_unparked(struct intel_gt *gt)
315 struct i915_pmu *pmu = >->i915->pmu;
317 if (!pmu->base.event_init)
320 spin_lock_irq(&pmu->lock);
323 * Re-enable sampling timer when GPU goes active.
325 if (pmu->unparked == 0)
326 __i915_pmu_maybe_start_timer(pmu);
328 pmu->unparked |= BIT(gt->info.id);
330 spin_unlock_irq(&pmu->lock);
334 add_sample(struct i915_pmu_sample *sample, u32 val)
339 static bool exclusive_mmio_access(const struct drm_i915_private *i915)
342 * We have to avoid concurrent mmio cache line access on gen7 or
343 * risk a machine hang. For a fun history lesson dig out the old
344 * userspace intel_gpu_top and run it on Ivybridge or Haswell!
346 return GRAPHICS_VER(i915) == 7;
349 static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
351 struct intel_engine_pmu *pmu = &engine->pmu;
355 val = ENGINE_READ_FW(engine, RING_CTL);
356 if (val == 0) /* powerwell off => engine idle */
360 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
361 if (val & RING_WAIT_SEMAPHORE)
362 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
364 /* No need to sample when busy stats are supported. */
365 if (intel_engine_supports_stats(engine))
369 * While waiting on a semaphore or event, MI_MODE reports the
370 * ring as idle. However, previously using the seqno, and with
371 * execlists sampling, we account for the ring waiting as the
372 * engine being busy. Therefore, we record the sample as being
373 * busy if either waiting or !idle.
375 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
377 val = ENGINE_READ_FW(engine, RING_MI_MODE);
378 busy = !(val & MODE_IDLE);
381 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
385 engines_sample(struct intel_gt *gt, unsigned int period_ns)
387 struct drm_i915_private *i915 = gt->i915;
388 struct intel_engine_cs *engine;
389 enum intel_engine_id id;
392 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
395 if (!intel_gt_pm_is_awake(gt))
398 for_each_engine(engine, gt, id) {
399 if (!engine->pmu.enable)
402 if (!intel_engine_pm_get_if_awake(engine))
405 if (exclusive_mmio_access(i915)) {
406 spin_lock_irqsave(&engine->uncore->lock, flags);
407 engine_sample(engine, period_ns);
408 spin_unlock_irqrestore(&engine->uncore->lock, flags);
410 engine_sample(engine, period_ns);
413 intel_engine_pm_put_async(engine);
418 frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt)
421 (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) |
422 config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt)));
426 frequency_sample(struct intel_gt *gt, unsigned int period_ns)
428 struct drm_i915_private *i915 = gt->i915;
429 const unsigned int gt_id = gt->info.id;
430 struct i915_pmu *pmu = &i915->pmu;
431 struct intel_rps *rps = >->rps;
433 if (!frequency_sampling_enabled(pmu, gt_id))
436 /* Report 0/0 (actual/requested) frequency while parked. */
437 if (!intel_gt_pm_get_if_awake(gt))
440 if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
444 * We take a quick peek here without using forcewake
445 * so that we don't perturb the system under observation
446 * (forcewake => !rc6 => increased power use). We expect
447 * that if the read fails because it is outside of the
448 * mmio power well, then it will return 0 -- in which
449 * case we assume the system is running at the intended
450 * frequency. Fortunately, the read should rarely fail!
452 val = intel_rps_read_actual_frequency_fw(rps);
454 val = intel_gpu_freq(rps, rps->cur_freq);
456 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT,
457 val, period_ns / 1000);
460 if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) {
461 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ,
462 intel_rps_get_requested_frequency(rps),
466 intel_gt_pm_put_async(gt);
469 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
471 struct drm_i915_private *i915 =
472 container_of(hrtimer, struct drm_i915_private, pmu.timer);
473 struct i915_pmu *pmu = &i915->pmu;
474 unsigned int period_ns;
479 if (!READ_ONCE(pmu->timer_enabled))
480 return HRTIMER_NORESTART;
483 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
484 pmu->timer_last = now;
487 * Strictly speaking the passed in period may not be 100% accurate for
488 * all internal calculation, since some amount of time can be spent on
489 * grabbing the forcewake. However the potential error from timer call-
490 * back delay greatly dominates this so we keep it simple.
493 for_each_gt(gt, i915, i) {
494 if (!(pmu->unparked & BIT(i)))
497 engines_sample(gt, period_ns);
498 frequency_sample(gt, period_ns);
501 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
503 return HRTIMER_RESTART;
506 static void i915_pmu_event_destroy(struct perf_event *event)
508 struct drm_i915_private *i915 =
509 container_of(event->pmu, typeof(*i915), pmu.base);
511 drm_WARN_ON(&i915->drm, event->parent);
513 drm_dev_put(&i915->drm);
517 engine_event_status(struct intel_engine_cs *engine,
518 enum drm_i915_pmu_engine_sample sample)
521 case I915_SAMPLE_BUSY:
522 case I915_SAMPLE_WAIT:
524 case I915_SAMPLE_SEMA:
525 if (GRAPHICS_VER(engine->i915) < 6)
536 config_status(struct drm_i915_private *i915, u64 config)
538 struct intel_gt *gt = to_gt(i915);
540 unsigned int gt_id = config_gt_id(config);
541 unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0;
543 if (gt_id > max_gt_id)
546 switch (config_counter(config)) {
547 case I915_PMU_ACTUAL_FREQUENCY:
548 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
549 /* Requires a mutex for sampling! */
552 case I915_PMU_REQUESTED_FREQUENCY:
553 if (GRAPHICS_VER(i915) < 6)
556 case I915_PMU_INTERRUPTS:
560 case I915_PMU_RC6_RESIDENCY:
561 if (!gt->rc6.supported)
564 case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
573 static int engine_event_init(struct perf_event *event)
575 struct drm_i915_private *i915 =
576 container_of(event->pmu, typeof(*i915), pmu.base);
577 struct intel_engine_cs *engine;
579 engine = intel_engine_lookup_user(i915, engine_event_class(event),
580 engine_event_instance(event));
584 return engine_event_status(engine, engine_event_sample(event));
587 static int i915_pmu_event_init(struct perf_event *event)
589 struct drm_i915_private *i915 =
590 container_of(event->pmu, typeof(*i915), pmu.base);
591 struct i915_pmu *pmu = &i915->pmu;
597 if (event->attr.type != event->pmu->type)
600 /* unsupported modes and filters */
601 if (event->attr.sample_period) /* no sampling */
604 if (has_branch_stack(event))
610 /* only allow running on one cpu at a time */
611 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
614 if (is_engine_event(event))
615 ret = engine_event_init(event);
617 ret = config_status(i915, event->attr.config);
621 if (!event->parent) {
622 drm_dev_get(&i915->drm);
623 event->destroy = i915_pmu_event_destroy;
629 static u64 __i915_pmu_event_read(struct perf_event *event)
631 struct drm_i915_private *i915 =
632 container_of(event->pmu, typeof(*i915), pmu.base);
633 struct i915_pmu *pmu = &i915->pmu;
636 if (is_engine_event(event)) {
637 u8 sample = engine_event_sample(event);
638 struct intel_engine_cs *engine;
640 engine = intel_engine_lookup_user(i915,
641 engine_event_class(event),
642 engine_event_instance(event));
644 if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
646 } else if (sample == I915_SAMPLE_BUSY &&
647 intel_engine_supports_stats(engine)) {
650 val = ktime_to_ns(intel_engine_get_busy_time(engine,
653 val = engine->pmu.sample[sample].cur;
656 const unsigned int gt_id = config_gt_id(event->attr.config);
657 const u64 config = config_counter(event->attr.config);
660 case I915_PMU_ACTUAL_FREQUENCY:
662 div_u64(read_sample(pmu, gt_id,
663 __I915_SAMPLE_FREQ_ACT),
664 USEC_PER_SEC /* to MHz */);
666 case I915_PMU_REQUESTED_FREQUENCY:
668 div_u64(read_sample(pmu, gt_id,
669 __I915_SAMPLE_FREQ_REQ),
670 USEC_PER_SEC /* to MHz */);
672 case I915_PMU_INTERRUPTS:
673 val = READ_ONCE(pmu->irq_count);
675 case I915_PMU_RC6_RESIDENCY:
676 val = get_rc6(i915->gt[gt_id]);
678 case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
679 val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915)));
687 static void i915_pmu_event_read(struct perf_event *event)
689 struct drm_i915_private *i915 =
690 container_of(event->pmu, typeof(*i915), pmu.base);
691 struct hw_perf_event *hwc = &event->hw;
692 struct i915_pmu *pmu = &i915->pmu;
696 event->hw.state = PERF_HES_STOPPED;
700 prev = local64_read(&hwc->prev_count);
701 new = __i915_pmu_event_read(event);
703 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
706 local64_add(new - prev, &event->count);
709 static void i915_pmu_enable(struct perf_event *event)
711 struct drm_i915_private *i915 =
712 container_of(event->pmu, typeof(*i915), pmu.base);
713 const unsigned int bit = event_bit(event);
714 struct i915_pmu *pmu = &i915->pmu;
720 spin_lock_irqsave(&pmu->lock, flags);
723 * Update the bitmask of enabled events and increment
724 * the event reference counter.
726 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
727 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
728 GEM_BUG_ON(pmu->enable_count[bit] == ~0);
730 pmu->enable |= BIT(bit);
731 pmu->enable_count[bit]++;
734 * Start the sampling timer if needed and not already enabled.
736 __i915_pmu_maybe_start_timer(pmu);
739 * For per-engine events the bitmask and reference counting
740 * is stored per engine.
742 if (is_engine_event(event)) {
743 u8 sample = engine_event_sample(event);
744 struct intel_engine_cs *engine;
746 engine = intel_engine_lookup_user(i915,
747 engine_event_class(event),
748 engine_event_instance(event));
750 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
751 I915_ENGINE_SAMPLE_COUNT);
752 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
753 I915_ENGINE_SAMPLE_COUNT);
754 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
755 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
756 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
758 engine->pmu.enable |= BIT(sample);
759 engine->pmu.enable_count[sample]++;
762 spin_unlock_irqrestore(&pmu->lock, flags);
766 * Store the current counter value so we can report the correct delta
767 * for all listeners. Even when the event was already enabled and has
768 * an existing non-zero value.
770 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
773 static void i915_pmu_disable(struct perf_event *event)
775 struct drm_i915_private *i915 =
776 container_of(event->pmu, typeof(*i915), pmu.base);
777 const unsigned int bit = event_bit(event);
778 struct i915_pmu *pmu = &i915->pmu;
784 spin_lock_irqsave(&pmu->lock, flags);
786 if (is_engine_event(event)) {
787 u8 sample = engine_event_sample(event);
788 struct intel_engine_cs *engine;
790 engine = intel_engine_lookup_user(i915,
791 engine_event_class(event),
792 engine_event_instance(event));
794 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
795 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
796 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
799 * Decrement the reference count and clear the enabled
800 * bitmask when the last listener on an event goes away.
802 if (--engine->pmu.enable_count[sample] == 0)
803 engine->pmu.enable &= ~BIT(sample);
806 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
807 GEM_BUG_ON(pmu->enable_count[bit] == 0);
809 * Decrement the reference count and clear the enabled
810 * bitmask when the last listener on an event goes away.
812 if (--pmu->enable_count[bit] == 0) {
813 pmu->enable &= ~BIT(bit);
814 pmu->timer_enabled &= pmu_needs_timer(pmu);
817 spin_unlock_irqrestore(&pmu->lock, flags);
820 static void i915_pmu_event_start(struct perf_event *event, int flags)
822 struct drm_i915_private *i915 =
823 container_of(event->pmu, typeof(*i915), pmu.base);
824 struct i915_pmu *pmu = &i915->pmu;
829 i915_pmu_enable(event);
833 static void i915_pmu_event_stop(struct perf_event *event, int flags)
835 struct drm_i915_private *i915 =
836 container_of(event->pmu, typeof(*i915), pmu.base);
837 struct i915_pmu *pmu = &i915->pmu;
842 if (flags & PERF_EF_UPDATE)
843 i915_pmu_event_read(event);
844 i915_pmu_disable(event);
847 event->hw.state = PERF_HES_STOPPED;
850 static int i915_pmu_event_add(struct perf_event *event, int flags)
852 struct drm_i915_private *i915 =
853 container_of(event->pmu, typeof(*i915), pmu.base);
854 struct i915_pmu *pmu = &i915->pmu;
859 if (flags & PERF_EF_START)
860 i915_pmu_event_start(event, flags);
865 static void i915_pmu_event_del(struct perf_event *event, int flags)
867 i915_pmu_event_stop(event, PERF_EF_UPDATE);
870 static int i915_pmu_event_event_idx(struct perf_event *event)
875 struct i915_str_attribute {
876 struct device_attribute attr;
880 static ssize_t i915_pmu_format_show(struct device *dev,
881 struct device_attribute *attr, char *buf)
883 struct i915_str_attribute *eattr;
885 eattr = container_of(attr, struct i915_str_attribute, attr);
886 return sprintf(buf, "%s\n", eattr->str);
889 #define I915_PMU_FORMAT_ATTR(_name, _config) \
890 (&((struct i915_str_attribute[]) { \
891 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
895 static struct attribute *i915_pmu_format_attrs[] = {
896 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
900 static const struct attribute_group i915_pmu_format_attr_group = {
902 .attrs = i915_pmu_format_attrs,
905 struct i915_ext_attribute {
906 struct device_attribute attr;
910 static ssize_t i915_pmu_event_show(struct device *dev,
911 struct device_attribute *attr, char *buf)
913 struct i915_ext_attribute *eattr;
915 eattr = container_of(attr, struct i915_ext_attribute, attr);
916 return sprintf(buf, "config=0x%lx\n", eattr->val);
919 static ssize_t cpumask_show(struct device *dev,
920 struct device_attribute *attr, char *buf)
922 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
925 static DEVICE_ATTR_RO(cpumask);
927 static struct attribute *i915_cpumask_attrs[] = {
928 &dev_attr_cpumask.attr,
932 static const struct attribute_group i915_pmu_cpumask_attr_group = {
933 .attrs = i915_cpumask_attrs,
936 #define __event(__counter, __name, __unit) \
938 .counter = (__counter), \
944 #define __global_event(__counter, __name, __unit) \
946 .counter = (__counter), \
952 #define __engine_event(__sample, __name) \
954 .sample = (__sample), \
958 static struct i915_ext_attribute *
959 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
961 sysfs_attr_init(&attr->attr.attr);
962 attr->attr.attr.name = name;
963 attr->attr.attr.mode = 0444;
964 attr->attr.show = i915_pmu_event_show;
970 static struct perf_pmu_events_attr *
971 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
974 sysfs_attr_init(&attr->attr.attr);
975 attr->attr.attr.name = name;
976 attr->attr.attr.mode = 0444;
977 attr->attr.show = perf_event_sysfs_show;
978 attr->event_str = str;
983 static struct attribute **
984 create_event_attributes(struct i915_pmu *pmu)
986 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
987 static const struct {
988 unsigned int counter;
993 __event(0, "actual-frequency", "M"),
994 __event(1, "requested-frequency", "M"),
995 __global_event(2, "interrupts", NULL),
996 __event(3, "rc6-residency", "ns"),
997 __event(4, "software-gt-awake-time", "ns"),
999 static const struct {
1000 enum drm_i915_pmu_engine_sample sample;
1002 } engine_events[] = {
1003 __engine_event(I915_SAMPLE_BUSY, "busy"),
1004 __engine_event(I915_SAMPLE_SEMA, "sema"),
1005 __engine_event(I915_SAMPLE_WAIT, "wait"),
1007 unsigned int count = 0;
1008 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
1009 struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
1010 struct attribute **attr = NULL, **attr_iter;
1011 struct intel_engine_cs *engine;
1012 struct intel_gt *gt;
1015 /* Count how many counters we will be exposing. */
1016 for_each_gt(gt, i915, j) {
1017 for (i = 0; i < ARRAY_SIZE(events); i++) {
1018 u64 config = ___I915_PMU_OTHER(j, events[i].counter);
1020 if (!config_status(i915, config))
1025 for_each_uabi_engine(engine, i915) {
1026 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
1027 if (!engine_event_status(engine,
1028 engine_events[i].sample))
1033 /* Allocate attribute objects and table. */
1034 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
1038 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
1042 /* Max one pointer of each attribute type plus a termination entry. */
1043 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
1047 i915_iter = i915_attr;
1048 pmu_iter = pmu_attr;
1051 /* Initialize supported non-engine counters. */
1052 for_each_gt(gt, i915, j) {
1053 for (i = 0; i < ARRAY_SIZE(events); i++) {
1054 u64 config = ___I915_PMU_OTHER(j, events[i].counter);
1057 if (config_status(i915, config))
1060 if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
1061 str = kstrdup(events[i].name, GFP_KERNEL);
1063 str = kasprintf(GFP_KERNEL, "%s-gt%u",
1068 *attr_iter++ = &i915_iter->attr.attr;
1069 i915_iter = add_i915_attr(i915_iter, str, config);
1071 if (events[i].unit) {
1072 if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
1073 str = kasprintf(GFP_KERNEL, "%s.unit",
1076 str = kasprintf(GFP_KERNEL, "%s-gt%u.unit",
1081 *attr_iter++ = &pmu_iter->attr.attr;
1082 pmu_iter = add_pmu_attr(pmu_iter, str,
1088 /* Initialize supported engine counters. */
1089 for_each_uabi_engine(engine, i915) {
1090 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
1093 if (engine_event_status(engine,
1094 engine_events[i].sample))
1097 str = kasprintf(GFP_KERNEL, "%s-%s",
1098 engine->name, engine_events[i].name);
1102 *attr_iter++ = &i915_iter->attr.attr;
1104 add_i915_attr(i915_iter, str,
1105 __I915_PMU_ENGINE(engine->uabi_class,
1106 engine->uabi_instance,
1107 engine_events[i].sample));
1109 str = kasprintf(GFP_KERNEL, "%s-%s.unit",
1110 engine->name, engine_events[i].name);
1114 *attr_iter++ = &pmu_iter->attr.attr;
1115 pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
1119 pmu->i915_attr = i915_attr;
1120 pmu->pmu_attr = pmu_attr;
1125 for (attr_iter = attr; *attr_iter; attr_iter++)
1126 kfree((*attr_iter)->name);
1136 static void free_event_attributes(struct i915_pmu *pmu)
1138 struct attribute **attr_iter = pmu->events_attr_group.attrs;
1140 for (; *attr_iter; attr_iter++)
1141 kfree((*attr_iter)->name);
1143 kfree(pmu->events_attr_group.attrs);
1144 kfree(pmu->i915_attr);
1145 kfree(pmu->pmu_attr);
1147 pmu->events_attr_group.attrs = NULL;
1148 pmu->i915_attr = NULL;
1149 pmu->pmu_attr = NULL;
1152 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1154 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1156 GEM_BUG_ON(!pmu->base.event_init);
1158 /* Select the first online CPU as a designated reader. */
1159 if (cpumask_empty(&i915_pmu_cpumask))
1160 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
1165 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
1167 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1168 unsigned int target = i915_pmu_target_cpu;
1170 GEM_BUG_ON(!pmu->base.event_init);
1173 * Unregistering an instance generates a CPU offline event which we must
1174 * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
1179 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
1180 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
1182 /* Migrate events if there is a valid target */
1183 if (target < nr_cpu_ids) {
1184 cpumask_set_cpu(target, &i915_pmu_cpumask);
1185 i915_pmu_target_cpu = target;
1189 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
1190 perf_pmu_migrate_context(&pmu->base, cpu, target);
1191 pmu->cpuhp.cpu = target;
1197 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
1199 int i915_pmu_init(void)
1203 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1204 "perf/x86/intel/i915:online",
1205 i915_pmu_cpu_online,
1206 i915_pmu_cpu_offline);
1208 pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
1216 void i915_pmu_exit(void)
1218 if (cpuhp_slot != CPUHP_INVALID)
1219 cpuhp_remove_multi_state(cpuhp_slot);
1222 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1224 if (cpuhp_slot == CPUHP_INVALID)
1227 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
1230 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1232 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
1235 static bool is_igp(struct drm_i915_private *i915)
1237 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1239 /* IGP is 0000:00:02.0 */
1240 return pci_domain_nr(pdev->bus) == 0 &&
1241 pdev->bus->number == 0 &&
1242 PCI_SLOT(pdev->devfn) == 2 &&
1243 PCI_FUNC(pdev->devfn) == 0;
1246 void i915_pmu_register(struct drm_i915_private *i915)
1248 struct i915_pmu *pmu = &i915->pmu;
1249 const struct attribute_group *attr_groups[] = {
1250 &i915_pmu_format_attr_group,
1251 &pmu->events_attr_group,
1252 &i915_pmu_cpumask_attr_group,
1258 if (GRAPHICS_VER(i915) <= 2) {
1259 drm_info(&i915->drm, "PMU not supported for this GPU.");
1263 spin_lock_init(&pmu->lock);
1264 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1265 pmu->timer.function = i915_sample;
1266 pmu->cpuhp.cpu = -1;
1269 if (!is_igp(i915)) {
1270 pmu->name = kasprintf(GFP_KERNEL,
1272 dev_name(i915->drm.dev));
1274 /* tools/perf reserves colons as special. */
1275 strreplace((char *)pmu->name, ':', '_');
1283 pmu->events_attr_group.name = "events";
1284 pmu->events_attr_group.attrs = create_event_attributes(pmu);
1285 if (!pmu->events_attr_group.attrs)
1288 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
1290 if (!pmu->base.attr_groups)
1293 pmu->base.module = THIS_MODULE;
1294 pmu->base.task_ctx_nr = perf_invalid_context;
1295 pmu->base.event_init = i915_pmu_event_init;
1296 pmu->base.add = i915_pmu_event_add;
1297 pmu->base.del = i915_pmu_event_del;
1298 pmu->base.start = i915_pmu_event_start;
1299 pmu->base.stop = i915_pmu_event_stop;
1300 pmu->base.read = i915_pmu_event_read;
1301 pmu->base.event_idx = i915_pmu_event_event_idx;
1303 ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1307 ret = i915_pmu_register_cpuhp_state(pmu);
1314 perf_pmu_unregister(&pmu->base);
1316 kfree(pmu->base.attr_groups);
1318 pmu->base.event_init = NULL;
1319 free_event_attributes(pmu);
1324 drm_notice(&i915->drm, "Failed to register PMU!\n");
1327 void i915_pmu_unregister(struct drm_i915_private *i915)
1329 struct i915_pmu *pmu = &i915->pmu;
1331 if (!pmu->base.event_init)
1335 * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
1336 * ensures all currently executing ones will have exited before we
1337 * proceed with unregistration.
1342 hrtimer_cancel(&pmu->timer);
1344 i915_pmu_unregister_cpuhp_state(pmu);
1346 perf_pmu_unregister(&pmu->base);
1347 pmu->base.event_init = NULL;
1348 kfree(pmu->base.attr_groups);
1351 free_event_attributes(pmu);