1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <drm/i915_drm.h>
9 #include "intel_breadcrumbs.h"
11 #include "intel_gt_clock_utils.h"
12 #include "intel_gt_irq.h"
13 #include "intel_gt_pm_irq.h"
14 #include "intel_rps.h"
15 #include "intel_sideband.h"
16 #include "../../../platform/x86/intel_ips.h"
18 #define BUSY_MAX_EI 20u /* ms */
21 * Lock protecting IPS related data structures
23 static DEFINE_SPINLOCK(mchdev_lock);
25 static struct intel_gt *rps_to_gt(struct intel_rps *rps)
27 return container_of(rps, struct intel_gt, rps);
30 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
32 return rps_to_gt(rps)->i915;
35 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
37 return rps_to_gt(rps)->uncore;
40 static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
42 struct intel_gt *gt = rps_to_gt(rps);
44 return >->uc.guc.slpc;
47 static bool rps_uses_slpc(struct intel_rps *rps)
49 struct intel_gt *gt = rps_to_gt(rps);
51 return intel_uc_uses_guc_slpc(>->uc);
54 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
56 return mask & ~rps->pm_intrmsk_mbz;
59 static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
61 intel_uncore_write_fw(uncore, reg, val);
64 static void rps_timer(struct timer_list *t)
66 struct intel_rps *rps = from_timer(rps, t, timer);
67 struct intel_engine_cs *engine;
68 ktime_t dt, last, timestamp;
69 enum intel_engine_id id;
73 for_each_engine(engine, rps_to_gt(rps), id) {
77 dt = intel_engine_get_busy_time(engine, ×tamp);
78 last = engine->stats.rps;
79 engine->stats.rps = dt;
81 busy = ktime_to_ns(ktime_sub(dt, last));
82 for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
83 if (busy > max_busy[i])
84 swap(busy, max_busy[i]);
87 last = rps->pm_timestamp;
88 rps->pm_timestamp = timestamp;
90 if (intel_rps_is_active(rps)) {
94 dt = ktime_sub(timestamp, last);
97 * Our goal is to evaluate each engine independently, so we run
98 * at the lowest clocks required to sustain the heaviest
99 * workload. However, a task may be split into sequential
100 * dependent operations across a set of engines, such that
101 * the independent contributions do not account for high load,
102 * but overall the task is GPU bound. For example, consider
103 * video decode on vcs followed by colour post-processing
104 * on vecs, followed by general post-processing on rcs.
105 * Since multi-engines being active does imply a single
106 * continuous workload across all engines, we hedge our
107 * bets by only contributing a factor of the distributed
108 * load into our busyness calculation.
111 for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
115 busy += div_u64(max_busy[i], 1 << i);
117 GT_TRACE(rps_to_gt(rps),
118 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
119 busy, (int)div64_u64(100 * busy, dt),
120 max_busy[0], max_busy[1], max_busy[2],
123 if (100 * busy > rps->power.up_threshold * dt &&
124 rps->cur_freq < rps->max_freq_softlimit) {
125 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
126 rps->pm_interval = 1;
127 schedule_work(&rps->work);
128 } else if (100 * busy < rps->power.down_threshold * dt &&
129 rps->cur_freq > rps->min_freq_softlimit) {
130 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
131 rps->pm_interval = 1;
132 schedule_work(&rps->work);
137 mod_timer(&rps->timer,
138 jiffies + msecs_to_jiffies(rps->pm_interval));
139 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
143 static void rps_start_timer(struct intel_rps *rps)
145 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
146 rps->pm_interval = 1;
147 mod_timer(&rps->timer, jiffies + 1);
150 static void rps_stop_timer(struct intel_rps *rps)
152 del_timer_sync(&rps->timer);
153 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
154 cancel_work_sync(&rps->work);
157 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
161 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
162 if (val > rps->min_freq_softlimit)
163 mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
164 GEN6_PM_RP_DOWN_THRESHOLD |
165 GEN6_PM_RP_DOWN_TIMEOUT);
167 if (val < rps->max_freq_softlimit)
168 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
170 mask &= rps->pm_events;
172 return rps_pm_sanitize_mask(rps, ~mask);
175 static void rps_reset_ei(struct intel_rps *rps)
177 memset(&rps->ei, 0, sizeof(rps->ei));
180 static void rps_enable_interrupts(struct intel_rps *rps)
182 struct intel_gt *gt = rps_to_gt(rps);
184 GEM_BUG_ON(rps_uses_slpc(rps));
186 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
187 rps->pm_events, rps_pm_mask(rps, rps->last_freq));
191 spin_lock_irq(>->irq_lock);
192 gen6_gt_pm_enable_irq(gt, rps->pm_events);
193 spin_unlock_irq(>->irq_lock);
195 intel_uncore_write(gt->uncore,
196 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
199 static void gen6_rps_reset_interrupts(struct intel_rps *rps)
201 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
204 static void gen11_rps_reset_interrupts(struct intel_rps *rps)
206 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
210 static void rps_reset_interrupts(struct intel_rps *rps)
212 struct intel_gt *gt = rps_to_gt(rps);
214 spin_lock_irq(>->irq_lock);
215 if (GRAPHICS_VER(gt->i915) >= 11)
216 gen11_rps_reset_interrupts(rps);
218 gen6_rps_reset_interrupts(rps);
221 spin_unlock_irq(>->irq_lock);
224 static void rps_disable_interrupts(struct intel_rps *rps)
226 struct intel_gt *gt = rps_to_gt(rps);
228 intel_uncore_write(gt->uncore,
229 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
231 spin_lock_irq(>->irq_lock);
232 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
233 spin_unlock_irq(>->irq_lock);
235 intel_synchronize_irq(gt->i915);
238 * Now that we will not be generating any more work, flush any
239 * outstanding tasks. As we are called on the RPS idle path,
240 * we will reset the GPU to minimum frequencies, so the current
241 * state of the worker can be discarded.
243 cancel_work_sync(&rps->work);
245 rps_reset_interrupts(rps);
246 GT_TRACE(gt, "interrupts:off\n");
249 static const struct cparams {
255 { 1, 1333, 301, 28664 },
256 { 1, 1066, 294, 24460 },
257 { 1, 800, 294, 25192 },
258 { 0, 1333, 276, 27605 },
259 { 0, 1066, 276, 27605 },
260 { 0, 800, 231, 23784 },
263 static void gen5_rps_init(struct intel_rps *rps)
265 struct drm_i915_private *i915 = rps_to_i915(rps);
266 struct intel_uncore *uncore = rps_to_uncore(rps);
267 u8 fmax, fmin, fstart;
271 if (i915->fsb_freq <= 3200)
273 else if (i915->fsb_freq <= 4800)
278 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
279 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
280 rps->ips.m = cparams[i].m;
281 rps->ips.c = cparams[i].c;
286 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
288 /* Set up min, max, and cur for interrupt handling */
289 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
290 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
291 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
292 MEMMODE_FSTART_SHIFT;
293 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
296 rps->min_freq = fmax;
297 rps->efficient_freq = fstart;
298 rps->max_freq = fmin;
302 __ips_chipset_val(struct intel_ips *ips)
304 struct intel_uncore *uncore =
305 rps_to_uncore(container_of(ips, struct intel_rps, ips));
306 unsigned long now = jiffies_to_msecs(jiffies), dt;
307 unsigned long result;
310 lockdep_assert_held(&mchdev_lock);
313 * Prevent division-by-zero if we are asking too fast.
314 * Also, we don't get interesting results if we are polling
315 * faster than once in 10ms, so just return the saved value
318 dt = now - ips->last_time1;
320 return ips->chipset_power;
322 /* FIXME: handle per-counter overflow */
323 total = intel_uncore_read(uncore, DMIEC);
324 total += intel_uncore_read(uncore, DDREC);
325 total += intel_uncore_read(uncore, CSIEC);
327 delta = total - ips->last_count1;
329 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
331 ips->last_count1 = total;
332 ips->last_time1 = now;
334 ips->chipset_power = result;
339 static unsigned long ips_mch_val(struct intel_uncore *uncore)
341 unsigned int m, x, b;
344 tsfs = intel_uncore_read(uncore, TSFS);
345 x = intel_uncore_read8(uncore, TR1);
347 b = tsfs & TSFS_INTR_MASK;
348 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
350 return m * x / 127 - b;
353 static int _pxvid_to_vd(u8 pxvid)
358 if (pxvid >= 8 && pxvid < 31)
361 return (pxvid + 2) * 125;
364 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
366 const int vd = _pxvid_to_vd(pxvid);
368 if (INTEL_INFO(i915)->is_mobile)
369 return max(vd - 1125, 0);
374 static void __gen5_ips_update(struct intel_ips *ips)
376 struct intel_uncore *uncore =
377 rps_to_uncore(container_of(ips, struct intel_rps, ips));
381 lockdep_assert_held(&mchdev_lock);
383 now = ktime_get_raw_ns();
384 dt = now - ips->last_time2;
385 do_div(dt, NSEC_PER_MSEC);
387 /* Don't divide by 0 */
391 count = intel_uncore_read(uncore, GFXEC);
392 delta = count - ips->last_count2;
394 ips->last_count2 = count;
395 ips->last_time2 = now;
397 /* More magic constants... */
398 ips->gfx_power = div_u64(delta * 1181, dt * 10);
401 static void gen5_rps_update(struct intel_rps *rps)
403 spin_lock_irq(&mchdev_lock);
404 __gen5_ips_update(&rps->ips);
405 spin_unlock_irq(&mchdev_lock);
408 static unsigned int gen5_invert_freq(struct intel_rps *rps,
411 /* Invert the frequency bin into an ips delay */
412 val = rps->max_freq - val;
413 val = rps->min_freq + val;
418 static int __gen5_rps_set(struct intel_rps *rps, u8 val)
420 struct intel_uncore *uncore = rps_to_uncore(rps);
423 lockdep_assert_held(&mchdev_lock);
425 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
426 if (rgvswctl & MEMCTL_CMD_STS) {
427 DRM_DEBUG("gpu busy, RCS change rejected\n");
428 return -EBUSY; /* still busy with another command */
431 /* Invert the frequency bin into an ips delay */
432 val = gen5_invert_freq(rps, val);
435 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
436 (val << MEMCTL_FREQ_SHIFT) |
438 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
439 intel_uncore_posting_read16(uncore, MEMSWCTL);
441 rgvswctl |= MEMCTL_CMD_STS;
442 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
447 static int gen5_rps_set(struct intel_rps *rps, u8 val)
451 spin_lock_irq(&mchdev_lock);
452 err = __gen5_rps_set(rps, val);
453 spin_unlock_irq(&mchdev_lock);
458 static unsigned long intel_pxfreq(u32 vidfreq)
460 int div = (vidfreq & 0x3f0000) >> 16;
461 int post = (vidfreq & 0x3000) >> 12;
462 int pre = (vidfreq & 0x7);
467 return div * 133333 / (pre << post);
470 static unsigned int init_emon(struct intel_uncore *uncore)
475 /* Disable to program */
476 intel_uncore_write(uncore, ECR, 0);
477 intel_uncore_posting_read(uncore, ECR);
479 /* Program energy weights for various events */
480 intel_uncore_write(uncore, SDEW, 0x15040d00);
481 intel_uncore_write(uncore, CSIEW0, 0x007f0000);
482 intel_uncore_write(uncore, CSIEW1, 0x1e220004);
483 intel_uncore_write(uncore, CSIEW2, 0x04000004);
485 for (i = 0; i < 5; i++)
486 intel_uncore_write(uncore, PEW(i), 0);
487 for (i = 0; i < 3; i++)
488 intel_uncore_write(uncore, DEW(i), 0);
490 /* Program P-state weights to account for frequency power adjustment */
491 for (i = 0; i < 16; i++) {
492 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
493 unsigned int freq = intel_pxfreq(pxvidfreq);
495 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
498 val = vid * vid * freq / 1000 * 255;
499 val /= 127 * 127 * 900;
503 /* Render standby states get 0 weight */
507 for (i = 0; i < 4; i++) {
508 intel_uncore_write(uncore, PXW(i),
509 pxw[i * 4 + 0] << 24 |
510 pxw[i * 4 + 1] << 16 |
511 pxw[i * 4 + 2] << 8 |
512 pxw[i * 4 + 3] << 0);
515 /* Adjust magic regs to magic values (more experimental results) */
516 intel_uncore_write(uncore, OGW0, 0);
517 intel_uncore_write(uncore, OGW1, 0);
518 intel_uncore_write(uncore, EG0, 0x00007f00);
519 intel_uncore_write(uncore, EG1, 0x0000000e);
520 intel_uncore_write(uncore, EG2, 0x000e0000);
521 intel_uncore_write(uncore, EG3, 0x68000300);
522 intel_uncore_write(uncore, EG4, 0x42000000);
523 intel_uncore_write(uncore, EG5, 0x00140031);
524 intel_uncore_write(uncore, EG6, 0);
525 intel_uncore_write(uncore, EG7, 0);
527 for (i = 0; i < 8; i++)
528 intel_uncore_write(uncore, PXWL(i), 0);
530 /* Enable PMON + select events */
531 intel_uncore_write(uncore, ECR, 0x80000019);
533 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
536 static bool gen5_rps_enable(struct intel_rps *rps)
538 struct drm_i915_private *i915 = rps_to_i915(rps);
539 struct intel_uncore *uncore = rps_to_uncore(rps);
543 spin_lock_irq(&mchdev_lock);
545 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
547 /* Enable temp reporting */
548 intel_uncore_write16(uncore, PMMISC,
549 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
550 intel_uncore_write16(uncore, TSC1,
551 intel_uncore_read16(uncore, TSC1) | TSE);
553 /* 100ms RC evaluation intervals */
554 intel_uncore_write(uncore, RCUPEI, 100000);
555 intel_uncore_write(uncore, RCDNEI, 100000);
557 /* Set max/min thresholds to 90ms and 80ms respectively */
558 intel_uncore_write(uncore, RCBMAXAVG, 90000);
559 intel_uncore_write(uncore, RCBMINAVG, 80000);
561 intel_uncore_write(uncore, MEMIHYST, 1);
563 /* Set up min, max, and cur for interrupt handling */
564 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
565 MEMMODE_FSTART_SHIFT;
567 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
568 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
570 intel_uncore_write(uncore,
572 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
574 intel_uncore_write(uncore, VIDSTART, vstart);
575 intel_uncore_posting_read(uncore, VIDSTART);
577 rgvmodectl |= MEMMODE_SWMODE_EN;
578 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
580 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
581 MEMCTL_CMD_STS) == 0, 10))
582 drm_err(&uncore->i915->drm,
583 "stuck trying to change perf mode\n");
586 __gen5_rps_set(rps, rps->cur_freq);
588 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
589 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
590 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
591 rps->ips.last_time1 = jiffies_to_msecs(jiffies);
593 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
594 rps->ips.last_time2 = ktime_get_raw_ns();
596 spin_lock(&i915->irq_lock);
597 ilk_enable_display_irq(i915, DE_PCU_EVENT);
598 spin_unlock(&i915->irq_lock);
600 spin_unlock_irq(&mchdev_lock);
602 rps->ips.corr = init_emon(uncore);
607 static void gen5_rps_disable(struct intel_rps *rps)
609 struct drm_i915_private *i915 = rps_to_i915(rps);
610 struct intel_uncore *uncore = rps_to_uncore(rps);
613 spin_lock_irq(&mchdev_lock);
615 spin_lock(&i915->irq_lock);
616 ilk_disable_display_irq(i915, DE_PCU_EVENT);
617 spin_unlock(&i915->irq_lock);
619 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
621 /* Ack interrupts, disable EFC interrupt */
622 intel_uncore_write(uncore, MEMINTREN,
623 intel_uncore_read(uncore, MEMINTREN) &
624 ~MEMINT_EVAL_CHG_EN);
625 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
627 /* Go back to the starting frequency */
628 __gen5_rps_set(rps, rps->idle_freq);
630 rgvswctl |= MEMCTL_CMD_STS;
631 intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
634 spin_unlock_irq(&mchdev_lock);
637 static u32 rps_limits(struct intel_rps *rps, u8 val)
642 * Only set the down limit when we've reached the lowest level to avoid
643 * getting more interrupts, otherwise leave this clear. This prevents a
644 * race in the hw when coming out of rc6: There's a tiny window where
645 * the hw runs at the minimal clock before selecting the desired
646 * frequency, if the down threshold expires in that window we will not
647 * receive a down interrupt.
649 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
650 limits = rps->max_freq_softlimit << 23;
651 if (val <= rps->min_freq_softlimit)
652 limits |= rps->min_freq_softlimit << 14;
654 limits = rps->max_freq_softlimit << 24;
655 if (val <= rps->min_freq_softlimit)
656 limits |= rps->min_freq_softlimit << 16;
662 static void rps_set_power(struct intel_rps *rps, int new_power)
664 struct intel_gt *gt = rps_to_gt(rps);
665 struct intel_uncore *uncore = gt->uncore;
666 u32 threshold_up = 0, threshold_down = 0; /* in % */
667 u32 ei_up = 0, ei_down = 0;
669 lockdep_assert_held(&rps->power.mutex);
671 if (new_power == rps->power.mode)
677 /* Note the units here are not exactly 1us, but 1280ns. */
695 /* When byt can survive without system hang with dynamic
696 * sw freq adjustments, this restriction can be lifted.
698 if (IS_VALLEYVIEW(gt->i915))
702 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
703 new_power, threshold_up, ei_up, threshold_down, ei_down);
705 set(uncore, GEN6_RP_UP_EI,
706 intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
707 set(uncore, GEN6_RP_UP_THRESHOLD,
708 intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
710 set(uncore, GEN6_RP_DOWN_EI,
711 intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
712 set(uncore, GEN6_RP_DOWN_THRESHOLD,
713 intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
715 set(uncore, GEN6_RP_CONTROL,
716 (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
717 GEN6_RP_MEDIA_HW_NORMAL_MODE |
718 GEN6_RP_MEDIA_IS_GFX |
720 GEN6_RP_UP_BUSY_AVG |
721 GEN6_RP_DOWN_IDLE_AVG);
724 rps->power.mode = new_power;
725 rps->power.up_threshold = threshold_up;
726 rps->power.down_threshold = threshold_down;
729 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
733 new_power = rps->power.mode;
734 switch (rps->power.mode) {
736 if (val > rps->efficient_freq + 1 &&
742 if (val <= rps->efficient_freq &&
744 new_power = LOW_POWER;
745 else if (val >= rps->rp0_freq &&
747 new_power = HIGH_POWER;
751 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
756 /* Max/min bins are special */
757 if (val <= rps->min_freq_softlimit)
758 new_power = LOW_POWER;
759 if (val >= rps->max_freq_softlimit)
760 new_power = HIGH_POWER;
762 mutex_lock(&rps->power.mutex);
763 if (rps->power.interactive)
764 new_power = HIGH_POWER;
765 rps_set_power(rps, new_power);
766 mutex_unlock(&rps->power.mutex);
769 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
771 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
773 mutex_lock(&rps->power.mutex);
775 if (!rps->power.interactive++ && intel_rps_is_active(rps))
776 rps_set_power(rps, HIGH_POWER);
778 GEM_BUG_ON(!rps->power.interactive);
779 rps->power.interactive--;
781 mutex_unlock(&rps->power.mutex);
784 static int gen6_rps_set(struct intel_rps *rps, u8 val)
786 struct intel_uncore *uncore = rps_to_uncore(rps);
787 struct drm_i915_private *i915 = rps_to_i915(rps);
790 GEM_BUG_ON(rps_uses_slpc(rps));
792 if (GRAPHICS_VER(i915) >= 9)
793 swreq = GEN9_FREQUENCY(val);
794 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
795 swreq = HSW_FREQUENCY(val);
797 swreq = (GEN6_FREQUENCY(val) |
799 GEN6_AGGRESSIVE_TURBO);
800 set(uncore, GEN6_RPNSWREQ, swreq);
802 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
803 val, intel_gpu_freq(rps, val), swreq);
808 static int vlv_rps_set(struct intel_rps *rps, u8 val)
810 struct drm_i915_private *i915 = rps_to_i915(rps);
814 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
817 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
818 val, intel_gpu_freq(rps, val));
823 static int rps_set(struct intel_rps *rps, u8 val, bool update)
825 struct drm_i915_private *i915 = rps_to_i915(rps);
828 if (val == rps->last_freq)
831 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
832 err = vlv_rps_set(rps, val);
833 else if (GRAPHICS_VER(i915) >= 6)
834 err = gen6_rps_set(rps, val);
836 err = gen5_rps_set(rps, val);
840 if (update && GRAPHICS_VER(i915) >= 6)
841 gen6_rps_set_thresholds(rps, val);
842 rps->last_freq = val;
847 void intel_rps_unpark(struct intel_rps *rps)
849 if (!intel_rps_is_enabled(rps))
852 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
855 * Use the user's desired frequency as a guide, but for better
856 * performance, jump directly to RPe as our starting frequency.
858 mutex_lock(&rps->lock);
860 intel_rps_set_active(rps);
863 rps->min_freq_softlimit,
864 rps->max_freq_softlimit));
866 mutex_unlock(&rps->lock);
869 if (intel_rps_has_interrupts(rps))
870 rps_enable_interrupts(rps);
871 if (intel_rps_uses_timer(rps))
872 rps_start_timer(rps);
874 if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
875 gen5_rps_update(rps);
878 void intel_rps_park(struct intel_rps *rps)
882 if (!intel_rps_is_enabled(rps))
885 GEM_BUG_ON(atomic_read(&rps->num_waiters));
887 if (!intel_rps_clear_active(rps))
890 if (intel_rps_uses_timer(rps))
892 if (intel_rps_has_interrupts(rps))
893 rps_disable_interrupts(rps);
895 if (rps->last_freq <= rps->idle_freq)
899 * The punit delays the write of the frequency and voltage until it
900 * determines the GPU is awake. During normal usage we don't want to
901 * waste power changing the frequency if the GPU is sleeping (rc6).
902 * However, the GPU and driver is now idle and we do not want to delay
903 * switching to minimum voltage (reducing power whilst idle) as we do
904 * not expect to be woken in the near future and so must flush the
905 * change by waking the device.
907 * We choose to take the media powerwell (either would do to trick the
908 * punit into committing the voltage change) as that takes a lot less
909 * power than the render powerwell.
911 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
912 rps_set(rps, rps->idle_freq, false);
913 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
916 * Since we will try and restart from the previously requested
917 * frequency on unparking, treat this idle point as a downclock
918 * interrupt and reduce the frequency for resume. If we park/unpark
919 * more frequently than the rps worker can run, we will not respond
920 * to any EI and never see a change in frequency.
922 * (Note we accommodate Cherryview's limitation of only using an
923 * even bin by applying it to all.)
928 else /* CHV needs even encode values */
931 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
932 if (rps->cur_freq < rps->efficient_freq) {
933 rps->cur_freq = rps->efficient_freq;
937 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
940 void intel_rps_boost(struct i915_request *rq)
942 if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
945 /* Serializes with i915_request_retire() */
946 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
947 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
949 if (atomic_fetch_inc(&rps->num_waiters))
952 if (!intel_rps_is_active(rps))
955 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
956 rq->fence.context, rq->fence.seqno);
958 if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
959 schedule_work(&rps->work);
961 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
965 int intel_rps_set(struct intel_rps *rps, u8 val)
969 lockdep_assert_held(&rps->lock);
970 GEM_BUG_ON(val > rps->max_freq);
971 GEM_BUG_ON(val < rps->min_freq);
973 if (intel_rps_is_active(rps)) {
974 err = rps_set(rps, val, true);
979 * Make sure we continue to get interrupts
980 * until we hit the minimum or maximum frequencies.
982 if (intel_rps_has_interrupts(rps)) {
983 struct intel_uncore *uncore = rps_to_uncore(rps);
986 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
988 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
996 static void gen6_rps_init(struct intel_rps *rps)
998 struct drm_i915_private *i915 = rps_to_i915(rps);
999 struct intel_uncore *uncore = rps_to_uncore(rps);
1001 /* All of these values are in units of 50MHz */
1003 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
1004 if (IS_GEN9_LP(i915)) {
1005 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
1007 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
1008 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
1009 rps->min_freq = (rp_state_cap >> 0) & 0xff;
1011 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
1013 rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
1014 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
1015 rps->min_freq = (rp_state_cap >> 16) & 0xff;
1018 /* hw_max = RP0 until we check for overclocking */
1019 rps->max_freq = rps->rp0_freq;
1021 rps->efficient_freq = rps->rp1_freq;
1022 if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
1023 IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
1024 u32 ddcc_status = 0;
1026 if (sandybridge_pcode_read(i915,
1027 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
1028 &ddcc_status, NULL) == 0)
1029 rps->efficient_freq =
1031 (ddcc_status >> 8) & 0xff,
1036 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
1037 /* Store the frequency values in 16.66 MHZ units, which is
1038 * the natural hardware unit for SKL
1040 rps->rp0_freq *= GEN9_FREQ_SCALER;
1041 rps->rp1_freq *= GEN9_FREQ_SCALER;
1042 rps->min_freq *= GEN9_FREQ_SCALER;
1043 rps->max_freq *= GEN9_FREQ_SCALER;
1044 rps->efficient_freq *= GEN9_FREQ_SCALER;
1048 static bool rps_reset(struct intel_rps *rps)
1050 struct drm_i915_private *i915 = rps_to_i915(rps);
1053 rps->power.mode = -1;
1054 rps->last_freq = -1;
1056 if (rps_set(rps, rps->min_freq, true)) {
1057 drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
1061 rps->cur_freq = rps->min_freq;
1065 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
1066 static bool gen9_rps_enable(struct intel_rps *rps)
1068 struct intel_gt *gt = rps_to_gt(rps);
1069 struct intel_uncore *uncore = gt->uncore;
1071 /* Program defaults and thresholds for RPS */
1072 if (GRAPHICS_VER(gt->i915) == 9)
1073 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1074 GEN9_FREQUENCY(rps->rp1_freq));
1076 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
1078 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1080 return rps_reset(rps);
1083 static bool gen8_rps_enable(struct intel_rps *rps)
1085 struct intel_uncore *uncore = rps_to_uncore(rps);
1087 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1088 HSW_FREQUENCY(rps->rp1_freq));
1090 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1092 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1094 return rps_reset(rps);
1097 static bool gen6_rps_enable(struct intel_rps *rps)
1099 struct intel_uncore *uncore = rps_to_uncore(rps);
1101 /* Power down if completely idle for over 50ms */
1102 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
1103 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1105 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1106 GEN6_PM_RP_DOWN_THRESHOLD |
1107 GEN6_PM_RP_DOWN_TIMEOUT);
1109 return rps_reset(rps);
1112 static int chv_rps_max_freq(struct intel_rps *rps)
1114 struct drm_i915_private *i915 = rps_to_i915(rps);
1115 struct intel_gt *gt = rps_to_gt(rps);
1118 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1120 switch (gt->info.sseu.eu_total) {
1122 /* (2 * 4) config */
1123 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
1126 /* (2 * 6) config */
1127 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
1130 /* (2 * 8) config */
1132 /* Setting (2 * 8) Min RP0 for any other combination */
1133 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
1137 return val & FB_GFX_FREQ_FUSE_MASK;
1140 static int chv_rps_rpe_freq(struct intel_rps *rps)
1142 struct drm_i915_private *i915 = rps_to_i915(rps);
1145 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
1146 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
1148 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
1151 static int chv_rps_guar_freq(struct intel_rps *rps)
1153 struct drm_i915_private *i915 = rps_to_i915(rps);
1156 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1158 return val & FB_GFX_FREQ_FUSE_MASK;
1161 static u32 chv_rps_min_freq(struct intel_rps *rps)
1163 struct drm_i915_private *i915 = rps_to_i915(rps);
1166 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
1167 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
1169 return val & FB_GFX_FREQ_FUSE_MASK;
1172 static bool chv_rps_enable(struct intel_rps *rps)
1174 struct intel_uncore *uncore = rps_to_uncore(rps);
1175 struct drm_i915_private *i915 = rps_to_i915(rps);
1178 /* 1: Program defaults and thresholds for RPS*/
1179 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1180 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1181 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1182 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1183 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1185 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1188 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1189 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1190 GEN6_RP_MEDIA_IS_GFX |
1192 GEN6_RP_UP_BUSY_AVG |
1193 GEN6_RP_DOWN_IDLE_AVG);
1195 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1196 GEN6_PM_RP_DOWN_THRESHOLD |
1197 GEN6_PM_RP_DOWN_TIMEOUT);
1199 /* Setting Fixed Bias */
1200 vlv_punit_get(i915);
1202 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
1203 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1205 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1207 vlv_punit_put(i915);
1209 /* RPS code assumes GPLL is used */
1210 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1211 "GPLL not enabled\n");
1213 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1214 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1216 return rps_reset(rps);
1219 static int vlv_rps_guar_freq(struct intel_rps *rps)
1221 struct drm_i915_private *i915 = rps_to_i915(rps);
1224 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1226 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
1227 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
1232 static int vlv_rps_max_freq(struct intel_rps *rps)
1234 struct drm_i915_private *i915 = rps_to_i915(rps);
1237 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1239 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
1241 rp0 = min_t(u32, rp0, 0xea);
1246 static int vlv_rps_rpe_freq(struct intel_rps *rps)
1248 struct drm_i915_private *i915 = rps_to_i915(rps);
1251 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
1252 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
1253 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
1254 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
1259 static int vlv_rps_min_freq(struct intel_rps *rps)
1261 struct drm_i915_private *i915 = rps_to_i915(rps);
1264 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
1266 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1267 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1268 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1269 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1270 * to make sure it matches what Punit accepts.
1272 return max_t(u32, val, 0xc0);
1275 static bool vlv_rps_enable(struct intel_rps *rps)
1277 struct intel_uncore *uncore = rps_to_uncore(rps);
1278 struct drm_i915_private *i915 = rps_to_i915(rps);
1281 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1282 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1283 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1284 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1285 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1287 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1289 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1290 GEN6_RP_MEDIA_TURBO |
1291 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1292 GEN6_RP_MEDIA_IS_GFX |
1294 GEN6_RP_UP_BUSY_AVG |
1295 GEN6_RP_DOWN_IDLE_CONT);
1297 /* WaGsvRC0ResidencyMethod:vlv */
1298 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
1300 vlv_punit_get(i915);
1302 /* Setting Fixed Bias */
1303 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
1304 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1306 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1308 vlv_punit_put(i915);
1310 /* RPS code assumes GPLL is used */
1311 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1312 "GPLL not enabled\n");
1314 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1315 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1317 return rps_reset(rps);
1320 static unsigned long __ips_gfx_val(struct intel_ips *ips)
1322 struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
1323 struct intel_uncore *uncore = rps_to_uncore(rps);
1324 unsigned int t, state1, state2;
1328 lockdep_assert_held(&mchdev_lock);
1330 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
1331 pxvid = (pxvid >> 24) & 0x7f;
1332 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
1336 /* Revel in the empirically derived constants */
1338 /* Correction factor in 1/100000 units */
1339 t = ips_mch_val(uncore);
1341 corr = t * 2349 + 135940;
1343 corr = t * 964 + 29317;
1345 corr = t * 301 + 1004;
1347 corr = div_u64(corr * 150142 * state1, 10000) - 78642;
1348 corr2 = div_u64(corr, 100000) * ips->corr;
1350 state2 = div_u64(corr2 * state1, 10000);
1351 state2 /= 100; /* convert to mW */
1353 __gen5_ips_update(ips);
1355 return ips->gfx_power + state2;
1358 static bool has_busy_stats(struct intel_rps *rps)
1360 struct intel_engine_cs *engine;
1361 enum intel_engine_id id;
1363 for_each_engine(engine, rps_to_gt(rps), id) {
1364 if (!intel_engine_supports_stats(engine))
1371 void intel_rps_enable(struct intel_rps *rps)
1373 struct drm_i915_private *i915 = rps_to_i915(rps);
1374 struct intel_uncore *uncore = rps_to_uncore(rps);
1375 bool enabled = false;
1380 if (rps_uses_slpc(rps))
1383 intel_gt_check_clock_frequency(rps_to_gt(rps));
1385 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1386 if (rps->max_freq <= rps->min_freq)
1387 /* leave disabled, no room for dynamic reclocking */;
1388 else if (IS_CHERRYVIEW(i915))
1389 enabled = chv_rps_enable(rps);
1390 else if (IS_VALLEYVIEW(i915))
1391 enabled = vlv_rps_enable(rps);
1392 else if (GRAPHICS_VER(i915) >= 9)
1393 enabled = gen9_rps_enable(rps);
1394 else if (GRAPHICS_VER(i915) >= 8)
1395 enabled = gen8_rps_enable(rps);
1396 else if (GRAPHICS_VER(i915) >= 6)
1397 enabled = gen6_rps_enable(rps);
1398 else if (IS_IRONLAKE_M(i915))
1399 enabled = gen5_rps_enable(rps);
1401 MISSING_CASE(GRAPHICS_VER(i915));
1402 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1406 GT_TRACE(rps_to_gt(rps),
1407 "min:%x, max:%x, freq:[%d, %d]\n",
1408 rps->min_freq, rps->max_freq,
1409 intel_gpu_freq(rps, rps->min_freq),
1410 intel_gpu_freq(rps, rps->max_freq));
1412 GEM_BUG_ON(rps->max_freq < rps->min_freq);
1413 GEM_BUG_ON(rps->idle_freq > rps->max_freq);
1415 GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
1416 GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
1418 if (has_busy_stats(rps))
1419 intel_rps_set_timer(rps);
1420 else if (GRAPHICS_VER(i915) >= 6)
1421 intel_rps_set_interrupts(rps);
1423 /* Ironlake currently uses intel_ips.ko */ {}
1425 intel_rps_set_enabled(rps);
1428 static void gen6_rps_disable(struct intel_rps *rps)
1430 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
1433 void intel_rps_disable(struct intel_rps *rps)
1435 struct drm_i915_private *i915 = rps_to_i915(rps);
1437 intel_rps_clear_enabled(rps);
1438 intel_rps_clear_interrupts(rps);
1439 intel_rps_clear_timer(rps);
1441 if (GRAPHICS_VER(i915) >= 6)
1442 gen6_rps_disable(rps);
1443 else if (IS_IRONLAKE_M(i915))
1444 gen5_rps_disable(rps);
1447 static int byt_gpu_freq(struct intel_rps *rps, int val)
1451 * Slow = Fast = GPLL ref * N
1453 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
1456 static int byt_freq_opcode(struct intel_rps *rps, int val)
1458 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
1461 static int chv_gpu_freq(struct intel_rps *rps, int val)
1465 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1467 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
1470 static int chv_freq_opcode(struct intel_rps *rps, int val)
1472 /* CHV needs even values */
1473 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
1476 int intel_gpu_freq(struct intel_rps *rps, int val)
1478 struct drm_i915_private *i915 = rps_to_i915(rps);
1480 if (GRAPHICS_VER(i915) >= 9)
1481 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
1483 else if (IS_CHERRYVIEW(i915))
1484 return chv_gpu_freq(rps, val);
1485 else if (IS_VALLEYVIEW(i915))
1486 return byt_gpu_freq(rps, val);
1487 else if (GRAPHICS_VER(i915) >= 6)
1488 return val * GT_FREQUENCY_MULTIPLIER;
1493 int intel_freq_opcode(struct intel_rps *rps, int val)
1495 struct drm_i915_private *i915 = rps_to_i915(rps);
1497 if (GRAPHICS_VER(i915) >= 9)
1498 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
1499 GT_FREQUENCY_MULTIPLIER);
1500 else if (IS_CHERRYVIEW(i915))
1501 return chv_freq_opcode(rps, val);
1502 else if (IS_VALLEYVIEW(i915))
1503 return byt_freq_opcode(rps, val);
1504 else if (GRAPHICS_VER(i915) >= 6)
1505 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
1510 static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
1512 struct drm_i915_private *i915 = rps_to_i915(rps);
1514 rps->gpll_ref_freq =
1515 vlv_get_cck_clock(i915, "GPLL ref",
1516 CCK_GPLL_CLOCK_CONTROL,
1519 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
1520 rps->gpll_ref_freq);
1523 static void vlv_rps_init(struct intel_rps *rps)
1525 struct drm_i915_private *i915 = rps_to_i915(rps);
1528 vlv_iosf_sb_get(i915,
1529 BIT(VLV_IOSF_SB_PUNIT) |
1530 BIT(VLV_IOSF_SB_NC) |
1531 BIT(VLV_IOSF_SB_CCK));
1533 vlv_init_gpll_ref_freq(rps);
1535 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1536 switch ((val >> 6) & 3) {
1539 i915->mem_freq = 800;
1542 i915->mem_freq = 1066;
1545 i915->mem_freq = 1333;
1548 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1550 rps->max_freq = vlv_rps_max_freq(rps);
1551 rps->rp0_freq = rps->max_freq;
1552 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1553 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1555 rps->efficient_freq = vlv_rps_rpe_freq(rps);
1556 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1557 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1559 rps->rp1_freq = vlv_rps_guar_freq(rps);
1560 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1561 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1563 rps->min_freq = vlv_rps_min_freq(rps);
1564 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1565 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1567 vlv_iosf_sb_put(i915,
1568 BIT(VLV_IOSF_SB_PUNIT) |
1569 BIT(VLV_IOSF_SB_NC) |
1570 BIT(VLV_IOSF_SB_CCK));
1573 static void chv_rps_init(struct intel_rps *rps)
1575 struct drm_i915_private *i915 = rps_to_i915(rps);
1578 vlv_iosf_sb_get(i915,
1579 BIT(VLV_IOSF_SB_PUNIT) |
1580 BIT(VLV_IOSF_SB_NC) |
1581 BIT(VLV_IOSF_SB_CCK));
1583 vlv_init_gpll_ref_freq(rps);
1585 val = vlv_cck_read(i915, CCK_FUSE_REG);
1587 switch ((val >> 2) & 0x7) {
1589 i915->mem_freq = 2000;
1592 i915->mem_freq = 1600;
1595 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1597 rps->max_freq = chv_rps_max_freq(rps);
1598 rps->rp0_freq = rps->max_freq;
1599 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1600 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1602 rps->efficient_freq = chv_rps_rpe_freq(rps);
1603 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1604 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1606 rps->rp1_freq = chv_rps_guar_freq(rps);
1607 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1608 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1610 rps->min_freq = chv_rps_min_freq(rps);
1611 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1612 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1614 vlv_iosf_sb_put(i915,
1615 BIT(VLV_IOSF_SB_PUNIT) |
1616 BIT(VLV_IOSF_SB_NC) |
1617 BIT(VLV_IOSF_SB_CCK));
1619 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
1620 rps->rp1_freq | rps->min_freq) & 1,
1621 "Odd GPU freq values\n");
1624 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
1626 ei->ktime = ktime_get_raw();
1627 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
1628 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
1631 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
1633 struct intel_uncore *uncore = rps_to_uncore(rps);
1634 const struct intel_rps_ei *prev = &rps->ei;
1635 struct intel_rps_ei now;
1638 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1641 vlv_c0_read(uncore, &now);
1647 time = ktime_us_delta(now.ktime, prev->ktime);
1649 time *= rps_to_i915(rps)->czclk_freq;
1651 /* Workload can be split between render + media,
1652 * e.g. SwapBuffers being blitted in X after being rendered in
1653 * mesa. To account for this we need to combine both engines
1654 * into our activity counter.
1656 render = now.render_c0 - prev->render_c0;
1657 media = now.media_c0 - prev->media_c0;
1658 c0 = max(render, media);
1659 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1661 if (c0 > time * rps->power.up_threshold)
1662 events = GEN6_PM_RP_UP_THRESHOLD;
1663 else if (c0 < time * rps->power.down_threshold)
1664 events = GEN6_PM_RP_DOWN_THRESHOLD;
1671 static void rps_work(struct work_struct *work)
1673 struct intel_rps *rps = container_of(work, typeof(*rps), work);
1674 struct intel_gt *gt = rps_to_gt(rps);
1675 struct drm_i915_private *i915 = rps_to_i915(rps);
1676 bool client_boost = false;
1677 int new_freq, adj, min, max;
1680 spin_lock_irq(>->irq_lock);
1681 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
1682 client_boost = atomic_read(&rps->num_waiters);
1683 spin_unlock_irq(>->irq_lock);
1685 /* Make sure we didn't queue anything we're not going to process. */
1686 if (!pm_iir && !client_boost)
1689 mutex_lock(&rps->lock);
1690 if (!intel_rps_is_active(rps)) {
1691 mutex_unlock(&rps->lock);
1695 pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
1697 adj = rps->last_adj;
1698 new_freq = rps->cur_freq;
1699 min = rps->min_freq_softlimit;
1700 max = rps->max_freq_softlimit;
1702 max = rps->max_freq;
1705 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1706 pm_iir, yesno(client_boost),
1707 adj, new_freq, min, max);
1709 if (client_boost && new_freq < rps->boost_freq) {
1710 new_freq = rps->boost_freq;
1712 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1715 else /* CHV needs even encode values */
1716 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
1718 if (new_freq >= rps->max_freq_softlimit)
1720 } else if (client_boost) {
1722 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1723 if (rps->cur_freq > rps->efficient_freq)
1724 new_freq = rps->efficient_freq;
1725 else if (rps->cur_freq > rps->min_freq_softlimit)
1726 new_freq = rps->min_freq_softlimit;
1728 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1731 else /* CHV needs even encode values */
1732 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
1734 if (new_freq <= rps->min_freq_softlimit)
1736 } else { /* unknown event */
1741 * sysfs frequency limits may have snuck in while
1742 * servicing the interrupt
1745 new_freq = clamp_t(int, new_freq, min, max);
1747 if (intel_rps_set(rps, new_freq)) {
1748 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
1751 rps->last_adj = adj;
1753 mutex_unlock(&rps->lock);
1756 spin_lock_irq(>->irq_lock);
1757 gen6_gt_pm_unmask_irq(gt, rps->pm_events);
1758 spin_unlock_irq(>->irq_lock);
1761 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1763 struct intel_gt *gt = rps_to_gt(rps);
1764 const u32 events = rps->pm_events & pm_iir;
1766 lockdep_assert_held(>->irq_lock);
1768 if (unlikely(!events))
1771 GT_TRACE(gt, "irq events:%x\n", events);
1773 gen6_gt_pm_mask_irq(gt, events);
1775 rps->pm_iir |= events;
1776 schedule_work(&rps->work);
1779 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1781 struct intel_gt *gt = rps_to_gt(rps);
1784 events = pm_iir & rps->pm_events;
1786 spin_lock(>->irq_lock);
1788 GT_TRACE(gt, "irq events:%x\n", events);
1790 gen6_gt_pm_mask_irq(gt, events);
1791 rps->pm_iir |= events;
1793 schedule_work(&rps->work);
1794 spin_unlock(>->irq_lock);
1797 if (GRAPHICS_VER(gt->i915) >= 8)
1800 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1801 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
1803 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1804 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1807 void gen5_rps_irq_handler(struct intel_rps *rps)
1809 struct intel_uncore *uncore = rps_to_uncore(rps);
1810 u32 busy_up, busy_down, max_avg, min_avg;
1813 spin_lock(&mchdev_lock);
1815 intel_uncore_write16(uncore,
1817 intel_uncore_read(uncore, MEMINTRSTS));
1819 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1820 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1821 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1822 max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1823 min_avg = intel_uncore_read(uncore, RCBMINAVG);
1825 /* Handle RCS change request from hw */
1826 new_freq = rps->cur_freq;
1827 if (busy_up > max_avg)
1829 else if (busy_down < min_avg)
1831 new_freq = clamp(new_freq,
1832 rps->min_freq_softlimit,
1833 rps->max_freq_softlimit);
1835 if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
1836 rps->cur_freq = new_freq;
1838 spin_unlock(&mchdev_lock);
1841 void intel_rps_init_early(struct intel_rps *rps)
1843 mutex_init(&rps->lock);
1844 mutex_init(&rps->power.mutex);
1846 INIT_WORK(&rps->work, rps_work);
1847 timer_setup(&rps->timer, rps_timer, 0);
1849 atomic_set(&rps->num_waiters, 0);
1852 void intel_rps_init(struct intel_rps *rps)
1854 struct drm_i915_private *i915 = rps_to_i915(rps);
1856 if (rps_uses_slpc(rps))
1859 if (IS_CHERRYVIEW(i915))
1861 else if (IS_VALLEYVIEW(i915))
1863 else if (GRAPHICS_VER(i915) >= 6)
1865 else if (IS_IRONLAKE_M(i915))
1868 /* Derive initial user preferences/limits from the hardware limits */
1869 rps->max_freq_softlimit = rps->max_freq;
1870 rps->min_freq_softlimit = rps->min_freq;
1872 /* After setting max-softlimit, find the overclock max freq */
1873 if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
1876 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
1878 if (params & BIT(31)) { /* OC supported */
1880 "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1881 (rps->max_freq & 0xff) * 50,
1882 (params & 0xff) * 50);
1883 rps->max_freq = params & 0xff;
1887 /* Finally allow us to boost to max by default */
1888 rps->boost_freq = rps->max_freq;
1889 rps->idle_freq = rps->min_freq;
1891 /* Start in the middle, from here we will autotune based on workload */
1892 rps->cur_freq = rps->efficient_freq;
1894 rps->pm_intrmsk_mbz = 0;
1897 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
1898 * if GEN6_PM_UP_EI_EXPIRED is masked.
1900 * TODO: verify if this can be reproduced on VLV,CHV.
1902 if (GRAPHICS_VER(i915) <= 7)
1903 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
1905 if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
1906 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1908 /* GuC needs ARAT expired interrupt unmasked */
1909 if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc))
1910 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1913 void intel_rps_sanitize(struct intel_rps *rps)
1915 if (rps_uses_slpc(rps))
1918 if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
1919 rps_disable_interrupts(rps);
1922 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
1924 struct drm_i915_private *i915 = rps_to_i915(rps);
1927 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1928 cagf = (rpstat >> 8) & 0xff;
1929 else if (GRAPHICS_VER(i915) >= 9)
1930 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1931 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1932 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1933 else if (GRAPHICS_VER(i915) >= 6)
1934 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1936 cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
1937 MEMSTAT_PSTATE_SHIFT);
1942 static u32 read_cagf(struct intel_rps *rps)
1944 struct drm_i915_private *i915 = rps_to_i915(rps);
1945 struct intel_uncore *uncore = rps_to_uncore(rps);
1948 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1949 vlv_punit_get(i915);
1950 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1951 vlv_punit_put(i915);
1952 } else if (GRAPHICS_VER(i915) >= 6) {
1953 freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
1955 freq = intel_uncore_read(uncore, MEMSTAT_ILK);
1958 return intel_rps_get_cagf(rps, freq);
1961 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
1963 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
1964 intel_wakeref_t wakeref;
1967 with_intel_runtime_pm_if_in_use(rpm, wakeref)
1968 freq = intel_gpu_freq(rps, read_cagf(rps));
1973 u32 intel_rps_read_punit_req(struct intel_rps *rps)
1975 struct intel_uncore *uncore = rps_to_uncore(rps);
1977 return intel_uncore_read(uncore, GEN6_RPNSWREQ);
1980 static u32 intel_rps_get_req(u32 pureq)
1982 u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT;
1987 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps)
1989 u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps));
1991 return intel_gpu_freq(rps, freq);
1994 u32 intel_rps_get_requested_frequency(struct intel_rps *rps)
1996 if (rps_uses_slpc(rps))
1997 return intel_rps_read_punit_req_frequency(rps);
1999 return intel_gpu_freq(rps, rps->cur_freq);
2002 u32 intel_rps_get_max_frequency(struct intel_rps *rps)
2004 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2006 if (rps_uses_slpc(rps))
2007 return slpc->max_freq_softlimit;
2009 return intel_gpu_freq(rps, rps->max_freq_softlimit);
2012 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
2014 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2016 if (rps_uses_slpc(rps))
2017 return slpc->rp0_freq;
2019 return intel_gpu_freq(rps, rps->rp0_freq);
2022 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps)
2024 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2026 if (rps_uses_slpc(rps))
2027 return slpc->rp1_freq;
2029 return intel_gpu_freq(rps, rps->rp1_freq);
2032 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps)
2034 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2036 if (rps_uses_slpc(rps))
2037 return slpc->min_freq;
2039 return intel_gpu_freq(rps, rps->min_freq);
2042 static int set_max_freq(struct intel_rps *rps, u32 val)
2044 struct drm_i915_private *i915 = rps_to_i915(rps);
2047 mutex_lock(&rps->lock);
2049 val = intel_freq_opcode(rps, val);
2050 if (val < rps->min_freq ||
2051 val > rps->max_freq ||
2052 val < rps->min_freq_softlimit) {
2057 if (val > rps->rp0_freq)
2058 drm_dbg(&i915->drm, "User requested overclocking to %d\n",
2059 intel_gpu_freq(rps, val));
2061 rps->max_freq_softlimit = val;
2063 val = clamp_t(int, rps->cur_freq,
2064 rps->min_freq_softlimit,
2065 rps->max_freq_softlimit);
2068 * We still need *_set_rps to process the new max_delay and
2069 * update the interrupt limits and PMINTRMSK even though
2070 * frequency request may be unchanged.
2072 intel_rps_set(rps, val);
2075 mutex_unlock(&rps->lock);
2080 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val)
2082 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2084 if (rps_uses_slpc(rps))
2085 return intel_guc_slpc_set_max_freq(slpc, val);
2087 return set_max_freq(rps, val);
2090 u32 intel_rps_get_min_frequency(struct intel_rps *rps)
2092 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2094 if (rps_uses_slpc(rps))
2095 return slpc->min_freq_softlimit;
2097 return intel_gpu_freq(rps, rps->min_freq_softlimit);
2100 static int set_min_freq(struct intel_rps *rps, u32 val)
2104 mutex_lock(&rps->lock);
2106 val = intel_freq_opcode(rps, val);
2107 if (val < rps->min_freq ||
2108 val > rps->max_freq ||
2109 val > rps->max_freq_softlimit) {
2114 rps->min_freq_softlimit = val;
2116 val = clamp_t(int, rps->cur_freq,
2117 rps->min_freq_softlimit,
2118 rps->max_freq_softlimit);
2121 * We still need *_set_rps to process the new min_delay and
2122 * update the interrupt limits and PMINTRMSK even though
2123 * frequency request may be unchanged.
2125 intel_rps_set(rps, val);
2128 mutex_unlock(&rps->lock);
2133 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
2135 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2137 if (rps_uses_slpc(rps))
2138 return intel_guc_slpc_set_min_freq(slpc, val);
2140 return set_min_freq(rps, val);
2143 /* External interface for intel_ips.ko */
2145 static struct drm_i915_private __rcu *ips_mchdev;
2148 * Tells the intel_ips driver that the i915 driver is now loaded, if
2149 * IPS got loaded first.
2151 * This awkward dance is so that neither module has to depend on the
2152 * other in order for IPS to do the appropriate communication of
2153 * GPU turbo limits to i915.
2156 ips_ping_for_i915_load(void)
2160 link = symbol_get(ips_link_to_i915_driver);
2163 symbol_put(ips_link_to_i915_driver);
2167 void intel_rps_driver_register(struct intel_rps *rps)
2169 struct intel_gt *gt = rps_to_gt(rps);
2172 * We only register the i915 ips part with intel-ips once everything is
2173 * set up, to avoid intel-ips sneaking in and reading bogus values.
2175 if (GRAPHICS_VER(gt->i915) == 5) {
2176 GEM_BUG_ON(ips_mchdev);
2177 rcu_assign_pointer(ips_mchdev, gt->i915);
2178 ips_ping_for_i915_load();
2182 void intel_rps_driver_unregister(struct intel_rps *rps)
2184 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
2185 rcu_assign_pointer(ips_mchdev, NULL);
2188 static struct drm_i915_private *mchdev_get(void)
2190 struct drm_i915_private *i915;
2193 i915 = rcu_dereference(ips_mchdev);
2194 if (i915 && !kref_get_unless_zero(&i915->drm.ref))
2202 * i915_read_mch_val - return value for IPS use
2204 * Calculate and return a value for the IPS driver to use when deciding whether
2205 * we have thermal and power headroom to increase CPU or GPU power budget.
2207 unsigned long i915_read_mch_val(void)
2209 struct drm_i915_private *i915;
2210 unsigned long chipset_val = 0;
2211 unsigned long graphics_val = 0;
2212 intel_wakeref_t wakeref;
2214 i915 = mchdev_get();
2218 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2219 struct intel_ips *ips = &i915->gt.rps.ips;
2221 spin_lock_irq(&mchdev_lock);
2222 chipset_val = __ips_chipset_val(ips);
2223 graphics_val = __ips_gfx_val(ips);
2224 spin_unlock_irq(&mchdev_lock);
2227 drm_dev_put(&i915->drm);
2228 return chipset_val + graphics_val;
2230 EXPORT_SYMBOL_GPL(i915_read_mch_val);
2233 * i915_gpu_raise - raise GPU frequency limit
2235 * Raise the limit; IPS indicates we have thermal headroom.
2237 bool i915_gpu_raise(void)
2239 struct drm_i915_private *i915;
2240 struct intel_rps *rps;
2242 i915 = mchdev_get();
2246 rps = &i915->gt.rps;
2248 spin_lock_irq(&mchdev_lock);
2249 if (rps->max_freq_softlimit < rps->max_freq)
2250 rps->max_freq_softlimit++;
2251 spin_unlock_irq(&mchdev_lock);
2253 drm_dev_put(&i915->drm);
2256 EXPORT_SYMBOL_GPL(i915_gpu_raise);
2259 * i915_gpu_lower - lower GPU frequency limit
2261 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2262 * frequency maximum.
2264 bool i915_gpu_lower(void)
2266 struct drm_i915_private *i915;
2267 struct intel_rps *rps;
2269 i915 = mchdev_get();
2273 rps = &i915->gt.rps;
2275 spin_lock_irq(&mchdev_lock);
2276 if (rps->max_freq_softlimit > rps->min_freq)
2277 rps->max_freq_softlimit--;
2278 spin_unlock_irq(&mchdev_lock);
2280 drm_dev_put(&i915->drm);
2283 EXPORT_SYMBOL_GPL(i915_gpu_lower);
2286 * i915_gpu_busy - indicate GPU business to IPS
2288 * Tell the IPS driver whether or not the GPU is busy.
2290 bool i915_gpu_busy(void)
2292 struct drm_i915_private *i915;
2295 i915 = mchdev_get();
2299 ret = i915->gt.awake;
2301 drm_dev_put(&i915->drm);
2304 EXPORT_SYMBOL_GPL(i915_gpu_busy);
2307 * i915_gpu_turbo_disable - disable graphics turbo
2309 * Disable graphics turbo by resetting the max frequency and setting the
2310 * current frequency to the default.
2312 bool i915_gpu_turbo_disable(void)
2314 struct drm_i915_private *i915;
2315 struct intel_rps *rps;
2318 i915 = mchdev_get();
2322 rps = &i915->gt.rps;
2324 spin_lock_irq(&mchdev_lock);
2325 rps->max_freq_softlimit = rps->min_freq;
2326 ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
2327 spin_unlock_irq(&mchdev_lock);
2329 drm_dev_put(&i915->drm);
2332 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2335 #include "selftest_rps.c"
2336 #include "selftest_slpc.c"