1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/string_helpers.h>
8 #include <drm/i915_drm.h>
10 #include "display/intel_display.h"
11 #include "display/intel_display_irq.h"
15 #include "intel_breadcrumbs.h"
17 #include "intel_gt_clock_utils.h"
18 #include "intel_gt_irq.h"
19 #include "intel_gt_pm.h"
20 #include "intel_gt_pm_irq.h"
21 #include "intel_gt_print.h"
22 #include "intel_gt_regs.h"
23 #include "intel_mchbar_regs.h"
24 #include "intel_pcode.h"
25 #include "intel_rps.h"
26 #include "vlv_sideband.h"
27 #include "../../../platform/x86/intel_ips.h"
29 #define BUSY_MAX_EI 20u /* ms */
32 * Lock protecting IPS related data structures
34 static DEFINE_SPINLOCK(mchdev_lock);
36 static struct intel_gt *rps_to_gt(struct intel_rps *rps)
38 return container_of(rps, struct intel_gt, rps);
41 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
43 return rps_to_gt(rps)->i915;
46 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
48 return rps_to_gt(rps)->uncore;
51 static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
53 struct intel_gt *gt = rps_to_gt(rps);
55 return >->uc.guc.slpc;
58 static bool rps_uses_slpc(struct intel_rps *rps)
60 struct intel_gt *gt = rps_to_gt(rps);
62 return intel_uc_uses_guc_slpc(>->uc);
65 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
67 return mask & ~rps->pm_intrmsk_mbz;
70 static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
72 intel_uncore_write_fw(uncore, reg, val);
75 static void rps_timer(struct timer_list *t)
77 struct intel_rps *rps = from_timer(rps, t, timer);
78 struct intel_gt *gt = rps_to_gt(rps);
79 struct intel_engine_cs *engine;
80 ktime_t dt, last, timestamp;
81 enum intel_engine_id id;
85 for_each_engine(engine, gt, id) {
89 dt = intel_engine_get_busy_time(engine, ×tamp);
90 last = engine->stats.rps;
91 engine->stats.rps = dt;
93 busy = ktime_to_ns(ktime_sub(dt, last));
94 for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
95 if (busy > max_busy[i])
96 swap(busy, max_busy[i]);
99 last = rps->pm_timestamp;
100 rps->pm_timestamp = timestamp;
102 if (intel_rps_is_active(rps)) {
106 dt = ktime_sub(timestamp, last);
109 * Our goal is to evaluate each engine independently, so we run
110 * at the lowest clocks required to sustain the heaviest
111 * workload. However, a task may be split into sequential
112 * dependent operations across a set of engines, such that
113 * the independent contributions do not account for high load,
114 * but overall the task is GPU bound. For example, consider
115 * video decode on vcs followed by colour post-processing
116 * on vecs, followed by general post-processing on rcs.
117 * Since multi-engines being active does imply a single
118 * continuous workload across all engines, we hedge our
119 * bets by only contributing a factor of the distributed
120 * load into our busyness calculation.
123 for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
127 busy += div_u64(max_busy[i], 1 << i);
130 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
131 busy, (int)div64_u64(100 * busy, dt),
132 max_busy[0], max_busy[1], max_busy[2],
135 if (100 * busy > rps->power.up_threshold * dt &&
136 rps->cur_freq < rps->max_freq_softlimit) {
137 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
138 rps->pm_interval = 1;
139 queue_work(gt->i915->unordered_wq, &rps->work);
140 } else if (100 * busy < rps->power.down_threshold * dt &&
141 rps->cur_freq > rps->min_freq_softlimit) {
142 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
143 rps->pm_interval = 1;
144 queue_work(gt->i915->unordered_wq, &rps->work);
149 mod_timer(&rps->timer,
150 jiffies + msecs_to_jiffies(rps->pm_interval));
151 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
155 static void rps_start_timer(struct intel_rps *rps)
157 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
158 rps->pm_interval = 1;
159 mod_timer(&rps->timer, jiffies + 1);
162 static void rps_stop_timer(struct intel_rps *rps)
164 del_timer_sync(&rps->timer);
165 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
166 cancel_work_sync(&rps->work);
169 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
173 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
174 if (val > rps->min_freq_softlimit)
175 mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
176 GEN6_PM_RP_DOWN_THRESHOLD |
177 GEN6_PM_RP_DOWN_TIMEOUT);
179 if (val < rps->max_freq_softlimit)
180 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
182 mask &= rps->pm_events;
184 return rps_pm_sanitize_mask(rps, ~mask);
187 static void rps_reset_ei(struct intel_rps *rps)
189 memset(&rps->ei, 0, sizeof(rps->ei));
192 static void rps_enable_interrupts(struct intel_rps *rps)
194 struct intel_gt *gt = rps_to_gt(rps);
196 GEM_BUG_ON(rps_uses_slpc(rps));
198 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
199 rps->pm_events, rps_pm_mask(rps, rps->last_freq));
203 spin_lock_irq(gt->irq_lock);
204 gen6_gt_pm_enable_irq(gt, rps->pm_events);
205 spin_unlock_irq(gt->irq_lock);
207 intel_uncore_write(gt->uncore,
208 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
211 static void gen6_rps_reset_interrupts(struct intel_rps *rps)
213 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
216 static void gen11_rps_reset_interrupts(struct intel_rps *rps)
218 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
222 static void rps_reset_interrupts(struct intel_rps *rps)
224 struct intel_gt *gt = rps_to_gt(rps);
226 spin_lock_irq(gt->irq_lock);
227 if (GRAPHICS_VER(gt->i915) >= 11)
228 gen11_rps_reset_interrupts(rps);
230 gen6_rps_reset_interrupts(rps);
233 spin_unlock_irq(gt->irq_lock);
236 static void rps_disable_interrupts(struct intel_rps *rps)
238 struct intel_gt *gt = rps_to_gt(rps);
240 intel_uncore_write(gt->uncore,
241 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
243 spin_lock_irq(gt->irq_lock);
244 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
245 spin_unlock_irq(gt->irq_lock);
247 intel_synchronize_irq(gt->i915);
250 * Now that we will not be generating any more work, flush any
251 * outstanding tasks. As we are called on the RPS idle path,
252 * we will reset the GPU to minimum frequencies, so the current
253 * state of the worker can be discarded.
255 cancel_work_sync(&rps->work);
257 rps_reset_interrupts(rps);
258 GT_TRACE(gt, "interrupts:off\n");
261 static const struct cparams {
267 { 1, 1333, 301, 28664 },
268 { 1, 1066, 294, 24460 },
269 { 1, 800, 294, 25192 },
270 { 0, 1333, 276, 27605 },
271 { 0, 1066, 276, 27605 },
272 { 0, 800, 231, 23784 },
275 static void gen5_rps_init(struct intel_rps *rps)
277 struct drm_i915_private *i915 = rps_to_i915(rps);
278 struct intel_uncore *uncore = rps_to_uncore(rps);
279 u8 fmax, fmin, fstart;
283 if (i915->fsb_freq <= 3200)
285 else if (i915->fsb_freq <= 4800)
290 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
291 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
292 rps->ips.m = cparams[i].m;
293 rps->ips.c = cparams[i].c;
298 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
300 /* Set up min, max, and cur for interrupt handling */
301 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
302 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
303 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
304 MEMMODE_FSTART_SHIFT;
305 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
308 rps->min_freq = fmax;
309 rps->efficient_freq = fstart;
310 rps->max_freq = fmin;
314 __ips_chipset_val(struct intel_ips *ips)
316 struct intel_uncore *uncore =
317 rps_to_uncore(container_of(ips, struct intel_rps, ips));
318 unsigned long now = jiffies_to_msecs(jiffies), dt;
319 unsigned long result;
322 lockdep_assert_held(&mchdev_lock);
325 * Prevent division-by-zero if we are asking too fast.
326 * Also, we don't get interesting results if we are polling
327 * faster than once in 10ms, so just return the saved value
330 dt = now - ips->last_time1;
332 return ips->chipset_power;
334 /* FIXME: handle per-counter overflow */
335 total = intel_uncore_read(uncore, DMIEC);
336 total += intel_uncore_read(uncore, DDREC);
337 total += intel_uncore_read(uncore, CSIEC);
339 delta = total - ips->last_count1;
341 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
343 ips->last_count1 = total;
344 ips->last_time1 = now;
346 ips->chipset_power = result;
351 static unsigned long ips_mch_val(struct intel_uncore *uncore)
353 unsigned int m, x, b;
356 tsfs = intel_uncore_read(uncore, TSFS);
357 x = intel_uncore_read8(uncore, TR1);
359 b = tsfs & TSFS_INTR_MASK;
360 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
362 return m * x / 127 - b;
365 static int _pxvid_to_vd(u8 pxvid)
370 if (pxvid >= 8 && pxvid < 31)
373 return (pxvid + 2) * 125;
376 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
378 const int vd = _pxvid_to_vd(pxvid);
380 if (INTEL_INFO(i915)->is_mobile)
381 return max(vd - 1125, 0);
386 static void __gen5_ips_update(struct intel_ips *ips)
388 struct intel_uncore *uncore =
389 rps_to_uncore(container_of(ips, struct intel_rps, ips));
393 lockdep_assert_held(&mchdev_lock);
395 now = ktime_get_raw_ns();
396 dt = now - ips->last_time2;
397 do_div(dt, NSEC_PER_MSEC);
399 /* Don't divide by 0 */
403 count = intel_uncore_read(uncore, GFXEC);
404 delta = count - ips->last_count2;
406 ips->last_count2 = count;
407 ips->last_time2 = now;
409 /* More magic constants... */
410 ips->gfx_power = div_u64(delta * 1181, dt * 10);
413 static void gen5_rps_update(struct intel_rps *rps)
415 spin_lock_irq(&mchdev_lock);
416 __gen5_ips_update(&rps->ips);
417 spin_unlock_irq(&mchdev_lock);
420 static unsigned int gen5_invert_freq(struct intel_rps *rps,
423 /* Invert the frequency bin into an ips delay */
424 val = rps->max_freq - val;
425 val = rps->min_freq + val;
430 static int __gen5_rps_set(struct intel_rps *rps, u8 val)
432 struct intel_uncore *uncore = rps_to_uncore(rps);
435 lockdep_assert_held(&mchdev_lock);
437 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
438 if (rgvswctl & MEMCTL_CMD_STS) {
439 drm_dbg(&rps_to_i915(rps)->drm,
440 "gpu busy, RCS change rejected\n");
441 return -EBUSY; /* still busy with another command */
444 /* Invert the frequency bin into an ips delay */
445 val = gen5_invert_freq(rps, val);
448 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
449 (val << MEMCTL_FREQ_SHIFT) |
451 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
452 intel_uncore_posting_read16(uncore, MEMSWCTL);
454 rgvswctl |= MEMCTL_CMD_STS;
455 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
460 static int gen5_rps_set(struct intel_rps *rps, u8 val)
464 spin_lock_irq(&mchdev_lock);
465 err = __gen5_rps_set(rps, val);
466 spin_unlock_irq(&mchdev_lock);
471 static unsigned long intel_pxfreq(u32 vidfreq)
473 int div = (vidfreq & 0x3f0000) >> 16;
474 int post = (vidfreq & 0x3000) >> 12;
475 int pre = (vidfreq & 0x7);
480 return div * 133333 / (pre << post);
483 static unsigned int init_emon(struct intel_uncore *uncore)
488 /* Disable to program */
489 intel_uncore_write(uncore, ECR, 0);
490 intel_uncore_posting_read(uncore, ECR);
492 /* Program energy weights for various events */
493 intel_uncore_write(uncore, SDEW, 0x15040d00);
494 intel_uncore_write(uncore, CSIEW0, 0x007f0000);
495 intel_uncore_write(uncore, CSIEW1, 0x1e220004);
496 intel_uncore_write(uncore, CSIEW2, 0x04000004);
498 for (i = 0; i < 5; i++)
499 intel_uncore_write(uncore, PEW(i), 0);
500 for (i = 0; i < 3; i++)
501 intel_uncore_write(uncore, DEW(i), 0);
503 /* Program P-state weights to account for frequency power adjustment */
504 for (i = 0; i < 16; i++) {
505 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
506 unsigned int freq = intel_pxfreq(pxvidfreq);
508 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
511 val = vid * vid * freq / 1000 * 255;
512 val /= 127 * 127 * 900;
516 /* Render standby states get 0 weight */
520 for (i = 0; i < 4; i++) {
521 intel_uncore_write(uncore, PXW(i),
522 pxw[i * 4 + 0] << 24 |
523 pxw[i * 4 + 1] << 16 |
524 pxw[i * 4 + 2] << 8 |
525 pxw[i * 4 + 3] << 0);
528 /* Adjust magic regs to magic values (more experimental results) */
529 intel_uncore_write(uncore, OGW0, 0);
530 intel_uncore_write(uncore, OGW1, 0);
531 intel_uncore_write(uncore, EG0, 0x00007f00);
532 intel_uncore_write(uncore, EG1, 0x0000000e);
533 intel_uncore_write(uncore, EG2, 0x000e0000);
534 intel_uncore_write(uncore, EG3, 0x68000300);
535 intel_uncore_write(uncore, EG4, 0x42000000);
536 intel_uncore_write(uncore, EG5, 0x00140031);
537 intel_uncore_write(uncore, EG6, 0);
538 intel_uncore_write(uncore, EG7, 0);
540 for (i = 0; i < 8; i++)
541 intel_uncore_write(uncore, PXWL(i), 0);
543 /* Enable PMON + select events */
544 intel_uncore_write(uncore, ECR, 0x80000019);
546 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
549 static bool gen5_rps_enable(struct intel_rps *rps)
551 struct drm_i915_private *i915 = rps_to_i915(rps);
552 struct intel_uncore *uncore = rps_to_uncore(rps);
556 spin_lock_irq(&mchdev_lock);
558 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
560 /* Enable temp reporting */
561 intel_uncore_write16(uncore, PMMISC,
562 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
563 intel_uncore_write16(uncore, TSC1,
564 intel_uncore_read16(uncore, TSC1) | TSE);
566 /* 100ms RC evaluation intervals */
567 intel_uncore_write(uncore, RCUPEI, 100000);
568 intel_uncore_write(uncore, RCDNEI, 100000);
570 /* Set max/min thresholds to 90ms and 80ms respectively */
571 intel_uncore_write(uncore, RCBMAXAVG, 90000);
572 intel_uncore_write(uncore, RCBMINAVG, 80000);
574 intel_uncore_write(uncore, MEMIHYST, 1);
576 /* Set up min, max, and cur for interrupt handling */
577 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
578 MEMMODE_FSTART_SHIFT;
580 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
581 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
583 intel_uncore_write(uncore,
585 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
587 intel_uncore_write(uncore, VIDSTART, vstart);
588 intel_uncore_posting_read(uncore, VIDSTART);
590 rgvmodectl |= MEMMODE_SWMODE_EN;
591 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
593 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
594 MEMCTL_CMD_STS) == 0, 10))
595 drm_err(&uncore->i915->drm,
596 "stuck trying to change perf mode\n");
599 __gen5_rps_set(rps, rps->cur_freq);
601 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
602 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
603 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
604 rps->ips.last_time1 = jiffies_to_msecs(jiffies);
606 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
607 rps->ips.last_time2 = ktime_get_raw_ns();
609 spin_lock(&i915->irq_lock);
610 ilk_enable_display_irq(i915, DE_PCU_EVENT);
611 spin_unlock(&i915->irq_lock);
613 spin_unlock_irq(&mchdev_lock);
615 rps->ips.corr = init_emon(uncore);
620 static void gen5_rps_disable(struct intel_rps *rps)
622 struct drm_i915_private *i915 = rps_to_i915(rps);
623 struct intel_uncore *uncore = rps_to_uncore(rps);
626 spin_lock_irq(&mchdev_lock);
628 spin_lock(&i915->irq_lock);
629 ilk_disable_display_irq(i915, DE_PCU_EVENT);
630 spin_unlock(&i915->irq_lock);
632 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
634 /* Ack interrupts, disable EFC interrupt */
635 intel_uncore_rmw(uncore, MEMINTREN, MEMINT_EVAL_CHG_EN, 0);
636 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
638 /* Go back to the starting frequency */
639 __gen5_rps_set(rps, rps->idle_freq);
641 rgvswctl |= MEMCTL_CMD_STS;
642 intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
645 spin_unlock_irq(&mchdev_lock);
648 static u32 rps_limits(struct intel_rps *rps, u8 val)
653 * Only set the down limit when we've reached the lowest level to avoid
654 * getting more interrupts, otherwise leave this clear. This prevents a
655 * race in the hw when coming out of rc6: There's a tiny window where
656 * the hw runs at the minimal clock before selecting the desired
657 * frequency, if the down threshold expires in that window we will not
658 * receive a down interrupt.
660 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
661 limits = rps->max_freq_softlimit << 23;
662 if (val <= rps->min_freq_softlimit)
663 limits |= rps->min_freq_softlimit << 14;
665 limits = rps->max_freq_softlimit << 24;
666 if (val <= rps->min_freq_softlimit)
667 limits |= rps->min_freq_softlimit << 16;
673 static void rps_set_power(struct intel_rps *rps, int new_power)
675 struct intel_gt *gt = rps_to_gt(rps);
676 struct intel_uncore *uncore = gt->uncore;
677 u32 ei_up = 0, ei_down = 0;
679 lockdep_assert_held(&rps->power.mutex);
681 if (new_power == rps->power.mode)
684 /* Note the units here are not exactly 1us, but 1280ns. */
702 /* When byt can survive without system hang with dynamic
703 * sw freq adjustments, this restriction can be lifted.
705 if (IS_VALLEYVIEW(gt->i915))
709 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
711 rps->power.up_threshold, ei_up,
712 rps->power.down_threshold, ei_down);
714 set(uncore, GEN6_RP_UP_EI,
715 intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
716 set(uncore, GEN6_RP_UP_THRESHOLD,
717 intel_gt_ns_to_pm_interval(gt,
718 ei_up * rps->power.up_threshold * 10));
720 set(uncore, GEN6_RP_DOWN_EI,
721 intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
722 set(uncore, GEN6_RP_DOWN_THRESHOLD,
723 intel_gt_ns_to_pm_interval(gt,
725 rps->power.down_threshold * 10));
727 set(uncore, GEN6_RP_CONTROL,
728 (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
729 GEN6_RP_MEDIA_HW_NORMAL_MODE |
730 GEN6_RP_MEDIA_IS_GFX |
732 GEN6_RP_UP_BUSY_AVG |
733 GEN6_RP_DOWN_IDLE_AVG);
736 rps->power.mode = new_power;
739 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
743 new_power = rps->power.mode;
744 switch (rps->power.mode) {
746 if (val > rps->efficient_freq + 1 &&
752 if (val <= rps->efficient_freq &&
754 new_power = LOW_POWER;
755 else if (val >= rps->rp0_freq &&
757 new_power = HIGH_POWER;
761 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
766 /* Max/min bins are special */
767 if (val <= rps->min_freq_softlimit)
768 new_power = LOW_POWER;
769 if (val >= rps->max_freq_softlimit)
770 new_power = HIGH_POWER;
772 mutex_lock(&rps->power.mutex);
773 if (rps->power.interactive)
774 new_power = HIGH_POWER;
775 rps_set_power(rps, new_power);
776 mutex_unlock(&rps->power.mutex);
779 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
781 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n",
782 str_yes_no(interactive));
784 mutex_lock(&rps->power.mutex);
786 if (!rps->power.interactive++ && intel_rps_is_active(rps))
787 rps_set_power(rps, HIGH_POWER);
789 GEM_BUG_ON(!rps->power.interactive);
790 rps->power.interactive--;
792 mutex_unlock(&rps->power.mutex);
795 static int gen6_rps_set(struct intel_rps *rps, u8 val)
797 struct intel_uncore *uncore = rps_to_uncore(rps);
798 struct drm_i915_private *i915 = rps_to_i915(rps);
801 GEM_BUG_ON(rps_uses_slpc(rps));
803 if (GRAPHICS_VER(i915) >= 9)
804 swreq = GEN9_FREQUENCY(val);
805 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
806 swreq = HSW_FREQUENCY(val);
808 swreq = (GEN6_FREQUENCY(val) |
810 GEN6_AGGRESSIVE_TURBO);
811 set(uncore, GEN6_RPNSWREQ, swreq);
813 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
814 val, intel_gpu_freq(rps, val), swreq);
819 static int vlv_rps_set(struct intel_rps *rps, u8 val)
821 struct drm_i915_private *i915 = rps_to_i915(rps);
825 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
828 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
829 val, intel_gpu_freq(rps, val));
834 static int rps_set(struct intel_rps *rps, u8 val, bool update)
836 struct drm_i915_private *i915 = rps_to_i915(rps);
839 if (val == rps->last_freq)
842 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
843 err = vlv_rps_set(rps, val);
844 else if (GRAPHICS_VER(i915) >= 6)
845 err = gen6_rps_set(rps, val);
847 err = gen5_rps_set(rps, val);
851 if (update && GRAPHICS_VER(i915) >= 6)
852 gen6_rps_set_thresholds(rps, val);
853 rps->last_freq = val;
858 void intel_rps_unpark(struct intel_rps *rps)
860 if (!intel_rps_is_enabled(rps))
863 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
866 * Use the user's desired frequency as a guide, but for better
867 * performance, jump directly to RPe as our starting frequency.
869 mutex_lock(&rps->lock);
871 intel_rps_set_active(rps);
874 rps->min_freq_softlimit,
875 rps->max_freq_softlimit));
877 mutex_unlock(&rps->lock);
880 if (intel_rps_has_interrupts(rps))
881 rps_enable_interrupts(rps);
882 if (intel_rps_uses_timer(rps))
883 rps_start_timer(rps);
885 if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
886 gen5_rps_update(rps);
889 void intel_rps_park(struct intel_rps *rps)
893 if (!intel_rps_is_enabled(rps))
896 if (!intel_rps_clear_active(rps))
899 if (intel_rps_uses_timer(rps))
901 if (intel_rps_has_interrupts(rps))
902 rps_disable_interrupts(rps);
904 if (rps->last_freq <= rps->idle_freq)
908 * The punit delays the write of the frequency and voltage until it
909 * determines the GPU is awake. During normal usage we don't want to
910 * waste power changing the frequency if the GPU is sleeping (rc6).
911 * However, the GPU and driver is now idle and we do not want to delay
912 * switching to minimum voltage (reducing power whilst idle) as we do
913 * not expect to be woken in the near future and so must flush the
914 * change by waking the device.
916 * We choose to take the media powerwell (either would do to trick the
917 * punit into committing the voltage change) as that takes a lot less
918 * power than the render powerwell.
920 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
921 rps_set(rps, rps->idle_freq, false);
922 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
925 * Since we will try and restart from the previously requested
926 * frequency on unparking, treat this idle point as a downclock
927 * interrupt and reduce the frequency for resume. If we park/unpark
928 * more frequently than the rps worker can run, we will not respond
929 * to any EI and never see a change in frequency.
931 * (Note we accommodate Cherryview's limitation of only using an
932 * even bin by applying it to all.)
937 else /* CHV needs even encode values */
940 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
941 if (rps->cur_freq < rps->efficient_freq) {
942 rps->cur_freq = rps->efficient_freq;
946 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
949 u32 intel_rps_get_boost_frequency(struct intel_rps *rps)
951 struct intel_guc_slpc *slpc;
953 if (rps_uses_slpc(rps)) {
954 slpc = rps_to_slpc(rps);
956 return slpc->boost_freq;
958 return intel_gpu_freq(rps, rps->boost_freq);
962 static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
966 /* Validate against (static) hardware limits */
967 val = intel_freq_opcode(rps, val);
968 if (val < rps->min_freq || val > rps->max_freq)
971 mutex_lock(&rps->lock);
972 if (val != rps->boost_freq) {
973 rps->boost_freq = val;
974 boost = atomic_read(&rps->num_waiters);
976 mutex_unlock(&rps->lock);
978 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
983 int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq)
985 struct intel_guc_slpc *slpc;
987 if (rps_uses_slpc(rps)) {
988 slpc = rps_to_slpc(rps);
990 return intel_guc_slpc_set_boost_freq(slpc, freq);
992 return rps_set_boost_freq(rps, freq);
996 void intel_rps_dec_waiters(struct intel_rps *rps)
998 struct intel_guc_slpc *slpc;
1000 if (rps_uses_slpc(rps)) {
1001 slpc = rps_to_slpc(rps);
1003 intel_guc_slpc_dec_waiters(slpc);
1005 atomic_dec(&rps->num_waiters);
1009 void intel_rps_boost(struct i915_request *rq)
1011 struct intel_guc_slpc *slpc;
1013 if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
1016 /* Serializes with i915_request_retire() */
1017 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
1018 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
1020 if (rps_uses_slpc(rps)) {
1021 slpc = rps_to_slpc(rps);
1023 if (slpc->min_freq_softlimit >= slpc->boost_freq)
1026 /* Return if old value is non zero */
1027 if (!atomic_fetch_inc(&slpc->num_waiters)) {
1028 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
1029 rq->fence.context, rq->fence.seqno);
1030 queue_work(rps_to_gt(rps)->i915->unordered_wq,
1037 if (atomic_fetch_inc(&rps->num_waiters))
1040 if (!intel_rps_is_active(rps))
1043 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
1044 rq->fence.context, rq->fence.seqno);
1046 if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
1047 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
1049 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
1053 int intel_rps_set(struct intel_rps *rps, u8 val)
1057 lockdep_assert_held(&rps->lock);
1058 GEM_BUG_ON(val > rps->max_freq);
1059 GEM_BUG_ON(val < rps->min_freq);
1061 if (intel_rps_is_active(rps)) {
1062 err = rps_set(rps, val, true);
1067 * Make sure we continue to get interrupts
1068 * until we hit the minimum or maximum frequencies.
1070 if (intel_rps_has_interrupts(rps)) {
1071 struct intel_uncore *uncore = rps_to_uncore(rps);
1074 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
1076 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
1080 rps->cur_freq = val;
1084 static u32 intel_rps_read_state_cap(struct intel_rps *rps)
1086 struct drm_i915_private *i915 = rps_to_i915(rps);
1087 struct intel_uncore *uncore = rps_to_uncore(rps);
1089 if (IS_PONTEVECCHIO(i915))
1090 return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
1091 else if (IS_XEHPSDV(i915))
1092 return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
1093 else if (IS_GEN9_LP(i915))
1094 return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
1096 return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
1100 mtl_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
1102 struct intel_uncore *uncore = rps_to_uncore(rps);
1103 u32 rp_state_cap = rps_to_gt(rps)->type == GT_MEDIA ?
1104 intel_uncore_read(uncore, MTL_MEDIAP_STATE_CAP) :
1105 intel_uncore_read(uncore, MTL_RP_STATE_CAP);
1106 u32 rpe = rps_to_gt(rps)->type == GT_MEDIA ?
1107 intel_uncore_read(uncore, MTL_MPE_FREQUENCY) :
1108 intel_uncore_read(uncore, MTL_GT_RPE_FREQUENCY);
1110 /* MTL values are in units of 16.67 MHz */
1111 caps->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, rp_state_cap);
1112 caps->min_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, rp_state_cap);
1113 caps->rp1_freq = REG_FIELD_GET(MTL_RPE_MASK, rpe);
1117 __gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
1119 struct drm_i915_private *i915 = rps_to_i915(rps);
1122 rp_state_cap = intel_rps_read_state_cap(rps);
1124 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
1125 if (IS_GEN9_LP(i915)) {
1126 caps->rp0_freq = (rp_state_cap >> 16) & 0xff;
1127 caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
1128 caps->min_freq = (rp_state_cap >> 0) & 0xff;
1130 caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
1131 if (GRAPHICS_VER(i915) >= 10)
1132 caps->rp1_freq = REG_FIELD_GET(RPE_MASK,
1133 intel_uncore_read(to_gt(i915)->uncore,
1134 GEN10_FREQ_INFO_REC));
1136 caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
1137 caps->min_freq = (rp_state_cap >> 16) & 0xff;
1140 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
1142 * In this case rp_state_cap register reports frequencies in
1143 * units of 50 MHz. Convert these to the actual "hw unit", i.e.
1144 * units of 16.67 MHz
1146 caps->rp0_freq *= GEN9_FREQ_SCALER;
1147 caps->rp1_freq *= GEN9_FREQ_SCALER;
1148 caps->min_freq *= GEN9_FREQ_SCALER;
1153 * gen6_rps_get_freq_caps - Get freq caps exposed by HW
1154 * @rps: the intel_rps structure
1155 * @caps: returned freq caps
1157 * Returned "caps" frequencies should be converted to MHz using
1160 void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
1162 struct drm_i915_private *i915 = rps_to_i915(rps);
1164 if (IS_METEORLAKE(i915))
1165 return mtl_get_freq_caps(rps, caps);
1167 return __gen6_rps_get_freq_caps(rps, caps);
1170 static void gen6_rps_init(struct intel_rps *rps)
1172 struct drm_i915_private *i915 = rps_to_i915(rps);
1173 struct intel_rps_freq_caps caps;
1175 gen6_rps_get_freq_caps(rps, &caps);
1176 rps->rp0_freq = caps.rp0_freq;
1177 rps->rp1_freq = caps.rp1_freq;
1178 rps->min_freq = caps.min_freq;
1180 /* hw_max = RP0 until we check for overclocking */
1181 rps->max_freq = rps->rp0_freq;
1183 rps->efficient_freq = rps->rp1_freq;
1184 if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
1185 IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
1186 u32 ddcc_status = 0;
1189 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
1190 mult = GEN9_FREQ_SCALER;
1191 if (snb_pcode_read(rps_to_gt(rps)->uncore,
1192 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
1193 &ddcc_status, NULL) == 0)
1194 rps->efficient_freq =
1196 ((ddcc_status >> 8) & 0xff) * mult,
1202 static bool rps_reset(struct intel_rps *rps)
1204 struct drm_i915_private *i915 = rps_to_i915(rps);
1207 rps->power.mode = -1;
1208 rps->last_freq = -1;
1210 if (rps_set(rps, rps->min_freq, true)) {
1211 drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
1215 rps->cur_freq = rps->min_freq;
1219 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
1220 static bool gen9_rps_enable(struct intel_rps *rps)
1222 struct intel_gt *gt = rps_to_gt(rps);
1223 struct intel_uncore *uncore = gt->uncore;
1225 /* Program defaults and thresholds for RPS */
1226 if (GRAPHICS_VER(gt->i915) == 9)
1227 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1228 GEN9_FREQUENCY(rps->rp1_freq));
1230 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
1232 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1234 return rps_reset(rps);
1237 static bool gen8_rps_enable(struct intel_rps *rps)
1239 struct intel_uncore *uncore = rps_to_uncore(rps);
1241 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1242 HSW_FREQUENCY(rps->rp1_freq));
1244 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1246 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1248 return rps_reset(rps);
1251 static bool gen6_rps_enable(struct intel_rps *rps)
1253 struct intel_uncore *uncore = rps_to_uncore(rps);
1255 /* Power down if completely idle for over 50ms */
1256 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
1257 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1259 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1260 GEN6_PM_RP_DOWN_THRESHOLD |
1261 GEN6_PM_RP_DOWN_TIMEOUT);
1263 return rps_reset(rps);
1266 static int chv_rps_max_freq(struct intel_rps *rps)
1268 struct drm_i915_private *i915 = rps_to_i915(rps);
1269 struct intel_gt *gt = rps_to_gt(rps);
1272 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1274 switch (gt->info.sseu.eu_total) {
1276 /* (2 * 4) config */
1277 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
1280 /* (2 * 6) config */
1281 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
1284 /* (2 * 8) config */
1286 /* Setting (2 * 8) Min RP0 for any other combination */
1287 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
1291 return val & FB_GFX_FREQ_FUSE_MASK;
1294 static int chv_rps_rpe_freq(struct intel_rps *rps)
1296 struct drm_i915_private *i915 = rps_to_i915(rps);
1299 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
1300 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
1302 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
1305 static int chv_rps_guar_freq(struct intel_rps *rps)
1307 struct drm_i915_private *i915 = rps_to_i915(rps);
1310 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1312 return val & FB_GFX_FREQ_FUSE_MASK;
1315 static u32 chv_rps_min_freq(struct intel_rps *rps)
1317 struct drm_i915_private *i915 = rps_to_i915(rps);
1320 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
1321 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
1323 return val & FB_GFX_FREQ_FUSE_MASK;
1326 static bool chv_rps_enable(struct intel_rps *rps)
1328 struct intel_uncore *uncore = rps_to_uncore(rps);
1329 struct drm_i915_private *i915 = rps_to_i915(rps);
1332 /* 1: Program defaults and thresholds for RPS*/
1333 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1334 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1335 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1336 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1337 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1339 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1342 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1343 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1344 GEN6_RP_MEDIA_IS_GFX |
1346 GEN6_RP_UP_BUSY_AVG |
1347 GEN6_RP_DOWN_IDLE_AVG);
1349 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1350 GEN6_PM_RP_DOWN_THRESHOLD |
1351 GEN6_PM_RP_DOWN_TIMEOUT);
1353 /* Setting Fixed Bias */
1354 vlv_punit_get(i915);
1356 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
1357 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1359 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1361 vlv_punit_put(i915);
1363 /* RPS code assumes GPLL is used */
1364 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1365 "GPLL not enabled\n");
1367 drm_dbg(&i915->drm, "GPLL enabled? %s\n",
1368 str_yes_no(val & GPLLENABLE));
1369 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1371 return rps_reset(rps);
1374 static int vlv_rps_guar_freq(struct intel_rps *rps)
1376 struct drm_i915_private *i915 = rps_to_i915(rps);
1379 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1381 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
1382 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
1387 static int vlv_rps_max_freq(struct intel_rps *rps)
1389 struct drm_i915_private *i915 = rps_to_i915(rps);
1392 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1394 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
1396 rp0 = min_t(u32, rp0, 0xea);
1401 static int vlv_rps_rpe_freq(struct intel_rps *rps)
1403 struct drm_i915_private *i915 = rps_to_i915(rps);
1406 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
1407 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
1408 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
1409 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
1414 static int vlv_rps_min_freq(struct intel_rps *rps)
1416 struct drm_i915_private *i915 = rps_to_i915(rps);
1419 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
1421 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1422 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1423 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1424 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1425 * to make sure it matches what Punit accepts.
1427 return max_t(u32, val, 0xc0);
1430 static bool vlv_rps_enable(struct intel_rps *rps)
1432 struct intel_uncore *uncore = rps_to_uncore(rps);
1433 struct drm_i915_private *i915 = rps_to_i915(rps);
1436 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1437 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1438 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1439 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1440 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1442 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1444 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1445 GEN6_RP_MEDIA_TURBO |
1446 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1447 GEN6_RP_MEDIA_IS_GFX |
1449 GEN6_RP_UP_BUSY_AVG |
1450 GEN6_RP_DOWN_IDLE_CONT);
1452 /* WaGsvRC0ResidencyMethod:vlv */
1453 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
1455 vlv_punit_get(i915);
1457 /* Setting Fixed Bias */
1458 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
1459 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1461 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1463 vlv_punit_put(i915);
1465 /* RPS code assumes GPLL is used */
1466 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1467 "GPLL not enabled\n");
1469 drm_dbg(&i915->drm, "GPLL enabled? %s\n",
1470 str_yes_no(val & GPLLENABLE));
1471 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1473 return rps_reset(rps);
1476 static unsigned long __ips_gfx_val(struct intel_ips *ips)
1478 struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
1479 struct intel_uncore *uncore = rps_to_uncore(rps);
1480 unsigned int t, state1, state2;
1484 lockdep_assert_held(&mchdev_lock);
1486 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
1487 pxvid = (pxvid >> 24) & 0x7f;
1488 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
1492 /* Revel in the empirically derived constants */
1494 /* Correction factor in 1/100000 units */
1495 t = ips_mch_val(uncore);
1497 corr = t * 2349 + 135940;
1499 corr = t * 964 + 29317;
1501 corr = t * 301 + 1004;
1503 corr = div_u64(corr * 150142 * state1, 10000) - 78642;
1504 corr2 = div_u64(corr, 100000) * ips->corr;
1506 state2 = div_u64(corr2 * state1, 10000);
1507 state2 /= 100; /* convert to mW */
1509 __gen5_ips_update(ips);
1511 return ips->gfx_power + state2;
1514 static bool has_busy_stats(struct intel_rps *rps)
1516 struct intel_engine_cs *engine;
1517 enum intel_engine_id id;
1519 for_each_engine(engine, rps_to_gt(rps), id) {
1520 if (!intel_engine_supports_stats(engine))
1527 void intel_rps_enable(struct intel_rps *rps)
1529 struct drm_i915_private *i915 = rps_to_i915(rps);
1530 struct intel_uncore *uncore = rps_to_uncore(rps);
1531 bool enabled = false;
1536 if (rps_uses_slpc(rps))
1539 intel_gt_check_clock_frequency(rps_to_gt(rps));
1541 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1542 if (rps->max_freq <= rps->min_freq)
1543 /* leave disabled, no room for dynamic reclocking */;
1544 else if (IS_CHERRYVIEW(i915))
1545 enabled = chv_rps_enable(rps);
1546 else if (IS_VALLEYVIEW(i915))
1547 enabled = vlv_rps_enable(rps);
1548 else if (GRAPHICS_VER(i915) >= 9)
1549 enabled = gen9_rps_enable(rps);
1550 else if (GRAPHICS_VER(i915) >= 8)
1551 enabled = gen8_rps_enable(rps);
1552 else if (GRAPHICS_VER(i915) >= 6)
1553 enabled = gen6_rps_enable(rps);
1554 else if (IS_IRONLAKE_M(i915))
1555 enabled = gen5_rps_enable(rps);
1557 MISSING_CASE(GRAPHICS_VER(i915));
1558 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1562 GT_TRACE(rps_to_gt(rps),
1563 "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n",
1564 rps->min_freq, rps->max_freq,
1565 intel_gpu_freq(rps, rps->min_freq),
1566 intel_gpu_freq(rps, rps->max_freq),
1567 rps->power.up_threshold,
1568 rps->power.down_threshold);
1570 GEM_BUG_ON(rps->max_freq < rps->min_freq);
1571 GEM_BUG_ON(rps->idle_freq > rps->max_freq);
1573 GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
1574 GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
1576 if (has_busy_stats(rps))
1577 intel_rps_set_timer(rps);
1578 else if (GRAPHICS_VER(i915) >= 6 && GRAPHICS_VER(i915) <= 11)
1579 intel_rps_set_interrupts(rps);
1581 /* Ironlake currently uses intel_ips.ko */ {}
1583 intel_rps_set_enabled(rps);
1586 static void gen6_rps_disable(struct intel_rps *rps)
1588 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
1591 void intel_rps_disable(struct intel_rps *rps)
1593 struct drm_i915_private *i915 = rps_to_i915(rps);
1595 if (!intel_rps_is_enabled(rps))
1598 intel_rps_clear_enabled(rps);
1599 intel_rps_clear_interrupts(rps);
1600 intel_rps_clear_timer(rps);
1602 if (GRAPHICS_VER(i915) >= 6)
1603 gen6_rps_disable(rps);
1604 else if (IS_IRONLAKE_M(i915))
1605 gen5_rps_disable(rps);
1608 static int byt_gpu_freq(struct intel_rps *rps, int val)
1612 * Slow = Fast = GPLL ref * N
1614 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
1617 static int byt_freq_opcode(struct intel_rps *rps, int val)
1619 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
1622 static int chv_gpu_freq(struct intel_rps *rps, int val)
1626 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1628 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
1631 static int chv_freq_opcode(struct intel_rps *rps, int val)
1633 /* CHV needs even values */
1634 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
1637 int intel_gpu_freq(struct intel_rps *rps, int val)
1639 struct drm_i915_private *i915 = rps_to_i915(rps);
1641 if (GRAPHICS_VER(i915) >= 9)
1642 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
1644 else if (IS_CHERRYVIEW(i915))
1645 return chv_gpu_freq(rps, val);
1646 else if (IS_VALLEYVIEW(i915))
1647 return byt_gpu_freq(rps, val);
1648 else if (GRAPHICS_VER(i915) >= 6)
1649 return val * GT_FREQUENCY_MULTIPLIER;
1654 int intel_freq_opcode(struct intel_rps *rps, int val)
1656 struct drm_i915_private *i915 = rps_to_i915(rps);
1658 if (GRAPHICS_VER(i915) >= 9)
1659 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
1660 GT_FREQUENCY_MULTIPLIER);
1661 else if (IS_CHERRYVIEW(i915))
1662 return chv_freq_opcode(rps, val);
1663 else if (IS_VALLEYVIEW(i915))
1664 return byt_freq_opcode(rps, val);
1665 else if (GRAPHICS_VER(i915) >= 6)
1666 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
1671 static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
1673 struct drm_i915_private *i915 = rps_to_i915(rps);
1675 rps->gpll_ref_freq =
1676 vlv_get_cck_clock(i915, "GPLL ref",
1677 CCK_GPLL_CLOCK_CONTROL,
1680 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
1681 rps->gpll_ref_freq);
1684 static void vlv_rps_init(struct intel_rps *rps)
1686 struct drm_i915_private *i915 = rps_to_i915(rps);
1688 vlv_iosf_sb_get(i915,
1689 BIT(VLV_IOSF_SB_PUNIT) |
1690 BIT(VLV_IOSF_SB_NC) |
1691 BIT(VLV_IOSF_SB_CCK));
1693 vlv_init_gpll_ref_freq(rps);
1695 rps->max_freq = vlv_rps_max_freq(rps);
1696 rps->rp0_freq = rps->max_freq;
1697 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1698 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1700 rps->efficient_freq = vlv_rps_rpe_freq(rps);
1701 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1702 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1704 rps->rp1_freq = vlv_rps_guar_freq(rps);
1705 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1706 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1708 rps->min_freq = vlv_rps_min_freq(rps);
1709 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1710 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1712 vlv_iosf_sb_put(i915,
1713 BIT(VLV_IOSF_SB_PUNIT) |
1714 BIT(VLV_IOSF_SB_NC) |
1715 BIT(VLV_IOSF_SB_CCK));
1718 static void chv_rps_init(struct intel_rps *rps)
1720 struct drm_i915_private *i915 = rps_to_i915(rps);
1722 vlv_iosf_sb_get(i915,
1723 BIT(VLV_IOSF_SB_PUNIT) |
1724 BIT(VLV_IOSF_SB_NC) |
1725 BIT(VLV_IOSF_SB_CCK));
1727 vlv_init_gpll_ref_freq(rps);
1729 rps->max_freq = chv_rps_max_freq(rps);
1730 rps->rp0_freq = rps->max_freq;
1731 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1732 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1734 rps->efficient_freq = chv_rps_rpe_freq(rps);
1735 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1736 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1738 rps->rp1_freq = chv_rps_guar_freq(rps);
1739 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1740 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1742 rps->min_freq = chv_rps_min_freq(rps);
1743 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1744 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1746 vlv_iosf_sb_put(i915,
1747 BIT(VLV_IOSF_SB_PUNIT) |
1748 BIT(VLV_IOSF_SB_NC) |
1749 BIT(VLV_IOSF_SB_CCK));
1751 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
1752 rps->rp1_freq | rps->min_freq) & 1,
1753 "Odd GPU freq values\n");
1756 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
1758 ei->ktime = ktime_get_raw();
1759 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
1760 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
1763 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
1765 struct intel_uncore *uncore = rps_to_uncore(rps);
1766 const struct intel_rps_ei *prev = &rps->ei;
1767 struct intel_rps_ei now;
1770 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1773 vlv_c0_read(uncore, &now);
1779 time = ktime_us_delta(now.ktime, prev->ktime);
1781 time *= rps_to_i915(rps)->czclk_freq;
1783 /* Workload can be split between render + media,
1784 * e.g. SwapBuffers being blitted in X after being rendered in
1785 * mesa. To account for this we need to combine both engines
1786 * into our activity counter.
1788 render = now.render_c0 - prev->render_c0;
1789 media = now.media_c0 - prev->media_c0;
1790 c0 = max(render, media);
1791 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1793 if (c0 > time * rps->power.up_threshold)
1794 events = GEN6_PM_RP_UP_THRESHOLD;
1795 else if (c0 < time * rps->power.down_threshold)
1796 events = GEN6_PM_RP_DOWN_THRESHOLD;
1803 static void rps_work(struct work_struct *work)
1805 struct intel_rps *rps = container_of(work, typeof(*rps), work);
1806 struct intel_gt *gt = rps_to_gt(rps);
1807 struct drm_i915_private *i915 = rps_to_i915(rps);
1808 bool client_boost = false;
1809 int new_freq, adj, min, max;
1812 spin_lock_irq(gt->irq_lock);
1813 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
1814 client_boost = atomic_read(&rps->num_waiters);
1815 spin_unlock_irq(gt->irq_lock);
1817 /* Make sure we didn't queue anything we're not going to process. */
1818 if (!pm_iir && !client_boost)
1821 mutex_lock(&rps->lock);
1822 if (!intel_rps_is_active(rps)) {
1823 mutex_unlock(&rps->lock);
1827 pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
1829 adj = rps->last_adj;
1830 new_freq = rps->cur_freq;
1831 min = rps->min_freq_softlimit;
1832 max = rps->max_freq_softlimit;
1834 max = rps->max_freq;
1837 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1838 pm_iir, str_yes_no(client_boost),
1839 adj, new_freq, min, max);
1841 if (client_boost && new_freq < rps->boost_freq) {
1842 new_freq = rps->boost_freq;
1844 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1847 else /* CHV needs even encode values */
1848 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
1850 if (new_freq >= rps->max_freq_softlimit)
1852 } else if (client_boost) {
1854 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1855 if (rps->cur_freq > rps->efficient_freq)
1856 new_freq = rps->efficient_freq;
1857 else if (rps->cur_freq > rps->min_freq_softlimit)
1858 new_freq = rps->min_freq_softlimit;
1860 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1863 else /* CHV needs even encode values */
1864 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
1866 if (new_freq <= rps->min_freq_softlimit)
1868 } else { /* unknown event */
1873 * sysfs frequency limits may have snuck in while
1874 * servicing the interrupt
1877 new_freq = clamp_t(int, new_freq, min, max);
1879 if (intel_rps_set(rps, new_freq)) {
1880 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
1883 rps->last_adj = adj;
1885 mutex_unlock(&rps->lock);
1888 spin_lock_irq(gt->irq_lock);
1889 gen6_gt_pm_unmask_irq(gt, rps->pm_events);
1890 spin_unlock_irq(gt->irq_lock);
1893 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1895 struct intel_gt *gt = rps_to_gt(rps);
1896 const u32 events = rps->pm_events & pm_iir;
1898 lockdep_assert_held(gt->irq_lock);
1900 if (unlikely(!events))
1903 GT_TRACE(gt, "irq events:%x\n", events);
1905 gen6_gt_pm_mask_irq(gt, events);
1907 rps->pm_iir |= events;
1908 queue_work(gt->i915->unordered_wq, &rps->work);
1911 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1913 struct intel_gt *gt = rps_to_gt(rps);
1916 events = pm_iir & rps->pm_events;
1918 spin_lock(gt->irq_lock);
1920 GT_TRACE(gt, "irq events:%x\n", events);
1922 gen6_gt_pm_mask_irq(gt, events);
1923 rps->pm_iir |= events;
1925 queue_work(gt->i915->unordered_wq, &rps->work);
1926 spin_unlock(gt->irq_lock);
1929 if (GRAPHICS_VER(gt->i915) >= 8)
1932 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1933 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
1935 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1936 drm_dbg(&rps_to_i915(rps)->drm,
1937 "Command parser error, pm_iir 0x%08x\n", pm_iir);
1940 void gen5_rps_irq_handler(struct intel_rps *rps)
1942 struct intel_uncore *uncore = rps_to_uncore(rps);
1943 u32 busy_up, busy_down, max_avg, min_avg;
1946 spin_lock(&mchdev_lock);
1948 intel_uncore_write16(uncore,
1950 intel_uncore_read(uncore, MEMINTRSTS));
1952 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1953 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1954 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1955 max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1956 min_avg = intel_uncore_read(uncore, RCBMINAVG);
1958 /* Handle RCS change request from hw */
1959 new_freq = rps->cur_freq;
1960 if (busy_up > max_avg)
1962 else if (busy_down < min_avg)
1964 new_freq = clamp(new_freq,
1965 rps->min_freq_softlimit,
1966 rps->max_freq_softlimit);
1968 if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
1969 rps->cur_freq = new_freq;
1971 spin_unlock(&mchdev_lock);
1974 void intel_rps_init_early(struct intel_rps *rps)
1976 mutex_init(&rps->lock);
1977 mutex_init(&rps->power.mutex);
1979 INIT_WORK(&rps->work, rps_work);
1980 timer_setup(&rps->timer, rps_timer, 0);
1982 atomic_set(&rps->num_waiters, 0);
1985 void intel_rps_init(struct intel_rps *rps)
1987 struct drm_i915_private *i915 = rps_to_i915(rps);
1989 if (rps_uses_slpc(rps))
1992 if (IS_CHERRYVIEW(i915))
1994 else if (IS_VALLEYVIEW(i915))
1996 else if (GRAPHICS_VER(i915) >= 6)
1998 else if (IS_IRONLAKE_M(i915))
2001 /* Derive initial user preferences/limits from the hardware limits */
2002 rps->max_freq_softlimit = rps->max_freq;
2003 rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit;
2004 rps->min_freq_softlimit = rps->min_freq;
2005 rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit;
2007 /* After setting max-softlimit, find the overclock max freq */
2008 if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
2011 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, ¶ms, NULL);
2012 if (params & BIT(31)) { /* OC supported */
2014 "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
2015 (rps->max_freq & 0xff) * 50,
2016 (params & 0xff) * 50);
2017 rps->max_freq = params & 0xff;
2021 /* Set default thresholds in % */
2022 rps->power.up_threshold = 95;
2023 rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold;
2024 rps->power.down_threshold = 85;
2025 rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold;
2027 /* Finally allow us to boost to max by default */
2028 rps->boost_freq = rps->max_freq;
2029 rps->idle_freq = rps->min_freq;
2031 /* Start in the middle, from here we will autotune based on workload */
2032 rps->cur_freq = rps->efficient_freq;
2034 rps->pm_intrmsk_mbz = 0;
2037 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
2038 * if GEN6_PM_UP_EI_EXPIRED is masked.
2040 * TODO: verify if this can be reproduced on VLV,CHV.
2042 if (GRAPHICS_VER(i915) <= 7)
2043 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
2045 if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
2046 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
2048 /* GuC needs ARAT expired interrupt unmasked */
2049 if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc))
2050 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
2053 void intel_rps_sanitize(struct intel_rps *rps)
2055 if (rps_uses_slpc(rps))
2058 if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
2059 rps_disable_interrupts(rps);
2062 u32 intel_rps_read_rpstat(struct intel_rps *rps)
2064 struct drm_i915_private *i915 = rps_to_i915(rps);
2067 rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1;
2069 return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat);
2072 static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
2074 struct drm_i915_private *i915 = rps_to_i915(rps);
2077 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2078 cagf = REG_FIELD_GET(MTL_CAGF_MASK, rpstat);
2079 else if (GRAPHICS_VER(i915) >= 12)
2080 cagf = REG_FIELD_GET(GEN12_CAGF_MASK, rpstat);
2081 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2082 cagf = REG_FIELD_GET(RPE_MASK, rpstat);
2083 else if (GRAPHICS_VER(i915) >= 9)
2084 cagf = REG_FIELD_GET(GEN9_CAGF_MASK, rpstat);
2085 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2086 cagf = REG_FIELD_GET(HSW_CAGF_MASK, rpstat);
2087 else if (GRAPHICS_VER(i915) >= 6)
2088 cagf = REG_FIELD_GET(GEN6_CAGF_MASK, rpstat);
2090 cagf = gen5_invert_freq(rps, REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rpstat));
2095 static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
2097 struct drm_i915_private *i915 = rps_to_i915(rps);
2098 struct intel_uncore *uncore = rps_to_uncore(rps);
2099 i915_reg_t r = INVALID_MMIO_REG;
2103 * For Gen12+ reading freq from HW does not need a forcewake and
2104 * registers will return 0 freq when GT is in RC6
2106 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2107 r = MTL_MIRROR_TARGET_WP1;
2108 } else if (GRAPHICS_VER(i915) >= 12) {
2110 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2111 vlv_punit_get(i915);
2112 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
2113 vlv_punit_put(i915);
2114 } else if (GRAPHICS_VER(i915) >= 6) {
2120 if (i915_mmio_reg_valid(r))
2121 freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r);
2123 return intel_rps_get_cagf(rps, freq);
2126 static u32 read_cagf(struct intel_rps *rps)
2128 return __read_cagf(rps, true);
2131 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
2133 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
2134 intel_wakeref_t wakeref;
2137 with_intel_runtime_pm_if_in_use(rpm, wakeref)
2138 freq = intel_gpu_freq(rps, read_cagf(rps));
2143 u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps)
2145 return intel_gpu_freq(rps, __read_cagf(rps, false));
2148 static u32 intel_rps_read_punit_req(struct intel_rps *rps)
2150 struct intel_uncore *uncore = rps_to_uncore(rps);
2151 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
2152 intel_wakeref_t wakeref;
2155 with_intel_runtime_pm_if_in_use(rpm, wakeref)
2156 freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
2161 static u32 intel_rps_get_req(u32 pureq)
2163 u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT;
2168 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps)
2170 u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps));
2172 return intel_gpu_freq(rps, freq);
2175 u32 intel_rps_get_requested_frequency(struct intel_rps *rps)
2177 if (rps_uses_slpc(rps))
2178 return intel_rps_read_punit_req_frequency(rps);
2180 return intel_gpu_freq(rps, rps->cur_freq);
2183 u32 intel_rps_get_max_frequency(struct intel_rps *rps)
2185 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2187 if (rps_uses_slpc(rps))
2188 return slpc->max_freq_softlimit;
2190 return intel_gpu_freq(rps, rps->max_freq_softlimit);
2194 * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
2195 * @rps: the intel_rps structure
2197 * Returns the max frequency in a raw format. In newer platforms raw is in
2200 u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
2202 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2205 if (rps_uses_slpc(rps)) {
2206 return DIV_ROUND_CLOSEST(slpc->rp0_freq,
2207 GT_FREQUENCY_MULTIPLIER);
2209 freq = rps->max_freq;
2210 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
2211 /* Convert GT frequency to 50 MHz units */
2212 freq /= GEN9_FREQ_SCALER;
2218 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
2220 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2222 if (rps_uses_slpc(rps))
2223 return slpc->rp0_freq;
2225 return intel_gpu_freq(rps, rps->rp0_freq);
2228 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps)
2230 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2232 if (rps_uses_slpc(rps))
2233 return slpc->rp1_freq;
2235 return intel_gpu_freq(rps, rps->rp1_freq);
2238 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps)
2240 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2242 if (rps_uses_slpc(rps))
2243 return slpc->min_freq;
2245 return intel_gpu_freq(rps, rps->min_freq);
2248 static void rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p)
2250 struct intel_gt *gt = rps_to_gt(rps);
2251 struct drm_i915_private *i915 = gt->i915;
2252 struct intel_uncore *uncore = gt->uncore;
2253 struct intel_rps_freq_caps caps;
2254 u32 rp_state_limits;
2256 u32 rpmodectl, rpinclimit, rpdeclimit;
2257 u32 rpstat, cagf, reqf;
2258 u32 rpcurupei, rpcurup, rpprevup;
2259 u32 rpcurdownei, rpcurdown, rpprevdown;
2260 u32 rpupei, rpupt, rpdownei, rpdownt;
2261 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
2263 rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
2264 gen6_rps_get_freq_caps(rps, &caps);
2265 if (IS_GEN9_LP(i915))
2266 gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
2268 gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
2270 /* RPSTAT1 is in the GT power well */
2271 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2273 reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
2274 if (GRAPHICS_VER(i915) >= 9) {
2277 reqf &= ~GEN6_TURBO_DISABLE;
2278 if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2283 reqf = intel_gpu_freq(rps, reqf);
2285 rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
2286 rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
2287 rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
2289 rpstat = intel_rps_read_rpstat(rps);
2290 rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
2291 rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
2292 rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
2293 rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
2294 rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
2295 rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
2297 rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
2298 rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
2300 rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
2301 rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
2303 cagf = intel_rps_read_actual_frequency(rps);
2305 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2307 if (GRAPHICS_VER(i915) >= 11) {
2308 pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
2309 pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
2311 * The equivalent to the PM ISR & IIR cannot be read
2312 * without affecting the current state of the system
2316 } else if (GRAPHICS_VER(i915) >= 8) {
2317 pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
2318 pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
2319 pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
2320 pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
2322 pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
2323 pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
2324 pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
2325 pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
2327 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
2329 drm_printf(p, "Video Turbo Mode: %s\n",
2330 str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
2331 drm_printf(p, "HW control enabled: %s\n",
2332 str_yes_no(rpmodectl & GEN6_RP_ENABLE));
2333 drm_printf(p, "SW control enabled: %s\n",
2334 str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
2336 drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
2337 pm_ier, pm_imr, pm_mask);
2338 if (GRAPHICS_VER(i915) <= 10)
2339 drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n",
2341 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
2342 rps->pm_intrmsk_mbz);
2343 drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
2344 drm_printf(p, "Render p-state ratio: %d\n",
2345 (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
2346 drm_printf(p, "Render p-state VID: %d\n",
2347 gt_perf_status & 0xff);
2348 drm_printf(p, "Render p-state limit: %d\n",
2349 rp_state_limits & 0xff);
2350 drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat);
2351 drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl);
2352 drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit);
2353 drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
2354 drm_printf(p, "RPNSWREQ: %dMHz\n", reqf);
2355 drm_printf(p, "CAGF: %dMHz\n", cagf);
2356 drm_printf(p, "RP CUR UP EI: %d (%lldns)\n",
2358 intel_gt_pm_interval_to_ns(gt, rpcurupei));
2359 drm_printf(p, "RP CUR UP: %d (%lldns)\n",
2360 rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
2361 drm_printf(p, "RP PREV UP: %d (%lldns)\n",
2362 rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
2363 drm_printf(p, "Up threshold: %d%%\n",
2364 rps->power.up_threshold);
2365 drm_printf(p, "RP UP EI: %d (%lldns)\n",
2366 rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
2367 drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n",
2368 rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
2370 drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n",
2372 intel_gt_pm_interval_to_ns(gt, rpcurdownei));
2373 drm_printf(p, "RP CUR DOWN: %d (%lldns)\n",
2375 intel_gt_pm_interval_to_ns(gt, rpcurdown));
2376 drm_printf(p, "RP PREV DOWN: %d (%lldns)\n",
2378 intel_gt_pm_interval_to_ns(gt, rpprevdown));
2379 drm_printf(p, "Down threshold: %d%%\n",
2380 rps->power.down_threshold);
2381 drm_printf(p, "RP DOWN EI: %d (%lldns)\n",
2382 rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
2383 drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
2384 rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
2386 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
2387 intel_gpu_freq(rps, caps.min_freq));
2388 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
2389 intel_gpu_freq(rps, caps.rp1_freq));
2390 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
2391 intel_gpu_freq(rps, caps.rp0_freq));
2392 drm_printf(p, "Max overclocked frequency: %dMHz\n",
2393 intel_gpu_freq(rps, rps->max_freq));
2395 drm_printf(p, "Current freq: %d MHz\n",
2396 intel_gpu_freq(rps, rps->cur_freq));
2397 drm_printf(p, "Actual freq: %d MHz\n", cagf);
2398 drm_printf(p, "Idle freq: %d MHz\n",
2399 intel_gpu_freq(rps, rps->idle_freq));
2400 drm_printf(p, "Min freq: %d MHz\n",
2401 intel_gpu_freq(rps, rps->min_freq));
2402 drm_printf(p, "Boost freq: %d MHz\n",
2403 intel_gpu_freq(rps, rps->boost_freq));
2404 drm_printf(p, "Max freq: %d MHz\n",
2405 intel_gpu_freq(rps, rps->max_freq));
2407 "efficient (RPe) frequency: %d MHz\n",
2408 intel_gpu_freq(rps, rps->efficient_freq));
2411 static void slpc_frequency_dump(struct intel_rps *rps, struct drm_printer *p)
2413 struct intel_gt *gt = rps_to_gt(rps);
2414 struct intel_uncore *uncore = gt->uncore;
2415 struct intel_rps_freq_caps caps;
2418 gen6_rps_get_freq_caps(rps, &caps);
2419 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
2421 drm_printf(p, "PM MASK=0x%08x\n", pm_mask);
2422 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
2423 rps->pm_intrmsk_mbz);
2424 drm_printf(p, "RPSTAT1: 0x%08x\n", intel_rps_read_rpstat(rps));
2425 drm_printf(p, "RPNSWREQ: %dMHz\n", intel_rps_get_requested_frequency(rps));
2426 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
2427 intel_gpu_freq(rps, caps.min_freq));
2428 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
2429 intel_gpu_freq(rps, caps.rp1_freq));
2430 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
2431 intel_gpu_freq(rps, caps.rp0_freq));
2432 drm_printf(p, "Current freq: %d MHz\n",
2433 intel_rps_get_requested_frequency(rps));
2434 drm_printf(p, "Actual freq: %d MHz\n",
2435 intel_rps_read_actual_frequency(rps));
2436 drm_printf(p, "Min freq: %d MHz\n",
2437 intel_rps_get_min_frequency(rps));
2438 drm_printf(p, "Boost freq: %d MHz\n",
2439 intel_rps_get_boost_frequency(rps));
2440 drm_printf(p, "Max freq: %d MHz\n",
2441 intel_rps_get_max_frequency(rps));
2443 "efficient (RPe) frequency: %d MHz\n",
2444 intel_gpu_freq(rps, caps.rp1_freq));
2447 void gen6_rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p)
2449 if (rps_uses_slpc(rps))
2450 return slpc_frequency_dump(rps, p);
2452 return rps_frequency_dump(rps, p);
2455 static int set_max_freq(struct intel_rps *rps, u32 val)
2457 struct drm_i915_private *i915 = rps_to_i915(rps);
2460 mutex_lock(&rps->lock);
2462 val = intel_freq_opcode(rps, val);
2463 if (val < rps->min_freq ||
2464 val > rps->max_freq ||
2465 val < rps->min_freq_softlimit) {
2470 if (val > rps->rp0_freq)
2471 drm_dbg(&i915->drm, "User requested overclocking to %d\n",
2472 intel_gpu_freq(rps, val));
2474 rps->max_freq_softlimit = val;
2476 val = clamp_t(int, rps->cur_freq,
2477 rps->min_freq_softlimit,
2478 rps->max_freq_softlimit);
2481 * We still need *_set_rps to process the new max_delay and
2482 * update the interrupt limits and PMINTRMSK even though
2483 * frequency request may be unchanged.
2485 intel_rps_set(rps, val);
2488 mutex_unlock(&rps->lock);
2493 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val)
2495 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2497 if (rps_uses_slpc(rps))
2498 return intel_guc_slpc_set_max_freq(slpc, val);
2500 return set_max_freq(rps, val);
2503 u32 intel_rps_get_min_frequency(struct intel_rps *rps)
2505 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2507 if (rps_uses_slpc(rps))
2508 return slpc->min_freq_softlimit;
2510 return intel_gpu_freq(rps, rps->min_freq_softlimit);
2514 * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
2515 * @rps: the intel_rps structure
2517 * Returns the min frequency in a raw format. In newer platforms raw is in
2520 u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
2522 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2525 if (rps_uses_slpc(rps)) {
2526 return DIV_ROUND_CLOSEST(slpc->min_freq,
2527 GT_FREQUENCY_MULTIPLIER);
2529 freq = rps->min_freq;
2530 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
2531 /* Convert GT frequency to 50 MHz units */
2532 freq /= GEN9_FREQ_SCALER;
2538 static int set_min_freq(struct intel_rps *rps, u32 val)
2542 mutex_lock(&rps->lock);
2544 val = intel_freq_opcode(rps, val);
2545 if (val < rps->min_freq ||
2546 val > rps->max_freq ||
2547 val > rps->max_freq_softlimit) {
2552 rps->min_freq_softlimit = val;
2554 val = clamp_t(int, rps->cur_freq,
2555 rps->min_freq_softlimit,
2556 rps->max_freq_softlimit);
2559 * We still need *_set_rps to process the new min_delay and
2560 * update the interrupt limits and PMINTRMSK even though
2561 * frequency request may be unchanged.
2563 intel_rps_set(rps, val);
2566 mutex_unlock(&rps->lock);
2571 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
2573 struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2575 if (rps_uses_slpc(rps))
2576 return intel_guc_slpc_set_min_freq(slpc, val);
2578 return set_min_freq(rps, val);
2581 u8 intel_rps_get_up_threshold(struct intel_rps *rps)
2583 return rps->power.up_threshold;
2586 static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val)
2593 ret = mutex_lock_interruptible(&rps->lock);
2597 if (*threshold == val)
2603 rps->last_freq = -1;
2604 mutex_lock(&rps->power.mutex);
2605 rps->power.mode = -1;
2606 mutex_unlock(&rps->power.mutex);
2608 intel_rps_set(rps, clamp(rps->cur_freq,
2609 rps->min_freq_softlimit,
2610 rps->max_freq_softlimit));
2613 mutex_unlock(&rps->lock);
2618 int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold)
2620 return rps_set_threshold(rps, &rps->power.up_threshold, threshold);
2623 u8 intel_rps_get_down_threshold(struct intel_rps *rps)
2625 return rps->power.down_threshold;
2628 int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold)
2630 return rps_set_threshold(rps, &rps->power.down_threshold, threshold);
2633 static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
2635 struct intel_uncore *uncore = rps_to_uncore(rps);
2636 u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE;
2638 /* Allow punit to process software requests */
2639 intel_uncore_write(uncore, GEN6_RP_CONTROL, state);
2642 void intel_rps_raise_unslice(struct intel_rps *rps)
2644 struct intel_uncore *uncore = rps_to_uncore(rps);
2646 mutex_lock(&rps->lock);
2648 if (rps_uses_slpc(rps)) {
2649 /* RP limits have not been initialized yet for SLPC path */
2650 struct intel_rps_freq_caps caps;
2652 gen6_rps_get_freq_caps(rps, &caps);
2654 intel_rps_set_manual(rps, true);
2655 intel_uncore_write(uncore, GEN6_RPNSWREQ,
2657 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
2658 GEN9_IGNORE_SLICE_RATIO));
2659 intel_rps_set_manual(rps, false);
2661 intel_rps_set(rps, rps->rp0_freq);
2664 mutex_unlock(&rps->lock);
2667 void intel_rps_lower_unslice(struct intel_rps *rps)
2669 struct intel_uncore *uncore = rps_to_uncore(rps);
2671 mutex_lock(&rps->lock);
2673 if (rps_uses_slpc(rps)) {
2674 /* RP limits have not been initialized yet for SLPC path */
2675 struct intel_rps_freq_caps caps;
2677 gen6_rps_get_freq_caps(rps, &caps);
2679 intel_rps_set_manual(rps, true);
2680 intel_uncore_write(uncore, GEN6_RPNSWREQ,
2682 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
2683 GEN9_IGNORE_SLICE_RATIO));
2684 intel_rps_set_manual(rps, false);
2686 intel_rps_set(rps, rps->min_freq);
2689 mutex_unlock(&rps->lock);
2692 static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32)
2694 struct intel_gt *gt = rps_to_gt(rps);
2695 intel_wakeref_t wakeref;
2698 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
2699 val = intel_uncore_read(gt->uncore, reg32);
2704 bool rps_read_mask_mmio(struct intel_rps *rps,
2705 i915_reg_t reg32, u32 mask)
2707 return rps_read_mmio(rps, reg32) & mask;
2710 /* External interface for intel_ips.ko */
2712 static struct drm_i915_private __rcu *ips_mchdev;
2715 * Tells the intel_ips driver that the i915 driver is now loaded, if
2716 * IPS got loaded first.
2718 * This awkward dance is so that neither module has to depend on the
2719 * other in order for IPS to do the appropriate communication of
2720 * GPU turbo limits to i915.
2723 ips_ping_for_i915_load(void)
2727 link = symbol_get(ips_link_to_i915_driver);
2730 symbol_put(ips_link_to_i915_driver);
2734 void intel_rps_driver_register(struct intel_rps *rps)
2736 struct intel_gt *gt = rps_to_gt(rps);
2739 * We only register the i915 ips part with intel-ips once everything is
2740 * set up, to avoid intel-ips sneaking in and reading bogus values.
2742 if (GRAPHICS_VER(gt->i915) == 5) {
2743 GEM_BUG_ON(ips_mchdev);
2744 rcu_assign_pointer(ips_mchdev, gt->i915);
2745 ips_ping_for_i915_load();
2749 void intel_rps_driver_unregister(struct intel_rps *rps)
2751 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
2752 rcu_assign_pointer(ips_mchdev, NULL);
2755 static struct drm_i915_private *mchdev_get(void)
2757 struct drm_i915_private *i915;
2760 i915 = rcu_dereference(ips_mchdev);
2761 if (i915 && !kref_get_unless_zero(&i915->drm.ref))
2769 * i915_read_mch_val - return value for IPS use
2771 * Calculate and return a value for the IPS driver to use when deciding whether
2772 * we have thermal and power headroom to increase CPU or GPU power budget.
2774 unsigned long i915_read_mch_val(void)
2776 struct drm_i915_private *i915;
2777 unsigned long chipset_val = 0;
2778 unsigned long graphics_val = 0;
2779 intel_wakeref_t wakeref;
2781 i915 = mchdev_get();
2785 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2786 struct intel_ips *ips = &to_gt(i915)->rps.ips;
2788 spin_lock_irq(&mchdev_lock);
2789 chipset_val = __ips_chipset_val(ips);
2790 graphics_val = __ips_gfx_val(ips);
2791 spin_unlock_irq(&mchdev_lock);
2794 drm_dev_put(&i915->drm);
2795 return chipset_val + graphics_val;
2797 EXPORT_SYMBOL_GPL(i915_read_mch_val);
2800 * i915_gpu_raise - raise GPU frequency limit
2802 * Raise the limit; IPS indicates we have thermal headroom.
2804 bool i915_gpu_raise(void)
2806 struct drm_i915_private *i915;
2807 struct intel_rps *rps;
2809 i915 = mchdev_get();
2813 rps = &to_gt(i915)->rps;
2815 spin_lock_irq(&mchdev_lock);
2816 if (rps->max_freq_softlimit < rps->max_freq)
2817 rps->max_freq_softlimit++;
2818 spin_unlock_irq(&mchdev_lock);
2820 drm_dev_put(&i915->drm);
2823 EXPORT_SYMBOL_GPL(i915_gpu_raise);
2826 * i915_gpu_lower - lower GPU frequency limit
2828 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2829 * frequency maximum.
2831 bool i915_gpu_lower(void)
2833 struct drm_i915_private *i915;
2834 struct intel_rps *rps;
2836 i915 = mchdev_get();
2840 rps = &to_gt(i915)->rps;
2842 spin_lock_irq(&mchdev_lock);
2843 if (rps->max_freq_softlimit > rps->min_freq)
2844 rps->max_freq_softlimit--;
2845 spin_unlock_irq(&mchdev_lock);
2847 drm_dev_put(&i915->drm);
2850 EXPORT_SYMBOL_GPL(i915_gpu_lower);
2853 * i915_gpu_busy - indicate GPU business to IPS
2855 * Tell the IPS driver whether or not the GPU is busy.
2857 bool i915_gpu_busy(void)
2859 struct drm_i915_private *i915;
2862 i915 = mchdev_get();
2866 ret = to_gt(i915)->awake;
2868 drm_dev_put(&i915->drm);
2871 EXPORT_SYMBOL_GPL(i915_gpu_busy);
2874 * i915_gpu_turbo_disable - disable graphics turbo
2876 * Disable graphics turbo by resetting the max frequency and setting the
2877 * current frequency to the default.
2879 bool i915_gpu_turbo_disable(void)
2881 struct drm_i915_private *i915;
2882 struct intel_rps *rps;
2885 i915 = mchdev_get();
2889 rps = &to_gt(i915)->rps;
2891 spin_lock_irq(&mchdev_lock);
2892 rps->max_freq_softlimit = rps->min_freq;
2893 ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq);
2894 spin_unlock_irq(&mchdev_lock);
2896 drm_dev_put(&i915->drm);
2899 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2901 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2902 #include "selftest_rps.c"
2903 #include "selftest_slpc.c"