2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
26 * Zhiyuan Lv <zhiyuan.lv@intel.com>
29 * Min He <min.he@intel.com>
30 * Tina Zhang <tina.zhang@intel.com>
31 * Pei Zhang <pei.zhang@intel.com>
32 * Niu Bing <bing.niu@intel.com>
33 * Ping Gao <ping.a.gao@intel.com>
34 * Zhi Wang <zhi.a.wang@intel.com>
42 #include "i915_pvinfo.h"
43 #include "intel_mchbar_regs.h"
44 #include "display/intel_display_types.h"
45 #include "display/intel_dmc_regs.h"
46 #include "display/intel_dp_aux_regs.h"
47 #include "display/intel_dpio_phy.h"
48 #include "display/intel_fbc.h"
49 #include "display/intel_fdi_regs.h"
50 #include "display/intel_pps_regs.h"
51 #include "display/skl_watermark_regs.h"
52 #include "display/vlv_dsi_pll_regs.h"
53 #include "gt/intel_gt_regs.h"
55 /* XXX FIXME i915 has changed PP_XXX definition */
56 #define PCH_PP_STATUS _MMIO(0xc7200)
57 #define PCH_PP_CONTROL _MMIO(0xc7204)
58 #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
59 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
60 #define PCH_PP_DIVISOR _MMIO(0xc7210)
62 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
64 struct drm_i915_private *i915 = gvt->gt->i915;
66 if (IS_BROADWELL(i915))
68 else if (IS_SKYLAKE(i915))
70 else if (IS_KABYLAKE(i915))
72 else if (IS_BROXTON(i915))
74 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
80 static bool intel_gvt_match_device(struct intel_gvt *gvt,
83 return intel_gvt_get_device_type(gvt) & device;
86 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
87 void *p_data, unsigned int bytes)
89 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
92 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
93 void *p_data, unsigned int bytes)
95 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
98 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
101 struct intel_gvt_mmio_info *e;
103 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
104 if (e->offset == offset)
110 static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
111 u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
112 gvt_mmio_func read, gvt_mmio_func write)
114 struct intel_gvt_mmio_info *p;
117 if (!intel_gvt_match_device(gvt, device))
120 if (WARN_ON(!IS_ALIGNED(offset, 4)))
126 for (i = start; i < end; i += 4) {
127 p = intel_gvt_find_mmio_info(gvt, i);
129 WARN(1, "assign a handler to a non-tracked mmio %x\n",
133 p->ro_mask = ro_mask;
134 gvt->mmio.mmio_attribute[i / 4] = flags;
144 * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
146 * @offset: register offset
149 * The engine containing the offset within its mmio page.
151 const struct intel_engine_cs *
152 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
154 struct intel_engine_cs *engine;
155 enum intel_engine_id id;
157 offset &= ~GENMASK(11, 0);
158 for_each_engine(engine, gvt->gt, id)
159 if (engine->mmio_base == offset)
165 #define offset_to_fence_num(offset) \
166 ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
168 #define fence_num_to_offset(num) \
169 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
172 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
175 case GVT_FAILSAFE_UNSUPPORTED_GUEST:
176 pr_err("Detected your guest driver doesn't support GVT-g.\n");
178 case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
179 pr_err("Graphics resource is not enough for the guest\n");
181 case GVT_FAILSAFE_GUEST_ERR:
182 pr_err("GVT Internal error for the guest\n");
187 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
188 vgpu->failsafe = true;
191 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
192 unsigned int fence_num, void *p_data, unsigned int bytes)
194 unsigned int max_fence = vgpu_fence_sz(vgpu);
196 if (fence_num >= max_fence) {
197 gvt_vgpu_err("access oob fence reg %d/%d\n",
198 fence_num, max_fence);
200 /* When guest access oob fence regs without access
201 * pv_info first, we treat guest not supporting GVT,
202 * and we will let vgpu enter failsafe mode.
204 if (!vgpu->pv_notified)
205 enter_failsafe_mode(vgpu,
206 GVT_FAILSAFE_UNSUPPORTED_GUEST);
208 memset(p_data, 0, bytes);
214 static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
215 unsigned int offset, void *p_data, unsigned int bytes)
217 u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
219 if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
220 if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
221 gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
223 gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
225 /* All engines must be enabled together for vGPU,
226 * since we don't know which engine the ppgtt will
227 * bind to when shadowing.
229 gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
235 write_vreg(vgpu, offset, p_data, bytes);
239 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
240 void *p_data, unsigned int bytes)
244 ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
248 read_vreg(vgpu, off, p_data, bytes);
252 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
253 void *p_data, unsigned int bytes)
255 struct intel_gvt *gvt = vgpu->gvt;
256 unsigned int fence_num = offset_to_fence_num(off);
259 ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
262 write_vreg(vgpu, off, p_data, bytes);
264 mmio_hw_access_pre(gvt->gt);
265 intel_vgpu_write_fence(vgpu, fence_num,
266 vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
267 mmio_hw_access_post(gvt->gt);
271 #define CALC_MODE_MASK_REG(old, new) \
272 (((new) & GENMASK(31, 16)) \
273 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
274 | ((new) & ((new) >> 16))))
276 static int mul_force_wake_write(struct intel_vgpu *vgpu,
277 unsigned int offset, void *p_data, unsigned int bytes)
282 old = vgpu_vreg(vgpu, offset);
283 new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
285 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) {
287 case FORCEWAKE_RENDER_GEN9_REG:
288 ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
290 case FORCEWAKE_GT_GEN9_REG:
291 ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
293 case FORCEWAKE_MEDIA_GEN9_REG:
294 ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
297 /*should not hit here*/
298 gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
302 ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
305 vgpu_vreg(vgpu, offset) = new;
306 vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
310 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
311 void *p_data, unsigned int bytes)
313 intel_engine_mask_t engine_mask = 0;
316 write_vreg(vgpu, offset, p_data, bytes);
317 data = vgpu_vreg(vgpu, offset);
319 if (data & GEN6_GRDOM_FULL) {
320 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
321 engine_mask = ALL_ENGINES;
323 if (data & GEN6_GRDOM_RENDER) {
324 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
325 engine_mask |= BIT(RCS0);
327 if (data & GEN6_GRDOM_MEDIA) {
328 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
329 engine_mask |= BIT(VCS0);
331 if (data & GEN6_GRDOM_BLT) {
332 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
333 engine_mask |= BIT(BCS0);
335 if (data & GEN6_GRDOM_VECS) {
336 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
337 engine_mask |= BIT(VECS0);
339 if (data & GEN8_GRDOM_MEDIA2) {
340 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
341 engine_mask |= BIT(VCS1);
343 if (data & GEN9_GRDOM_GUC) {
344 gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
345 vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
347 engine_mask &= vgpu->gvt->gt->info.engine_mask;
350 /* vgpu_lock already hold by emulate mmio r/w */
351 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
353 /* sw will wait for the device to ack the reset request */
354 vgpu_vreg(vgpu, offset) = 0;
359 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
360 void *p_data, unsigned int bytes)
362 return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
365 static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
366 void *p_data, unsigned int bytes)
368 return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
371 static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
372 unsigned int offset, void *p_data, unsigned int bytes)
374 write_vreg(vgpu, offset, p_data, bytes);
376 if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
377 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
378 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
379 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
380 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
383 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
384 ~(PP_ON | PP_SEQUENCE_POWER_DOWN
385 | PP_CYCLE_DELAY_ACTIVE);
389 static int transconf_mmio_write(struct intel_vgpu *vgpu,
390 unsigned int offset, void *p_data, unsigned int bytes)
392 write_vreg(vgpu, offset, p_data, bytes);
394 if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
395 vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
397 vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
401 static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
402 void *p_data, unsigned int bytes)
404 write_vreg(vgpu, offset, p_data, bytes);
406 if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
407 vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
409 vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
411 if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
412 vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
414 vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
419 static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
420 void *p_data, unsigned int bytes)
427 vgpu_vreg(vgpu, offset) = 1 << 17;
430 vgpu_vreg(vgpu, offset) = 0x3;
433 vgpu_vreg(vgpu, offset) = 0x2f << 16;
439 read_vreg(vgpu, offset, p_data, bytes);
444 * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
445 * TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
446 * setup_virtual_dp_monitor().
447 * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
448 * DPLL. Later guest driver may setup a different DPLLx when setting mode.
449 * So the correct sequence to find DP stream clock is:
450 * Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
451 * Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
452 * Then Refresh rate then can be calculated based on follow equations:
453 * Pixel clock = h_total * v_total * refresh_rate
454 * stream clock = Pixel clock
455 * ls_clk = DP bitrate
456 * Link M/N = strm_clk / ls_clk
459 static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
462 u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
464 switch (ddi_pll_sel) {
465 case PORT_CLK_SEL_LCPLL_2700:
468 case PORT_CLK_SEL_LCPLL_1350:
471 case PORT_CLK_SEL_LCPLL_810:
474 case PORT_CLK_SEL_SPLL:
476 switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
477 case SPLL_FREQ_810MHz:
480 case SPLL_FREQ_1350MHz:
483 case SPLL_FREQ_2700MHz:
487 gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
488 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
493 case PORT_CLK_SEL_WRPLL1:
494 case PORT_CLK_SEL_WRPLL2:
499 if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
500 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
502 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
504 switch (wrpll_ctl & WRPLL_REF_MASK) {
505 case WRPLL_REF_PCH_SSC:
506 refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
508 case WRPLL_REF_LCPLL:
512 gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
513 vgpu->id, port_name(port), wrpll_ctl);
517 r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
518 p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
519 n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
521 dp_br = (refclk * n / 10) / (p * r) * 2;
525 gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
526 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
534 static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
537 int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
538 enum dpio_phy phy = DPIO_PHY0;
539 enum dpio_channel ch = DPIO_CH0;
540 struct dpll clock = {0};
543 /* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
558 gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
562 temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
563 if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
564 gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
565 vgpu->id, port_name(port), temp);
570 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
571 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
572 if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
573 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
574 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
575 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
576 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
577 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
578 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
579 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
580 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
581 clock.m = clock.m1 * clock.m2;
582 clock.p = clock.p1 * clock.p2 * 5;
584 if (clock.n == 0 || clock.p == 0) {
585 gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
589 clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
590 clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
598 static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
601 enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
603 /* Find the enabled DPLL for the DDI/PORT */
604 if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
605 (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
606 dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
607 DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
608 DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
610 gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
611 vgpu->id, port_name(port));
615 /* Find PLL output frequency from correct DPLL, and get bir rate */
616 switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
617 DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
618 DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
619 case DPLL_CTRL1_LINK_RATE_810:
622 case DPLL_CTRL1_LINK_RATE_1080:
625 case DPLL_CTRL1_LINK_RATE_1350:
628 case DPLL_CTRL1_LINK_RATE_1620:
631 case DPLL_CTRL1_LINK_RATE_2160:
634 case DPLL_CTRL1_LINK_RATE_2700:
639 gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
640 vgpu->id, port_name(port), dpll_id);
646 static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
648 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
650 u32 dp_br, link_m, link_n, htotal, vtotal;
652 /* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
653 port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
654 TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
655 if (port != PORT_B && port != PORT_D) {
656 gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
660 /* Calculate DP bitrate from PLL */
661 if (IS_BROADWELL(dev_priv))
662 dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
663 else if (IS_BROXTON(dev_priv))
664 dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
666 dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
668 /* Get DP link symbol clock M/N */
669 link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
670 link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
672 /* Get H/V total from transcoder timing */
673 htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
674 vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
676 if (dp_br && link_n && htotal && vtotal) {
679 u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
681 /* Calcuate pixel clock by (ls_clk * M / N) */
682 pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
683 pixel_clk *= MSEC_PER_SEC;
685 /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
686 new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
688 if (*old_rate != new_rate)
689 *old_rate = new_rate;
691 gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
692 vgpu->id, pipe_name(PIPE_A), new_rate);
696 static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
697 void *p_data, unsigned int bytes)
701 write_vreg(vgpu, offset, p_data, bytes);
702 data = vgpu_vreg(vgpu, offset);
704 if (data & TRANSCONF_ENABLE) {
705 vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
706 vgpu_update_refresh_rate(vgpu);
707 vgpu_update_vblank_emulation(vgpu, true);
709 vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
710 vgpu_update_vblank_emulation(vgpu, false);
715 /* sorted in ascending order */
716 static i915_reg_t force_nonpriv_white_list[] = {
718 GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
719 GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
720 CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
721 PS_INVOCATION_COUNT, //_MMIO(0x2348)
722 PS_DEPTH_COUNT, //_MMIO(0x2350)
723 GEN8_CS_CHICKEN1,//_MMIO(0x2580)
732 GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
734 HDC_CHICKEN0,//_MMIO(0x7300)
735 GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
750 /* a simple bsearch */
751 static inline bool in_whitelist(u32 reg)
753 int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
754 i915_reg_t *array = force_nonpriv_white_list;
756 while (left < right) {
757 int mid = (left + right)/2;
759 if (reg > array[mid].reg)
761 else if (reg < array[mid].reg)
769 static int force_nonpriv_write(struct intel_vgpu *vgpu,
770 unsigned int offset, void *p_data, unsigned int bytes)
772 u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
773 const struct intel_engine_cs *engine =
774 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
776 if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
777 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
778 vgpu->id, offset, bytes);
782 if (!in_whitelist(reg_nonpriv) &&
783 reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
784 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
785 vgpu->id, reg_nonpriv, offset);
787 intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
792 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
793 void *p_data, unsigned int bytes)
795 write_vreg(vgpu, offset, p_data, bytes);
797 if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
798 vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
800 vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
801 if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
802 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
803 &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
808 static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
809 unsigned int offset, void *p_data, unsigned int bytes)
811 vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
815 #define FDI_LINK_TRAIN_PATTERN1 0
816 #define FDI_LINK_TRAIN_PATTERN2 1
818 static int fdi_auto_training_started(struct intel_vgpu *vgpu)
820 u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
821 u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
822 u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
824 if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
825 (rx_ctl & FDI_RX_ENABLE) &&
826 (rx_ctl & FDI_AUTO_TRAINING) &&
827 (tx_ctl & DP_TP_CTL_ENABLE) &&
828 (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
834 static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
835 enum pipe pipe, unsigned int train_pattern)
837 i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
838 unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
839 unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
840 unsigned int fdi_iir_check_bits;
842 fdi_rx_imr = FDI_RX_IMR(pipe);
843 fdi_tx_ctl = FDI_TX_CTL(pipe);
844 fdi_rx_ctl = FDI_RX_CTL(pipe);
846 if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
847 fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
848 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
849 fdi_iir_check_bits = FDI_RX_BIT_LOCK;
850 } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
851 fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
852 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
853 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
855 gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
859 fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
860 fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
862 /* If imr bit has been masked */
863 if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
866 if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
867 == fdi_tx_check_bits)
868 && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
869 == fdi_rx_check_bits))
875 #define INVALID_INDEX (~0U)
877 static unsigned int calc_index(unsigned int offset, unsigned int start,
878 unsigned int next, unsigned int end, i915_reg_t i915_end)
880 unsigned int range = next - start;
883 end = i915_mmio_reg_offset(i915_end);
884 if (offset < start || offset > end)
885 return INVALID_INDEX;
887 return offset / range;
890 #define FDI_RX_CTL_TO_PIPE(offset) \
891 calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
893 #define FDI_TX_CTL_TO_PIPE(offset) \
894 calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
896 #define FDI_RX_IMR_TO_PIPE(offset) \
897 calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
899 static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
900 unsigned int offset, void *p_data, unsigned int bytes)
902 i915_reg_t fdi_rx_iir;
906 if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
907 index = FDI_RX_CTL_TO_PIPE(offset);
908 else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
909 index = FDI_TX_CTL_TO_PIPE(offset);
910 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
911 index = FDI_RX_IMR_TO_PIPE(offset);
913 gvt_vgpu_err("Unsupported registers %x\n", offset);
917 write_vreg(vgpu, offset, p_data, bytes);
919 fdi_rx_iir = FDI_RX_IIR(index);
921 ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
925 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
927 ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
931 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
933 if (offset == _FDI_RXA_CTL)
934 if (fdi_auto_training_started(vgpu))
935 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
936 DP_TP_STATUS_AUTOTRAIN_DONE;
940 #define DP_TP_CTL_TO_PORT(offset) \
941 calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
943 static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
944 void *p_data, unsigned int bytes)
946 i915_reg_t status_reg;
950 write_vreg(vgpu, offset, p_data, bytes);
952 index = DP_TP_CTL_TO_PORT(offset);
953 data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
955 status_reg = DP_TP_STATUS(index);
956 vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
961 static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
962 unsigned int offset, void *p_data, unsigned int bytes)
967 reg_val = *((u32 *)p_data);
968 sticky_mask = GENMASK(27, 26) | (1 << 24);
970 vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
971 (vgpu_vreg(vgpu, offset) & sticky_mask);
972 vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
976 static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
977 unsigned int offset, void *p_data, unsigned int bytes)
981 write_vreg(vgpu, offset, p_data, bytes);
982 data = vgpu_vreg(vgpu, offset);
984 if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
985 vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
989 static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
990 unsigned int offset, void *p_data, unsigned int bytes)
994 write_vreg(vgpu, offset, p_data, bytes);
995 data = vgpu_vreg(vgpu, offset);
997 if (data & FDI_MPHY_IOSFSB_RESET_CTL)
998 vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
1000 vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
1004 #define DSPSURF_TO_PIPE(offset) \
1005 calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
1007 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1008 void *p_data, unsigned int bytes)
1010 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1011 u32 pipe = DSPSURF_TO_PIPE(offset);
1012 int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
1014 write_vreg(vgpu, offset, p_data, bytes);
1015 vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1017 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1019 if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
1020 intel_vgpu_trigger_virtual_event(vgpu, event);
1022 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1027 #define SPRSURF_TO_PIPE(offset) \
1028 calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
1030 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1031 void *p_data, unsigned int bytes)
1033 u32 pipe = SPRSURF_TO_PIPE(offset);
1034 int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
1036 write_vreg(vgpu, offset, p_data, bytes);
1037 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1039 if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1040 intel_vgpu_trigger_virtual_event(vgpu, event);
1042 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1047 static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1048 unsigned int offset, void *p_data,
1051 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1052 enum pipe pipe = REG_50080_TO_PIPE(offset);
1053 enum plane_id plane = REG_50080_TO_PLANE(offset);
1054 int event = SKL_FLIP_EVENT(pipe, plane);
1056 write_vreg(vgpu, offset, p_data, bytes);
1057 if (plane == PLANE_PRIMARY) {
1058 vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1059 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1061 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1064 if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1065 intel_vgpu_trigger_virtual_event(vgpu, event);
1067 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1072 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1075 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1076 enum intel_gvt_event_type event;
1078 if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
1079 event = AUX_CHANNEL_A;
1080 else if (reg == _PCH_DPB_AUX_CH_CTL ||
1081 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
1082 event = AUX_CHANNEL_B;
1083 else if (reg == _PCH_DPC_AUX_CH_CTL ||
1084 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
1085 event = AUX_CHANNEL_C;
1086 else if (reg == _PCH_DPD_AUX_CH_CTL ||
1087 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
1088 event = AUX_CHANNEL_D;
1090 drm_WARN_ON(&dev_priv->drm, true);
1094 intel_vgpu_trigger_virtual_event(vgpu, event);
1098 static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1099 unsigned int reg, int len, bool data_valid)
1101 /* mark transaction done */
1102 value |= DP_AUX_CH_CTL_DONE;
1103 value &= ~DP_AUX_CH_CTL_SEND_BUSY;
1104 value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
1107 value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
1109 value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
1112 value &= ~(0xf << 20);
1113 value |= (len << 20);
1114 vgpu_vreg(vgpu, reg) = value;
1116 if (value & DP_AUX_CH_CTL_INTERRUPT)
1117 return trigger_aux_channel_interrupt(vgpu, reg);
1121 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
1124 if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
1125 /* training pattern 1 for CR */
1126 /* set LANE0_CR_DONE, LANE1_CR_DONE */
1127 dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
1128 /* set LANE2_CR_DONE, LANE3_CR_DONE */
1129 dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
1130 } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1131 DPCD_TRAINING_PATTERN_2) {
1132 /* training pattern 2 for EQ */
1133 /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
1134 dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
1135 dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
1136 /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
1137 dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
1138 dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
1139 /* set INTERLANE_ALIGN_DONE */
1140 dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
1141 DPCD_INTERLANE_ALIGN_DONE;
1142 } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1143 DPCD_LINK_TRAINING_DISABLED) {
1144 /* finish link training */
1145 /* set sink status as synchronized */
1146 dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
1150 #define _REG_HSW_DP_AUX_CH_CTL(dp) \
1151 ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
1153 #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
1155 #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
1157 #define dpy_is_valid_port(port) \
1158 (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
1160 static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1161 unsigned int offset, void *p_data, unsigned int bytes)
1163 struct intel_vgpu_display *display = &vgpu->display;
1164 int msg, addr, ctrl, op, len;
1165 int port_index = OFFSET_TO_DP_AUX_PORT(offset);
1166 struct intel_vgpu_dpcd_data *dpcd = NULL;
1167 struct intel_vgpu_port *port = NULL;
1170 if (!dpy_is_valid_port(port_index)) {
1171 gvt_vgpu_err("Unsupported DP port access!\n");
1175 write_vreg(vgpu, offset, p_data, bytes);
1176 data = vgpu_vreg(vgpu, offset);
1178 if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
1179 && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
1180 /* SKL DPB/C/D aux ctl register changed */
1182 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1183 offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
1184 /* write to the data registers */
1188 if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
1189 /* just want to clear the sticky bits */
1190 vgpu_vreg(vgpu, offset) = 0;
1194 port = &display->ports[port_index];
1197 /* read out message from DATA1 register */
1198 msg = vgpu_vreg(vgpu, offset + 4);
1199 addr = (msg >> 8) & 0xffff;
1200 ctrl = (msg >> 24) & 0xff;
1204 if (op == GVT_AUX_NATIVE_WRITE) {
1208 if ((addr + len + 1) >= DPCD_SIZE) {
1210 * Write request exceeds what we supported,
1211 * DCPD spec: When a Source Device is writing a DPCD
1212 * address not supported by the Sink Device, the Sink
1213 * Device shall reply with AUX NACK and “M” equal to
1218 vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1219 dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
1224 * Write request format: Headr (command + address + size) occupies
1225 * 4 bytes, followed by (len + 1) bytes of data. See details at
1226 * intel_dp_aux_transfer().
1228 if ((len + 1 + 4) > AUX_BURST_SIZE) {
1229 gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1233 /* unpack data from vreg to buf */
1234 for (t = 0; t < 4; t++) {
1235 u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1237 buf[t * 4] = (r >> 24) & 0xff;
1238 buf[t * 4 + 1] = (r >> 16) & 0xff;
1239 buf[t * 4 + 2] = (r >> 8) & 0xff;
1240 buf[t * 4 + 3] = r & 0xff;
1243 /* write to virtual DPCD */
1244 if (dpcd && dpcd->data_valid) {
1245 for (t = 0; t <= len; t++) {
1248 dpcd->data[p] = buf[t];
1249 /* check for link training */
1250 if (p == DPCD_TRAINING_PATTERN_SET)
1251 dp_aux_ch_ctl_link_training(dpcd,
1257 vgpu_vreg(vgpu, offset + 4) = 0;
1258 dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
1259 dpcd && dpcd->data_valid);
1263 if (op == GVT_AUX_NATIVE_READ) {
1264 int idx, i, ret = 0;
1266 if ((addr + len + 1) >= DPCD_SIZE) {
1268 * read request exceeds what we supported
1269 * DPCD spec: A Sink Device receiving a Native AUX CH
1270 * read request for an unsupported DPCD address must
1271 * reply with an AUX ACK and read data set equal to
1272 * zero instead of replying with AUX NACK.
1276 vgpu_vreg(vgpu, offset + 4) = 0;
1277 vgpu_vreg(vgpu, offset + 8) = 0;
1278 vgpu_vreg(vgpu, offset + 12) = 0;
1279 vgpu_vreg(vgpu, offset + 16) = 0;
1280 vgpu_vreg(vgpu, offset + 20) = 0;
1282 dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1287 for (idx = 1; idx <= 5; idx++) {
1288 /* clear the data registers */
1289 vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1293 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
1295 if ((len + 2) > AUX_BURST_SIZE) {
1296 gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1300 /* read from virtual DPCD to vreg */
1301 /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
1302 if (dpcd && dpcd->data_valid) {
1303 for (i = 1; i <= (len + 1); i++) {
1306 t = dpcd->data[addr + i - 1];
1307 t <<= (24 - 8 * (i % 4));
1310 if ((i % 4 == 3) || (i == (len + 1))) {
1311 vgpu_vreg(vgpu, offset +
1312 (i / 4 + 1) * 4) = ret;
1317 dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1318 dpcd && dpcd->data_valid);
1322 /* i2c transaction starts */
1323 intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
1325 if (data & DP_AUX_CH_CTL_INTERRUPT)
1326 trigger_aux_channel_interrupt(vgpu, offset);
1330 static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1331 void *p_data, unsigned int bytes)
1333 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
1334 write_vreg(vgpu, offset, p_data, bytes);
1338 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1339 void *p_data, unsigned int bytes)
1343 write_vreg(vgpu, offset, p_data, bytes);
1344 vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1346 gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1347 vga_disable ? "Disable" : "Enable");
1351 static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1352 unsigned int sbi_offset)
1354 struct intel_vgpu_display *display = &vgpu->display;
1355 int num = display->sbi.number;
1358 for (i = 0; i < num; ++i)
1359 if (display->sbi.registers[i].offset == sbi_offset)
1365 return display->sbi.registers[i].value;
1368 static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1369 unsigned int offset, u32 value)
1371 struct intel_vgpu_display *display = &vgpu->display;
1372 int num = display->sbi.number;
1375 for (i = 0; i < num; ++i) {
1376 if (display->sbi.registers[i].offset == offset)
1381 if (num == SBI_REG_MAX) {
1382 gvt_vgpu_err("SBI caching meets maximum limits\n");
1385 display->sbi.number++;
1388 display->sbi.registers[i].offset = offset;
1389 display->sbi.registers[i].value = value;
1392 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1393 void *p_data, unsigned int bytes)
1395 if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1396 SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
1397 unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1398 SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1399 vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
1402 read_vreg(vgpu, offset, p_data, bytes);
1406 static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1407 void *p_data, unsigned int bytes)
1411 write_vreg(vgpu, offset, p_data, bytes);
1412 data = vgpu_vreg(vgpu, offset);
1414 data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
1417 data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
1418 data |= SBI_RESPONSE_SUCCESS;
1420 vgpu_vreg(vgpu, offset) = data;
1422 if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1423 SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
1424 unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1425 SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1427 write_virtual_sbi_register(vgpu, sbi_offset,
1428 vgpu_vreg_t(vgpu, SBI_DATA));
1433 #define _vgtif_reg(x) \
1434 (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
1436 static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1437 void *p_data, unsigned int bytes)
1439 bool invalid_read = false;
1441 read_vreg(vgpu, offset, p_data, bytes);
1444 case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
1445 if (offset + bytes > _vgtif_reg(vgt_id) + 4)
1446 invalid_read = true;
1448 case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
1449 _vgtif_reg(avail_rs.fence_num):
1450 if (offset + bytes >
1451 _vgtif_reg(avail_rs.fence_num) + 4)
1452 invalid_read = true;
1454 case 0x78010: /* vgt_caps */
1458 invalid_read = true;
1462 gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1463 offset, bytes, *(u32 *)p_data);
1464 vgpu->pv_notified = true;
1468 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1470 enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1471 struct intel_vgpu_mm *mm;
1474 pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1476 switch (notification) {
1477 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1478 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1480 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1481 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1482 return PTR_ERR_OR_ZERO(mm);
1483 case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
1484 case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
1485 return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1486 case VGT_G2V_EXECLIST_CONTEXT_CREATE:
1487 case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
1488 case 1: /* Remove this in guest driver. */
1491 gvt_vgpu_err("Invalid PV notification %d\n", notification);
1496 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1498 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1499 char *env[3] = {NULL, NULL, NULL};
1501 char display_ready_str[20];
1503 snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1504 env[0] = display_ready_str;
1506 snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
1509 return kobject_uevent_env(kobj, KOBJ_ADD, env);
1512 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1513 void *p_data, unsigned int bytes)
1515 u32 data = *(u32 *)p_data;
1516 bool invalid_write = false;
1519 case _vgtif_reg(display_ready):
1520 send_display_ready_uevent(vgpu, data ? 1 : 0);
1522 case _vgtif_reg(g2v_notify):
1523 handle_g2v_notification(vgpu, data);
1525 /* add xhot and yhot to handled list to avoid error log */
1526 case _vgtif_reg(cursor_x_hot):
1527 case _vgtif_reg(cursor_y_hot):
1528 case _vgtif_reg(pdp[0].lo):
1529 case _vgtif_reg(pdp[0].hi):
1530 case _vgtif_reg(pdp[1].lo):
1531 case _vgtif_reg(pdp[1].hi):
1532 case _vgtif_reg(pdp[2].lo):
1533 case _vgtif_reg(pdp[2].hi):
1534 case _vgtif_reg(pdp[3].lo):
1535 case _vgtif_reg(pdp[3].hi):
1536 case _vgtif_reg(execlist_context_descriptor_lo):
1537 case _vgtif_reg(execlist_context_descriptor_hi):
1539 case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1540 invalid_write = true;
1541 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1544 invalid_write = true;
1545 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1546 offset, bytes, data);
1551 write_vreg(vgpu, offset, p_data, bytes);
1556 static int pf_write(struct intel_vgpu *vgpu,
1557 unsigned int offset, void *p_data, unsigned int bytes)
1559 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1560 u32 val = *(u32 *)p_data;
1562 if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
1563 offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
1564 offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
1565 drm_WARN_ONCE(&i915->drm, true,
1566 "VM(%d): guest is trying to scaling a plane\n",
1571 return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1574 static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1575 unsigned int offset, void *p_data, unsigned int bytes)
1577 write_vreg(vgpu, offset, p_data, bytes);
1579 if (vgpu_vreg(vgpu, offset) &
1580 HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
1581 vgpu_vreg(vgpu, offset) |=
1582 HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1584 vgpu_vreg(vgpu, offset) &=
1585 ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1589 static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1590 unsigned int offset, void *p_data, unsigned int bytes)
1592 write_vreg(vgpu, offset, p_data, bytes);
1594 if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1595 vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1597 vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1602 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1603 unsigned int offset, void *p_data, unsigned int bytes)
1605 write_vreg(vgpu, offset, p_data, bytes);
1607 if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1608 vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1612 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1613 void *p_data, unsigned int bytes)
1615 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1618 write_vreg(vgpu, offset, p_data, bytes);
1619 mode = vgpu_vreg(vgpu, offset);
1621 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1622 drm_WARN_ONCE(&i915->drm, 1,
1623 "VM(%d): iGVT-g doesn't support GuC\n",
1631 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1632 void *p_data, unsigned int bytes)
1634 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1635 u32 trtte = *(u32 *)p_data;
1637 if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1638 drm_WARN(&i915->drm, 1,
1639 "VM(%d): Use physical address for TRTT!\n",
1643 write_vreg(vgpu, offset, p_data, bytes);
1648 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1649 void *p_data, unsigned int bytes)
1651 write_vreg(vgpu, offset, p_data, bytes);
1655 static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1656 void *p_data, unsigned int bytes)
1660 if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1663 if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1666 if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1669 if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1672 vgpu_vreg(vgpu, offset) = v;
1674 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1677 static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1678 void *p_data, unsigned int bytes)
1680 u32 value = *(u32 *)p_data;
1681 u32 cmd = value & 0xff;
1682 u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1685 case GEN9_PCODE_READ_MEM_LATENCY:
1686 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1687 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1688 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1689 IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1691 * "Read memory latency" command on gen9.
1692 * Below memory latency values are read
1693 * from skylake platform.
1696 *data0 = 0x1e1a1100;
1698 *data0 = 0x61514b3d;
1699 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1701 * "Read memory latency" command on gen9.
1702 * Below memory latency values are read
1706 *data0 = 0x16080707;
1708 *data0 = 0x16161616;
1711 case SKL_PCODE_CDCLK_CONTROL:
1712 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1713 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1714 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1715 IS_COMETLAKE(vgpu->gvt->gt->i915))
1716 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
1718 case GEN6_PCODE_READ_RC6VIDS:
1723 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1724 vgpu->id, value, *data0);
1726 * PCODE_READY clear means ready for pcode read/write,
1727 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1728 * always emulate as pcode read/write success and ready for access
1729 * anytime, since we don't touch real physical registers here.
1731 value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1732 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1735 static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1736 void *p_data, unsigned int bytes)
1738 u32 value = *(u32 *)p_data;
1739 const struct intel_engine_cs *engine =
1740 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1743 !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1744 gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
1750 * Need to emulate all the HWSP register write to ensure host can
1751 * update the VM CSB status correctly. Here listed registers can
1752 * support BDW, SKL or other platforms with same HWSP registers.
1754 if (unlikely(!engine)) {
1755 gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
1759 vgpu->hws_pga[engine->id] = value;
1760 gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
1761 vgpu->id, value, offset);
1763 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1766 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1767 unsigned int offset, void *p_data, unsigned int bytes)
1769 u32 v = *(u32 *)p_data;
1771 if (IS_BROXTON(vgpu->gvt->gt->i915))
1772 v &= (1 << 31) | (1 << 29);
1774 v &= (1 << 31) | (1 << 29) | (1 << 9) |
1775 (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
1778 return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1781 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1782 void *p_data, unsigned int bytes)
1784 u32 v = *(u32 *)p_data;
1786 /* other bits are MBZ. */
1787 v &= (1 << 31) | (1 << 30);
1788 v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
1790 vgpu_vreg(vgpu, offset) = v;
1795 static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1796 unsigned int offset, void *p_data, unsigned int bytes)
1798 u32 v = *(u32 *)p_data;
1800 if (v & BXT_DE_PLL_PLL_ENABLE)
1801 v |= BXT_DE_PLL_LOCK;
1803 vgpu_vreg(vgpu, offset) = v;
1808 static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1809 unsigned int offset, void *p_data, unsigned int bytes)
1811 u32 v = *(u32 *)p_data;
1813 if (v & PORT_PLL_ENABLE)
1816 vgpu_vreg(vgpu, offset) = v;
1821 static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1822 unsigned int offset, void *p_data, unsigned int bytes)
1824 u32 v = *(u32 *)p_data;
1825 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1828 case _PHY_CTL_FAMILY_EDP:
1829 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1831 case _PHY_CTL_FAMILY_DDI:
1832 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1833 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1837 vgpu_vreg(vgpu, offset) = v;
1842 static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1843 unsigned int offset, void *p_data, unsigned int bytes)
1845 u32 v = vgpu_vreg(vgpu, offset);
1847 v &= ~UNIQUE_TRANGE_EN_METHOD;
1849 vgpu_vreg(vgpu, offset) = v;
1851 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1854 static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1855 unsigned int offset, void *p_data, unsigned int bytes)
1857 u32 v = *(u32 *)p_data;
1859 if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
1860 vgpu_vreg(vgpu, offset - 0x600) = v;
1861 vgpu_vreg(vgpu, offset - 0x800) = v;
1863 vgpu_vreg(vgpu, offset - 0x400) = v;
1864 vgpu_vreg(vgpu, offset - 0x600) = v;
1867 vgpu_vreg(vgpu, offset) = v;
1872 static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1873 unsigned int offset, void *p_data, unsigned int bytes)
1875 u32 v = *(u32 *)p_data;
1878 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1880 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1885 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1887 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1892 vgpu_vreg(vgpu, offset) = v;
1897 static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1898 unsigned int offset, void *p_data, unsigned int bytes)
1900 vgpu_vreg(vgpu, offset) = 0;
1906 * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
1907 * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
1908 * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
1909 * these MI_BATCH_BUFFER.
1910 * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
1911 * PML4 PTE: PAT(0) PCD(1) PWT(1).
1912 * The performance is still expected to be low, will need further improvement.
1914 static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1915 void *p_data, unsigned int bytes)
1918 GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1921 GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1922 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1923 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1924 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1925 GEN8_PPAT(7, CHV_PPAT_SNOOP);
1927 vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1932 static int guc_status_read(struct intel_vgpu *vgpu,
1933 unsigned int offset, void *p_data,
1936 /* keep MIA_IN_RESET before clearing */
1937 read_vreg(vgpu, offset, p_data, bytes);
1938 vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1942 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1943 unsigned int offset, void *p_data, unsigned int bytes)
1945 struct intel_gvt *gvt = vgpu->gvt;
1946 const struct intel_engine_cs *engine =
1947 intel_gvt_render_mmio_to_engine(gvt, offset);
1950 * Read HW reg in following case
1951 * a. the offset isn't a ring mmio
1952 * b. the offset's ring is running on hw.
1953 * c. the offset is ring time stamp mmio
1957 vgpu == gvt->scheduler.engine_owner[engine->id] ||
1958 offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1959 offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1960 mmio_hw_access_pre(gvt->gt);
1961 vgpu_vreg(vgpu, offset) =
1962 intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1963 mmio_hw_access_post(gvt->gt);
1966 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1969 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1970 void *p_data, unsigned int bytes)
1972 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1973 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1974 struct intel_vgpu_execlist *execlist;
1975 u32 data = *(u32 *)p_data;
1978 if (drm_WARN_ON(&i915->drm, !engine))
1982 * Due to d3_entered is used to indicate skipping PPGTT invalidation on
1983 * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
1984 * vGPU reset if in resuming.
1985 * In S0ix exit, the device power state also transite from D3 to D0 as
1986 * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
1987 * S0ix exit, all engines continue to work. However the d3_entered
1988 * remains set which will break next vGPU reset logic (miss the expected
1989 * PPGTT invalidation).
1990 * Engines can only work in D0. Thus the 1st elsp write gives GVT a
1991 * chance to clear d3_entered.
1993 if (vgpu->d3_entered)
1994 vgpu->d3_entered = false;
1996 execlist = &vgpu->submission.execlist[engine->id];
1998 execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
1999 if (execlist->elsp_dwords.index == 3) {
2000 ret = intel_vgpu_submit_execlist(vgpu, engine);
2002 gvt_vgpu_err("fail submit workload on ring %s\n",
2006 ++execlist->elsp_dwords.index;
2007 execlist->elsp_dwords.index &= 0x3;
2011 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2012 void *p_data, unsigned int bytes)
2014 u32 data = *(u32 *)p_data;
2015 const struct intel_engine_cs *engine =
2016 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2017 bool enable_execlist;
2020 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
2021 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2022 IS_COMETLAKE(vgpu->gvt->gt->i915))
2023 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
2024 write_vreg(vgpu, offset, p_data, bytes);
2026 if (IS_MASKED_BITS_ENABLED(data, 1)) {
2027 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2031 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2032 IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2033 IS_MASKED_BITS_ENABLED(data, 2)) {
2034 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2038 /* when PPGTT mode enabled, we will check if guest has called
2039 * pvinfo, if not, we will treat this guest as non-gvtg-aware
2040 * guest, and stop emulating its cfg space, mmio, gtt, etc.
2042 if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
2043 IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
2044 !vgpu->pv_notified) {
2045 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2048 if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
2049 IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
2050 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
2052 gvt_dbg_core("EXECLIST %s on ring %s\n",
2053 (enable_execlist ? "enabling" : "disabling"),
2056 if (!enable_execlist)
2059 ret = intel_vgpu_select_submission_ops(vgpu,
2061 INTEL_VGPU_EXECLIST_SUBMISSION);
2065 intel_vgpu_start_schedule(vgpu);
2070 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2071 unsigned int offset, void *p_data, unsigned int bytes)
2073 unsigned int id = 0;
2075 write_vreg(vgpu, offset, p_data, bytes);
2076 vgpu_vreg(vgpu, offset) = 0;
2097 set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
2102 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2103 unsigned int offset, void *p_data, unsigned int bytes)
2107 write_vreg(vgpu, offset, p_data, bytes);
2108 data = vgpu_vreg(vgpu, offset);
2110 if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
2111 data |= RESET_CTL_READY_TO_RESET;
2112 else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
2113 data &= ~RESET_CTL_READY_TO_RESET;
2115 vgpu_vreg(vgpu, offset) = data;
2119 static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2120 unsigned int offset, void *p_data,
2123 u32 data = *(u32 *)p_data;
2125 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
2126 write_vreg(vgpu, offset, p_data, bytes);
2128 if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
2129 IS_MASKED_BITS_ENABLED(data, 0x8))
2130 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2135 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
2136 ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2137 s, f, am, rm, d, r, w); \
2142 #define MMIO_DH(reg, d, r, w) \
2143 MMIO_F(reg, 4, 0, 0, 0, d, r, w)
2145 #define MMIO_DFH(reg, d, f, r, w) \
2146 MMIO_F(reg, 4, f, 0, 0, d, r, w)
2148 #define MMIO_GM(reg, d, r, w) \
2149 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
2151 #define MMIO_GM_RDR(reg, d, r, w) \
2152 MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
2154 #define MMIO_RO(reg, d, f, rm, r, w) \
2155 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
2157 #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
2158 MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
2159 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
2160 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
2161 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
2162 if (HAS_ENGINE(gvt->gt, VCS1)) \
2163 MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
2166 #define MMIO_RING_DFH(prefix, d, f, r, w) \
2167 MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
2169 #define MMIO_RING_GM(prefix, d, r, w) \
2170 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
2172 #define MMIO_RING_GM_RDR(prefix, d, r, w) \
2173 MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
2175 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
2176 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
2178 static int init_generic_mmio_info(struct intel_gvt *gvt)
2180 struct drm_i915_private *dev_priv = gvt->gt->i915;
2183 MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
2184 intel_vgpu_reg_imr_handler);
2186 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
2187 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
2188 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
2190 MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
2193 MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
2194 gamw_echo_dev_rw_ia_write);
2196 MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2197 MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2198 MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2200 #define RING_REG(base) _MMIO((base) + 0x28)
2201 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2204 #define RING_REG(base) _MMIO((base) + 0x134)
2205 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2208 #define RING_REG(base) _MMIO((base) + 0x6c)
2209 MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
2211 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
2213 MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
2214 MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
2215 MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
2217 MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
2218 MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
2219 MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
2220 MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
2221 MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
2224 #define RING_REG(base) _MMIO((base) + 0x29c)
2225 MMIO_RING_DFH(RING_REG, D_ALL,
2226 F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
2227 ring_mode_mmio_write);
2230 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2232 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2234 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
2235 mmio_read_from_hw, NULL);
2236 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
2237 mmio_read_from_hw, NULL);
2239 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2240 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2242 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2243 MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2244 MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2246 MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2247 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2248 MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2249 MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2250 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2251 MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2252 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
2253 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2255 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2257 MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
2258 MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
2259 MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
2260 MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
2261 MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
2262 MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
2263 MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2264 MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2265 MMIO_DFH(HSW_HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2266 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2269 MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write);
2270 MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write);
2271 MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write);
2272 MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write);
2273 MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
2274 MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2275 reg50080_mmio_write);
2276 MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
2277 MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2278 reg50080_mmio_write);
2279 MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
2280 MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2281 reg50080_mmio_write);
2282 MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
2283 MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2284 reg50080_mmio_write);
2285 MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
2286 MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2287 reg50080_mmio_write);
2288 MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
2289 MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2290 reg50080_mmio_write);
2292 MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
2294 MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2296 MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2297 dp_aux_ch_ctl_mmio_write);
2298 MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2299 dp_aux_ch_ctl_mmio_write);
2300 MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2301 dp_aux_ch_ctl_mmio_write);
2303 MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
2305 MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
2306 MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
2308 MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
2309 MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
2310 MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
2311 MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2312 MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2313 MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2314 MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2315 MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2316 MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2317 MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
2318 MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
2319 MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
2320 MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
2321 MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
2322 MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
2323 MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
2325 MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
2326 PORTA_HOTPLUG_STATUS_MASK
2327 | PORTB_HOTPLUG_STATUS_MASK
2328 | PORTC_HOTPLUG_STATUS_MASK
2329 | PORTD_HOTPLUG_STATUS_MASK,
2332 MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
2333 MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
2334 MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
2335 MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
2336 MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
2338 MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
2339 dp_aux_ch_ctl_mmio_write);
2341 MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2342 MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2343 MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2344 MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2345 MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2347 MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
2348 MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
2349 MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
2350 MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
2351 MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
2353 MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
2354 MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
2355 MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
2356 MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
2357 MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
2359 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
2360 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
2361 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
2362 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
2364 MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
2365 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2366 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2367 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2368 MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2369 MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2370 MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
2371 MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2372 MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2373 MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2374 MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
2375 MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2376 MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2378 MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2379 MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2380 MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2382 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
2383 MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
2385 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2386 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
2388 MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
2389 MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2390 MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2391 MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2392 MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2393 MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2395 MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2396 MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2397 MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2398 MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2400 MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2401 MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2402 MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2404 MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2405 MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2406 MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2407 MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2408 MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2409 MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2410 MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2411 MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2412 MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2413 MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2414 MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2415 MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2416 MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2417 MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2418 MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2419 MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2420 MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2422 MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2423 MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
2424 MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2425 MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2426 MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2427 MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2428 MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2429 MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2430 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2431 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2432 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2434 MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2435 MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2436 MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
2441 static int init_bdw_mmio_info(struct intel_gvt *gvt)
2445 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2446 MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2447 MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2449 MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2450 MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2451 MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2453 MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2454 MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2455 MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2457 MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2458 MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2459 MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2461 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
2462 intel_vgpu_reg_imr_handler);
2463 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
2464 intel_vgpu_reg_ier_handler);
2465 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
2466 intel_vgpu_reg_iir_handler);
2468 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
2469 intel_vgpu_reg_imr_handler);
2470 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
2471 intel_vgpu_reg_ier_handler);
2472 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
2473 intel_vgpu_reg_iir_handler);
2475 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
2476 intel_vgpu_reg_imr_handler);
2477 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
2478 intel_vgpu_reg_ier_handler);
2479 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
2480 intel_vgpu_reg_iir_handler);
2482 MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2483 MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2484 MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2486 MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2487 MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2488 MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2490 MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2491 MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2492 MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2494 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2495 intel_vgpu_reg_master_irq_handler);
2497 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
2498 mmio_read_from_hw, NULL);
2500 #define RING_REG(base) _MMIO((base) + 0xd0)
2501 MMIO_RING_F(RING_REG, 4, F_RO, 0,
2502 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2503 ring_reset_ctl_write);
2506 #define RING_REG(base) _MMIO((base) + 0x230)
2507 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
2510 #define RING_REG(base) _MMIO((base) + 0x234)
2511 MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
2515 #define RING_REG(base) _MMIO((base) + 0x244)
2516 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2519 #define RING_REG(base) _MMIO((base) + 0x370)
2520 MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
2523 #define RING_REG(base) _MMIO((base) + 0x3a0)
2524 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2527 MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2529 #define RING_REG(base) _MMIO((base) + 0x270)
2530 MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2533 MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
2535 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2537 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2539 MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2541 MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2543 MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2544 MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2545 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2546 MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
2547 MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
2549 MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
2550 D_BDW_PLUS, NULL, force_nonpriv_write);
2552 MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
2554 MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
2556 MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2557 MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2558 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2559 MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2561 MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
2563 MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2564 MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2565 MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2566 MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2567 MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2568 MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2569 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2570 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2571 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2572 MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2576 static int init_skl_mmio_info(struct intel_gvt *gvt)
2578 struct drm_i915_private *dev_priv = gvt->gt->i915;
2581 MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2582 MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
2583 MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2584 MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
2585 MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2586 MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
2588 MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2589 dp_aux_ch_ctl_mmio_write);
2590 MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2591 dp_aux_ch_ctl_mmio_write);
2592 MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2593 dp_aux_ch_ctl_mmio_write);
2595 MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
2597 MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2599 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2600 MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2601 MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
2602 MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2603 MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2604 MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
2606 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2607 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2608 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2609 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2610 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2611 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2613 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2614 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2615 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2616 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2617 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2618 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2620 MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2621 MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2622 MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2623 MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2624 MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2625 MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2627 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2628 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2629 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2630 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2632 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2633 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2634 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2635 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2637 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2638 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2639 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2640 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2642 MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
2643 MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
2644 MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
2646 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2647 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2648 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2650 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2651 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2652 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2654 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2655 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2656 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2658 MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
2659 MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
2660 MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
2662 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2663 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2664 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2665 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2667 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2668 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2669 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2670 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2672 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2673 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2674 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2675 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2677 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
2678 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
2679 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
2680 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
2682 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
2683 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
2684 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
2685 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
2687 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
2688 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
2689 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
2690 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
2692 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
2693 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
2694 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
2695 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
2697 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
2698 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
2699 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
2700 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
2702 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
2703 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
2704 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
2705 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
2707 MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2709 MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2711 MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2714 MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
2715 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2716 MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2720 MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2721 MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2722 MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2723 MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2724 MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2725 MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
2726 NULL, gen9_trtte_write);
2727 MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
2728 NULL, gen9_trtt_chicken_write);
2730 MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2731 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
2733 #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
2734 MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2735 NULL, csfe_chicken1_mmio_write);
2736 #undef CSFE_CHICKEN1_REG
2737 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2739 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2742 MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
2743 MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2748 static int init_bxt_mmio_info(struct intel_gvt *gvt)
2752 MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
2753 MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
2754 NULL, bxt_phy_ctl_family_write);
2755 MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
2756 NULL, bxt_phy_ctl_family_write);
2757 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
2758 NULL, bxt_port_pll_enable_write);
2759 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
2760 NULL, bxt_port_pll_enable_write);
2761 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
2762 bxt_port_pll_enable_write);
2764 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
2765 NULL, bxt_pcs_dw12_grp_write);
2766 MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
2767 bxt_port_tx_dw3_read, NULL);
2768 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
2769 NULL, bxt_pcs_dw12_grp_write);
2770 MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
2771 bxt_port_tx_dw3_read, NULL);
2772 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
2773 NULL, bxt_pcs_dw12_grp_write);
2774 MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
2775 bxt_port_tx_dw3_read, NULL);
2776 MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
2777 MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
2778 MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2779 MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
2780 MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2781 0, 0, D_BXT, NULL, NULL);
2782 MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2783 0, 0, D_BXT, NULL, NULL);
2784 MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2785 0, 0, D_BXT, NULL, NULL);
2786 MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2787 0, 0, D_BXT, NULL, NULL);
2789 MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2791 MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
2796 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2797 unsigned int offset)
2799 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2800 int num = gvt->mmio.num_mmio_block;
2803 for (i = 0; i < num; i++, block++) {
2804 if (offset >= i915_mmio_reg_offset(block->offset) &&
2805 offset < i915_mmio_reg_offset(block->offset) + block->size)
2812 * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
2815 * This function is called at the driver unloading stage, to clean up the MMIO
2816 * information table of GVT device
2819 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2821 struct hlist_node *tmp;
2822 struct intel_gvt_mmio_info *e;
2825 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
2828 kfree(gvt->mmio.mmio_block);
2829 gvt->mmio.mmio_block = NULL;
2830 gvt->mmio.num_mmio_block = 0;
2832 vfree(gvt->mmio.mmio_attribute);
2833 gvt->mmio.mmio_attribute = NULL;
2836 static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2839 struct intel_gvt *gvt = iter->data;
2840 struct intel_gvt_mmio_info *info, *p;
2843 if (WARN_ON(!IS_ALIGNED(offset, 4)))
2847 end = offset + size;
2849 for (i = start; i < end; i += 4) {
2850 p = intel_gvt_find_mmio_info(gvt, i);
2852 WARN(1, "dup mmio definition offset %x\n",
2855 /* We return -EEXIST here to make GVT-g load fail.
2856 * So duplicated MMIO can be found as soon as
2862 info = kzalloc(sizeof(*info), GFP_KERNEL);
2867 info->read = intel_vgpu_default_mmio_read;
2868 info->write = intel_vgpu_default_mmio_write;
2869 INIT_HLIST_NODE(&info->node);
2870 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
2871 gvt->mmio.num_tracked_mmio++;
2876 static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
2877 u32 offset, u32 size)
2879 struct intel_gvt *gvt = iter->data;
2880 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2883 ret = krealloc(block,
2884 (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
2889 gvt->mmio.mmio_block = block = ret;
2891 block += gvt->mmio.num_mmio_block;
2893 memset(block, 0, sizeof(*block));
2895 block->offset = _MMIO(offset);
2898 gvt->mmio.num_mmio_block++;
2903 static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2906 if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
2907 return handle_mmio(iter, offset, size);
2909 return handle_mmio_block(iter, offset, size);
2912 static int init_mmio_info(struct intel_gvt *gvt)
2914 struct intel_gvt_mmio_table_iter iter = {
2915 .i915 = gvt->gt->i915,
2917 .handle_mmio_cb = handle_mmio_cb,
2920 return intel_gvt_iterate_mmio_table(&iter);
2923 static int init_mmio_block_handlers(struct intel_gvt *gvt)
2925 struct gvt_mmio_block *block;
2927 block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
2929 WARN(1, "fail to assign handlers to mmio block %x\n",
2930 i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
2934 block->read = pvinfo_mmio_read;
2935 block->write = pvinfo_mmio_write;
2941 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2944 * This function is called at the initialization stage, to setup the MMIO
2945 * information table for GVT device
2948 * zero on success, negative if failed.
2950 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2952 struct intel_gvt_device_info *info = &gvt->device_info;
2953 struct drm_i915_private *i915 = gvt->gt->i915;
2954 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
2957 gvt->mmio.mmio_attribute = vzalloc(size);
2958 if (!gvt->mmio.mmio_attribute)
2961 ret = init_mmio_info(gvt);
2965 ret = init_mmio_block_handlers(gvt);
2969 ret = init_generic_mmio_info(gvt);
2973 if (IS_BROADWELL(i915)) {
2974 ret = init_bdw_mmio_info(gvt);
2977 } else if (IS_SKYLAKE(i915) ||
2978 IS_KABYLAKE(i915) ||
2979 IS_COFFEELAKE(i915) ||
2980 IS_COMETLAKE(i915)) {
2981 ret = init_bdw_mmio_info(gvt);
2984 ret = init_skl_mmio_info(gvt);
2987 } else if (IS_BROXTON(i915)) {
2988 ret = init_bdw_mmio_info(gvt);
2991 ret = init_skl_mmio_info(gvt);
2994 ret = init_bxt_mmio_info(gvt);
3001 intel_gvt_clean_mmio_info(gvt);
3006 * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
3007 * @gvt: a GVT device
3008 * @handler: the handler
3009 * @data: private data given to handler
3012 * Zero on success, negative error code if failed.
3014 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3015 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3018 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3019 struct intel_gvt_mmio_info *e;
3022 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3023 ret = handler(gvt, e->offset, data);
3028 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3029 /* pvinfo data doesn't come from hw mmio */
3030 if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3033 for (j = 0; j < block->size; j += 4) {
3034 ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
3043 * intel_vgpu_default_mmio_read - default MMIO read handler
3045 * @offset: access offset
3046 * @p_data: data return buffer
3047 * @bytes: access data length
3050 * Zero on success, negative error code if failed.
3052 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3053 void *p_data, unsigned int bytes)
3055 read_vreg(vgpu, offset, p_data, bytes);
3060 * intel_vgpu_default_mmio_write() - default MMIO write handler
3062 * @offset: access offset
3063 * @p_data: write data buffer
3064 * @bytes: access data length
3067 * Zero on success, negative error code if failed.
3069 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3070 void *p_data, unsigned int bytes)
3072 write_vreg(vgpu, offset, p_data, bytes);
3077 * intel_vgpu_mask_mmio_write - write mask register
3079 * @offset: access offset
3080 * @p_data: write data buffer
3081 * @bytes: access data length
3084 * Zero on success, negative error code if failed.
3086 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3087 void *p_data, unsigned int bytes)
3091 old_vreg = vgpu_vreg(vgpu, offset);
3092 write_vreg(vgpu, offset, p_data, bytes);
3093 mask = vgpu_vreg(vgpu, offset) >> 16;
3094 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3095 (vgpu_vreg(vgpu, offset) & mask);
3101 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3102 * force-nopriv register
3104 * @gvt: a GVT device
3105 * @offset: register offset
3108 * True if the register is in force-nonpriv whitelist;
3111 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3112 unsigned int offset)
3114 return in_whitelist(offset);
3118 * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
3120 * @offset: register offset
3121 * @pdata: data buffer
3122 * @bytes: data length
3123 * @is_read: read or write
3126 * Zero on success, negative error code if failed.
3128 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3129 void *pdata, unsigned int bytes, bool is_read)
3131 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3132 struct intel_gvt *gvt = vgpu->gvt;
3133 struct intel_gvt_mmio_info *mmio_info;
3134 struct gvt_mmio_block *mmio_block;
3138 if (drm_WARN_ON(&i915->drm, bytes > 8))
3142 * Handle special MMIO blocks.
3144 mmio_block = find_mmio_block(gvt, offset);
3146 func = is_read ? mmio_block->read : mmio_block->write;
3148 return func(vgpu, offset, pdata, bytes);
3153 * Normal tracked MMIOs.
3155 mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3157 gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
3162 return mmio_info->read(vgpu, offset, pdata, bytes);
3164 u64 ro_mask = mmio_info->ro_mask;
3168 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3169 old_vreg = vgpu_vreg(vgpu, offset);
3172 if (likely(!ro_mask))
3173 ret = mmio_info->write(vgpu, offset, pdata, bytes);
3174 else if (!~ro_mask) {
3175 gvt_vgpu_err("try to write RO reg %x\n", offset);
3178 /* keep the RO bits in the virtual register */
3179 memcpy(&data, pdata, bytes);
3181 data |= vgpu_vreg(vgpu, offset) & ro_mask;
3182 ret = mmio_info->write(vgpu, offset, &data, bytes);
3185 /* higher 16bits of mode ctl regs are mask bits for change */
3186 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3187 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3189 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3190 | (vgpu_vreg(vgpu, offset) & mask);
3198 intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3199 intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3202 void intel_gvt_restore_fence(struct intel_gvt *gvt)
3204 struct intel_vgpu *vgpu;
3207 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3208 mmio_hw_access_pre(gvt->gt);
3209 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3210 intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3211 mmio_hw_access_post(gvt->gt);
3215 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
3217 struct intel_vgpu *vgpu = data;
3218 struct drm_i915_private *dev_priv = gvt->gt->i915;
3220 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3221 intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
3226 void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3228 struct intel_vgpu *vgpu;
3231 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3232 mmio_hw_access_pre(gvt->gt);
3233 intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3234 mmio_hw_access_post(gvt->gt);