Merge tag 'drm-intel-next-2018-07-19' of git://anongit.freedesktop.org/drm/drm-intel...
authorDave Airlie <airlied@redhat.com>
Fri, 20 Jul 2018 02:29:23 +0000 (12:29 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 20 Jul 2018 02:29:24 +0000 (12:29 +1000)
On GEM side:

- GuC related fixes (Chris, Michal)
- GTT read-only pages support (Jon, Chris)
- More selftests fixes (Chris)
- More GPU reset improvements (Chris)
- Flush caches after GGTT writes (Chris)
- Handle recursive shrinker for vma->last_active allocation (Chris)
- Other execlists fixes (Chris)

On Display side:

- GLK HDMI fix (Clint)
- Rework and cleanup around HPD pin (Ville)
- Preparation work for Display Stream Compression support coming on ICL (Anusha)
- Nuke LVDS lid notification (Ville)
- Assume eDP is always connected (Ville)
- Kill intel panel detection (Ville)

Signed-off-by: Dave Airlie <airlied@redhat.com>
# gpg: Signature made Fri 20 Jul 2018 01:51:45 AM AEST
# gpg:                using RSA key FA625F640EEB13CA
# gpg: Good signature from "Rodrigo Vivi <rodrigo.vivi@intel.com>"
# gpg:                 aka "Rodrigo Vivi <rodrigo.vivi@gmail.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6D20 7068 EEDD 6509 1C2C  E2A3 FA62 5F64 0EEB 13CA

# Conflicts:
# drivers/gpu/drm/i915/intel_lrc.c
Link: https://patchwork.freedesktop.org/patch/msgid/20180719171257.GA12199@intel.com
15 files changed:
1  2 
arch/x86/kernel/early-quirks.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c

@@@ -28,6 -28,8 +28,6 @@@
  #include <asm/irq_remapping.h>
  #include <asm/early_ioremap.h>
  
 -#define dev_err(msg)  pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
 -
  static void __init fix_hypertransport_config(int num, int slot, int func)
  {
        u32 htcfg;
@@@ -338,6 -340,18 +338,18 @@@ static resource_size_t __init gen3_stol
        return bsm & INTEL_BSM_MASK;
  }
  
+ static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
+                                               resource_size_t stolen_size)
+ {
+       u64 bsm;
+       bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
+       bsm &= INTEL_BSM_MASK;
+       bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
+       return bsm;
+ }
  static resource_size_t __init i830_stolen_size(int num, int slot, int func)
  {
        u16 gmch_ctrl;
@@@ -498,6 -512,11 +510,11 @@@ static const struct intel_early_ops chv
        .stolen_size = chv_stolen_size,
  };
  
+ static const struct intel_early_ops gen11_early_ops __initconst = {
+       .stolen_base = gen11_stolen_base,
+       .stolen_size = gen9_stolen_size,
+ };
  static const struct pci_device_id intel_early_ids[] __initconst = {
        INTEL_I830_IDS(&i830_early_ops),
        INTEL_I845G_IDS(&i845_early_ops),
        INTEL_CFL_IDS(&gen9_early_ops),
        INTEL_GLK_IDS(&gen9_early_ops),
        INTEL_CNL_IDS(&gen9_early_ops),
+       INTEL_ICL_11_IDS(&gen11_early_ops),
  };
  
  struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
@@@ -615,8 -635,7 +633,8 @@@ static void __init apple_airport_reset(
  
                pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
                if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
 -                      dev_err("Cannot power up Apple AirPort card\n");
 +                      pr_err("pci 0000:%02x:%02x.%d: Cannot power up Apple AirPort card\n",
 +                             bus, slot, func);
                        return;
                }
        }
  
        mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
        if (!mmio) {
 -              dev_err("Cannot iomap Apple AirPort card\n");
 +              pr_err("pci 0000:%02x:%02x.%d: Cannot iomap Apple AirPort card\n",
 +                     bus, slot, func);
                return;
        }
  
@@@ -216,16 -216,22 +216,22 @@@ static struct gtt_type_table_entry gtt_
                        GTT_TYPE_PPGTT_PDE_PT,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+       /* We take IPS bit as 'PSE' for PTE level. */
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_INVALID,
-                       GTT_TYPE_INVALID),
+                       GTT_TYPE_PPGTT_PTE_64K_ENTRY),
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_INVALID,
-                       GTT_TYPE_INVALID),
+                       GTT_TYPE_PPGTT_PTE_64K_ENTRY),
+       GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+                       GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+                       GTT_TYPE_PPGTT_PTE_PT,
+                       GTT_TYPE_INVALID,
+                       GTT_TYPE_PPGTT_PTE_64K_ENTRY),
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
                        GTT_TYPE_PPGTT_PDE_ENTRY,
                        GTT_TYPE_PPGTT_PDE_PT,
@@@ -339,8 -345,14 +345,14 @@@ static inline int gtt_set_entry64(void 
  
  #define ADDR_1G_MASK  GENMASK_ULL(GTT_HAW - 1, 30)
  #define ADDR_2M_MASK  GENMASK_ULL(GTT_HAW - 1, 21)
+ #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
  #define ADDR_4K_MASK  GENMASK_ULL(GTT_HAW - 1, 12)
  
+ #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
+ #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
+ #define GTT_64K_PTE_STRIDE 16
  static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
  {
        unsigned long pfn;
                pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
        else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
                pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
+       else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
+               pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
        else
                pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
        return pfn;
@@@ -362,6 -376,9 +376,9 @@@ static void gen8_gtt_set_pfn(struct int
        } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
                e->val64 &= ~ADDR_2M_MASK;
                pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
+       } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
+               e->val64 &= ~ADDR_64K_MASK;
+               pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
        } else {
                e->val64 &= ~ADDR_4K_MASK;
                pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
  
  static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
  {
-       /* Entry doesn't have PSE bit. */
-       if (get_pse_type(e->type) == GTT_TYPE_INVALID)
-               return false;
+       return !!(e->val64 & _PAGE_PSE);
+ }
  
-       e->type = get_entry_type(e->type);
-       if (!(e->val64 & _PAGE_PSE))
+ static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
+ {
+       if (gen8_gtt_test_pse(e)) {
+               switch (e->type) {
+               case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+                       e->val64 &= ~_PAGE_PSE;
+                       e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
+                       break;
+               case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+                       e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
+                       e->val64 &= ~_PAGE_PSE;
+                       break;
+               default:
+                       WARN_ON(1);
+               }
+       }
+ }
+ static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
+ {
+       if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
                return false;
  
-       e->type = get_pse_type(e->type);
-       return true;
+       return !!(e->val64 & GEN8_PDE_IPS_64K);
+ }
+ static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
+ {
+       if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
+               return;
+       e->val64 &= ~GEN8_PDE_IPS_64K;
  }
  
  static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
@@@ -408,6 -450,21 +450,21 @@@ static void gtt_entry_set_present(struc
        e->val64 |= _PAGE_PRESENT;
  }
  
+ static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
+ {
+       return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
+ }
+ static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
+ {
+       e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
+ }
+ static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
+ {
+       e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
+ }
  /*
   * Per-platform GMA routines.
   */
@@@ -440,6 -497,12 +497,12 @@@ static struct intel_gvt_gtt_pte_ops gen
        .set_present = gtt_entry_set_present,
        .test_present = gen8_gtt_test_present,
        .test_pse = gen8_gtt_test_pse,
+       .clear_pse = gen8_gtt_clear_pse,
+       .clear_ips = gen8_gtt_clear_ips,
+       .test_ips = gen8_gtt_test_ips,
+       .clear_64k_splited = gen8_gtt_clear_64k_splited,
+       .set_64k_splited = gen8_gtt_set_64k_splited,
+       .test_64k_splited = gen8_gtt_test_64k_splited,
        .get_pfn = gen8_gtt_get_pfn,
        .set_pfn = gen8_gtt_set_pfn,
  };
@@@ -453,6 -516,27 +516,27 @@@ static struct intel_gvt_gtt_gma_ops gen
        .gma_to_pml4_index = gen8_gma_to_pml4_index,
  };
  
+ /* Update entry type per pse and ips bit. */
+ static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
+       struct intel_gvt_gtt_entry *entry, bool ips)
+ {
+       switch (entry->type) {
+       case GTT_TYPE_PPGTT_PDE_ENTRY:
+       case GTT_TYPE_PPGTT_PDP_ENTRY:
+               if (pte_ops->test_pse(entry))
+                       entry->type = get_pse_type(entry->type);
+               break;
+       case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+               if (ips)
+                       entry->type = get_pse_type(entry->type);
+               break;
+       default:
+               GEM_BUG_ON(!gtt_type_is_entry(entry->type));
+       }
+       GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
+ }
  /*
   * MM helpers.
   */
@@@ -468,8 -552,7 +552,7 @@@ static void _ppgtt_get_root_entry(struc
        pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
                           mm->ppgtt_mm.shadow_pdps,
                           entry, index, false, 0, mm->vgpu);
-       pte_ops->test_pse(entry);
+       update_entry_type_for_real(pte_ops, entry, false);
  }
  
  static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
@@@ -574,7 -657,8 +657,8 @@@ static inline int ppgtt_spt_get_entry
        if (ret)
                return ret;
  
-       ops->test_pse(e);
+       update_entry_type_for_real(ops, e, guest ?
+                                  spt->guest_page.pde_ips : false);
  
        gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
                    type, e->type, index, e->val64);
@@@ -653,10 -737,12 +737,12 @@@ static void ppgtt_free_spt(struct intel
  
        radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
  
-       if (spt->guest_page.oos_page)
-               detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+       if (spt->guest_page.gfn) {
+               if (spt->guest_page.oos_page)
+                       detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
  
-       intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+               intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+       }
  
        list_del_init(&spt->post_shadow_list);
        free_spt(spt);
@@@ -717,8 -803,9 +803,9 @@@ static inline struct intel_vgpu_ppgtt_s
  
  static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
  
+ /* Allocate shadow page table without guest page. */
  static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
-               struct intel_vgpu *vgpu, int type, unsigned long gfn)
+               struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
  {
        struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
        struct intel_vgpu_ppgtt_spt *spt = NULL;
@@@ -753,26 -840,12 +840,12 @@@ retry
        spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
        spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
  
-       /*
-        * Init guest_page.
-        */
-       spt->guest_page.type = type;
-       spt->guest_page.gfn = gfn;
-       ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
-                                       ppgtt_write_protection_handler, spt);
-       if (ret)
-               goto err_unmap_dma;
        ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
        if (ret)
-               goto err_unreg_page_track;
+               goto err_unmap_dma;
  
-       trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
        return spt;
  
- err_unreg_page_track:
-       intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
  err_unmap_dma:
        dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  err_free_spt:
        return ERR_PTR(ret);
  }
  
+ /* Allocate shadow page table associated with specific gfn. */
+ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
+               struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+               unsigned long gfn, bool guest_pde_ips)
+ {
+       struct intel_vgpu_ppgtt_spt *spt;
+       int ret;
+       spt = ppgtt_alloc_spt(vgpu, type);
+       if (IS_ERR(spt))
+               return spt;
+       /*
+        * Init guest_page.
+        */
+       ret = intel_vgpu_register_page_track(vgpu, gfn,
+                       ppgtt_write_protection_handler, spt);
+       if (ret) {
+               ppgtt_free_spt(spt);
+               return ERR_PTR(ret);
+       }
+       spt->guest_page.type = type;
+       spt->guest_page.gfn = gfn;
+       spt->guest_page.pde_ips = guest_pde_ips;
+       trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+       return spt;
+ }
  #define pt_entry_size_shift(spt) \
        ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
  
        (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
  
  #define for_each_present_guest_entry(spt, e, i) \
-       for (i = 0; i < pt_entries(spt); i++) \
+       for (i = 0; i < pt_entries(spt); \
+            i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
                if (!ppgtt_get_guest_entry(spt, e, i) && \
                    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
  
  #define for_each_present_shadow_entry(spt, e, i) \
-       for (i = 0; i < pt_entries(spt); i++) \
+       for (i = 0; i < pt_entries(spt); \
+            i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
                if (!ppgtt_get_shadow_entry(spt, e, i) && \
                    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
  
- static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
+ #define for_each_shadow_entry(spt, e, i) \
+       for (i = 0; i < pt_entries(spt); \
+            i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
+               if (!ppgtt_get_shadow_entry(spt, e, i))
+ static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
  {
        int v = atomic_read(&spt->refcount);
  
        trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
        atomic_inc(&spt->refcount);
  }
  
+ static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
+ {
+       int v = atomic_read(&spt->refcount);
+       trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+       return atomic_dec_return(&spt->refcount);
+ }
  static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
  
  static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@@ -843,7 -961,8 +961,8 @@@ static inline void ppgtt_invalidate_pte
        pfn = ops->get_pfn(entry);
        type = spt->shadow_page.type;
  
-       if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+       /* Uninitialized spte or unshadowed spte. */
+       if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
                return;
  
        intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
@@@ -855,14 -974,11 +974,11 @@@ static int ppgtt_invalidate_spt(struct 
        struct intel_gvt_gtt_entry e;
        unsigned long index;
        int ret;
-       int v = atomic_read(&spt->refcount);
  
        trace_spt_change(spt->vgpu->id, "die", spt,
                        spt->guest_page.gfn, spt->shadow_page.type);
  
-       trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
-       if (atomic_dec_return(&spt->refcount) > 0)
+       if (ppgtt_put_spt(spt) > 0)
                return 0;
  
        for_each_present_shadow_entry(spt, &e, index) {
                        gvt_vdbg_mm("invalidate 4K entry\n");
                        ppgtt_invalidate_pte(spt, &e);
                        break;
+               case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+                       /* We don't setup 64K shadow entry so far. */
+                       WARN(1, "suspicious 64K gtt entry\n");
+                       continue;
                case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+                       gvt_vdbg_mm("invalidate 2M entry\n");
+                       continue;
                case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
-                       WARN(1, "GVT doesn't support 2M/1GB page\n");
+                       WARN(1, "GVT doesn't support 1GB page\n");
                        continue;
                case GTT_TYPE_PPGTT_PML4_ENTRY:
                case GTT_TYPE_PPGTT_PDP_ENTRY:
@@@ -899,6 -1021,22 +1021,22 @@@ fail
        return ret;
  }
  
+ static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
+ {
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+               u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
+                       GAMW_ECO_ENABLE_64K_IPS_FIELD;
+               return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
+       } else if (INTEL_GEN(dev_priv) >= 11) {
+               /* 64K paging only controlled by IPS bit in PTE now. */
+               return true;
+       } else
+               return false;
+ }
  static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
  
  static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
  {
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        struct intel_vgpu_ppgtt_spt *spt = NULL;
+       bool ips = false;
        int ret;
  
        GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
  
+       if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+               ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
        spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
-       if (spt)
+       if (spt) {
                ppgtt_get_spt(spt);
-       else {
+               if (ips != spt->guest_page.pde_ips) {
+                       spt->guest_page.pde_ips = ips;
+                       gvt_dbg_mm("reshadow PDE since ips changed\n");
+                       clear_page(spt->shadow_page.vaddr);
+                       ret = ppgtt_populate_spt(spt);
+                       if (ret) {
+                               ppgtt_put_spt(spt);
+                               goto err;
+                       }
+               }
+       } else {
                int type = get_next_pt_type(we->type);
  
-               spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+               spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
                if (IS_ERR(spt)) {
                        ret = PTR_ERR(spt);
-                       goto fail;
+                       goto err;
                }
  
                ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
                if (ret)
-                       goto fail;
+                       goto err_free_spt;
  
                ret = ppgtt_populate_spt(spt);
                if (ret)
-                       goto fail;
+                       goto err_free_spt;
  
                trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
                                 spt->shadow_page.type);
        }
        return spt;
- fail:
+ err_free_spt:
+       ppgtt_free_spt(spt);
+ err:
        gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
                     spt, we->val64, we->type);
        return ERR_PTR(ret);
@@@ -948,16 -1105,118 +1105,118 @@@ static inline void ppgtt_generate_shado
        se->type = ge->type;
        se->val64 = ge->val64;
  
+       /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
+       if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+               ops->clear_ips(se);
        ops->set_pfn(se, s->shadow_page.mfn);
  }
  
+ /**
+  * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
+  * negtive if found err.
+  */
+ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+       struct intel_gvt_gtt_entry *entry)
+ {
+       struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+       unsigned long pfn;
+       if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+               return 0;
+       pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
+       if (pfn == INTEL_GVT_INVALID_ADDR)
+               return -EINVAL;
+       return PageTransHuge(pfn_to_page(pfn));
+ }
+ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+       struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+       struct intel_gvt_gtt_entry *se)
+ {
+       struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+       struct intel_vgpu_ppgtt_spt *sub_spt;
+       struct intel_gvt_gtt_entry sub_se;
+       unsigned long start_gfn;
+       dma_addr_t dma_addr;
+       unsigned long sub_index;
+       int ret;
+       gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
+       start_gfn = ops->get_pfn(se);
+       sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
+       if (IS_ERR(sub_spt))
+               return PTR_ERR(sub_spt);
+       for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+               ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+                               start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+               if (ret) {
+                       ppgtt_invalidate_spt(spt);
+                       return ret;
+               }
+               sub_se.val64 = se->val64;
+               /* Copy the PAT field from PDE. */
+               sub_se.val64 &= ~_PAGE_PAT;
+               sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
+               ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
+               ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
+       }
+       /* Clear dirty field. */
+       se->val64 &= ~_PAGE_DIRTY;
+       ops->clear_pse(se);
+       ops->clear_ips(se);
+       ops->set_pfn(se, sub_spt->shadow_page.mfn);
+       ppgtt_set_shadow_entry(spt, se, index);
+       return 0;
+ }
+ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
+       struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+       struct intel_gvt_gtt_entry *se)
+ {
+       struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+       struct intel_gvt_gtt_entry entry = *se;
+       unsigned long start_gfn;
+       dma_addr_t dma_addr;
+       int i, ret;
+       gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
+       GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
+       start_gfn = ops->get_pfn(se);
+       entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
+       ops->set_64k_splited(&entry);
+       for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+               ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+                                       start_gfn + i, PAGE_SIZE, &dma_addr);
+               if (ret)
+                       return ret;
+               ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
+               ppgtt_set_shadow_entry(spt, &entry, index + i);
+       }
+       return 0;
+ }
  static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
        struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
        struct intel_gvt_gtt_entry *ge)
  {
        struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry se = *ge;
-       unsigned long gfn;
+       unsigned long gfn, page_size = PAGE_SIZE;
        dma_addr_t dma_addr;
        int ret;
  
        case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
                gvt_vdbg_mm("shadow 4K gtt entry\n");
                break;
+       case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+               gvt_vdbg_mm("shadow 64K gtt entry\n");
+               /*
+                * The layout of 64K page is special, the page size is
+                * controlled by uper PDE. To be simple, we always split
+                * 64K page to smaller 4K pages in shadow PT.
+                */
+               return split_64KB_gtt_entry(vgpu, spt, index, &se);
        case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+               gvt_vdbg_mm("shadow 2M gtt entry\n");
+               ret = is_2MB_gtt_possible(vgpu, ge);
+               if (ret == 0)
+                       return split_2MB_gtt_entry(vgpu, spt, index, &se);
+               else if (ret < 0)
+                       return ret;
+               page_size = I915_GTT_PAGE_SIZE_2M;
+               break;
        case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
-               gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+               gvt_vgpu_err("GVT doesn't support 1GB entry\n");
                return -EINVAL;
        default:
                GEM_BUG_ON(1);
        };
  
        /* direct shadow */
-       ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+       ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
+                                                     &dma_addr);
        if (ret)
                return -ENXIO;
  
@@@ -1062,8 -1338,12 +1338,12 @@@ static int ppgtt_handle_guest_entry_rem
                ret = ppgtt_invalidate_spt(s);
                if (ret)
                        goto fail;
-       } else
+       } else {
+               /* We don't setup 64K shadow entry so far. */
+               WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+                    "suspicious 64K entry\n");
                ppgtt_invalidate_pte(spt, se);
+       }
  
        return 0;
  fail:
@@@ -1286,7 -1566,7 +1566,7 @@@ static int ppgtt_handle_guest_write_pag
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry old_se;
        int new_present;
-       int ret;
+       int i, ret;
  
        new_present = ops->test_present(we);
  
                goto fail;
  
        if (!new_present) {
-               ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
-               ppgtt_set_shadow_entry(spt, &old_se, index);
+               /* For 64KB splited entries, we need clear them all. */
+               if (ops->test_64k_splited(&old_se) &&
+                   !(index % GTT_64K_PTE_STRIDE)) {
+                       gvt_vdbg_mm("remove splited 64K shadow entries\n");
+                       for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+                               ops->clear_64k_splited(&old_se);
+                               ops->set_pfn(&old_se,
+                                       vgpu->gtt.scratch_pt[type].page_mfn);
+                               ppgtt_set_shadow_entry(spt, &old_se, index + i);
+                       }
+               } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
+                          old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+                       ops->clear_pse(&old_se);
+                       ops->set_pfn(&old_se,
+                                    vgpu->gtt.scratch_pt[type].page_mfn);
+                       ppgtt_set_shadow_entry(spt, &old_se, index);
+               } else {
+                       ops->set_pfn(&old_se,
+                                    vgpu->gtt.scratch_pt[type].page_mfn);
+                       ppgtt_set_shadow_entry(spt, &old_se, index);
+               }
        }
  
        return 0;
@@@ -1391,7 -1690,17 +1690,17 @@@ static int ppgtt_handle_guest_write_pag
  
        ppgtt_get_guest_entry(spt, &we, index);
  
-       ops->test_pse(&we);
+       /*
+        * For page table which has 64K gtt entry, only PTE#0, PTE#16,
+        * PTE#32, ... PTE#496 are used. Unused PTEs update should be
+        * ignored.
+        */
+       if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
+           (index % GTT_64K_PTE_STRIDE)) {
+               gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
+                           index);
+               return 0;
+       }
  
        if (bytes == info->gtt_entry_size) {
                ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
@@@ -1585,9 -1894,8 +1894,9 @@@ static struct intel_vgpu_mm *intel_vgpu
        mm->type = INTEL_GVT_MM_GGTT;
  
        nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
 -      mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
 -                                      vgpu->gvt->device_info.gtt_entry_size);
 +      mm->ggtt_mm.virtual_ggtt =
 +              vzalloc(array_size(nr_entries,
 +                                 vgpu->gvt->device_info.gtt_entry_size));
        if (!mm->ggtt_mm.virtual_ggtt) {
                vgpu_free_mm(mm);
                return ERR_PTR(-ENOMEM);
@@@ -1881,7 -2189,7 +2190,7 @@@ static int emulate_ggtt_mmio_write(stru
                }
  
                ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
-                                                             &dma_addr);
+                                                       PAGE_SIZE, &dma_addr);
                if (ret) {
                        gvt_vgpu_err("fail to populate guest ggtt entry\n");
                        /* guest driver may read/write the entry when partial
@@@ -210,6 -210,31 +210,31 @@@ static int sanitize_fence_mmio_access(s
        return 0;
  }
  
+ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+ {
+       u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
+       if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+               if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
+                       gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
+               else if (!ips)
+                       gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
+               else {
+                       /* All engines must be enabled together for vGPU,
+                        * since we don't know which engine the ppgtt will
+                        * bind to when shadowing.
+                        */
+                       gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
+                                    ips);
+                       return -EINVAL;
+               }
+       }
+       write_vreg(vgpu, offset, p_data, bytes);
+       return 0;
+ }
  static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
                void *p_data, unsigned int bytes)
  {
@@@ -911,14 -936,11 +936,14 @@@ static int dp_aux_ch_ctl_mmio_write(str
                }
  
                /*
 -               * Write request format: (command + address) occupies
 -               * 3 bytes, followed by (len + 1) bytes of data.
 +               * Write request format: Headr (command + address + size) occupies
 +               * 4 bytes, followed by (len + 1) bytes of data. See details at
 +               * intel_dp_aux_transfer().
                 */
 -              if (WARN_ON((len + 4) > AUX_BURST_SIZE))
 +              if ((len + 1 + 4) > AUX_BURST_SIZE) {
 +                      gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
                        return -EINVAL;
 +              }
  
                /* unpack data from vreg to buf */
                for (t = 0; t < 4; t++) {
                /*
                 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
                 */
 -              if (WARN_ON((len + 2) > AUX_BURST_SIZE))
 +              if ((len + 2) > AUX_BURST_SIZE) {
 +                      gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
                        return -EINVAL;
 +              }
  
                /* read from virtual DPCD to vreg */
                /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
@@@ -1564,6 -1584,13 +1589,13 @@@ static int bxt_gt_disp_pwron_write(stru
        return 0;
  }
  
+ static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+ {
+       vgpu_vreg(vgpu, offset) = 0;
+       return 0;
+ }
  static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
  {
@@@ -1774,7 -1801,9 +1806,9 @@@ static int init_generic_mmio_info(struc
  
        MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
  
-       MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+       MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
+               gamw_echo_dev_rw_ia_write);
        MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
        MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
        MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
@@@ -3160,6 -3189,9 +3194,9 @@@ static int init_bxt_mmio_info(struct in
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
  
+       MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+       MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
        MMIO_D(RC6_CTX_BASE, D_BXT);
  
        MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
@@@ -94,6 -94,7 +94,7 @@@ struct gvt_dma 
        struct rb_node dma_addr_node;
        gfn_t gfn;
        dma_addr_t dma_addr;
+       unsigned long size;
        struct kref ref;
  };
  
@@@ -106,51 -107,103 +107,109 @@@ static int kvmgt_guest_init(struct mdev
  static void intel_vgpu_release_work(struct work_struct *work);
  static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
  
+ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+               unsigned long size)
+ {
+       int total_pages;
+       int npage;
+       int ret;
+       total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+       for (npage = 0; npage < total_pages; npage++) {
+               unsigned long cur_gfn = gfn + npage;
+               ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
+               WARN_ON(ret != 1);
+       }
+ }
+ /* Pin a normal or compound guest page for dma. */
+ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+               unsigned long size, struct page **page)
+ {
+       unsigned long base_pfn = 0;
+       int total_pages;
+       int npage;
+       int ret;
+       total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+       /*
+        * We pin the pages one-by-one to avoid allocating a big arrary
+        * on stack to hold pfns.
+        */
+       for (npage = 0; npage < total_pages; npage++) {
+               unsigned long cur_gfn = gfn + npage;
+               unsigned long pfn;
+               ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+                                    IOMMU_READ | IOMMU_WRITE, &pfn);
+               if (ret != 1) {
+                       gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
+                                    cur_gfn, ret);
+                       goto err;
+               }
+               if (!pfn_valid(pfn)) {
+                       gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
+                       npage++;
+                       ret = -EFAULT;
+                       goto err;
+               }
+               if (npage == 0)
+                       base_pfn = pfn;
+               else if (base_pfn + npage != pfn) {
+                       gvt_vgpu_err("The pages are not continuous\n");
+                       ret = -EINVAL;
+                       npage++;
+                       goto err;
+               }
+       }
+       *page = pfn_to_page(base_pfn);
+       return 0;
+ err:
+       gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+       return ret;
+ }
  static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
-               dma_addr_t *dma_addr)
+               dma_addr_t *dma_addr, unsigned long size)
  {
        struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-       struct page *page;
-       unsigned long pfn;
+       struct page *page = NULL;
        int ret;
  
-       /* Pin the page first. */
-       ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
-                            IOMMU_READ | IOMMU_WRITE, &pfn);
-       if (ret != 1) {
-               gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
-                            gfn, ret);
-               return -EINVAL;
-       }
+       ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
+       if (ret)
+               return ret;
  
 +      if (!pfn_valid(pfn)) {
 +              gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
 +              vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
 +              return -EINVAL;
 +      }
 +
        /* Setup DMA mapping. */
-       page = pfn_to_page(pfn);
-       *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
-                                PCI_DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(dev, *dma_addr)) {
-               gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
-               vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
-               return -ENOMEM;
+       *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
+       ret = dma_mapping_error(dev, *dma_addr);
+       if (ret) {
+               gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
+                            page_to_pfn(page), ret);
+               gvt_unpin_guest_page(vgpu, gfn, size);
        }
  
-       return 0;
+       return ret;
  }
  
  static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
-               dma_addr_t dma_addr)
+               dma_addr_t dma_addr, unsigned long size)
  {
        struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-       int ret;
  
-       dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
-       WARN_ON(ret != 1);
+       dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+       gvt_unpin_guest_page(vgpu, gfn, size);
  }
  
  static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
@@@ -191,7 -244,7 +250,7 @@@ static struct gvt_dma *__gvt_cache_find
  }
  
  static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
-               dma_addr_t dma_addr)
+               dma_addr_t dma_addr, unsigned long size)
  {
        struct gvt_dma *new, *itr;
        struct rb_node **link, *parent = NULL;
        new->vgpu = vgpu;
        new->gfn = gfn;
        new->dma_addr = dma_addr;
+       new->size = size;
        kref_init(&new->ref);
  
        /* gfn_cache maps gfn to struct gvt_dma. */
@@@ -260,7 -314,7 +320,7 @@@ static void gvt_cache_destroy(struct in
                        break;
                }
                dma = rb_entry(node, struct gvt_dma, gfn_node);
-               gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
+               gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
                __gvt_cache_remove_entry(vgpu, dma);
                mutex_unlock(&vgpu->vdev.cache_lock);
        }
@@@ -515,7 -569,8 +575,8 @@@ static int intel_vgpu_iommu_notifier(st
                        if (!entry)
                                continue;
  
-                       gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+                       gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+                                          entry->size);
                        __gvt_cache_remove_entry(vgpu, entry);
                }
                mutex_unlock(&vgpu->vdev.cache_lock);
@@@ -589,17 -644,6 +650,17 @@@ out
        return ret;
  }
  
 +static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
 +{
 +      struct eventfd_ctx *trigger;
 +
 +      trigger = vgpu->vdev.msi_trigger;
 +      if (trigger) {
 +              eventfd_ctx_put(trigger);
 +              vgpu->vdev.msi_trigger = NULL;
 +      }
 +}
 +
  static void __intel_vgpu_release(struct intel_vgpu *vgpu)
  {
        struct kvmgt_guest_info *info;
        info = (struct kvmgt_guest_info *)vgpu->handle;
        kvmgt_guest_exit(info);
  
 +      intel_vgpu_release_msi_eventfd_ctx(vgpu);
 +
        vgpu->vdev.kvm = NULL;
        vgpu->handle = 0;
  }
@@@ -1006,8 -1048,7 +1067,8 @@@ static int intel_vgpu_set_msi_trigger(s
                        return PTR_ERR(trigger);
                }
                vgpu->vdev.msi_trigger = trigger;
 -      }
 +      } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
 +              intel_vgpu_release_msi_eventfd_ctx(vgpu);
  
        return 0;
  }
@@@ -1612,18 -1653,6 +1673,18 @@@ static int kvmgt_inject_msi(unsigned lo
        info = (struct kvmgt_guest_info *)handle;
        vgpu = info->vgpu;
  
 +      /*
 +       * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
 +       * config and mmio register isn't restored to default during guest
 +       * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
 +       * may be enabled, then once this vgpu is active, it will get inject
 +       * vblank interrupt request. But msi_trigger is null until msi is
 +       * enabled by guest. so if msi_trigger is null, success is still
 +       * returned and don't inject interrupt into guest.
 +       */
 +      if (vgpu->vdev.msi_trigger == NULL)
 +              return 0;
 +
        if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
                return 0;
  
@@@ -1648,7 -1677,7 +1709,7 @@@ static unsigned long kvmgt_gfn_to_pfn(u
  }
  
  int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
-               dma_addr_t *dma_addr)
+               unsigned long size, dma_addr_t *dma_addr)
  {
        struct kvmgt_guest_info *info;
        struct intel_vgpu *vgpu;
  
        entry = __gvt_cache_find_gfn(info->vgpu, gfn);
        if (!entry) {
-               ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+               ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
                if (ret)
                        goto err_unlock;
  
-               ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+               ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
                if (ret)
                        goto err_unmap;
        } else {
        return 0;
  
  err_unmap:
-       gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+       gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
  err_unlock:
        mutex_unlock(&info->vgpu->vdev.cache_lock);
        return ret;
@@@ -1691,7 -1720,8 +1752,8 @@@ static void __gvt_dma_release(struct kr
  {
        struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
  
-       gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+       gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
+                          entry->size);
        __gvt_cache_remove_entry(entry->vgpu, entry);
  }
  
@@@ -46,6 -46,7 +46,7 @@@ void populate_pvinfo_page(struct intel_
  
        vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
        vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+       vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
  
        vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
                vgpu_aperture_gmadr_base(vgpu);
@@@ -124,7 -125,7 +125,7 @@@ int intel_gvt_init_vgpu_types(struct in
        high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
        num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
  
 -      gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
 +      gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
                             GFP_KERNEL);
        if (!gvt->types)
                return -ENOMEM;
@@@ -104,8 -104,13 +104,13 @@@ __i915_printk(struct drm_i915_private *
        vaf.fmt = fmt;
        vaf.va = &args;
  
-       dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
-                  __builtin_return_address(0), &vaf);
+       if (is_error)
+               dev_printk(level, kdev, "%pV", &vaf);
+       else
+               dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+                          __builtin_return_address(0), &vaf);
+       va_end(args);
  
        if (is_error && !shown_bug_once) {
                /*
                        dev_notice(kdev, "%s", FDO_BUG_MSG);
                shown_bug_once = true;
        }
-       va_end(args);
  }
  
  /* Map PCH device id to PCH type, or PCH_NONE if unknown. */
@@@ -679,7 -682,7 +682,7 @@@ static int i915_load_modeset_init(struc
  
        ret = i915_gem_init(dev_priv);
        if (ret)
-               goto cleanup_irq;
+               goto cleanup_modeset;
  
        intel_setup_overlay(dev_priv);
  
@@@ -699,6 -702,8 +702,8 @@@ cleanup_gem
        if (i915_gem_suspend(dev_priv))
                DRM_ERROR("failed to idle hardware; continuing to unload!\n");
        i915_gem_fini(dev_priv);
+ cleanup_modeset:
+       intel_modeset_cleanup(dev);
  cleanup_irq:
        drm_irq_uninstall(dev);
        intel_teardown_gmbus(dev_priv);
@@@ -895,7 -900,6 +900,6 @@@ static int i915_driver_init_early(struc
        spin_lock_init(&dev_priv->uncore.lock);
  
        mutex_init(&dev_priv->sb_lock);
-       mutex_init(&dev_priv->modeset_restore_lock);
        mutex_init(&dev_priv->av_mutex);
        mutex_init(&dev_priv->wm.wm_mutex);
        mutex_init(&dev_priv->pps_mutex);
@@@ -1149,8 -1153,6 +1153,6 @@@ static int i915_driver_init_hw(struct d
  
        intel_uncore_sanitize(dev_priv);
  
-       intel_opregion_setup(dev_priv);
        i915_gem_load_init_fences(dev_priv);
  
        /* On the 945G/GM, the chipset reports the MSI capability on the
  
        ret = intel_gvt_init(dev_priv);
        if (ret)
-               goto err_ggtt;
+               goto err_msi;
+       intel_opregion_setup(dev_priv);
  
        return 0;
  
+ err_msi:
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
+       pm_qos_remove_request(&dev_priv->pm_qos);
  err_ggtt:
        i915_ggtt_cleanup_hw(dev_priv);
  err_perf:
@@@ -1415,6 -1423,7 +1423,7 @@@ out_fini
        drm_dev_fini(&dev_priv->drm);
  out_free:
        kfree(dev_priv);
+       pci_set_drvdata(pdev, NULL);
        return ret;
  }
  
@@@ -1560,11 -1569,6 +1569,6 @@@ static int i915_drm_suspend(struct drm_
        struct pci_dev *pdev = dev_priv->drm.pdev;
        pci_power_t opregion_target_state;
  
-       /* ignore lid events during suspend */
-       mutex_lock(&dev_priv->modeset_restore_lock);
-       dev_priv->modeset_restore = MODESET_SUSPENDED;
-       mutex_unlock(&dev_priv->modeset_restore_lock);
        disable_rpm_wakeref_asserts(dev_priv);
  
        /* We do a lot of poking in a lot of registers, make sure they work
  
        intel_display_suspend(dev);
  
-       intel_dp_mst_suspend(dev);
+       intel_dp_mst_suspend(dev_priv);
  
        intel_runtime_pm_disable_interrupts(dev_priv);
        intel_hpd_cancel_work(dev_priv);
@@@ -1742,7 -1746,7 +1746,7 @@@ static int i915_drm_resume(struct drm_d
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
  
-       intel_dp_mst_resume(dev);
+       intel_dp_mst_resume(dev_priv);
  
        intel_display_resume(dev);
  
  
        intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
  
-       mutex_lock(&dev_priv->modeset_restore_lock);
-       dev_priv->modeset_restore = MODESET_DONE;
-       mutex_unlock(&dev_priv->modeset_restore_lock);
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
  
        enable_rpm_wakeref_asserts(dev_priv);
@@@ -2844,10 -2844,10 +2844,10 @@@ static const struct drm_ioctl_desc i915
        DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
 -      DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
 -      DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
 -      DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
 -      DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
 +      DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
 +      DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
 +      DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
        DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
@@@ -86,8 -86,8 +86,8 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
- #define DRIVER_DATE           "20180709"
- #define DRIVER_TIMESTAMP      1531175967
+ #define DRIVER_DATE           "20180719"
+ #define DRIVER_TIMESTAMP      1532015279
  
  /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
   * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@@ -299,7 -299,6 +299,6 @@@ struct i915_hotplug 
        u32 event_bits;
        struct delayed_work reenable_work;
  
-       struct intel_digital_port *irq_port[I915_MAX_PORTS];
        u32 long_port_mask;
        u32 short_port_mask;
        struct work_struct dig_port_work;
@@@ -650,6 -649,7 +649,7 @@@ enum intel_sbi_destination 
  #define QUIRK_BACKLIGHT_PRESENT (1<<3)
  #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
  #define QUIRK_INCREASE_T12_DELAY (1<<6)
+ #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
  
  struct intel_fbdev;
  struct intel_fbc_work;
@@@ -1002,12 -1002,6 +1002,6 @@@ struct i915_gem_mm 
  
  #define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
  
- enum modeset_restore {
-       MODESET_ON_LID_OPEN,
-       MODESET_DONE,
-       MODESET_SUSPENDED,
- };
  #define DP_AUX_A 0x40
  #define DP_AUX_B 0x10
  #define DP_AUX_C 0x20
@@@ -1730,8 -1724,6 +1724,6 @@@ struct drm_i915_private 
  
        unsigned long quirks;
  
-       enum modeset_restore modeset_restore;
-       struct mutex modeset_restore_lock;
        struct drm_atomic_state *modeset_restore_state;
        struct drm_modeset_acquire_ctx reset_ctx;
  
@@@ -2238,6 -2230,9 +2230,6 @@@ static inline struct scatterlist *____s
   **/
  static inline struct scatterlist *__sg_next(struct scatterlist *sg)
  {
 -#ifdef CONFIG_DEBUG_SG
 -      BUG_ON(sg->sg_magic != SG_MAGIC);
 -#endif
        return sg_is_last(sg) ? NULL : ____sg_next(sg);
  }
  
@@@ -2557,6 -2552,9 +2549,9 @@@ intel_info(const struct drm_i915_privat
         IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
  
  #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
+ #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+                                       IS_GEMINILAKE(dev_priv) || \
+                                       IS_KABYLAKE(dev_priv))
  
  /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
   * rows, which changed the alignment requirements and fence programming.
@@@ -2739,8 -2737,6 +2734,6 @@@ void intel_hpd_irq_handler(struct drm_i
  void intel_hpd_init(struct drm_i915_private *dev_priv);
  void intel_hpd_init_work(struct drm_i915_private *dev_priv);
  void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
- enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
-                               enum hpd_pin pin);
  enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
                                   enum port port);
  bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@@ -3304,7 -3300,7 +3297,7 @@@ unsigned long i915_gem_shrink(struct dr
  unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
  void i915_gem_shrinker_register(struct drm_i915_private *i915);
  void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
+ void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
  
  /* i915_gem_tiling.c */
  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@@ -204,9 -204,9 +204,9 @@@ static int ppgtt_bind_vma(struct i915_v
                        return err;
        }
  
-       /* Currently applicable only to VLV */
+       /* Applicable to VLV, and gen8+ */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
  
        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@@ -244,10 -244,13 +244,13 @@@ static void clear_pages(struct i915_vm
  }
  
  static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-                                 enum i915_cache_level level)
+                                 enum i915_cache_level level,
+                                 u32 flags)
  {
-       gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
-       pte |= addr;
+       gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+       if (unlikely(flags & PTE_READ_ONLY))
+               pte &= ~_PAGE_RW;
  
        switch (level) {
        case I915_CACHE_NONE:
@@@ -531,6 -534,14 +534,14 @@@ static void vm_free_page(struct i915_ad
  static void i915_address_space_init(struct i915_address_space *vm,
                                    struct drm_i915_private *dev_priv)
  {
+       /*
+        * The vm->mutex must be reclaim safe (for use in the shrinker).
+        * Do a dummy acquire now under fs_reclaim so that any allocation
+        * attempt holding the lock is immediately reported by lockdep.
+        */
+       mutex_init(&vm->mutex);
+       i915_gem_shrinker_taints_mutex(&vm->mutex);
        GEM_BUG_ON(!vm->total);
        drm_mm_init(&vm->mm, 0, vm->total);
        vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
@@@ -551,6 -562,8 +562,8 @@@ static void i915_address_space_fini(str
        spin_unlock(&vm->free_pages.lock);
  
        drm_mm_takedown(&vm->mm);
+       mutex_destroy(&vm->mutex);
  }
  
  static int __setup_page_dma(struct i915_address_space *vm,
@@@ -711,7 -724,7 +724,7 @@@ static void gen8_initialize_pt(struct i
                               struct i915_page_table *pt)
  {
        fill_px(vm, pt,
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
  }
  
  static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
@@@ -859,7 -872,7 +872,7 @@@ static bool gen8_ppgtt_clear_pt(struct 
        unsigned int pte = gen8_pte_index(start);
        unsigned int pte_end = pte + num_entries;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        gen8_pte_t *vaddr;
  
        GEM_BUG_ON(num_entries > pt->used_ptes);
@@@ -1031,10 -1044,11 +1044,11 @@@ gen8_ppgtt_insert_pte_entries(struct i9
                              struct i915_page_directory_pointer *pdp,
                              struct sgt_dma *iter,
                              struct gen8_insert_pte *idx,
-                             enum i915_cache_level cache_level)
+                             enum i915_cache_level cache_level,
+                             u32 flags)
  {
        struct i915_page_directory *pd;
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
        gen8_pte_t *vaddr;
        bool ret;
  
  static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
                                   struct i915_vma *vma,
                                   enum i915_cache_level cache_level,
-                                  u32 unused)
+                                  u32 flags)
  {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct sgt_dma iter = sgt_dma(vma);
        struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
  
        gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
-                                     cache_level);
+                                     cache_level, flags);
  
        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
  }
  static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
                                           struct i915_page_directory_pointer **pdps,
                                           struct sgt_dma *iter,
-                                          enum i915_cache_level cache_level)
+                                          enum i915_cache_level cache_level,
+                                          u32 flags)
  {
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
        u64 start = vma->node.start;
        dma_addr_t rem = iter->sg->length;
  
  static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
                                   struct i915_vma *vma,
                                   enum i915_cache_level cache_level,
-                                  u32 unused)
+                                  u32 flags)
  {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct sgt_dma iter = sgt_dma(vma);
        struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
  
        if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
-               gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+               gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+                                              flags);
        } else {
                struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
  
                while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
-                                                    &iter, &idx, cache_level))
+                                                    &iter, &idx, cache_level,
+                                                    flags))
                        GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
  
                vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@@ -1568,7 -1585,7 +1585,7 @@@ static void gen8_dump_ppgtt(struct i915
  {
        struct i915_address_space *vm = &ppgtt->vm;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        u64 start = 0, length = ppgtt->vm.total;
  
        if (use_4lvl(vm)) {
@@@ -1645,6 -1662,13 +1662,13 @@@ static struct i915_hw_ppgtt *gen8_ppgtt
                1ULL << 48 :
                1ULL << 32;
  
+       /*
+        * From bdw, there is support for read-only pages in the PPGTT.
+        *
+        * XXX GVT is not honouring the lack of RW in the PTE bits.
+        */
+       ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
        i915_address_space_init(&ppgtt->vm, i915);
  
        /* There are only few exceptions for gen >=6. chv and bxt.
@@@ -2451,7 -2475,7 +2475,7 @@@ static void gen8_ggtt_insert_page(struc
        gen8_pte_t __iomem *pte =
                (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
  
-       gen8_set_pte(pte, gen8_pte_encode(addr, level));
+       gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
  
        ggtt->invalidate(vm->i915);
  }
  static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct i915_vma *vma,
                                     enum i915_cache_level level,
-                                    u32 unused)
+                                    u32 flags)
  {
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct sgt_iter sgt_iter;
        gen8_pte_t __iomem *gtt_entries;
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
        dma_addr_t addr;
  
+       /*
+        * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+        * not to allow the user to override access to a read only page.
+        */
        gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
        gtt_entries += vma->node.start >> PAGE_SHIFT;
        for_each_sgt_dma(addr, sgt_iter, vma->pages)
@@@ -2532,7 -2561,7 +2561,7 @@@ static void gen8_ggtt_clear_range(struc
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        gen8_pte_t __iomem *gtt_base =
                (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
        const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@@ -2593,13 -2622,14 +2622,14 @@@ struct insert_entries 
        struct i915_address_space *vm;
        struct i915_vma *vma;
        enum i915_cache_level level;
+       u32 flags;
  };
  
  static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
  {
        struct insert_entries *arg = _arg;
  
-       gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
+       gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
        bxt_vtd_ggtt_wa(arg->vm);
  
        return 0;
  static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
                                             struct i915_vma *vma,
                                             enum i915_cache_level level,
-                                            u32 unused)
+                                            u32 flags)
  {
-       struct insert_entries arg = { vm, vma, level };
+       struct insert_entries arg = { vm, vma, level, flags };
  
        stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
  }
@@@ -2701,9 -2731,9 +2731,9 @@@ static int ggtt_bind_vma(struct i915_vm
        struct drm_i915_gem_object *obj = vma->obj;
        u32 pte_flags;
  
-       /* Currently applicable only to VLV */
+       /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
        pte_flags = 0;
-       if (obj->gt_ro)
+       if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
  
        intel_runtime_pm_get(i915);
@@@ -2741,7 -2771,7 +2771,7 @@@ static int aliasing_gtt_bind_vma(struc
  
        /* Currently applicable only to VLV */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
  
        if (flags & I915_VMA_LOCAL_BIND) {
@@@ -3581,6 -3611,10 +3611,10 @@@ int i915_ggtt_init_hw(struct drm_i915_p
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_address_space_init(&ggtt->vm, dev_priv);
+       /* Only VLV supports read-only GGTT mappings */
+       ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
        if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
                ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@@ -4001,7 -4035,7 +4035,7 @@@ int i915_gem_gtt_insert(struct i915_add
  
        mode = DRM_MM_INSERT_BEST;
        if (flags & PIN_HIGH)
 -              mode = DRM_MM_INSERT_HIGH;
 +              mode = DRM_MM_INSERT_HIGHEST;
        if (flags & PIN_MAPPABLE)
                mode = DRM_MM_INSERT_LOW;
  
        if (err != -ENOSPC)
                return err;
  
 +      if (mode & DRM_MM_INSERT_ONCE) {
 +              err = drm_mm_insert_node_in_range(&vm->mm, node,
 +                                                size, alignment, color,
 +                                                start, end,
 +                                                DRM_MM_INSERT_BEST);
 +              if (err != -ENOSPC)
 +                      return err;
 +      }
 +
        if (flags & PIN_NOEVICT)
                return -ENOSPC;
  
@@@ -1022,7 -1022,7 +1022,7 @@@ bool intel_crtc_active(struct intel_crt
         * We can ditch the adjusted_mode.crtc_clock check as soon
         * as Haswell has gained clock readout/fastboot support.
         *
 -       * We can ditch the crtc->primary->fb check as soon as we can
 +       * We can ditch the crtc->primary->state->fb check as soon as we can
         * properly reconstruct framebuffers.
         *
         * FIXME: The intel_crtc->active here should be switched to
@@@ -2756,10 -2756,10 +2756,10 @@@ intel_set_plane_visible(struct intel_cr
  
        /* FIXME pre-g4x don't work like this */
        if (visible) {
 -              crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
 +              crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
                crtc_state->active_planes |= BIT(plane->id);
        } else {
 -              crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
 +              crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
                crtc_state->active_planes &= ~BIT(plane->id);
        }
  
@@@ -2882,8 -2882,9 +2882,8 @@@ valid_fb
        if (i915_gem_object_is_tiled(obj))
                dev_priv->preserve_bios_swizzle = true;
  
 -      drm_framebuffer_get(fb);
 -      primary->fb = primary->state->fb = fb;
 -      primary->crtc = primary->state->crtc = &intel_crtc->base;
 +      plane_state->fb = fb;
 +      plane_state->crtc = &intel_crtc->base;
  
        intel_set_plane_visible(to_intel_crtc_state(crtc_state),
                                to_intel_plane_state(plane_state),
@@@ -3657,7 -3658,7 +3657,7 @@@ u32 glk_plane_color_ctl(const struct in
        plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
        plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
  
 -      if (intel_format_is_yuv(fb->format->format)) {
 +      if (fb->format->is_yuv) {
                if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
                        plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
                else
@@@ -5837,7 -5838,7 +5837,7 @@@ static void haswell_crtc_disable(struc
                intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
  
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+               intel_ddi_disable_transcoder_func(old_crtc_state);
  
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_scaler_disable(intel_crtc);
@@@ -11898,7 -11899,7 +11898,7 @@@ verify_single_dpll_state(struct drm_i91
                         struct drm_crtc_state *new_state)
  {
        struct intel_dpll_hw_state dpll_hw_state;
 -      unsigned crtc_mask;
 +      unsigned int crtc_mask;
        bool active;
  
        memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
                return;
        }
  
 -      crtc_mask = 1 << drm_crtc_index(crtc);
 +      crtc_mask = drm_crtc_mask(crtc);
  
        if (new_state->active)
                I915_STATE_WARN(!(pll->active_mask & crtc_mask),
@@@ -11960,7 -11961,7 +11960,7 @@@ verify_shared_dpll_state(struct drm_dev
  
        if (old_state->shared_dpll &&
            old_state->shared_dpll != new_state->shared_dpll) {
 -              unsigned crtc_mask = 1 << drm_crtc_index(crtc);
 +              unsigned int crtc_mask = drm_crtc_mask(crtc);
                struct intel_shared_dpll *pll = old_state->shared_dpll;
  
                I915_STATE_WARN(pll->active_mask & crtc_mask,
@@@ -13304,17 -13305,8 +13304,17 @@@ void intel_plane_destroy(struct drm_pla
        kfree(to_intel_plane(plane));
  }
  
 -static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
 +static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
 +                                          u32 format, u64 modifier)
  {
 +      switch (modifier) {
 +      case DRM_FORMAT_MOD_LINEAR:
 +      case I915_FORMAT_MOD_X_TILED:
 +              break;
 +      default:
 +              return false;
 +      }
 +
        switch (format) {
        case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB565:
        }
  }
  
 -static bool i965_mod_supported(uint32_t format, uint64_t modifier)
 +static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
 +                                          u32 format, u64 modifier)
  {
 +      switch (modifier) {
 +      case DRM_FORMAT_MOD_LINEAR:
 +      case I915_FORMAT_MOD_X_TILED:
 +              break;
 +      default:
 +              return false;
 +      }
 +
        switch (format) {
        case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB565:
        }
  }
  
 -static bool skl_mod_supported(uint32_t format, uint64_t modifier)
 +static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
 +                                         u32 format, u64 modifier)
  {
 +      struct intel_plane *plane = to_intel_plane(_plane);
 +
 +      switch (modifier) {
 +      case DRM_FORMAT_MOD_LINEAR:
 +      case I915_FORMAT_MOD_X_TILED:
 +      case I915_FORMAT_MOD_Y_TILED:
 +      case I915_FORMAT_MOD_Yf_TILED:
 +              break;
 +      case I915_FORMAT_MOD_Y_TILED_CCS:
 +      case I915_FORMAT_MOD_Yf_TILED_CCS:
 +              if (!plane->has_ccs)
 +                      return false;
 +              break;
 +      default:
 +              return false;
 +      }
 +
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
        }
  }
  
 -static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
 -                                                   uint32_t format,
 -                                                   uint64_t modifier)
 +static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
 +                                            u32 format, u64 modifier)
  {
 -      struct drm_i915_private *dev_priv = to_i915(plane->dev);
 -
 -      if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
 -              return false;
 -
 -      if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
 -          modifier != DRM_FORMAT_MOD_LINEAR)
 -              return false;
 -
 -      if (INTEL_GEN(dev_priv) >= 9)
 -              return skl_mod_supported(format, modifier);
 -      else if (INTEL_GEN(dev_priv) >= 4)
 -              return i965_mod_supported(format, modifier);
 -      else
 -              return i8xx_mod_supported(format, modifier);
 +      return modifier == DRM_FORMAT_MOD_LINEAR &&
 +              format == DRM_FORMAT_ARGB8888;
  }
  
 -static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
 -                                                  uint32_t format,
 -                                                  uint64_t modifier)
 -{
 -      if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
 -              return false;
 +static struct drm_plane_funcs skl_plane_funcs = {
 +      .update_plane = drm_atomic_helper_update_plane,
 +      .disable_plane = drm_atomic_helper_disable_plane,
 +      .destroy = intel_plane_destroy,
 +      .atomic_get_property = intel_plane_atomic_get_property,
 +      .atomic_set_property = intel_plane_atomic_set_property,
 +      .atomic_duplicate_state = intel_plane_duplicate_state,
 +      .atomic_destroy_state = intel_plane_destroy_state,
 +      .format_mod_supported = skl_plane_format_mod_supported,
 +};
  
 -      return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
 -}
 +static struct drm_plane_funcs i965_plane_funcs = {
 +      .update_plane = drm_atomic_helper_update_plane,
 +      .disable_plane = drm_atomic_helper_disable_plane,
 +      .destroy = intel_plane_destroy,
 +      .atomic_get_property = intel_plane_atomic_get_property,
 +      .atomic_set_property = intel_plane_atomic_set_property,
 +      .atomic_duplicate_state = intel_plane_duplicate_state,
 +      .atomic_destroy_state = intel_plane_destroy_state,
 +      .format_mod_supported = i965_plane_format_mod_supported,
 +};
  
 -static struct drm_plane_funcs intel_plane_funcs = {
 +static struct drm_plane_funcs i8xx_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
        .atomic_set_property = intel_plane_atomic_set_property,
        .atomic_duplicate_state = intel_plane_duplicate_state,
        .atomic_destroy_state = intel_plane_destroy_state,
 -      .format_mod_supported = intel_primary_plane_format_mod_supported,
 +      .format_mod_supported = i8xx_plane_format_mod_supported,
  };
  
  static int
@@@ -13565,7 -13532,7 +13565,7 @@@ static const struct drm_plane_funcs int
        .atomic_set_property = intel_plane_atomic_set_property,
        .atomic_duplicate_state = intel_plane_duplicate_state,
        .atomic_destroy_state = intel_plane_destroy_state,
 -      .format_mod_supported = intel_cursor_plane_format_mod_supported,
 +      .format_mod_supported = intel_cursor_format_mod_supported,
  };
  
  static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@@@ -13623,7 -13590,6 +13623,7 @@@ intel_primary_plane_create(struct drm_i
  {
        struct intel_plane *primary = NULL;
        struct intel_plane_state *state = NULL;
 +      const struct drm_plane_funcs *plane_funcs;
        const uint32_t *intel_primary_formats;
        unsigned int supported_rotations;
        unsigned int num_formats;
        primary->check_plane = intel_check_primary_plane;
  
        if (INTEL_GEN(dev_priv) >= 9) {
 +              primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
 +                                                   PLANE_PRIMARY);
 +
                if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
                        intel_primary_formats = skl_pri_planar_formats;
                        num_formats = ARRAY_SIZE(skl_pri_planar_formats);
                        num_formats = ARRAY_SIZE(skl_primary_formats);
                }
  
 -              if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
 +              if (primary->has_ccs)
                        modifiers = skl_format_modifiers_ccs;
                else
                        modifiers = skl_format_modifiers_noccs;
                primary->update_plane = skl_update_plane;
                primary->disable_plane = skl_disable_plane;
                primary->get_hw_state = skl_plane_get_hw_state;
 +
 +              plane_funcs = &skl_plane_funcs;
        } else if (INTEL_GEN(dev_priv) >= 4) {
                intel_primary_formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
                primary->update_plane = i9xx_update_plane;
                primary->disable_plane = i9xx_disable_plane;
                primary->get_hw_state = i9xx_plane_get_hw_state;
 +
 +              plane_funcs = &i965_plane_funcs;
        } else {
                intel_primary_formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
                primary->update_plane = i9xx_update_plane;
                primary->disable_plane = i9xx_disable_plane;
                primary->get_hw_state = i9xx_plane_get_hw_state;
 +
 +              plane_funcs = &i8xx_plane_funcs;
        }
  
        if (INTEL_GEN(dev_priv) >= 9)
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
 -                                             0, &intel_plane_funcs,
 +                                             0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "plane 1%c", pipe_name(pipe));
        else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
 -                                             0, &intel_plane_funcs,
 +                                             0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "primary %c", pipe_name(pipe));
        else
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
 -                                             0, &intel_plane_funcs,
 +                                             0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
@@@ -14849,6 -14806,18 +14849,18 @@@ static void quirk_increase_t12_delay(st
        DRM_INFO("Applying T12 delay quirk\n");
  }
  
+ /*
+  * GeminiLake NUC HDMI outputs require additional off time
+  * this allows the onboard retimer to correctly sync to signal
+  */
+ static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+       DRM_INFO("Applying Increase DDI Disabled quirk\n");
+ }
  struct intel_quirk {
        int device;
        int subsystem_vendor;
@@@ -14935,6 -14904,13 +14947,13 @@@ static struct intel_quirk intel_quirks[
  
        /* Toshiba Satellite P50-C-18C */
        { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+       /* GeminiLake NUC */
+       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       /* ASRock ITX*/
+       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
  };
  
  static void intel_init_quirks(struct drm_device *dev)
@@@ -15633,9 -15609,9 +15652,9 @@@ static void intel_modeset_readout_hw_st
                                 * rely on the connector_mask being accurate.
                                 */
                                encoder->base.crtc->state->connector_mask |=
 -                                      1 << drm_connector_index(&connector->base);
 +                                      drm_connector_mask(&connector->base);
                                encoder->base.crtc->state->encoder_mask |=
 -                                      1 << drm_encoder_index(&encoder->base);
 +                                      drm_encoder_mask(&encoder->base);
                        }
  
                } else {
@@@ -15890,6 -15866,8 +15909,8 @@@ void intel_modeset_cleanup(struct drm_d
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
  
+       flush_workqueue(dev_priv->modeset_wq);
        flush_work(&dev_priv->atomic_helper.free_work);
        WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
  
@@@ -15933,7 -15911,8 +15954,7 @@@ void intel_connector_attach_encoder(str
                                    struct intel_encoder *encoder)
  {
        connector->encoder = encoder;
 -      drm_mode_connector_attach_encoder(&connector->base,
 -                                        &encoder->base);
 +      drm_connector_attach_encoder(&connector->base, &encoder->base);
  }
  
  /*
@@@ -265,7 -265,7 +265,7 @@@ struct intel_link_m_n 
                            &(dev)->mode_config.plane_list,             \
                            base.head)                                  \
                for_each_if((plane_mask) &                              \
 -                          BIT(drm_plane_index(&intel_plane->base)))
 +                          drm_plane_mask(&intel_plane->base)))
  
  #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)    \
        list_for_each_entry(intel_plane,                                \
        list_for_each_entry(intel_crtc,                                 \
                            &(dev)->mode_config.crtc_list,              \
                            base.head)                                  \
 -              for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
 +              for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
  
  #define for_each_intel_encoder(dev, intel_encoder)            \
        list_for_each_entry(intel_encoder,                      \
                            &(dev)->mode_config.encoder_list,   \
                            base.head)
  
+ #define for_each_intel_dp(dev, intel_encoder)                 \
+       for_each_intel_encoder(dev, intel_encoder)              \
+               for_each_if(intel_encoder_is_dp(intel_encoder))
  #define for_each_intel_connector_iter(intel_connector, iter) \
        while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
  
@@@ -600,14 -600,8 +600,8 @@@ static enum pipe vlv_find_free_pps(stru
         * We don't have power sequencer currently.
         * Pick one that's not used by other ports.
         */
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_dp *intel_dp;
-               if (encoder->type != INTEL_OUTPUT_DP &&
-                   encoder->type != INTEL_OUTPUT_EDP)
-                       continue;
-               intel_dp = enc_to_intel_dp(&encoder->base);
+       for_each_intel_dp(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  
                if (encoder->type == INTEL_OUTPUT_EDP) {
                        WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@@ -799,19 -793,8 +793,8 @@@ void intel_power_sequencer_reset(struc
         * should use them always.
         */
  
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_dp *intel_dp;
-               if (encoder->type != INTEL_OUTPUT_DP &&
-                   encoder->type != INTEL_OUTPUT_EDP &&
-                   encoder->type != INTEL_OUTPUT_DDI)
-                       continue;
-               intel_dp = enc_to_intel_dp(&encoder->base);
-               /* Skip pure DVI/HDMI DDI encoders */
-               if (!i915_mmio_reg_valid(intel_dp->output_reg))
-                       continue;
+       for_each_intel_dp(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  
                WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
  
@@@ -2830,10 -2813,6 +2813,6 @@@ static void vlv_disable_dp(struct intel
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
  {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       intel_psr_disable(intel_dp, old_crtc_state);
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
  }
  
@@@ -3046,10 -3025,7 +3025,7 @@@ static void vlv_enable_dp(struct intel_
                          const struct intel_crtc_state *pipe_config,
                          const struct drm_connector_state *conn_state)
  {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        intel_edp_backlight_on(pipe_config, conn_state);
-       intel_psr_enable(intel_dp, pipe_config);
  }
  
  static void g4x_pre_enable_dp(struct intel_encoder *encoder,
@@@ -3104,16 -3080,9 +3080,9 @@@ static void vlv_steal_power_sequencer(s
  
        lockdep_assert_held(&dev_priv->pps_mutex);
  
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_dp *intel_dp;
-               enum port port;
-               if (encoder->type != INTEL_OUTPUT_DP &&
-                   encoder->type != INTEL_OUTPUT_EDP)
-                       continue;
-               intel_dp = enc_to_intel_dp(&encoder->base);
-               port = dp_to_dig_port(intel_dp)->base.port;
+       for_each_intel_dp(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+               enum port port = encoder->port;
  
                WARN(intel_dp->active_pipe == pipe,
                     "stealing pipe %c power sequencer from active (e)DP port %c\n",
@@@ -3905,129 -3874,6 +3874,6 @@@ intel_dp_configure_mst(struct intel_dp 
                                        intel_dp->is_mst);
  }
  
- static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
-                                 struct intel_crtc_state *crtc_state, bool disable_wa)
- {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       u8 buf;
-       int ret = 0;
-       int count = 0;
-       int attempts = 10;
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-               DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
-               ret = -EIO;
-               goto out;
-       }
-       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              buf & ~DP_TEST_SINK_START) < 0) {
-               DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
-               ret = -EIO;
-               goto out;
-       }
-       do {
-               intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-               if (drm_dp_dpcd_readb(&intel_dp->aux,
-                                     DP_TEST_SINK_MISC, &buf) < 0) {
-                       ret = -EIO;
-                       goto out;
-               }
-               count = buf & DP_TEST_COUNT_MASK;
-       } while (--attempts && count);
-       if (attempts == 0) {
-               DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
-               ret = -ETIMEDOUT;
-       }
-  out:
-       if (disable_wa)
-               hsw_enable_ips(crtc_state);
-       return ret;
- }
- static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
-                                  struct intel_crtc_state *crtc_state)
- {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       u8 buf;
-       int ret;
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
-               return -EIO;
-       if (!(buf & DP_TEST_CRC_SUPPORTED))
-               return -ENOTTY;
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
-               return -EIO;
-       if (buf & DP_TEST_SINK_START) {
-               ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
-               if (ret)
-                       return ret;
-       }
-       hsw_disable_ips(crtc_state);
-       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              buf | DP_TEST_SINK_START) < 0) {
-               hsw_enable_ips(crtc_state);
-               return -EIO;
-       }
-       intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-       return 0;
- }
- int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
- {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       u8 buf;
-       int count, ret;
-       int attempts = 6;
-       ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
-       if (ret)
-               return ret;
-       do {
-               intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-               if (drm_dp_dpcd_readb(&intel_dp->aux,
-                                     DP_TEST_SINK_MISC, &buf) < 0) {
-                       ret = -EIO;
-                       goto stop;
-               }
-               count = buf & DP_TEST_COUNT_MASK;
-       } while (--attempts && count == 0);
-       if (attempts == 0) {
-               DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
-               ret = -ETIMEDOUT;
-               goto stop;
-       }
-       if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
-               ret = -EIO;
-               goto stop;
-       }
- stop:
-       intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
-       return ret;
- }
  static bool
  intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
  {
@@@ -4487,9 -4333,6 +4333,9 @@@ intel_dp_short_pulse(struct intel_dp *i
                        DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
        }
  
 +      /* Handle CEC interrupts, if any */
 +      drm_dp_cec_irq(&intel_dp->aux);
 +
        /* defer to the hotplug work for link retraining if needed */
        if (intel_dp_needs_link_retrain(intel_dp))
                return false;
@@@ -4563,14 -4406,7 +4409,7 @@@ intel_dp_detect_dpcd(struct intel_dp *i
  static enum drm_connector_status
  edp_detect(struct intel_dp *intel_dp)
  {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       enum drm_connector_status status;
-       status = intel_panel_detect(dev_priv);
-       if (status == connector_status_unknown)
-               status = connector_status_connected;
-       return status;
+       return connector_status_connected;
  }
  
  static bool ibx_digital_port_connected(struct intel_encoder *encoder)
@@@ -4806,7 -4642,6 +4645,7 @@@ intel_dp_set_edid(struct intel_dp *inte
        intel_connector->detect_edid = edid;
  
        intel_dp->has_audio = drm_detect_monitor_audio(edid);
 +      drm_dp_cec_set_edid(&intel_dp->aux, edid);
  }
  
  static void
@@@ -4814,7 -4649,6 +4653,7 @@@ intel_dp_unset_edid(struct intel_dp *in
  {
        struct intel_connector *intel_connector = intel_dp->attached_connector;
  
 +      drm_dp_cec_unset_edid(&intel_dp->aux);
        kfree(intel_connector->detect_edid);
        intel_connector->detect_edid = NULL;
  
@@@ -4833,7 -4667,7 +4672,7 @@@ intel_dp_long_pulse(struct intel_connec
  
        intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
  
-       /* Can't disconnect eDP, but you can close the lid... */
+       /* Can't disconnect eDP */
        if (intel_dp_is_edp(intel_dp))
                status = edp_detect(intel_dp);
        else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
@@@ -5003,7 -4837,6 +4842,7 @@@ static in
  intel_dp_connector_register(struct drm_connector *connector)
  {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
 +      struct drm_device *dev = connector->dev;
        int ret;
  
        ret = intel_connector_register(connector);
                      intel_dp->aux.name, connector->kdev->kobj.name);
  
        intel_dp->aux.dev = connector->kdev;
 -      return drm_dp_aux_register(&intel_dp->aux);
 +      ret = drm_dp_aux_register(&intel_dp->aux);
 +      if (!ret)
 +              drm_dp_cec_register_connector(&intel_dp->aux,
 +                                            connector->name, dev->dev);
 +      return ret;
  }
  
  static void
  intel_dp_connector_unregister(struct drm_connector *connector)
  {
 -      drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
 +      struct intel_dp *intel_dp = intel_attached_dp(connector);
 +
 +      drm_dp_cec_unregister_connector(&intel_dp->aux);
 +      drm_dp_aux_unregister(&intel_dp->aux);
        intel_connector_unregister(connector);
  }
  
@@@ -6225,7 -6051,7 +6064,7 @@@ static bool intel_edp_init_connector(st
        edid = drm_get_edid(connector, &intel_dp->aux.ddc);
        if (edid) {
                if (drm_add_edid_modes(connector, edid)) {
 -                      drm_mode_connector_update_edid_property(connector,
 +                      drm_connector_update_edid_property(connector,
                                                                edid);
                } else {
                        kfree(edid);
@@@ -6314,8 -6140,8 +6153,8 @@@ static void intel_dp_modeset_retry_work
        /* Set connector link status to BAD and send a Uevent to notify
         * userspace to do a modeset.
         */
 -      drm_mode_connector_set_link_status_property(connector,
 -                                                  DRM_MODE_LINK_STATUS_BAD);
 +      drm_connector_set_link_status_property(connector,
 +                                             DRM_MODE_LINK_STATUS_BAD);
        mutex_unlock(&connector->dev->mode_config.mutex);
        /* Send Hotplug uevent so userspace can reprobe */
        drm_kms_helper_hotplug_event(connector->dev);
@@@ -6508,7 -6334,6 +6347,6 @@@ bool intel_dp_init(struct drm_i915_priv
        intel_encoder->port = port;
  
        intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-       dev_priv->hotplug.irq_port[port] = intel_dig_port;
  
        if (port != PORT_A)
                intel_infoframe_init(intel_dig_port);
@@@ -6527,37 -6352,44 +6365,44 @@@ err_connector_alloc
        return false;
  }
  
- void intel_dp_mst_suspend(struct drm_device *dev)
+ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int i;
+       struct intel_encoder *encoder;
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp;
  
-       /* disable MST */
-       for (i = 0; i < I915_MAX_PORTS; i++) {
-               struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+               if (encoder->type != INTEL_OUTPUT_DDI)
+                       continue;
  
-               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+               intel_dp = enc_to_intel_dp(&encoder->base);
+               if (!intel_dp->can_mst)
                        continue;
  
-               if (intel_dig_port->dp.is_mst)
-                       drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+               if (intel_dp->is_mst)
+                       drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
        }
  }
  
- void intel_dp_mst_resume(struct drm_device *dev)
+ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int i;
+       struct intel_encoder *encoder;
  
-       for (i = 0; i < I915_MAX_PORTS; i++) {
-               struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp;
                int ret;
  
-               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+               if (encoder->type != INTEL_OUTPUT_DDI)
+                       continue;
+               intel_dp = enc_to_intel_dp(&encoder->base);
+               if (!intel_dp->can_mst)
                        continue;
  
-               ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+               ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
                if (ret)
-                       intel_dp_check_mst_status(&intel_dig_port->dp);
+                       intel_dp_check_mst_status(intel_dp);
        }
  }
@@@ -304,6 -304,8 +304,8 @@@ struct intel_panel 
        } backlight;
  };
  
+ struct intel_digital_port;
  /*
   * This structure serves as a translation layer between the generic HDCP code
   * and the bus-specific code. What that means is that HDCP over HDMI differs
@@@ -947,7 -949,6 +949,7 @@@ struct intel_plane 
        enum pipe pipe;
        bool can_scale;
        bool has_fbc;
 +      bool has_ccs;
        int max_downscale;
        uint32_t frontbuffer_bit;
  
@@@ -1246,23 -1247,29 +1248,29 @@@ intel_attached_encoder(struct drm_conne
        return to_intel_connector(connector)->encoder;
  }
  
- static inline struct intel_digital_port *
- enc_to_dig_port(struct drm_encoder *encoder)
+ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
  {
-       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-       switch (intel_encoder->type) {
+       switch (encoder->type) {
        case INTEL_OUTPUT_DDI:
-               WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
-               /* fall through */
        case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_EDP:
        case INTEL_OUTPUT_HDMI:
+               return true;
+       default:
+               return false;
+       }
+ }
+ static inline struct intel_digital_port *
+ enc_to_dig_port(struct drm_encoder *encoder)
+ {
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+       if (intel_encoder_is_dig_port(intel_encoder))
                return container_of(encoder, struct intel_digital_port,
                                    base.base);
-       default:
+       else
                return NULL;
-       }
  }
  
  static inline struct intel_dp_mst_encoder *
@@@ -1276,6 -1283,20 +1284,20 @@@ static inline struct intel_dp *enc_to_i
        return &enc_to_dig_port(encoder)->dp;
  }
  
+ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
+ {
+       switch (encoder->type) {
+       case INTEL_OUTPUT_DP:
+       case INTEL_OUTPUT_EDP:
+               return true;
+       case INTEL_OUTPUT_DDI:
+               /* Skip pure HDMI/DVI DDI encoders */
+               return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+       default:
+               return false;
+       }
+ }
  static inline struct intel_digital_port *
  dp_to_dig_port(struct intel_dp *intel_dp)
  {
@@@ -1332,9 -1353,6 +1354,6 @@@ void intel_check_cpu_fifo_underruns(str
  void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
  
  /* i915_irq.c */
- bool gen11_reset_one_iir(struct drm_i915_private * const i915,
-                        const unsigned int bank,
-                        const unsigned int bit);
  void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
  void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
  void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
@@@ -1385,8 -1403,7 +1404,7 @@@ void hsw_fdi_link_train(struct intel_cr
  void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
  bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
  void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
- void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                      enum transcoder cpu_transcoder);
+ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
  void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
  void intel_ddi_disable_pipe_clock(const  struct intel_crtc_state *crtc_state);
  void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
@@@ -1665,8 -1682,6 +1683,6 @@@ void intel_dp_sink_dpms(struct intel_d
  void intel_dp_encoder_reset(struct drm_encoder *encoder);
  void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
  void intel_dp_encoder_destroy(struct drm_encoder *encoder);
- int intel_dp_sink_crc(struct intel_dp *intel_dp,
-                     struct intel_crtc_state *crtc_state, u8 *crc);
  bool intel_dp_compute_config(struct intel_encoder *encoder,
                             struct intel_crtc_state *pipe_config,
                             struct drm_connector_state *conn_state);
@@@ -1680,8 -1695,8 +1696,8 @@@ void intel_edp_backlight_off(const stru
  void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
  void intel_edp_panel_on(struct intel_dp *intel_dp);
  void intel_edp_panel_off(struct intel_dp *intel_dp);
- void intel_dp_mst_suspend(struct drm_device *dev);
- void intel_dp_mst_resume(struct drm_device *dev);
+ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+ void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
  int intel_dp_max_link_rate(struct intel_dp *intel_dp);
  int intel_dp_max_lane_count(struct intel_dp *intel_dp);
  int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
@@@ -1874,7 -1889,6 +1890,6 @@@ void intel_panel_enable_backlight(cons
                                  const struct drm_connector_state *conn_state);
  void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
  void intel_panel_destroy_backlight(struct drm_connector *connector);
- enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
  extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_i915_private *dev_priv,
                                struct drm_display_mode *fixed_mode,
@@@ -1923,7 -1937,7 +1938,7 @@@ void intel_psr_compute_config(struct in
  void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
  void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
  void intel_psr_short_pulse(struct intel_dp *intel_dp);
- int intel_psr_wait_for_idle(struct drm_i915_private *dev_priv);
+ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
  
  /* intel_runtime_pm.c */
  int intel_power_domains_init(struct drm_i915_private *);
@@@ -2072,6 -2086,7 +2087,6 @@@ bool intel_sdvo_init(struct drm_i915_pr
  
  
  /* intel_sprite.c */
 -bool intel_format_is_yuv(u32 format);
  int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
                             int usecs);
  struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@@ -2087,6 -2102,7 +2102,6 @@@ void skl_disable_plane(struct intel_pla
  bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
  bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
                       enum pipe pipe, enum plane_id plane_id);
 -bool intel_format_is_yuv(uint32_t format);
  bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
                          enum pipe pipe, enum plane_id plane_id);
  
@@@ -44,8 -44,6 +44,6 @@@
  /* Private structure for the integrated LVDS support */
  struct intel_lvds_connector {
        struct intel_connector base;
-       struct notifier_block lid_notifier;
  };
  
  struct intel_lvds_pps {
@@@ -452,26 -450,9 +450,9 @@@ static bool intel_lvds_compute_config(s
        return true;
  }
  
- /*
-  * Detect the LVDS connection.
-  *
-  * Since LVDS doesn't have hotlug, we use the lid as a proxy.  Open means
-  * connected and closed means disconnected.  We also send hotplug events as
-  * needed, using lid status notification from the input layer.
-  */
  static enum drm_connector_status
  intel_lvds_detect(struct drm_connector *connector, bool force)
  {
-       struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       enum drm_connector_status status;
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
-       status = intel_panel_detect(dev_priv);
-       if (status != connector_status_unknown)
-               return status;
        return connector_status_connected;
  }
  
@@@ -496,117 -477,6 +477,6 @@@ static int intel_lvds_get_modes(struct 
        return 1;
  }
  
- static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
- {
-       DRM_INFO("Skipping forced modeset for %s\n", id->ident);
-       return 1;
- }
- /* The GPU hangs up on these systems if modeset is performed on LID open */
- static const struct dmi_system_id intel_no_modeset_on_lid[] = {
-       {
-               .callback = intel_no_modeset_on_lid_dmi_callback,
-               .ident = "Toshiba Tecra A11",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
-               },
-       },
-       { }     /* terminating entry */
- };
- /*
-  * Lid events. Note the use of 'modeset':
-  *  - we set it to MODESET_ON_LID_OPEN on lid close,
-  *    and set it to MODESET_DONE on open
-  *  - we use it as a "only once" bit (ie we ignore
-  *    duplicate events where it was already properly set)
-  *  - the suspend/resume paths will set it to
-  *    MODESET_SUSPENDED and ignore the lid open event,
-  *    because they restore the mode ("lid open").
-  */
- static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
-                           void *unused)
- {
-       struct intel_lvds_connector *lvds_connector =
-               container_of(nb, struct intel_lvds_connector, lid_notifier);
-       struct drm_connector *connector = &lvds_connector->base.base;
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
-               return NOTIFY_OK;
-       mutex_lock(&dev_priv->modeset_restore_lock);
-       if (dev_priv->modeset_restore == MODESET_SUSPENDED)
-               goto exit;
-       /*
-        * check and update the status of LVDS connector after receiving
-        * the LID nofication event.
-        */
-       connector->status = connector->funcs->detect(connector, false);
-       /* Don't force modeset on machines where it causes a GPU lockup */
-       if (dmi_check_system(intel_no_modeset_on_lid))
-               goto exit;
-       if (!acpi_lid_open()) {
-               /* do modeset on next lid open event */
-               dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
-               goto exit;
-       }
-       if (dev_priv->modeset_restore == MODESET_DONE)
-               goto exit;
-       /*
-        * Some old platform's BIOS love to wreak havoc while the lid is closed.
-        * We try to detect this here and undo any damage. The split for PCH
-        * platforms is rather conservative and a bit arbitrary expect that on
-        * those platforms VGA disabling requires actual legacy VGA I/O access,
-        * and as part of the cleanup in the hw state restore we also redisable
-        * the vga plane.
-        */
-       if (!HAS_PCH_SPLIT(dev_priv))
-               intel_display_resume(dev);
-       dev_priv->modeset_restore = MODESET_DONE;
- exit:
-       mutex_unlock(&dev_priv->modeset_restore_lock);
-       return NOTIFY_OK;
- }
- static int
- intel_lvds_connector_register(struct drm_connector *connector)
- {
-       struct intel_lvds_connector *lvds = to_lvds_connector(connector);
-       int ret;
-       ret = intel_connector_register(connector);
-       if (ret)
-               return ret;
-       lvds->lid_notifier.notifier_call = intel_lid_notify;
-       if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
-               DRM_DEBUG_KMS("lid notifier registration failed\n");
-               lvds->lid_notifier.notifier_call = NULL;
-       }
-       return 0;
- }
- static void
- intel_lvds_connector_unregister(struct drm_connector *connector)
- {
-       struct intel_lvds_connector *lvds = to_lvds_connector(connector);
-       if (lvds->lid_notifier.notifier_call)
-               acpi_lid_notifier_unregister(&lvds->lid_notifier);
-       intel_connector_unregister(connector);
- }
  /**
   * intel_lvds_destroy - unregister and free LVDS structures
   * @connector: connector to free
@@@ -639,8 -509,8 +509,8 @@@ static const struct drm_connector_func
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_get_property = intel_digital_connector_atomic_get_property,
        .atomic_set_property = intel_digital_connector_atomic_set_property,
-       .late_register = intel_lvds_connector_register,
-       .early_unregister = intel_lvds_connector_unregister,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_lvds_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@@ -1114,8 -984,6 +984,6 @@@ void intel_lvds_init(struct drm_i915_pr
         * 2) check for VBT data
         * 3) check to see if LVDS is already on
         *    if none of the above, no panel
-        * 4) make sure lid is open
-        *    if closed, act like it's not there for now
         */
  
        /*
                                    intel_gmbus_get_adapter(dev_priv, pin));
        if (edid) {
                if (drm_add_edid_modes(connector, edid)) {
 -                      drm_mode_connector_update_edid_property(connector,
 +                      drm_connector_update_edid_property(connector,
                                                                edid);
                } else {
                        kfree(edid);
@@@ -524,8 -524,6 +524,6 @@@ static int init_ring_common(struct inte
                goto out;
        }
  
-       intel_engine_init_hangcheck(engine);
        if (INTEL_GEN(dev_priv) > 2)
                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
  
@@@ -1025,8 -1023,6 +1023,8 @@@ int intel_ring_pin(struct intel_ring *r
                flags |= PIN_OFFSET_BIAS | offset_bias;
        if (vma->obj->stolen)
                flags |= PIN_MAPPABLE;
 +      else
 +              flags |= PIN_HIGH;
  
        if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
                if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
@@@ -1089,6 -1085,7 +1087,7 @@@ void intel_ring_unpin(struct intel_rin
  static struct i915_vma *
  intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
  {
+       struct i915_address_space *vm = &dev_priv->ggtt.vm;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
  
        if (IS_ERR(obj))
                return ERR_CAST(obj);
  
-       /* mark ring buffers as read-only from GPU side by default */
-       obj->gt_ro = 1;
+       /*
+        * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+        * if supported by the platform's GGTT.
+        */
+       if (vm->has_read_only)
+               i915_gem_object_set_readonly(obj);
  
-       vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
+       vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma))
                goto err;
  
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
  
 -bool intel_format_is_yuv(u32 format)
 -{
 -      switch (format) {
 -      case DRM_FORMAT_YUYV:
 -      case DRM_FORMAT_UYVY:
 -      case DRM_FORMAT_VYUY:
 -      case DRM_FORMAT_YVYU:
 -      case DRM_FORMAT_NV12:
 -              return true;
 -      default:
 -              return false;
 -      }
 -}
 -
  int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
                             int usecs)
  {
@@@ -104,7 -118,7 +104,7 @@@ void intel_pipe_update_start(const stru
         * VBL interrupts will start the PSR exit and prevent a PSR
         * re-entry as well.
         */
-       if (CAN_PSR(dev_priv) && intel_psr_wait_for_idle(dev_priv))
+       if (intel_psr_wait_for_idle(new_crtc_state))
                DRM_ERROR("PSR idle timed out, atomic update may fail\n");
  
        local_irq_disable();
@@@ -402,7 -416,7 +402,7 @@@ chv_update_csc(const struct intel_plane
        const s16 *csc = csc_matrix[plane_state->base.color_encoding];
  
        /* Seems RGB data bypasses the CSC always */
 -      if (!intel_format_is_yuv(fb->format->format))
 +      if (!fb->format->is_yuv)
                return;
  
        I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
@@@ -437,7 -451,7 +437,7 @@@ vlv_update_clrc(const struct intel_plan
        enum plane_id plane_id = plane->id;
        int contrast, brightness, sh_scale, sh_sin, sh_cos;
  
 -      if (intel_format_is_yuv(fb->format->format) &&
 +      if (fb->format->is_yuv &&
            plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
                /*
                 * Expand limited range to full range:
@@@ -964,12 -978,22 +964,12 @@@ intel_check_sprite_plane(struct intel_p
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = state->base.fb;
 -      int crtc_x, crtc_y;
 -      unsigned int crtc_w, crtc_h;
 -      uint32_t src_x, src_y, src_w, src_h;
 -      struct drm_rect *src = &state->base.src;
 -      struct drm_rect *dst = &state->base.dst;
 -      struct drm_rect clip = {};
        int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384;
 -      int hscale, vscale;
        int max_scale, min_scale;
        bool can_scale;
        int ret;
        uint32_t pixel_format = 0;
  
 -      *src = drm_plane_state_src(&state->base);
 -      *dst = drm_plane_state_dest(&state->base);
 -
        if (!fb) {
                state->base.visible = false;
                return 0;
                min_scale = plane->can_scale ? 1 : (1 << 16);
        }
  
 -      /*
 -       * FIXME the following code does a bunch of fuzzy adjustments to the
 -       * coordinates and sizes. We probably need some way to decide whether
 -       * more strict checking should be done instead.
 -       */
 -      drm_rect_rotate(src, fb->width << 16, fb->height << 16,
 -                      state->base.rotation);
 -
 -      hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
 -      BUG_ON(hscale < 0);
 -
 -      vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
 -      BUG_ON(vscale < 0);
 -
 -      if (crtc_state->base.enable)
 -              drm_mode_get_hv_timing(&crtc_state->base.mode,
 -                                     &clip.x2, &clip.y2);
 -
 -      state->base.visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
 -
 -      crtc_x = dst->x1;
 -      crtc_y = dst->y1;
 -      crtc_w = drm_rect_width(dst);
 -      crtc_h = drm_rect_height(dst);
 +      ret = drm_atomic_helper_check_plane_state(&state->base,
 +                                                &crtc_state->base,
 +                                                min_scale, max_scale,
 +                                                true, true);
 +      if (ret)
 +              return ret;
  
        if (state->base.visible) {
 -              /* check again in case clipping clamped the results */
 -              hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
 -              if (hscale < 0) {
 -                      DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
 -                      drm_rect_debug_print("src: ", src, true);
 -                      drm_rect_debug_print("dst: ", dst, false);
 -
 -                      return hscale;
 -              }
 -
 -              vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
 -              if (vscale < 0) {
 -                      DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
 -                      drm_rect_debug_print("src: ", src, true);
 -                      drm_rect_debug_print("dst: ", dst, false);
 -
 -                      return vscale;
 -              }
 -
 -              /* Make the source viewport size an exact multiple of the scaling factors. */
 -              drm_rect_adjust_size(src,
 -                                   drm_rect_width(dst) * hscale - drm_rect_width(src),
 -                                   drm_rect_height(dst) * vscale - drm_rect_height(src));
 -
 -              drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
 -                                  state->base.rotation);
 -
 -              /* sanity check to make sure the src viewport wasn't enlarged */
 -              WARN_ON(src->x1 < (int) state->base.src_x ||
 -                      src->y1 < (int) state->base.src_y ||
 -                      src->x2 > (int) state->base.src_x + state->base.src_w ||
 -                      src->y2 > (int) state->base.src_y + state->base.src_h);
 +              struct drm_rect *src = &state->base.src;
 +              struct drm_rect *dst = &state->base.dst;
 +              unsigned int crtc_w = drm_rect_width(dst);
 +              unsigned int crtc_h = drm_rect_height(dst);
 +              uint32_t src_x, src_y, src_w, src_h;
  
                /*
                 * Hardware doesn't handle subpixel coordinates.
                src_y = src->y1 >> 16;
                src_h = drm_rect_height(src) >> 16;
  
 -              if (intel_format_is_yuv(fb->format->format) &&
 -                  fb->format->format != DRM_FORMAT_NV12) {
 -                      src_x &= ~1;
 -                      src_w &= ~1;
 -
 -                      /*
 -                       * Must keep src and dst the
 -                       * same if we can't scale.
 -                       */
 -                      if (!can_scale)
 -                              crtc_w &= ~1;
 +              src->x1 = src_x << 16;
 +              src->x2 = (src_x + src_w) << 16;
 +              src->y1 = src_y << 16;
 +              src->y2 = (src_y + src_h) << 16;
  
 -                      if (crtc_w == 0)
 -                              state->base.visible = false;
 +              if (fb->format->is_yuv &&
 +                  fb->format->format != DRM_FORMAT_NV12 &&
 +                  (src_x % 2 || src_w % 2)) {
 +                      DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
 +                                    src_x, src_w);
 +                      return -EINVAL;
                }
 -      }
 -
 -      /* Check size restrictions when scaling */
 -      if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
 -              unsigned int width_bytes;
 -              int cpp = fb->format->cpp[0];
 -
 -              WARN_ON(!can_scale);
  
 -              /* FIXME interlacing min height is 6 */
 +              /* Check size restrictions when scaling */
 +              if (src_w != crtc_w || src_h != crtc_h) {
 +                      unsigned int width_bytes;
 +                      int cpp = fb->format->cpp[0];
  
 -              if (crtc_w < 3 || crtc_h < 3)
 -                      state->base.visible = false;
 +                      WARN_ON(!can_scale);
  
 -              if (src_w < 3 || src_h < 3)
 -                      state->base.visible = false;
 +                      width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
  
 -              width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
 -
 -              if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
 -                  width_bytes > 4096 || fb->pitches[0] > 4096)) {
 -                      DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
 -                      return -EINVAL;
 +                      /* FIXME interlacing min height is 6 */
 +                      if (INTEL_GEN(dev_priv) < 9 && (
 +                           src_w < 3 || src_h < 3 ||
 +                           src_w > 2048 || src_h > 2048 ||
 +                           crtc_w < 3 || crtc_h < 3 ||
 +                           width_bytes > 4096 || fb->pitches[0] > 4096)) {
 +                              DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
 +                              return -EINVAL;
 +                      }
                }
        }
  
 -      if (state->base.visible) {
 -              src->x1 = src_x << 16;
 -              src->x2 = (src_x + src_w) << 16;
 -              src->y1 = src_y << 16;
 -              src->y2 = (src_y + src_h) << 16;
 -      }
 -
 -      dst->x1 = crtc_x;
 -      dst->x2 = crtc_x + crtc_w;
 -      dst->y1 = crtc_y;
 -      dst->y2 = crtc_y + crtc_h;
 -
        if (INTEL_GEN(dev_priv) >= 9) {
                ret = skl_check_plane_surface(crtc_state, state);
                if (ret)
@@@ -1297,17 -1385,8 +1297,17 @@@ static const uint64_t skl_plane_format_
        DRM_FORMAT_MOD_INVALID
  };
  
 -static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
 +static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
 +                                          u32 format, u64 modifier)
  {
 +      switch (modifier) {
 +      case DRM_FORMAT_MOD_LINEAR:
 +      case I915_FORMAT_MOD_X_TILED:
 +              break;
 +      default:
 +              return false;
 +      }
 +
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_YUYV:
        }
  }
  
 -static bool snb_mod_supported(uint32_t format, uint64_t modifier)
 +static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
 +                                          u32 format, u64 modifier)
  {
 +      switch (modifier) {
 +      case DRM_FORMAT_MOD_LINEAR:
 +      case I915_FORMAT_MOD_X_TILED:
 +              break;
 +      default:
 +              return false;
 +      }
 +
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
        }
  }
  
 -static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
 +static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
 +                                          u32 format, u64 modifier)
  {
 +      switch (modifier) {
 +      case DRM_FORMAT_MOD_LINEAR:
 +      case I915_FORMAT_MOD_X_TILED:
 +              break;
 +      default:
 +              return false;
 +      }
 +
        switch (format) {
        case DRM_FORMAT_RGB565:
        case DRM_FORMAT_ABGR8888:
        }
  }
  
 -static bool skl_mod_supported(uint32_t format, uint64_t modifier)
 +static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
 +                                         u32 format, u64 modifier)
  {
 +      struct intel_plane *plane = to_intel_plane(_plane);
 +
 +      switch (modifier) {
 +      case DRM_FORMAT_MOD_LINEAR:
 +      case I915_FORMAT_MOD_X_TILED:
 +      case I915_FORMAT_MOD_Y_TILED:
 +      case I915_FORMAT_MOD_Yf_TILED:
 +              break;
 +      case I915_FORMAT_MOD_Y_TILED_CCS:
 +      case I915_FORMAT_MOD_Yf_TILED_CCS:
 +              if (!plane->has_ccs)
 +                      return false;
 +              break;
 +      default:
 +              return false;
 +      }
 +
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
        }
  }
  
 -static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
 -                                                  uint32_t format,
 -                                                  uint64_t modifier)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(plane->dev);
 -
 -      if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
 -              return false;
 +static const struct drm_plane_funcs g4x_sprite_funcs = {
 +      .update_plane = drm_atomic_helper_update_plane,
 +      .disable_plane = drm_atomic_helper_disable_plane,
 +      .destroy = intel_plane_destroy,
 +      .atomic_get_property = intel_plane_atomic_get_property,
 +      .atomic_set_property = intel_plane_atomic_set_property,
 +      .atomic_duplicate_state = intel_plane_duplicate_state,
 +      .atomic_destroy_state = intel_plane_destroy_state,
 +      .format_mod_supported = g4x_sprite_format_mod_supported,
 +};
  
 -      if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
 -          modifier != DRM_FORMAT_MOD_LINEAR)
 -              return false;
 +static const struct drm_plane_funcs snb_sprite_funcs = {
 +      .update_plane = drm_atomic_helper_update_plane,
 +      .disable_plane = drm_atomic_helper_disable_plane,
 +      .destroy = intel_plane_destroy,
 +      .atomic_get_property = intel_plane_atomic_get_property,
 +      .atomic_set_property = intel_plane_atomic_set_property,
 +      .atomic_duplicate_state = intel_plane_duplicate_state,
 +      .atomic_destroy_state = intel_plane_destroy_state,
 +      .format_mod_supported = snb_sprite_format_mod_supported,
 +};
  
 -      if (INTEL_GEN(dev_priv) >= 9)
 -              return skl_mod_supported(format, modifier);
 -      else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 -              return vlv_mod_supported(format, modifier);
 -      else if (INTEL_GEN(dev_priv) >= 6)
 -              return snb_mod_supported(format, modifier);
 -      else
 -              return g4x_mod_supported(format, modifier);
 -}
 +static const struct drm_plane_funcs vlv_sprite_funcs = {
 +      .update_plane = drm_atomic_helper_update_plane,
 +      .disable_plane = drm_atomic_helper_disable_plane,
 +      .destroy = intel_plane_destroy,
 +      .atomic_get_property = intel_plane_atomic_get_property,
 +      .atomic_set_property = intel_plane_atomic_set_property,
 +      .atomic_duplicate_state = intel_plane_duplicate_state,
 +      .atomic_destroy_state = intel_plane_destroy_state,
 +      .format_mod_supported = vlv_sprite_format_mod_supported,
 +};
  
 -static const struct drm_plane_funcs intel_sprite_plane_funcs = {
 +static const struct drm_plane_funcs skl_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
        .atomic_set_property = intel_plane_atomic_set_property,
        .atomic_duplicate_state = intel_plane_duplicate_state,
        .atomic_destroy_state = intel_plane_destroy_state,
 -      .format_mod_supported = intel_sprite_plane_format_mod_supported,
 +      .format_mod_supported = skl_plane_format_mod_supported,
  };
  
  bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@@@ -1500,7 -1533,6 +1500,7 @@@ intel_sprite_plane_create(struct drm_i9
  {
        struct intel_plane *intel_plane = NULL;
        struct intel_plane_state *state = NULL;
 +      const struct drm_plane_funcs *plane_funcs;
        unsigned long possible_crtcs;
        const uint32_t *plane_formats;
        const uint64_t *modifiers;
                intel_plane->can_scale = true;
                state->scaler_id = -1;
  
 +              intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
 +                                                       PLANE_SPRITE0 + plane);
 +
                intel_plane->update_plane = skl_update_plane;
                intel_plane->disable_plane = skl_disable_plane;
                intel_plane->get_hw_state = skl_plane_get_hw_state;
                        num_plane_formats = ARRAY_SIZE(skl_plane_formats);
                }
  
 -              if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
 +              if (intel_plane->has_ccs)
                        modifiers = skl_plane_format_modifiers_ccs;
                else
                        modifiers = skl_plane_format_modifiers_noccs;
 +
 +              plane_funcs = &skl_plane_funcs;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                intel_plane->can_scale = false;
                intel_plane->max_downscale = 1;
                plane_formats = vlv_plane_formats;
                num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
 +
 +              plane_funcs = &vlv_sprite_funcs;
        } else if (INTEL_GEN(dev_priv) >= 7) {
                if (IS_IVYBRIDGE(dev_priv)) {
                        intel_plane->can_scale = true;
                plane_formats = snb_plane_formats;
                num_plane_formats = ARRAY_SIZE(snb_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
 +
 +              plane_funcs = &snb_sprite_funcs;
        } else {
                intel_plane->can_scale = true;
                intel_plane->max_downscale = 16;
                if (IS_GEN6(dev_priv)) {
                        plane_formats = snb_plane_formats;
                        num_plane_formats = ARRAY_SIZE(snb_plane_formats);
 +
 +                      plane_funcs = &snb_sprite_funcs;
                } else {
                        plane_formats = g4x_plane_formats;
                        num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
 +
 +                      plane_funcs = &g4x_sprite_funcs;
                }
        }
  
  
        if (INTEL_GEN(dev_priv) >= 9)
                ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
 -                                             possible_crtcs, &intel_sprite_plane_funcs,
 +                                             possible_crtcs, plane_funcs,
                                               plane_formats, num_plane_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_OVERLAY,
                                               "plane %d%c", plane + 2, pipe_name(pipe));
        else
                ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
 -                                             possible_crtcs, &intel_sprite_plane_funcs,
 +                                             possible_crtcs, plane_funcs,
                                               plane_formats, num_plane_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_OVERLAY,