Merge drm/drm-next into drm-intel-gt-next
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Wed, 2 Jun 2021 07:18:45 +0000 (10:18 +0300)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Wed, 2 Jun 2021 07:18:45 +0000 (10:18 +0300)
Pulling in -rc2 fixes and TTM changes that next upcoming patches depend
on.

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
1  2 
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_gtt.h
drivers/gpu/drm/i915/i915_drv.c

@@@ -56,17 -56,10 +56,17 @@@ in
  i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
  {
 +      struct drm_i915_private *i915 = to_i915(dev);
        struct drm_i915_gem_mmap *args = data;
        struct drm_i915_gem_object *obj;
        unsigned long addr;
  
 +      /* mmap ioctl is disallowed for all platforms after TGL-LP.  This also
 +       * covers all platforms with local memory.
 +       */
 +      if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
 +              return -EOPNOTSUPP;
 +
        if (args->flags & ~(I915_MMAP_WC))
                return -EINVAL;
  
@@@ -374,11 -367,10 +374,10 @@@ retry
                goto err_unpin;
  
        /* Finally, remap it using the new GTT offset */
-       ret = remap_io_mapping(area,
-                              area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
-                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
-                              min_t(u64, vma->size, area->vm_end - area->vm_start),
-                              &ggtt->iomap);
+       ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
+                       (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+                       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+                       min_t(u64, vma->size, area->vm_end - area->vm_start));
        if (ret)
                goto err_fence;
  
@@@ -8,7 -8,6 +8,6 @@@
  #include <linux/shmem_fs.h>
  #include <linux/swap.h>
  
- #include <drm/drm.h> /* for drm_legacy.h! */
  #include <drm/drm_cache.h>
  
  #include "gt/intel_gt.h"
@@@ -208,7 -207,7 +207,7 @@@ static int i915_gem_object_shmem_to_phy
  
  err_xfer:
        if (!IS_ERR_OR_NULL(pages)) {
 -              unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
 +              unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
  
                __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
        }
@@@ -18,6 -18,7 +18,7 @@@
  #include "i915_vgpu.h"
  
  #include "intel_gtt.h"
+ #include "gen8_ppgtt.h"
  
  static int
  i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@@ -187,9 -188,9 +188,9 @@@ static void gmch_ggtt_invalidate(struc
        intel_gtt_chipset_flush();
  }
  
static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
-                               enum i915_cache_level level,
-                               u32 flags)
+ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+                        enum i915_cache_level level,
+                        u32 flags)
  {
        gen8_pte_t pte = addr | _PAGE_PRESENT;
  
@@@ -745,6 -746,7 +746,6 @@@ static void ggtt_cleanup_hw(struct i915
  
        mutex_unlock(&ggtt->vm.mutex);
        i915_address_space_fini(&ggtt->vm);
 -      dma_resv_fini(&ggtt->vm.resv);
  
        arch_phys_wc_del(ggtt->mtrr);
  
@@@ -766,19 -768,6 +767,19 @@@ void i915_ggtt_driver_release(struct dr
        ggtt_cleanup_hw(ggtt);
  }
  
 +/**
 + * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
 + * all free objects have been drained.
 + * @i915: i915 device
 + */
 +void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
 +{
 +      struct i915_ggtt *ggtt = &i915->ggtt;
 +
 +      GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
 +      dma_resv_fini(&ggtt->vm._resv);
 +}
 +
  static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
  {
        snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@@ -840,7 -829,6 +841,7 @@@ static int ggtt_probe_common(struct i91
                return -ENOMEM;
        }
  
 +      kref_init(&ggtt->vm.resv_ref);
        ret = setup_scratch_page(&ggtt->vm);
        if (ret) {
                drm_err(&i915->drm, "Scratch setup failed\n");
@@@ -1147,7 -1135,7 +1148,7 @@@ static int ggtt_probe_hw(struct i915_gg
        ggtt->vm.gt = gt;
        ggtt->vm.i915 = i915;
        ggtt->vm.dma = i915->drm.dev;
 -      dma_resv_init(&ggtt->vm.resv);
 +      dma_resv_init(&ggtt->vm._resv);
  
        if (INTEL_GEN(i915) <= 5)
                ret = i915_gmch_probe(ggtt);
        else
                ret = gen8_gmch_probe(ggtt);
        if (ret) {
 -              dma_resv_fini(&ggtt->vm.resv);
 +              dma_resv_fini(&ggtt->vm._resv);
                return ret;
        }
  
@@@ -245,11 -245,10 +245,12 @@@ struct i915_address_space 
        atomic_t open;
  
        struct mutex mutex; /* protects vma and our lists */
 -      struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
 +
 +      struct kref resv_ref; /* kref to keep the reservation lock alive. */
 +      struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
  #define VM_CLASS_GGTT 0
  #define VM_CLASS_PPGTT 1
+ #define VM_CLASS_DPT 2
  
        struct drm_i915_gem_object *scratch[4];
        /**
        /* Global GTT */
        bool is_ggtt:1;
  
+       /* Display page table */
+       bool is_dpt:1;
        /* Some systems support read-only mappings for GGTT and/or PPGTT */
        bool has_read_only:1;
  
@@@ -356,6 -358,7 +360,7 @@@ struct i915_ppgtt 
  };
  
  #define i915_is_ggtt(vm) ((vm)->is_ggtt)
+ #define i915_is_dpt(vm) ((vm)->is_dpt)
  
  int __must_check
  i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
@@@ -401,36 -404,13 +406,36 @@@ i915_vm_get(struct i915_address_space *
        return vm;
  }
  
 +/**
 + * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
 + * @vm: The vm whose reservation lock we want to share.
 + *
 + * Return: A pointer to the vm's reservation lock.
 + */
 +static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
 +{
 +      kref_get(&vm->resv_ref);
 +      return &vm->_resv;
 +}
 +
  void i915_vm_release(struct kref *kref);
  
 +void i915_vm_resv_release(struct kref *kref);
 +
  static inline void i915_vm_put(struct i915_address_space *vm)
  {
        kref_put(&vm->ref, i915_vm_release);
  }
  
 +/**
 + * i915_vm_resv_put - Release a reference on the vm's reservation lock
 + * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
 + */
 +static inline void i915_vm_resv_put(struct i915_address_space *vm)
 +{
 +      kref_put(&vm->resv_ref, i915_vm_resv_release);
 +}
 +
  static inline struct i915_address_space *
  i915_vm_open(struct i915_address_space *vm)
  {
@@@ -526,7 -506,6 +531,7 @@@ void i915_ggtt_enable_guc(struct i915_g
  void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
  int i915_init_ggtt(struct drm_i915_private *i915);
  void i915_ggtt_driver_release(struct drm_i915_private *i915);
 +void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
  
  static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
  {
@@@ -39,6 -39,7 +39,7 @@@
  #include <linux/vga_switcheroo.h>
  #include <linux/vt.h>
  
+ #include <drm/drm_aperture.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_ioctl.h>
  #include <drm/drm_irq.h>
@@@ -553,7 -554,7 +554,7 @@@ static int i915_driver_hw_probe(struct 
        if (ret)
                goto err_perf;
  
-       ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+       ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
        if (ret)
                goto err_ggtt;
  
@@@ -630,8 -631,6 +631,8 @@@ err_mem_regions
        intel_memory_regions_driver_release(dev_priv);
  err_ggtt:
        i915_ggtt_driver_release(dev_priv);
 +      i915_gem_drain_freed_objects(dev_priv);
 +      i915_ggtt_driver_late_release(dev_priv);
  err_perf:
        i915_perf_fini(dev_priv);
        return ret;
@@@ -759,7 -758,6 +760,6 @@@ i915_driver_create(struct pci_dev *pdev
        if (IS_ERR(i915))
                return i915;
  
-       i915->drm.pdev = pdev;
        pci_set_drvdata(pdev, i915);
  
        /* Device parameters start as a copy of module parameters. */
@@@ -882,8 -880,6 +882,8 @@@ out_cleanup_hw
        i915_driver_hw_remove(i915);
        intel_memory_regions_driver_release(i915);
        i915_ggtt_driver_release(i915);
 +      i915_gem_drain_freed_objects(i915);
 +      i915_ggtt_driver_late_release(i915);
  out_cleanup_mmio:
        i915_driver_mmio_release(i915);
  out_runtime_pm_put:
@@@ -940,7 -936,6 +940,7 @@@ static void i915_driver_release(struct 
        intel_memory_regions_driver_release(dev_priv);
        i915_ggtt_driver_release(dev_priv);
        i915_gem_drain_freed_objects(dev_priv);
 +      i915_ggtt_driver_late_release(dev_priv);
  
        i915_driver_mmio_release(dev_priv);
  
@@@ -976,8 -971,12 +976,12 @@@ static int i915_driver_open(struct drm_
   */
  static void i915_driver_lastclose(struct drm_device *dev)
  {
+       struct drm_i915_private *i915 = to_i915(dev);
        intel_fbdev_restore_mode(dev);
-       vga_switcheroo_process_delayed_switch();
+       if (HAS_DISPLAY(i915))
+               vga_switcheroo_process_delayed_switch();
  }
  
  static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@@ -997,6 -996,9 +1001,9 @@@ static void intel_suspend_encoders(stru
        struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
  
+       if (!HAS_DISPLAY(dev_priv))
+               return;
        drm_modeset_lock_all(dev);
        for_each_intel_encoder(dev, encoder)
                if (encoder->suspend)
@@@ -1009,6 -1011,9 +1016,9 @@@ static void intel_shutdown_encoders(str
        struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
  
+       if (!HAS_DISPLAY(dev_priv))
+               return;
        drm_modeset_lock_all(dev);
        for_each_intel_encoder(dev, encoder)
                if (encoder->shutdown)
@@@ -1024,9 -1029,11 +1034,11 @@@ void i915_driver_shutdown(struct drm_i9
  
        i915_gem_suspend(i915);
  
-       drm_kms_helper_poll_disable(&i915->drm);
+       if (HAS_DISPLAY(i915)) {
+               drm_kms_helper_poll_disable(&i915->drm);
  
-       drm_atomic_helper_shutdown(&i915->drm);
+               drm_atomic_helper_shutdown(&i915->drm);
+       }
  
        intel_dp_mst_suspend(i915);
  
        intel_suspend_encoders(i915);
        intel_shutdown_encoders(i915);
  
+       intel_csr_ucode_suspend(i915);
        /*
         * The only requirement is to reboot with display DC states disabled,
         * for now leaving all display power wells in the INIT power domain
-        * enabled matching the driver reload sequence.
+        * enabled.
+        *
+        * TODO:
+        * - unify the pci_driver::shutdown sequence here with the
+        *   pci_driver.driver.pm.poweroff,poweroff_late sequence.
+        * - unify the driver remove and system/runtime suspend sequences with
+        *   the above unified shutdown/poweroff sequence.
         */
        intel_power_domains_driver_remove(i915);
        enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@@ -1082,8 -1097,8 +1102,8 @@@ static int i915_drm_suspend(struct drm_
        /* We do a lot of poking in a lot of registers, make sure they work
         * properly. */
        intel_power_domains_disable(dev_priv);
-       drm_kms_helper_poll_disable(dev);
+       if (HAS_DISPLAY(dev_priv))
+               drm_kms_helper_poll_disable(dev);
  
        pci_save_state(pdev);
  
@@@ -1230,7 -1245,8 +1250,8 @@@ static int i915_drm_resume(struct drm_d
         */
        intel_runtime_pm_enable_interrupts(dev_priv);
  
-       drm_mode_config_reset(dev);
+       if (HAS_DISPLAY(dev_priv))
+               drm_mode_config_reset(dev);
  
        i915_gem_resume(dev_priv);
  
        intel_display_resume(dev);
  
        intel_hpd_poll_disable(dev_priv);
-       drm_kms_helper_poll_enable(dev);
+       if (HAS_DISPLAY(dev_priv))
+               drm_kms_helper_poll_enable(dev);
  
        intel_opregion_resume(dev_priv);