i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_mmap *args = data;
struct drm_i915_gem_object *obj;
unsigned long addr;
+ /* mmap ioctl is disallowed for all platforms after TGL-LP. This also
+ * covers all platforms with local memory.
+ */
+ if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
+ return -EOPNOTSUPP;
+
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
goto err_unpin;
/* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
+ ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
+ (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start));
if (ret)
goto err_fence;
#include <linux/shmem_fs.h>
#include <linux/swap.h>
- #include <drm/drm.h> /* for drm_legacy.h! */
#include <drm/drm_cache.h>
#include "gt/intel_gt.h"
err_xfer:
if (!IS_ERR_OR_NULL(pages)) {
- unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+ unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
}
#include "i915_vgpu.h"
#include "intel_gtt.h"
+ #include "gen8_ppgtt.h"
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
intel_gtt_chipset_flush();
}
- static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen8_pte_t pte = addr | _PAGE_PRESENT;
mutex_unlock(&ggtt->vm.mutex);
i915_address_space_fini(&ggtt->vm);
- dma_resv_fini(&ggtt->vm.resv);
arch_phys_wc_del(ggtt->mtrr);
ggtt_cleanup_hw(ggtt);
}
+/**
+ * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
+ * all free objects have been drained.
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
+ dma_resv_fini(&ggtt->vm._resv);
+}
+
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
return -ENOMEM;
}
+ kref_init(&ggtt->vm.resv_ref);
ret = setup_scratch_page(&ggtt->vm);
if (ret) {
drm_err(&i915->drm, "Scratch setup failed\n");
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
ggtt->vm.dma = i915->drm.dev;
- dma_resv_init(&ggtt->vm.resv);
+ dma_resv_init(&ggtt->vm._resv);
if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
if (ret) {
- dma_resv_fini(&ggtt->vm.resv);
+ dma_resv_fini(&ggtt->vm._resv);
return ret;
}
atomic_t open;
struct mutex mutex; /* protects vma and our lists */
- struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
+
+ struct kref resv_ref; /* kref to keep the reservation lock alive. */
+ struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
+ #define VM_CLASS_DPT 2
struct drm_i915_gem_object *scratch[4];
/**
/* Global GTT */
bool is_ggtt:1;
+ /* Display page table */
+ bool is_dpt:1;
+
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
};
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+ #define i915_is_dpt(vm) ((vm)->is_dpt)
int __must_check
i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
return vm;
}
+/**
+ * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
+ * @vm: The vm whose reservation lock we want to share.
+ *
+ * Return: A pointer to the vm's reservation lock.
+ */
+static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
+{
+ kref_get(&vm->resv_ref);
+ return &vm->_resv;
+}
+
void i915_vm_release(struct kref *kref);
+void i915_vm_resv_release(struct kref *kref);
+
static inline void i915_vm_put(struct i915_address_space *vm)
{
kref_put(&vm->ref, i915_vm_release);
}
+/**
+ * i915_vm_resv_put - Release a reference on the vm's reservation lock
+ * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
+ */
+static inline void i915_vm_resv_put(struct i915_address_space *vm)
+{
+ kref_put(&vm->resv_ref, i915_vm_resv_release);
+}
+
static inline struct i915_address_space *
i915_vm_open(struct i915_address_space *vm)
{
void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *i915);
void i915_ggtt_driver_release(struct drm_i915_private *i915);
+void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
{
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
+ #include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
if (ret)
goto err_perf;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
if (ret)
goto err_ggtt;
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
i915_ggtt_driver_release(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
err_perf:
i915_perf_fini(dev_priv);
return ret;
if (IS_ERR(i915))
return i915;
- i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
/* Device parameters start as a copy of module parameters. */
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
i915_ggtt_driver_release(i915);
+ i915_gem_drain_freed_objects(i915);
+ i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
i915_driver_mmio_release(i915);
out_runtime_pm_put:
intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
i915_driver_mmio_release(dev_priv);
*/
static void i915_driver_lastclose(struct drm_device *dev)
{
+ struct drm_i915_private *i915 = to_i915(dev);
+
intel_fbdev_restore_mode(dev);
- vga_switcheroo_process_delayed_switch();
+
+ if (HAS_DISPLAY(i915))
+ vga_switcheroo_process_delayed_switch();
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
drm_modeset_lock_all(dev);
for_each_intel_encoder(dev, encoder)
if (encoder->suspend)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
drm_modeset_lock_all(dev);
for_each_intel_encoder(dev, encoder)
if (encoder->shutdown)
i915_gem_suspend(i915);
- drm_kms_helper_poll_disable(&i915->drm);
+ if (HAS_DISPLAY(i915)) {
+ drm_kms_helper_poll_disable(&i915->drm);
- drm_atomic_helper_shutdown(&i915->drm);
+ drm_atomic_helper_shutdown(&i915->drm);
+ }
intel_dp_mst_suspend(i915);
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+ intel_csr_ucode_suspend(i915);
+
/*
* The only requirement is to reboot with display DC states disabled,
* for now leaving all display power wells in the INIT power domain
- * enabled matching the driver reload sequence.
+ * enabled.
+ *
+ * TODO:
+ * - unify the pci_driver::shutdown sequence here with the
+ * pci_driver.driver.pm.poweroff,poweroff_late sequence.
+ * - unify the driver remove and system/runtime suspend sequences with
+ * the above unified shutdown/poweroff sequence.
*/
intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(dev_priv);
-
- drm_kms_helper_poll_disable(dev);
+ if (HAS_DISPLAY(dev_priv))
+ drm_kms_helper_poll_disable(dev);
pci_save_state(pdev);
*/
intel_runtime_pm_enable_interrupts(dev_priv);
- drm_mode_config_reset(dev);
+ if (HAS_DISPLAY(dev_priv))
+ drm_mode_config_reset(dev);
i915_gem_resume(dev_priv);
intel_display_resume(dev);
intel_hpd_poll_disable(dev_priv);
- drm_kms_helper_poll_enable(dev);
+ if (HAS_DISPLAY(dev_priv))
+ drm_kms_helper_poll_enable(dev);
intel_opregion_resume(dev_priv);