Merge tag 'drm-intel-next-2018-07-19' of git://anongit.freedesktop.org/drm/drm-intel...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
index abd81fb..f00c7fb 100644 (file)
@@ -204,9 +204,9 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
                        return err;
        }
 
-       /* Currently applicable only to VLV */
+       /* Applicable to VLV, and gen8+ */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma)
 }
 
 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-                                 enum i915_cache_level level)
+                                 enum i915_cache_level level,
+                                 u32 flags)
 {
-       gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
-       pte |= addr;
+       gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+       if (unlikely(flags & PTE_READ_ONLY))
+               pte &= ~_PAGE_RW;
 
        switch (level) {
        case I915_CACHE_NONE:
@@ -531,6 +534,14 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
 static void i915_address_space_init(struct i915_address_space *vm,
                                    struct drm_i915_private *dev_priv)
 {
+       /*
+        * The vm->mutex must be reclaim safe (for use in the shrinker).
+        * Do a dummy acquire now under fs_reclaim so that any allocation
+        * attempt holding the lock is immediately reported by lockdep.
+        */
+       mutex_init(&vm->mutex);
+       i915_gem_shrinker_taints_mutex(&vm->mutex);
+
        GEM_BUG_ON(!vm->total);
        drm_mm_init(&vm->mm, 0, vm->total);
        vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
@@ -551,6 +562,8 @@ static void i915_address_space_fini(struct i915_address_space *vm)
        spin_unlock(&vm->free_pages.lock);
 
        drm_mm_takedown(&vm->mm);
+
+       mutex_destroy(&vm->mutex);
 }
 
 static int __setup_page_dma(struct i915_address_space *vm,
@@ -711,7 +724,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
                               struct i915_page_table *pt)
 {
        fill_px(vm, pt,
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
 }
 
 static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
@@ -859,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
        unsigned int pte = gen8_pte_index(start);
        unsigned int pte_end = pte + num_entries;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        gen8_pte_t *vaddr;
 
        GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -1031,10 +1044,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
                              struct i915_page_directory_pointer *pdp,
                              struct sgt_dma *iter,
                              struct gen8_insert_pte *idx,
-                             enum i915_cache_level cache_level)
+                             enum i915_cache_level cache_level,
+                             u32 flags)
 {
        struct i915_page_directory *pd;
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
        gen8_pte_t *vaddr;
        bool ret;
 
@@ -1085,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
                                   struct i915_vma *vma,
                                   enum i915_cache_level cache_level,
-                                  u32 unused)
+                                  u32 flags)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct sgt_dma iter = sgt_dma(vma);
        struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
 
        gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
-                                     cache_level);
+                                     cache_level, flags);
 
        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 }
@@ -1100,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
                                           struct i915_page_directory_pointer **pdps,
                                           struct sgt_dma *iter,
-                                          enum i915_cache_level cache_level)
+                                          enum i915_cache_level cache_level,
+                                          u32 flags)
 {
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
        u64 start = vma->node.start;
        dma_addr_t rem = iter->sg->length;
 
@@ -1218,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
                                   struct i915_vma *vma,
                                   enum i915_cache_level cache_level,
-                                  u32 unused)
+                                  u32 flags)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct sgt_dma iter = sgt_dma(vma);
        struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
 
        if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
-               gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+               gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+                                              flags);
        } else {
                struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
 
                while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
-                                                    &iter, &idx, cache_level))
+                                                    &iter, &idx, cache_level,
+                                                    flags))
                        GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
 
                vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1568,7 +1585,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
        struct i915_address_space *vm = &ppgtt->vm;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        u64 start = 0, length = ppgtt->vm.total;
 
        if (use_4lvl(vm)) {
@@ -1645,6 +1662,13 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
                1ULL << 48 :
                1ULL << 32;
 
+       /*
+        * From bdw, there is support for read-only pages in the PPGTT.
+        *
+        * XXX GVT is not honouring the lack of RW in the PTE bits.
+        */
+       ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+
        i915_address_space_init(&ppgtt->vm, i915);
 
        /* There are only few exceptions for gen >=6. chv and bxt.
@@ -2451,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
        gen8_pte_t __iomem *pte =
                (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
 
-       gen8_set_pte(pte, gen8_pte_encode(addr, level));
+       gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
 
        ggtt->invalidate(vm->i915);
 }
@@ -2459,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct i915_vma *vma,
                                     enum i915_cache_level level,
-                                    u32 unused)
+                                    u32 flags)
 {
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct sgt_iter sgt_iter;
        gen8_pte_t __iomem *gtt_entries;
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
        dma_addr_t addr;
 
+       /*
+        * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+        * not to allow the user to override access to a read only page.
+        */
+
        gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
        gtt_entries += vma->node.start >> PAGE_SHIFT;
        for_each_sgt_dma(addr, sgt_iter, vma->pages)
@@ -2532,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        gen8_pte_t __iomem *gtt_base =
                (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
        const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2593,13 +2622,14 @@ struct insert_entries {
        struct i915_address_space *vm;
        struct i915_vma *vma;
        enum i915_cache_level level;
+       u32 flags;
 };
 
 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
 {
        struct insert_entries *arg = _arg;
 
-       gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
+       gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
        bxt_vtd_ggtt_wa(arg->vm);
 
        return 0;
@@ -2608,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
                                             struct i915_vma *vma,
                                             enum i915_cache_level level,
-                                            u32 unused)
+                                            u32 flags)
 {
-       struct insert_entries arg = { vm, vma, level };
+       struct insert_entries arg = { vm, vma, level, flags };
 
        stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
 }
@@ -2701,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        struct drm_i915_gem_object *obj = vma->obj;
        u32 pte_flags;
 
-       /* Currently applicable only to VLV */
+       /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
        pte_flags = 0;
-       if (obj->gt_ro)
+       if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
 
        intel_runtime_pm_get(i915);
@@ -2741,7 +2771,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
        /* Currently applicable only to VLV */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        if (flags & I915_VMA_LOCAL_BIND) {
@@ -3581,6 +3611,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_address_space_init(&ggtt->vm, dev_priv);
+
+       /* Only VLV supports read-only GGTT mappings */
+       ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+
        if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
                ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);