drm/i915: Refactor object page API
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 12:58:35 +0000 (13:58 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 19:53:46 +0000 (20:53 +0100)
The plan is to make obtaining the backing storage for the object avoid
struct_mutex (i.e. use its own locking). The first step is to update the
API so that normal users only call pin/unpin whilst working on the
backing storage.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-12-chris@chris-wilson.co.uk
17 files changed:
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_batch_pool.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/intel_lrc.c

index f191d7b..f5039f4 100644 (file)
@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
        }
 
        if (ret == 0 && needs_clflush_after)
-               drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+               drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
        i915_gem_object_unpin_map(shadow_batch_obj);
 
        return ret;
index 9f5a392..e97a16c 100644 (file)
@@ -112,7 +112,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj)
 
 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
 {
-       return obj->mapping ? 'M' : ' ';
+       return obj->mm.mapping ? 'M' : ' ';
 }
 
 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -158,8 +158,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                   i915_gem_active_get_seqno(&obj->last_write,
                                             &obj->base.dev->struct_mutex),
                   i915_cache_level_str(dev_priv, obj->cache_level),
-                  obj->dirty ? " dirty" : "",
-                  obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+                  obj->mm.dirty ? " dirty" : "",
+                  obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
                seq_printf(m, " (name: %d)", obj->base.name);
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
@@ -403,12 +403,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                size += obj->base.size;
                ++count;
 
-               if (obj->madv == I915_MADV_DONTNEED) {
+               if (obj->mm.madv == I915_MADV_DONTNEED) {
                        purgeable_size += obj->base.size;
                        ++purgeable_count;
                }
 
-               if (obj->mapping) {
+               if (obj->mm.mapping) {
                        mapped_count++;
                        mapped_size += obj->base.size;
                }
@@ -425,12 +425,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                        ++dpy_count;
                }
 
-               if (obj->madv == I915_MADV_DONTNEED) {
+               if (obj->mm.madv == I915_MADV_DONTNEED) {
                        purgeable_size += obj->base.size;
                        ++purgeable_count;
                }
 
-               if (obj->mapping) {
+               if (obj->mm.mapping) {
                        mapped_count++;
                        mapped_size += obj->base.size;
                }
@@ -2028,7 +2028,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
                seq_printf(m, "\tBound in GGTT at 0x%08x\n",
                           i915_ggtt_offset(vma));
 
-       if (i915_gem_object_get_pages(vma->obj)) {
+       if (i915_gem_object_pin_pages(vma->obj)) {
                seq_puts(m, "\tFailed to get pages for context object\n\n");
                return;
        }
@@ -2047,6 +2047,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
                kunmap_atomic(reg_state);
        }
 
+       i915_gem_object_unpin_pages(vma->obj);
        seq_putc(m, '\n');
 }
 
index 85ae83a..50781cb 100644 (file)
@@ -2252,17 +2252,6 @@ struct drm_i915_gem_object {
         */
 #define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
 
-       /**
-        * This is set if the object has been written to since last bound
-        * to the GTT
-        */
-       unsigned int dirty:1;
-
-       /**
-        * Advice: are the backing pages purgeable?
-        */
-       unsigned int madv:2;
-
        /*
         * Is the object to be mapped as read-only to the GPU
         * Only honoured if hardware has relevant pte bit
@@ -2284,16 +2273,31 @@ struct drm_i915_gem_object {
        unsigned int bind_count;
        unsigned int pin_display;
 
-       struct sg_table *pages;
-       int pages_pin_count;
-       struct i915_gem_object_page_iter {
-               struct scatterlist *sg_pos;
-               unsigned int sg_idx; /* in pages, but 32bit eek! */
+       struct {
+               unsigned int pages_pin_count;
+
+               struct sg_table *pages;
+               void *mapping;
 
-               struct radix_tree_root radix;
-               struct mutex lock; /* protects this cache */
-       } get_page;
-       void *mapping;
+               struct i915_gem_object_page_iter {
+                       struct scatterlist *sg_pos;
+                       unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+                       struct radix_tree_root radix;
+                       struct mutex lock; /* protects this cache */
+               } get_page;
+
+               /**
+                * Advice: are the backing pages purgeable?
+                */
+               unsigned int madv:2;
+
+               /**
+                * This is set if the object has been written to since the
+                * pages were last acquired.
+                */
+               bool dirty:1;
+       } mm;
 
        /** Breadcrumb of last rendering to the buffer.
         * There can only be one writer, but we allow for multiple readers.
@@ -3182,14 +3186,11 @@ void i915_vma_close(struct i915_vma *vma);
 void i915_vma_destroy(struct i915_vma *vma);
 
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
-int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-
-static inline int __sg_page_count(struct scatterlist *sg)
+static inline int __sg_page_count(const struct scatterlist *sg)
 {
        return sg->length >> PAGE_SHIFT;
 }
@@ -3210,19 +3211,52 @@ dma_addr_t
 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
                                unsigned long n);
 
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       if (obj->mm.pages_pin_count++)
+               return 0;
+
+       return __i915_gem_object_get_pages(obj);
+}
+
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-       GEM_BUG_ON(obj->pages == NULL);
-       obj->pages_pin_count++;
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       GEM_BUG_ON(!obj->mm.pages);
+
+       obj->mm.pages_pin_count++;
+}
+
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+       return obj->mm.pages_pin_count;
+}
+
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       GEM_BUG_ON(!obj->mm.pages);
+
+       obj->mm.pages_pin_count--;
+       GEM_BUG_ON(obj->mm.pages_pin_count < obj->bind_count);
 }
 
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
-       GEM_BUG_ON(obj->pages_pin_count == 0);
-       obj->pages_pin_count--;
-       GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
+       __i915_gem_object_unpin_pages(obj);
 }
 
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+
 enum i915_map_type {
        I915_MAP_WB = 0,
        I915_MAP_WC,
index aa0de3a..0d702c8 100644 (file)
@@ -216,7 +216,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        sg_dma_address(sg) = obj->phys_handle->busaddr;
        sg_dma_len(sg) = obj->base.size;
 
-       obj->pages = st;
+       obj->mm.pages = st;
        return 0;
 }
 
@@ -225,7 +225,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
 {
        int ret;
 
-       BUG_ON(obj->madv == __I915_MADV_PURGED);
+       GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
        if (WARN_ON(ret)) {
@@ -235,10 +235,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
-       if (obj->madv == I915_MADV_DONTNEED)
-               obj->dirty = 0;
+       if (obj->mm.madv == I915_MADV_DONTNEED)
+               obj->mm.dirty = false;
 
-       if (obj->dirty) {
+       if (obj->mm.dirty) {
                struct address_space *mapping = obj->base.filp->f_mapping;
                char *vaddr = obj->phys_handle->vaddr;
                int i;
@@ -257,22 +257,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
                        kunmap_atomic(dst);
 
                        set_page_dirty(page);
-                       if (obj->madv == I915_MADV_WILLNEED)
+                       if (obj->mm.madv == I915_MADV_WILLNEED)
                                mark_page_accessed(page);
                        put_page(page);
                        vaddr += PAGE_SIZE;
                }
-               obj->dirty = 0;
+               obj->mm.dirty = false;
        }
 
-       sg_free_table(obj->pages);
-       kfree(obj->pages);
+       sg_free_table(obj->mm.pages);
+       kfree(obj->mm.pages);
 }
 
 static void
 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
 {
        drm_pci_free(obj->base.dev, obj->phys_handle);
+       i915_gem_object_unpin_pages(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -507,7 +508,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                return 0;
        }
 
-       if (obj->madv != I915_MADV_WILLNEED)
+       if (obj->mm.madv != I915_MADV_WILLNEED)
                return -EFAULT;
 
        if (obj->base.filp == NULL)
@@ -517,7 +518,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       ret = i915_gem_object_put_pages(obj);
+       ret = __i915_gem_object_put_pages(obj);
        if (ret)
                return ret;
 
@@ -529,7 +530,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        obj->phys_handle = phys;
        obj->ops = &i915_gem_phys_ops;
 
-       return i915_gem_object_get_pages(obj);
+       return i915_gem_object_pin_pages(obj);
 }
 
 static int
@@ -725,12 +726,10 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                return ret;
 
-       i915_gem_object_pin_pages(obj);
-
        i915_gem_object_flush_gtt_write_domain(obj);
 
        /* If we're not in the cpu read domain, set ourself into the gtt
@@ -778,12 +777,10 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                return ret;
 
-       i915_gem_object_pin_pages(obj);
-
        i915_gem_object_flush_gtt_write_domain(obj);
 
        /* If we're not in the cpu write domain, set ourself into the
@@ -813,7 +810,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
                obj->cache_dirty = true;
 
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       obj->dirty = 1;
+       obj->mm.dirty = true;
        /* return with the pages pinned */
        return 0;
 
@@ -951,13 +948,11 @@ i915_gem_gtt_pread(struct drm_device *dev,
                if (ret)
                        goto out;
 
-               ret = i915_gem_object_get_pages(obj);
+               ret = i915_gem_object_pin_pages(obj);
                if (ret) {
                        remove_mappable_node(&node);
                        goto out;
                }
-
-               i915_gem_object_pin_pages(obj);
        }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -1064,7 +1059,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
        offset = args->offset;
        remain = args->size;
 
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+       for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
                         offset >> PAGE_SHIFT) {
                struct page *page = sg_page_iter_page(&sg_iter);
 
@@ -1254,13 +1249,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                if (ret)
                        goto out;
 
-               ret = i915_gem_object_get_pages(obj);
+               ret = i915_gem_object_pin_pages(obj);
                if (ret) {
                        remove_mappable_node(&node);
                        goto out;
                }
-
-               i915_gem_object_pin_pages(obj);
        }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -1268,7 +1261,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                goto out_unpin;
 
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       obj->dirty = true;
+       obj->mm.dirty = true;
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
@@ -1439,7 +1432,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        offset = args->offset;
        remain = args->size;
 
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+       for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
                         offset >> PAGE_SHIFT) {
                struct page *page = sg_page_iter_page(&sg_iter);
                int partial_cacheline_write;
@@ -2266,7 +2259,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
         * backing pages, *now*.
         */
        shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
-       obj->madv = __I915_MADV_PURGED;
+       obj->mm.madv = __I915_MADV_PURGED;
 }
 
 /* Try to discard unwanted pages */
@@ -2275,7 +2268,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping;
 
-       switch (obj->madv) {
+       switch (obj->mm.madv) {
        case I915_MADV_DONTNEED:
                i915_gem_object_truncate(obj);
        case __I915_MADV_PURGED:
@@ -2296,7 +2289,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        struct page *page;
        int ret;
 
-       BUG_ON(obj->madv == __I915_MADV_PURGED);
+       GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
        if (WARN_ON(ret)) {
@@ -2312,22 +2305,22 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_save_bit_17_swizzle(obj);
 
-       if (obj->madv == I915_MADV_DONTNEED)
-               obj->dirty = 0;
+       if (obj->mm.madv == I915_MADV_DONTNEED)
+               obj->mm.dirty = false;
 
-       for_each_sgt_page(page, sgt_iter, obj->pages) {
-               if (obj->dirty)
+       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+               if (obj->mm.dirty)
                        set_page_dirty(page);
 
-               if (obj->madv == I915_MADV_WILLNEED)
+               if (obj->mm.madv == I915_MADV_WILLNEED)
                        mark_page_accessed(page);
 
                put_page(page);
        }
-       obj->dirty = 0;
+       obj->mm.dirty = false;
 
-       sg_free_table(obj->pages);
-       kfree(obj->pages);
+       sg_free_table(obj->mm.pages);
+       kfree(obj->mm.pages);
 }
 
 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
@@ -2335,21 +2328,20 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
        struct radix_tree_iter iter;
        void **slot;
 
-       radix_tree_for_each_slot(slot, &obj->get_page.radix, &iter, 0)
-               radix_tree_delete(&obj->get_page.radix, iter.index);
+       radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+               radix_tree_delete(&obj->mm.get_page.radix, iter.index);
 }
 
-int
-i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 {
        const struct drm_i915_gem_object_ops *ops = obj->ops;
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       if (obj->pages == NULL)
+       if (!obj->mm.pages)
                return 0;
 
-       if (obj->pages_pin_count)
+       if (i915_gem_object_has_pinned_pages(obj))
                return -EBUSY;
 
        GEM_BUG_ON(obj->bind_count);
@@ -2359,22 +2351,22 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
         * lists early. */
        list_del(&obj->global_list);
 
-       if (obj->mapping) {
+       if (obj->mm.mapping) {
                void *ptr;
 
-               ptr = ptr_mask_bits(obj->mapping);
+               ptr = ptr_mask_bits(obj->mm.mapping);
                if (is_vmalloc_addr(ptr))
                        vunmap(ptr);
                else
                        kunmap(kmap_to_page(ptr));
 
-               obj->mapping = NULL;
+               obj->mm.mapping = NULL;
        }
 
        __i915_gem_object_reset_page_iter(obj);
 
        ops->put_pages(obj);
-       obj->pages = NULL;
+       obj->mm.pages = NULL;
 
        i915_gem_object_invalidate(obj);
 
@@ -2474,7 +2466,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        }
        if (sg) /* loop terminated early; short sg table */
                sg_mark_end(sg);
-       obj->pages = st;
+       obj->mm.pages = st;
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret)
@@ -2485,7 +2477,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 
        if (i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
-               i915_gem_object_pin_pages(obj);
+               __i915_gem_object_pin_pages(obj);
 
        return 0;
 
@@ -2517,8 +2509,7 @@ err_pages:
  * either as a result of memory pressure (reaping pages under the shrinker)
  * or as the object is itself released.
  */
-int
-i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        const struct drm_i915_gem_object_ops *ops = obj->ops;
@@ -2526,24 +2517,25 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       if (obj->pages)
+       if (obj->mm.pages)
                return 0;
 
-       if (obj->madv != I915_MADV_WILLNEED) {
+       if (obj->mm.madv != I915_MADV_WILLNEED) {
                DRM_DEBUG("Attempting to obtain a purgeable object\n");
+               __i915_gem_object_unpin_pages(obj);
                return -EFAULT;
        }
 
-       BUG_ON(obj->pages_pin_count);
-
        ret = ops->get_pages(obj);
-       if (ret)
+       if (ret) {
+               __i915_gem_object_unpin_pages(obj);
                return ret;
+       }
 
        list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
-       obj->get_page.sg_pos = obj->pages->sgl;
-       obj->get_page.sg_idx = 0;
+       obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
+       obj->mm.get_page.sg_idx = 0;
 
        return 0;
 }
@@ -2553,7 +2545,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
                                 enum i915_map_type type)
 {
        unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
-       struct sg_table *sgt = obj->pages;
+       struct sg_table *sgt = obj->mm.pages;
        struct sgt_iter sgt_iter;
        struct page *page;
        struct page *stack_pages[32];
@@ -2607,14 +2599,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
        lockdep_assert_held(&obj->base.dev->struct_mutex);
        GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                return ERR_PTR(ret);
 
-       i915_gem_object_pin_pages(obj);
-       pinned = obj->pages_pin_count > 1;
+       pinned = obj->mm.pages_pin_count > 1;
 
-       ptr = ptr_unpack_bits(obj->mapping, has_type);
+       ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
        if (ptr && has_type != type) {
                if (pinned) {
                        ret = -EBUSY;
@@ -2626,7 +2617,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
                else
                        kunmap(kmap_to_page(ptr));
 
-               ptr = obj->mapping = NULL;
+               ptr = obj->mm.mapping = NULL;
        }
 
        if (!ptr) {
@@ -2636,7 +2627,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
                        goto err;
                }
 
-               obj->mapping = ptr_pack_bits(ptr, type);
+               obj->mm.mapping = ptr_pack_bits(ptr, type);
        }
 
        return ptr;
@@ -3087,7 +3078,7 @@ int i915_vma_unbind(struct i915_vma *vma)
                goto destroy;
 
        GEM_BUG_ON(obj->bind_count == 0);
-       GEM_BUG_ON(!obj->pages);
+       GEM_BUG_ON(!obj->mm.pages);
 
        if (i915_vma_is_map_and_fenceable(vma)) {
                /* release the fence reg _after_ flushing */
@@ -3111,7 +3102,7 @@ int i915_vma_unbind(struct i915_vma *vma)
        drm_mm_remove_node(&vma->node);
        list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-       if (vma->pages != obj->pages) {
+       if (vma->pages != obj->mm.pages) {
                GEM_BUG_ON(!vma->pages);
                sg_free_table(vma->pages);
                kfree(vma->pages);
@@ -3244,12 +3235,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                return -E2BIG;
        }
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                return ret;
 
-       i915_gem_object_pin_pages(obj);
-
        if (flags & PIN_OFFSET_FIXED) {
                u64 offset = flags & PIN_OFFSET_MASK;
                if (offset & (alignment - 1) || offset > end - size) {
@@ -3331,7 +3320,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
         * to GPU, and we can ignore the cache flush because it'll happen
         * again at bind time.
         */
-       if (obj->pages == NULL)
+       if (!obj->mm.pages)
                return false;
 
        /*
@@ -3355,7 +3344,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
        }
 
        trace_i915_gem_object_clflush(obj);
-       drm_clflush_sg(obj->pages);
+       drm_clflush_sg(obj->mm.pages);
        obj->cache_dirty = false;
 
        return true;
@@ -3469,7 +3458,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
         * continue to assume that the obj remained out of the CPU cached
         * domain.
         */
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                return ret;
 
@@ -3493,7 +3482,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (write) {
                obj->base.read_domains = I915_GEM_DOMAIN_GTT;
                obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-               obj->dirty = 1;
+               obj->mm.dirty = true;
        }
 
        trace_i915_gem_object_change_domain(obj,
@@ -3502,6 +3491,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
        /* And bump the LRU for this access */
        i915_gem_object_bump_inactive_ggtt(obj);
+       i915_gem_object_unpin_pages(obj);
 
        return 0;
 }
@@ -4304,23 +4294,23 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                goto unlock;
        }
 
-       if (obj->pages &&
+       if (obj->mm.pages &&
            i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-               if (obj->madv == I915_MADV_WILLNEED)
-                       i915_gem_object_unpin_pages(obj);
+               if (obj->mm.madv == I915_MADV_WILLNEED)
+                       __i915_gem_object_unpin_pages(obj);
                if (args->madv == I915_MADV_WILLNEED)
-                       i915_gem_object_pin_pages(obj);
+                       __i915_gem_object_pin_pages(obj);
        }
 
-       if (obj->madv != __I915_MADV_PURGED)
-               obj->madv = args->madv;
+       if (obj->mm.madv != __I915_MADV_PURGED)
+               obj->mm.madv = args->madv;
 
        /* if the object is no longer attached, discard its backing storage */
-       if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
+       if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
                i915_gem_object_truncate(obj);
 
-       args->retained = obj->madv != __I915_MADV_PURGED;
+       args->retained = obj->mm.madv != __I915_MADV_PURGED;
 
        i915_gem_object_put(obj);
 unlock:
@@ -4347,9 +4337,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        obj->ops = ops;
 
        obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
-       obj->madv = I915_MADV_WILLNEED;
-       INIT_RADIX_TREE(&obj->get_page.radix, GFP_KERNEL | __GFP_NOWARN);
-       mutex_init(&obj->get_page.lock);
+
+       obj->mm.madv = I915_MADV_WILLNEED;
+       INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+       mutex_init(&obj->mm.get_page.lock);
 
        i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
@@ -4441,7 +4432,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
         * back the contents from the GPU.
         */
 
-       if (obj->madv != I915_MADV_WILLNEED)
+       if (obj->mm.madv != I915_MADV_WILLNEED)
                return false;
 
        if (obj->base.filp == NULL)
@@ -4483,32 +4474,27 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        }
        GEM_BUG_ON(obj->bind_count);
 
-       /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
-        * before progressing. */
-       if (obj->stolen)
-               i915_gem_object_unpin_pages(obj);
-
        WARN_ON(atomic_read(&obj->frontbuffer_bits));
 
-       if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
+       if (obj->mm.pages && obj->mm.madv == I915_MADV_WILLNEED &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
            i915_gem_object_is_tiled(obj))
-               i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_unpin_pages(obj);
 
-       if (WARN_ON(obj->pages_pin_count))
-               obj->pages_pin_count = 0;
+       if (obj->ops->release)
+               obj->ops->release(obj);
+
+       if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+               obj->mm.pages_pin_count = 0;
        if (discard_backing_storage(obj))
-               obj->madv = I915_MADV_DONTNEED;
-       i915_gem_object_put_pages(obj);
+               obj->mm.madv = I915_MADV_DONTNEED;
+       __i915_gem_object_put_pages(obj);
 
-       BUG_ON(obj->pages);
+       GEM_BUG_ON(obj->mm.pages);
 
        if (obj->base.import_attach)
                drm_prime_gem_destroy(&obj->base, NULL);
 
-       if (obj->ops->release)
-               obj->ops->release(obj);
-
        drm_gem_object_release(&obj->base);
        i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
@@ -5063,14 +5049,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
        if (ret)
                goto fail;
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                goto fail;
 
-       i915_gem_object_pin_pages(obj);
-       sg = obj->pages;
+       sg = obj->mm.pages;
        bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-       obj->dirty = 1;         /* Backing store is now out of date */
+       obj->mm.dirty = true; /* Backing store is now out of date */
        i915_gem_object_unpin_pages(obj);
 
        if (WARN_ON(bytes != size)) {
@@ -5091,13 +5076,13 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                       unsigned int n,
                       unsigned int *offset)
 {
-       struct i915_gem_object_page_iter *iter = &obj->get_page;
+       struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
        struct scatterlist *sg;
        unsigned int idx, count;
 
        might_sleep();
        GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
-       GEM_BUG_ON(obj->pages_pin_count == 0);
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
        /* As we iterate forward through the sg, we record each entry in a
         * radixtree for quick repeated (backwards) lookups. If we have seen
@@ -5222,7 +5207,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
        struct page *page;
 
        page = i915_gem_object_get_page(obj, n);
-       if (!obj->dirty)
+       if (!obj->mm.dirty)
                set_page_dirty(page);
 
        return page;
index aa4e1e0..e0f38e5 100644 (file)
@@ -130,11 +130,10 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
                        return obj;
        }
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                return ERR_PTR(ret);
 
        list_move_tail(&obj->batch_pool_link, list);
-       i915_gem_object_pin_pages(obj);
        return obj;
 }
index 97c9d68..10441dc 100644 (file)
@@ -48,12 +48,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
        if (ret)
                goto err;
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
        if (ret)
                goto err_unlock;
 
-       i915_gem_object_pin_pages(obj);
-
        /* Copy sg so that we make an independent mapping */
        st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (st == NULL) {
@@ -61,13 +59,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
                goto err_unpin;
        }
 
-       ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+       ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
        if (ret)
                goto err_free;
 
-       src = obj->pages->sgl;
+       src = obj->mm.pages->sgl;
        dst = st->sgl;
-       for (i = 0; i < obj->pages->nents; i++) {
+       for (i = 0; i < obj->mm.pages->nents; i++) {
                sg_set_page(dst, sg_page(src), src->length, 0);
                dst = sg_next(dst);
                src = sg_next(src);
@@ -299,14 +297,14 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
        if (IS_ERR(sg))
                return PTR_ERR(sg);
 
-       obj->pages = sg;
+       obj->mm.pages = sg;
        return 0;
 }
 
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
        dma_buf_unmap_attachment(obj->base.import_attach,
-                                obj->pages, DMA_BIDIRECTIONAL);
+                                obj->mm.pages, DMA_BIDIRECTIONAL);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
index 4cafce9..d95c4e0 100644 (file)
@@ -1281,7 +1281,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-       obj->dirty = 1; /* be paranoid  */
+       obj->mm.dirty = true; /* be paranoid  */
 
        /* Add a reference if we're newly entering the active list.
         * The order in which we add operations to the retirement queue is
index 3c5a808..5aadab5 100644 (file)
@@ -664,7 +664,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
                return;
 
        i = 0;
-       for_each_sgt_page(page, sgt_iter, obj->pages) {
+       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
                char new_bit_17 = page_to_phys(page) >> 17;
                if ((new_bit_17 & 0x1) !=
                    (test_bit(i, obj->bit_17) != 0)) {
@@ -703,7 +703,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 
        i = 0;
 
-       for_each_sgt_page(page, sgt_iter, obj->pages) {
+       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
                if (page_to_phys(page) & (1 << 17))
                        __set_bit(i, obj->bit_17);
                else
index b3f341f..794ccc4 100644 (file)
@@ -175,7 +175,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 {
        u32 pte_flags = 0;
 
-       vma->pages = vma->obj->pages;
+       vma->pages = vma->obj->mm.pages;
 
        /* Currently applicable only to VLV */
        if (vma->obj->gt_ro)
@@ -2373,7 +2373,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 {
        if (!dma_map_sg(&obj->base.dev->pdev->dev,
-                       obj->pages->sgl, obj->pages->nents,
+                       obj->mm.pages->sgl, obj->mm.pages->nents,
                        PCI_DMA_BIDIRECTIONAL))
                return -ENOSPC;
 
@@ -2710,7 +2710,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
                }
        }
 
-       dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
+       dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
                     PCI_DMA_BIDIRECTIONAL);
 }
 
@@ -3548,7 +3548,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
 
        /* Populate source page list from the object. */
        i = 0;
-       for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+       for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
                page_addr_list[i++] = dma_addr;
 
        GEM_BUG_ON(i != n_pages);
@@ -3641,7 +3641,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
                return 0;
 
        if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
-               vma->pages = vma->obj->pages;
+               vma->pages = vma->obj->mm.pages;
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
                vma->pages =
                        intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
index 02e66fa..08a2576 100644 (file)
@@ -102,10 +102,10 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 
                sg = __sg_next(sg);
        } while (1);
-       obj->pages = st;
+       obj->mm.pages = st;
 
        if (i915_gem_gtt_prepare_object(obj)) {
-               obj->pages = NULL;
+               obj->mm.pages = NULL;
                goto err;
        }
 
@@ -114,7 +114,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
         * and the caller is expected to repopulate - the contents of this
         * object are only valid whilst active and pinned.
         */
-       obj->madv = I915_MADV_DONTNEED;
+       obj->mm.madv = I915_MADV_DONTNEED;
        return 0;
 
 err:
@@ -126,10 +126,10 @@ err:
 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj)
 {
        i915_gem_gtt_finish_object(obj);
-       internal_free_pages(obj->pages);
+       internal_free_pages(obj->mm.pages);
 
-       obj->dirty = 0;
-       obj->madv = I915_MADV_WILLNEED;
+       obj->mm.dirty = false;
+       obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
index 0529324..57918f2 100644 (file)
@@ -230,7 +230,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
                return 0;
 
        /* Recreate the page after shrinking */
-       if (!so->vma->obj->pages)
+       if (!so->vma->obj->mm.pages)
                so->batch_offset = -1;
 
        ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
index de25b6e..124f69a 100644 (file)
@@ -78,7 +78,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
         * to the GPU, simply unbinding from the GPU is not going to succeed
         * in releasing our pin count on the pages themselves.
         */
-       if (obj->pages_pin_count > obj->bind_count)
+       if (obj->mm.pages_pin_count > obj->bind_count)
                return false;
 
        if (any_vma_pinned(obj))
@@ -88,7 +88,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
         * discard the contents (because the user has marked them as being
         * purgeable) or if we can move their contents out to swap.
         */
-       return swap_available() || obj->madv == I915_MADV_DONTNEED;
+       return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
 }
 
 /**
@@ -175,11 +175,11 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                        list_move_tail(&obj->global_list, &still_in_list);
 
                        if (flags & I915_SHRINK_PURGEABLE &&
-                           obj->madv != I915_MADV_DONTNEED)
+                           obj->mm.madv != I915_MADV_DONTNEED)
                                continue;
 
                        if (flags & I915_SHRINK_VMAPS &&
-                           !is_vmalloc_addr(obj->mapping))
+                           !is_vmalloc_addr(obj->mm.mapping))
                                continue;
 
                        if (!(flags & I915_SHRINK_ACTIVE) &&
@@ -194,7 +194,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
                        /* For the unbound phase, this should be a no-op! */
                        i915_gem_object_unbind(obj);
-                       if (i915_gem_object_put_pages(obj) == 0)
+                       if (__i915_gem_object_put_pages(obj) == 0)
                                count += obj->base.size >> PAGE_SHIFT;
 
                        i915_gem_object_put(obj);
index 70e61bc..0acbdcb 100644 (file)
@@ -555,16 +555,17 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 {
        /* Should only be called during free */
-       sg_free_table(obj->pages);
-       kfree(obj->pages);
+       sg_free_table(obj->mm.pages);
+       kfree(obj->mm.pages);
 }
 
-
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
+       __i915_gem_object_unpin_pages(obj);
+
        if (obj->stolen) {
                i915_gem_stolen_remove_node(dev_priv, obj->stolen);
                kfree(obj->stolen);
@@ -590,15 +591,16 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        drm_gem_private_object_init(dev, &obj->base, stolen->size);
        i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
-       obj->pages = i915_pages_create_for_stolen(dev,
-                                                 stolen->start, stolen->size);
-       if (obj->pages == NULL)
+       obj->mm.pages = i915_pages_create_for_stolen(dev,
+                                                    stolen->start,
+                                                    stolen->size);
+       if (!obj->mm.pages)
                goto cleanup;
 
-       obj->get_page.sg_pos = obj->pages->sgl;
-       obj->get_page.sg_idx = 0;
+       obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
+       obj->mm.get_page.sg_idx = 0;
 
-       i915_gem_object_pin_pages(obj);
+       __i915_gem_object_pin_pages(obj);
        obj->stolen = stolen;
 
        obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
@@ -718,14 +720,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                goto err;
        }
 
-       vma->pages = obj->pages;
+       vma->pages = obj->mm.pages;
        vma->flags |= I915_VMA_GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
        list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
        obj->bind_count++;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       i915_gem_object_pin_pages(obj);
+       __i915_gem_object_pin_pages(obj);
 
        return obj;
 
index 71f80d2..34d5ada 100644 (file)
@@ -259,13 +259,13 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                if (!err) {
                        struct i915_vma *vma;
 
-                       if (obj->pages &&
-                           obj->madv == I915_MADV_WILLNEED &&
+                       if (obj->mm.pages &&
+                           obj->mm.madv == I915_MADV_WILLNEED &&
                            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                                if (args->tiling_mode == I915_TILING_NONE)
-                                       i915_gem_object_unpin_pages(obj);
+                                       __i915_gem_object_unpin_pages(obj);
                                if (!i915_gem_object_is_tiled(obj))
-                                       i915_gem_object_pin_pages(obj);
+                                       __i915_gem_object_pin_pages(obj);
                        }
 
                        list_for_each_entry(vma, &obj->vma_list, obj_link) {
index e2fa970..0cbc8f7 100644 (file)
@@ -73,10 +73,10 @@ static void cancel_userptr(struct work_struct *work)
        /* Cancel any active worker and force us to re-evaluate gup */
        obj->userptr.work = NULL;
 
-       if (obj->pages != NULL) {
+       if (obj->mm.pages) {
                /* We are inside a kthread context and can't be interrupted */
                WARN_ON(i915_gem_object_unbind(obj));
-               WARN_ON(i915_gem_object_put_pages(obj));
+               WARN_ON(__i915_gem_object_put_pages(obj));
        }
 
        i915_gem_object_put(obj);
@@ -432,15 +432,15 @@ __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
 {
        int ret;
 
-       ret = st_set_pages(&obj->pages, pvec, num_pages);
+       ret = st_set_pages(&obj->mm.pages, pvec, num_pages);
        if (ret)
                return ret;
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
-               sg_free_table(obj->pages);
-               kfree(obj->pages);
-               obj->pages = NULL;
+               sg_free_table(obj->mm.pages);
+               kfree(obj->mm.pages);
+               obj->mm.pages = NULL;
        }
 
        return ret;
@@ -530,8 +530,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                        if (ret == 0) {
                                list_add_tail(&obj->global_list,
                                              &to_i915(dev)->mm.unbound_list);
-                               obj->get_page.sg_pos = obj->pages->sgl;
-                               obj->get_page.sg_idx = 0;
+                               obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
+                               obj->mm.get_page.sg_idx = 0;
                                pinned = 0;
                        }
                }
@@ -672,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
        BUG_ON(obj->userptr.work != NULL);
        __i915_gem_userptr_set_active(obj, false);
 
-       if (obj->madv != I915_MADV_WILLNEED)
-               obj->dirty = 0;
+       if (obj->mm.madv != I915_MADV_WILLNEED)
+               obj->mm.dirty = false;
 
        i915_gem_gtt_finish_object(obj);
 
-       for_each_sgt_page(page, sgt_iter, obj->pages) {
-               if (obj->dirty)
+       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+               if (obj->mm.dirty)
                        set_page_dirty(page);
 
                mark_page_accessed(page);
                put_page(page);
        }
-       obj->dirty = 0;
+       obj->mm.dirty = false;
 
-       sg_free_table(obj->pages);
-       kfree(obj->pages);
+       sg_free_table(obj->mm.pages);
+       kfree(obj->mm.pages);
 }
 
 static void
index d5feace..5bbb372 100644 (file)
@@ -896,8 +896,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
        err->write_domain = obj->base.write_domain;
        err->fence_reg = vma->fence ? vma->fence->id : -1;
        err->tiling = i915_gem_object_get_tiling(obj);
-       err->dirty = obj->dirty;
-       err->purgeable = obj->madv != I915_MADV_WILLNEED;
+       err->dirty = obj->mm.dirty;
+       err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
        err->userptr = obj->userptr.mm != NULL;
        err->cache_level = obj->cache_level;
 }
index 1c1bd30..cb30549 100644 (file)
@@ -744,7 +744,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
        ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
                i915_ggtt_offset(ce->ring->vma);
 
-       ce->state->obj->dirty = true;
+       ce->state->obj->mm.dirty = true;
 
        /* Invalidate GuC TLB. */
        if (i915.enable_guc_submission) {
@@ -2042,7 +2042,7 @@ populate_lr_context(struct i915_gem_context *ctx,
                DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
                return ret;
        }
-       ctx_obj->dirty = true;
+       ctx_obj->mm.dirty = true;
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
@@ -2180,7 +2180,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
                        reg[CTX_RING_HEAD+1] = 0;
                        reg[CTX_RING_TAIL+1] = 0;
 
-                       ce->state->obj->dirty = true;
+                       ce->state->obj->mm.dirty = true;
                        i915_gem_object_unpin_map(ce->state->obj);
 
                        ce->ring->head = ce->ring->tail = 0;