As the previous patch fixed the places where we walk the whole scatterlist
for DMA addresses, this patch fixes the random lookup functionality.
To achieve this we have to add a second lookup iterator and add a
i915_gem_object_get_sg_dma helper, to be used analoguous to existing
i915_gem_object_get_sg_dma. Therefore two lookup caches are maintained per
object and they are flushed at the same point for simplicity. (Strictly
speaking the DMA cache should be flushed from i915_gem_gtt_finish_pages,
but today this conincides with unsetting of the pages in general.)
Partial VMA view is then fixed to use the new DMA lookup and properly
query sg length.
v2:
* Checkpatch.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Cc: Tom Murphy <murphyt7@tcd.ie>
Cc: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201006092508.1064287-2-tvrtko.ursulin@linux.intel.com
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
+ INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_dma_page.lock);
if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
unsigned int tiling, unsigned int stride);
struct scatterlist *
+__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ unsigned int n,
+ unsigned int *offset);
+
+static inline struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n, unsigned int *offset);
+ unsigned int n,
+ unsigned int *offset)
+{
+ return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset);
+}
+
+static inline struct scatterlist *
+i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset);
+}
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
struct rb_node offset;
};
+struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
I915_SELFTEST_DECLARE(unsigned int page_mask);
- struct i915_gem_object_page_iter {
- struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
-
- struct radix_tree_root radix;
- struct mutex lock; /* protects this cache */
- } get_page;
+ struct i915_gem_object_page_iter get_page;
+ struct i915_gem_object_page_iter get_dma_page;
/**
* Element within i915->mm.unbound_list or i915->mm.bound_list,
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
+ obj->mm.get_dma_page.sg_pos = pages->sgl;
+ obj->mm.get_dma_page.sg_idx = 0;
obj->mm.pages = pages;
rcu_read_lock();
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
rcu_read_unlock();
}
}
struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
+__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ unsigned int n,
+ unsigned int *offset)
{
- struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ const bool dma = iter == &obj->mm.get_dma_page;
struct scatterlist *sg;
unsigned int idx, count;
sg = iter->sg_pos;
idx = iter->sg_idx;
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
while (idx + count <= n) {
void *entry;
idx += count;
sg = ____sg_next(sg);
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
}
scan:
while (idx + count <= n) {
idx += count;
sg = ____sg_next(sg);
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
}
*offset = n - idx;
struct scatterlist *sg;
unsigned int offset;
- sg = i915_gem_object_get_sg(obj, n, &offset);
+ sg = i915_gem_object_get_sg_dma(obj, n, &offset);
if (len)
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
if (ret)
goto err_sg_alloc;
- iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
+ iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
do {
unsigned int len;
- len = min(iter->length - (offset << PAGE_SHIFT),
+ len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
count << PAGE_SHIFT);
sg_set_page(sg, NULL, len, 0);
sg_dma_address(sg) =
return sg->length >> PAGE_SHIFT;
}
+static inline int __sg_dma_page_count(const struct scatterlist *sg)
+{
+ return sg_dma_len(sg) >> PAGE_SHIFT;
+}
+
static inline struct scatterlist *____sg_next(struct scatterlist *sg)
{
++sg;