From: Tvrtko Ursulin Date: Tue, 6 Oct 2020 09:25:07 +0000 (+0100) Subject: drm/i915: Fix DMA mapped scatterlist walks X-Git-Tag: accepted/tizen/unified/20230118.172025~8358^2~8^2~39 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8a473dbadccfc6206150de3db3223c40785da348;p=platform%2Fkernel%2Flinux-rpi.git drm/i915: Fix DMA mapped scatterlist walks When walking DMA mapped scatterlists sg_dma_len has to be used since it can be different (coalesced) from the backing store entry. This also means we have to end the walk when encountering a zero length DMA entry and cannot rely on the normal sg list end marker. Both issues were there in theory for some time but were hidden by the fact Intel IOMMU driver was never coalescing entries. As there are ongoing efforts to change this we need to start handling it. v2: * Use unsigned int for local storing sg_dma_len. (Logan) Signed-off-by: Tvrtko Ursulin References: 85d1225ec066 ("drm/i915: Introduce & use new lightweight SGL iterators") References: b31144c0daa8 ("drm/i915: Micro-optimise gen6_ppgtt_insert_entries()") Reported-by: Tom Murphy Suggested-by: Tom Murphy # __sgt_iter Suggested-by: Logan Gunthorpe # __sgt_iter Cc: Joonas Lahtinen Cc: Chris Wilson Cc: Matthew Auld Cc: Lu Baolu Reviewed-by: Logan Gunthorpe Link: https://patchwork.freedesktop.org/patch/msgid/20201006092508.1064287-1-tvrtko.ursulin@linux.intel.com --- diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index fd0d24d..c0d17f8 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -131,17 +131,17 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { - GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE); + GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); iter.dma += I915_GTT_PAGE_SIZE; if (iter.dma == iter.max) { iter.sg = __sg_next(iter.sg); - if (!iter.sg) + if (!iter.sg || sg_dma_len(iter.sg) == 0) break; iter.dma = sg_dma_address(iter.sg); - iter.max = iter.dma + iter.sg->length; + iter.max = iter.dma + sg_dma_len(iter.sg); } if (++act_pte == GEN6_PTES) { diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index eb64f47..b236aa0 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -372,19 +372,19 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { - GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); + GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { iter->sg = __sg_next(iter->sg); - if (!iter->sg) { + if (!iter->sg || sg_dma_len(iter->sg) == 0) { idx = 0; break; } iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + iter->sg->length; + iter->max = iter->dma + sg_dma_len(iter->sg); } if (gen8_pd_index(++idx, 0) == 0) { @@ -413,8 +413,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, u32 flags) { const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + unsigned int rem = sg_dma_len(iter->sg); u64 start = vma->node.start; - dma_addr_t rem = iter->sg->length; GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); @@ -456,7 +456,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } do { - GEM_BUG_ON(iter->sg->length < page_size); + GEM_BUG_ON(sg_dma_len(iter->sg) < page_size); vaddr[index++] = encode | iter->dma; start += page_size; @@ -467,7 +467,10 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, if (!iter->sg) break; - rem = iter->sg->length; + rem = sg_dma_len(iter->sg); + if (!rem) + break; + iter->dma = sg_dma_address(iter->sg); iter->max = iter->dma + rem; @@ -525,7 +528,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } vma->page_sizes.gtt |= page_size; - } while (iter->sg); + } while (iter->sg && sg_dma_len(iter->sg)); } static void gen8_ppgtt_insert(struct i915_address_space *vm, diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index c13c650c..8a33940 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -580,7 +580,7 @@ static inline struct sgt_dma { struct scatterlist *sg = vma->pages->sgl; dma_addr_t addr = sg_dma_address(sg); - return (struct sgt_dma){ sg, addr, addr + sg->length }; + return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; } #endif diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index b7b5932..5108568 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -27,13 +27,17 @@ static __always_inline struct sgt_iter { } __sgt_iter(struct scatterlist *sgl, bool dma) { struct sgt_iter s = { .sgp = sgl }; - if (s.sgp) { + if (dma && s.sgp && sg_dma_len(s.sgp) == 0) { + s.sgp = NULL; + } else if (s.sgp) { s.max = s.curr = s.sgp->offset; - s.max += s.sgp->length; - if (dma) + if (dma) { s.dma = sg_dma_address(s.sgp); - else + s.max += sg_dma_len(s.sgp); + } else { s.pfn = page_to_pfn(sg_page(s.sgp)); + s.max += s.sgp->length; + } } return s;