Merge tag 'drm-msm-next-2020-09-27' of https://gitlab.freedesktop.org/drm/msm into...
authorDave Airlie <airlied@redhat.com>
Tue, 29 Sep 2020 00:18:49 +0000 (10:18 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 29 Sep 2020 00:18:49 +0000 (10:18 +1000)
* DSI support for sm8150/sm8250
* Support for per-process GPU pagetables (finally!) for a6xx.
  There are still some iommu/arm-smmu changes required to
  enable, without which it will fallback to the current single
  pgtable state.  The first part (ie. what doesn't depend on
  drm side patches) is queued up for v5.10[1].
* DisplayPort support.  Userspace DP compliance tool support
  is already merged in IGT[2]
* The usual assortment of smaller fixes/cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvqjuzH=Po_9EzzFsp2Xq3tqJUTKfsA2g09XY7_+6Ypfw@mail.gmail.com
1  2 
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gpummu.c
drivers/gpu/drm/msm/msm_iommu.c
include/drm/drm_dp_helper.h

@@@ -57,9 -67,9 +67,9 @@@ static void a5xx_submit_in_rb(struct ms
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        /* copy commands into RB: */
                        obj = submit->bos[submit->cmd[i].idx].obj;
@@@ -148,9 -157,9 +157,9 @@@ static void a5xx_submit(struct msm_gpu 
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
Simple merge
@@@ -115,9 -170,9 +170,9 @@@ static void a6xx_submit(struct msm_gpu 
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
@@@ -52,23 -52,16 +52,14 @@@ static void sync_for_device(struct msm_
  {
        struct device *dev = msm_obj->base.dev->dev;
  
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_device(dev, msm_obj->sgt,
-                                           DMA_BIDIRECTIONAL);
-       } else {
-               dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
 -      dma_map_sg(dev, msm_obj->sgt->sgl,
 -              msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  static void sync_for_cpu(struct msm_gem_object *msm_obj)
  {
        struct device *dev = msm_obj->base.dev->dev;
  
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
-       } else {
-               dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
 -      dma_unmap_sg(dev, msm_obj->sgt->sgl,
 -              msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /* allocate pages from VRAM carveout, used when no IOMMU: */
Simple merge
@@@ -36,7 -231,11 +231,11 @@@ static int msm_iommu_map(struct msm_mm
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        size_t ret;
  
 -      ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+       /* The arm-smmu driver expects the addresses to be sign extended */
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
 +      ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
        WARN_ON(!ret);
  
        return (ret == len) ? 0 : -EINVAL;
Simple merge