drm/tegra: Prepare to dynamic dma-buf locking specification
authorDmitry Osipenko <dmitry.osipenko@collabora.com>
Mon, 17 Oct 2022 17:22:16 +0000 (20:22 +0300)
committerDmitry Osipenko <dmitry.osipenko@collabora.com>
Mon, 17 Oct 2022 22:21:44 +0000 (01:21 +0300)
Prepare Tegra DRM driver to the common dynamic dma-buf locking convention
by starting to use the unlocked versions of dma-buf API functions.

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-9-dmitry.osipenko@collabora.com
drivers/gpu/drm/tegra/gem.c

index 8199109..b09b8ab 100644 (file)
@@ -84,7 +84,7 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
                        goto free;
                }
 
-               map->sgt = dma_buf_map_attachment(map->attach, direction);
+               map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
                if (IS_ERR(map->sgt)) {
                        dma_buf_detach(buf, map->attach);
                        err = PTR_ERR(map->sgt);
@@ -160,7 +160,8 @@ free:
 static void tegra_bo_unpin(struct host1x_bo_mapping *map)
 {
        if (map->attach) {
-               dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
+               dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
+                                                 map->direction);
                dma_buf_detach(map->attach->dmabuf, map->attach);
        } else {
                dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
@@ -181,7 +182,7 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
        if (obj->vaddr) {
                return obj->vaddr;
        } else if (obj->gem.import_attach) {
-               ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+               ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
                return ret ? NULL : map.vaddr;
        } else {
                return vmap(obj->pages, obj->num_pages, VM_MAP,
@@ -197,7 +198,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
        if (obj->vaddr)
                return;
        else if (obj->gem.import_attach)
-               dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
+               dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
        else
                vunmap(addr);
 }
@@ -461,7 +462,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
 
        get_dma_buf(buf);
 
-       bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+       bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
        if (IS_ERR(bo->sgt)) {
                err = PTR_ERR(bo->sgt);
                goto detach;
@@ -479,7 +480,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
 
 detach:
        if (!IS_ERR_OR_NULL(bo->sgt))
-               dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+               dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
 
        dma_buf_detach(buf, attach);
        dma_buf_put(buf);
@@ -508,8 +509,8 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
                tegra_bo_iommu_unmap(tegra, bo);
 
        if (gem->import_attach) {
-               dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
-                                        DMA_TO_DEVICE);
+               dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
+                                                 DMA_TO_DEVICE);
                drm_prime_gem_destroy(gem, NULL);
        } else {
                tegra_bo_free(gem->dev, bo);