drm/qxl: stop using TTM to call driver internal functions
authorChristian König <christian.koenig@amd.com>
Fri, 27 Sep 2019 13:06:22 +0000 (15:06 +0200)
committerChristian König <christian.koenig@amd.com>
Fri, 25 Oct 2019 09:40:51 +0000 (11:40 +0200)
The ttm_mem_io_* functions were intended to be internal to TTM and
shouldn't have been used in a driver. They were exported in commit
afe6804c045fbd69a1b75c681107b5d6df9190de just for QXL.

Instead call the qxl_ttm_io_mem_reserve() function directly and
completely drop the free call since that is a dummy on QXL.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
Link: https://patchwork.freedesktop.org/patch/333289/
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_ttm.c

index a5cb386..27e45a2 100644 (file)
@@ -355,6 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
 /* qxl ttm */
 int qxl_ttm_init(struct qxl_device *qdev);
 void qxl_ttm_fini(struct qxl_device *qdev);
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+                          struct ttm_mem_reg *mem);
 
 /* qxl image */
 
index ad336c9..ab72dc3 100644 (file)
@@ -167,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
                              struct qxl_bo *bo, int page_offset)
 {
-       struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
        void *rptr;
        int ret;
        struct io_mapping *map;
@@ -179,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
        else
                goto fallback;
 
-       (void) ttm_mem_io_lock(man, false);
-       ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
-       ttm_mem_io_unlock(man);
+       ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
 
        return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
 fallback:
@@ -212,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
                               struct qxl_bo *bo, void *pmap)
 {
-       struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
-
        if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
            (bo->tbo.mem.mem_type != TTM_PL_PRIV))
                goto fallback;
 
        io_mapping_unmap_atomic(pmap);
-
-       (void) ttm_mem_io_lock(man, false);
-       ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
-       ttm_mem_io_unlock(man);
        return;
  fallback:
        qxl_bo_kunmap(bo);
index 4b13b0b..16a5e90 100644 (file)
@@ -110,8 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
        *placement = qbo->placement;
 }
 
-static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
-                                 struct ttm_mem_reg *mem)
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+                          struct ttm_mem_reg *mem)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
        struct qxl_device *qdev = qxl_get_qdev(bdev);