drm/shmem-helper: Switch to reservation lock
authorDmitry Osipenko <dmitry.osipenko@collabora.com>
Mon, 21 Mar 2022 00:35:41 +0000 (03:35 +0300)
committerDmitry Osipenko <dmitry.osipenko@collabora.com>
Mon, 27 Feb 2023 04:10:56 +0000 (07:10 +0300)
Replace all drm-shmem locks with a GEM reservation lock. This makes locks
consistent with dma-buf locking convention where importers are responsible
for holding reservation lock for all operations performed over dma-bufs,
preventing deadlock between dma-buf importers and exporters.

Suggested-by: Daniel Vetter <daniel@ffwll.ch>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://lore.kernel.org/all/20230108210445.3948344-8-dmitry.osipenko@collabora.com/
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/lima/lima_gem.c
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
include/drm/drm_gem_shmem_helper.h

index f75e502..3d43e59 100644 (file)
@@ -88,8 +88,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
        if (ret)
                goto err_release;
 
-       mutex_init(&shmem->pages_lock);
-       mutex_init(&shmem->vmap_lock);
        INIT_LIST_HEAD(&shmem->madv_list);
 
        if (!private) {
@@ -141,11 +139,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       drm_WARN_ON(obj->dev, shmem->vmap_use_count);
-
        if (obj->import_attach) {
                drm_prime_gem_destroy(obj, shmem->sgt);
        } else {
+               dma_resv_lock(shmem->base.resv, NULL);
+
+               drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+
                if (shmem->sgt) {
                        dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
                                          DMA_BIDIRECTIONAL, 0);
@@ -154,18 +154,18 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
                }
                if (shmem->pages)
                        drm_gem_shmem_put_pages(shmem);
-       }
 
-       drm_WARN_ON(obj->dev, shmem->pages_use_count);
+               drm_WARN_ON(obj->dev, shmem->pages_use_count);
+
+               dma_resv_unlock(shmem->base.resv);
+       }
 
        drm_gem_object_release(obj);
-       mutex_destroy(&shmem->pages_lock);
-       mutex_destroy(&shmem->vmap_lock);
        kfree(shmem);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
 
-static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
        struct page **pages;
@@ -197,35 +197,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
 }
 
 /*
- * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
  * @shmem: shmem GEM object
  *
- * This function makes sure that backing pages exists for the shmem GEM object
- * and increases the use count.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function decreases the use count and puts the backing pages when use drops to zero.
  */
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
-       int ret;
 
-       drm_WARN_ON(obj->dev, obj->import_attach);
-
-       ret = mutex_lock_interruptible(&shmem->pages_lock);
-       if (ret)
-               return ret;
-       ret = drm_gem_shmem_get_pages_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_gem_shmem_get_pages);
-
-static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
-{
-       struct drm_gem_object *obj = &shmem->base;
+       dma_resv_assert_held(shmem->base.resv);
 
        if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                return;
@@ -243,19 +224,6 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
                          shmem->pages_mark_accessed_on_put);
        shmem->pages = NULL;
 }
-
-/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
- * @shmem: shmem GEM object
- *
- * This function decreases the use count and puts the backing pages when use drops to zero.
- */
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
-{
-       mutex_lock(&shmem->pages_lock);
-       drm_gem_shmem_put_pages_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
-}
 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
 
 /**
@@ -272,6 +240,8 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
+       dma_resv_assert_held(shmem->base.resv);
+
        drm_WARN_ON(obj->dev, obj->import_attach);
 
        return drm_gem_shmem_get_pages(shmem);
@@ -289,14 +259,31 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
+       dma_resv_assert_held(shmem->base.resv);
+
        drm_WARN_ON(obj->dev, obj->import_attach);
 
        drm_gem_shmem_put_pages(shmem);
 }
 EXPORT_SYMBOL(drm_gem_shmem_unpin);
 
-static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
-                                    struct iosys_map *map)
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
+ *       store.
+ *
+ * This function makes sure that a contiguous kernel virtual address mapping
+ * exists for the buffer backing the shmem GEM object. It hides the differences
+ * between dma-buf imported and natively allocated objects.
+ *
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
+                      struct iosys_map *map)
 {
        struct drm_gem_object *obj = &shmem->base;
        int ret = 0;
@@ -312,6 +299,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
        } else {
                pgprot_t prot = PAGE_KERNEL;
 
+               dma_resv_assert_held(shmem->base.resv);
+
                if (shmem->vmap_use_count++ > 0) {
                        iosys_map_set_vaddr(map, shmem->vaddr);
                        return 0;
@@ -346,45 +335,30 @@ err_zero_use:
 
        return ret;
 }
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
 
 /*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
  * @shmem: shmem GEM object
- * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
- *       store.
- *
- * This function makes sure that a contiguous kernel virtual address mapping
- * exists for the buffer backing the shmem GEM object. It hides the differences
- * between dma-buf imported and natively allocated objects.
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
  *
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * This function cleans up a kernel virtual address mapping acquired by
+ * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
+ * zero.
  *
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function hides the differences between dma-buf imported and natively
+ * allocated objects.
  */
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
-                      struct iosys_map *map)
-{
-       int ret;
-
-       ret = mutex_lock_interruptible(&shmem->vmap_lock);
-       if (ret)
-               return ret;
-       ret = drm_gem_shmem_vmap_locked(shmem, map);
-       mutex_unlock(&shmem->vmap_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
-
-static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
-                                       struct iosys_map *map)
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
+                         struct iosys_map *map)
 {
        struct drm_gem_object *obj = &shmem->base;
 
        if (obj->import_attach) {
                dma_buf_vunmap(obj->import_attach->dmabuf, map);
        } else {
+               dma_resv_assert_held(shmem->base.resv);
+
                if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
                        return;
 
@@ -397,26 +371,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
 
        shmem->vaddr = NULL;
 }
-
-/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
- * @shmem: shmem GEM object
- * @map: Kernel virtual address where the SHMEM GEM object was mapped
- *
- * This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
- *
- * This function hides the differences between dma-buf imported and natively
- * allocated objects.
- */
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
-                         struct iosys_map *map)
-{
-       mutex_lock(&shmem->vmap_lock);
-       drm_gem_shmem_vunmap_locked(shmem, map);
-       mutex_unlock(&shmem->vmap_lock);
-}
 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
 
 static int
@@ -447,24 +401,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
  */
 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
 {
-       mutex_lock(&shmem->pages_lock);
+       dma_resv_assert_held(shmem->base.resv);
 
        if (shmem->madv >= 0)
                shmem->madv = madv;
 
        madv = shmem->madv;
 
-       mutex_unlock(&shmem->pages_lock);
-
        return (madv >= 0);
 }
 EXPORT_SYMBOL(drm_gem_shmem_madvise);
 
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
        struct drm_device *dev = obj->dev;
 
+       dma_resv_assert_held(shmem->base.resv);
+
        drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
 
        dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
@@ -472,7 +426,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
        kfree(shmem->sgt);
        shmem->sgt = NULL;
 
-       drm_gem_shmem_put_pages_locked(shmem);
+       drm_gem_shmem_put_pages(shmem);
 
        shmem->madv = -1;
 
@@ -488,17 +442,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
 
        invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
 }
-EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
-
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
-{
-       if (!mutex_trylock(&shmem->pages_lock))
-               return false;
-       drm_gem_shmem_purge_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
-
-       return true;
-}
 EXPORT_SYMBOL(drm_gem_shmem_purge);
 
 /**
@@ -551,7 +494,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
-       mutex_lock(&shmem->pages_lock);
+       dma_resv_lock(shmem->base.resv, NULL);
 
        if (page_offset >= num_pages ||
            drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
@@ -563,7 +506,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
                ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
        }
 
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
        return ret;
 }
@@ -575,7 +518,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       mutex_lock(&shmem->pages_lock);
+       dma_resv_lock(shmem->base.resv, NULL);
 
        /*
         * We should have already pinned the pages when the buffer was first
@@ -585,7 +528,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
        if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                shmem->pages_use_count++;
 
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
        drm_gem_vm_open(vma);
 }
@@ -595,7 +538,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
+       dma_resv_lock(shmem->base.resv, NULL);
        drm_gem_shmem_put_pages(shmem);
+       dma_resv_unlock(shmem->base.resv);
+
        drm_gem_vm_close(vma);
 }
 
@@ -630,7 +576,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
                return dma_buf_mmap(obj->dma_buf, vma, 0);
        }
 
+       dma_resv_lock(shmem->base.resv, NULL);
        ret = drm_gem_shmem_get_pages(shmem);
+       dma_resv_unlock(shmem->base.resv);
+
        if (ret)
                return ret;
 
@@ -696,7 +645,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       ret = drm_gem_shmem_get_pages_locked(shmem);
+       ret = drm_gem_shmem_get_pages(shmem);
        if (ret)
                return ERR_PTR(ret);
 
@@ -718,7 +667,7 @@ err_free_sgt:
        sg_free_table(sgt);
        kfree(sgt);
 err_put_pages:
-       drm_gem_shmem_put_pages_locked(shmem);
+       drm_gem_shmem_put_pages(shmem);
        return ERR_PTR(ret);
 }
 
@@ -743,11 +692,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
        int ret;
        struct sg_table *sgt;
 
-       ret = mutex_lock_interruptible(&shmem->pages_lock);
+       ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
        if (ret)
                return ERR_PTR(ret);
        sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
        return sgt;
 }
index 0f1ca0b..5008f0c 100644 (file)
@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
 
        new_size = min(new_size, bo->base.base.size);
 
-       mutex_lock(&bo->base.pages_lock);
+       dma_resv_lock(bo->base.base.resv, NULL);
 
        if (bo->base.pages) {
                pages = bo->base.pages;
@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
                pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
                                       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
                if (!pages) {
-                       mutex_unlock(&bo->base.pages_lock);
+                       dma_resv_unlock(bo->base.base.resv);
                        return -ENOMEM;
                }
 
@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
                struct page *page = shmem_read_mapping_page(mapping, i);
 
                if (IS_ERR(page)) {
-                       mutex_unlock(&bo->base.pages_lock);
+                       dma_resv_unlock(bo->base.base.resv);
                        return PTR_ERR(page);
                }
                pages[i] = page;
        }
 
-       mutex_unlock(&bo->base.pages_lock);
+       dma_resv_unlock(bo->base.base.resv);
 
        ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
                                        new_size, GFP_KERNEL);
index f49096f..aa292e4 100644 (file)
@@ -407,6 +407,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
 
        bo = to_panfrost_bo(gem_obj);
 
+       ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
+       if (ret)
+               goto out_put_object;
+
        mutex_lock(&pfdev->shrinker_lock);
        mutex_lock(&bo->mappings.lock);
        if (args->madv == PANFROST_MADV_DONTNEED) {
@@ -444,7 +448,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
 out_unlock_mappings:
        mutex_unlock(&bo->mappings.lock);
        mutex_unlock(&pfdev->shrinker_lock);
-
+       dma_resv_unlock(bo->base.base.resv);
+out_put_object:
        drm_gem_object_put(gem_obj);
        return ret;
 }
index bf01707..6a71a25 100644 (file)
@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
        if (!mutex_trylock(&bo->mappings.lock))
                return false;
 
-       if (!mutex_trylock(&shmem->pages_lock))
+       if (!dma_resv_trylock(shmem->base.resv))
                goto unlock_mappings;
 
        panfrost_gem_teardown_mappings_locked(bo);
-       drm_gem_shmem_purge_locked(&bo->base);
+       drm_gem_shmem_purge(&bo->base);
        ret = true;
 
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
 unlock_mappings:
        mutex_unlock(&bo->mappings.lock);
index 4e83a18..94421fa 100644 (file)
@@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        struct panfrost_gem_mapping *bomapping;
        struct panfrost_gem_object *bo;
        struct address_space *mapping;
+       struct drm_gem_object *obj;
        pgoff_t page_offset;
        struct sg_table *sgt;
        struct page **pages;
@@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        page_offset = addr >> PAGE_SHIFT;
        page_offset -= bomapping->mmnode.start;
 
-       mutex_lock(&bo->base.pages_lock);
+       obj = &bo->base.base;
+
+       dma_resv_lock(obj->resv, NULL);
 
        if (!bo->base.pages) {
                bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
                                     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
                if (!bo->sgts) {
-                       mutex_unlock(&bo->base.pages_lock);
                        ret = -ENOMEM;
-                       goto err_bo;
+                       goto err_unlock;
                }
 
                pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
@@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                if (!pages) {
                        kvfree(bo->sgts);
                        bo->sgts = NULL;
-                       mutex_unlock(&bo->base.pages_lock);
                        ret = -ENOMEM;
-                       goto err_bo;
+                       goto err_unlock;
                }
                bo->base.pages = pages;
                bo->base.pages_use_count = 1;
@@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                pages = bo->base.pages;
                if (pages[page_offset]) {
                        /* Pages are already mapped, bail out. */
-                       mutex_unlock(&bo->base.pages_lock);
                        goto out;
                }
        }
@@ -502,14 +502,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
                pages[i] = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(pages[i])) {
-                       mutex_unlock(&bo->base.pages_lock);
                        ret = PTR_ERR(pages[i]);
                        goto err_pages;
                }
        }
 
-       mutex_unlock(&bo->base.pages_lock);
-
        sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
        ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
                                        NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
@@ -528,6 +525,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
 
 out:
+       dma_resv_unlock(obj->resv);
+
        panfrost_gem_mapping_put(bomapping);
 
        return 0;
@@ -536,6 +535,8 @@ err_map:
        sg_free_table(sgt);
 err_pages:
        drm_gem_shmem_put_pages(&bo->base);
+err_unlock:
+       dma_resv_unlock(obj->resv);
 err_bo:
        panfrost_gem_mapping_put(bomapping);
        return ret;
index 5994fed..20ddcd7 100644 (file)
@@ -27,11 +27,6 @@ struct drm_gem_shmem_object {
        struct drm_gem_object base;
 
        /**
-        * @pages_lock: Protects the page table and use count
-        */
-       struct mutex pages_lock;
-
-       /**
         * @pages: Page table
         */
        struct page **pages;
@@ -66,11 +61,6 @@ struct drm_gem_shmem_object {
        struct sg_table *sgt;
 
        /**
-        * @vmap_lock: Protects the vmap address and use count
-        */
-       struct mutex vmap_lock;
-
-       /**
         * @vaddr: Kernel virtual address of the backing memory
         */
        void *vaddr;
@@ -109,7 +99,6 @@ struct drm_gem_shmem_object {
 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
 
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
                !shmem->base.dma_buf && !shmem->base.import_attach;
 }
 
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
 
 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);