drm/etnaviv: convert user fence tracking to XArray
authorLucas Stach <l.stach@pengutronix.de>
Thu, 1 Dec 2022 17:48:46 +0000 (18:48 +0100)
committerLucas Stach <l.stach@pengutronix.de>
Wed, 1 Feb 2023 15:32:26 +0000 (16:32 +0100)
This simplifies the driver code a bit, as XArray already provides
internal locking. IDRs are implemented using XArrays anyways, so
this drops one level of unneeded abstraction.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_sched.c

index 2bb4c25..0b311af 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/sizes.h>
 #include <linux/time64.h>
 #include <linux/types.h>
+#include <linux/xarray.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_gem.h>
index a602c16..45403ea 100644 (file)
@@ -393,10 +393,11 @@ static void submit_cleanup(struct kref *kref)
        wake_up_all(&submit->gpu->fence_event);
 
        if (submit->out_fence) {
-               /* first remove from IDR, so fence can not be found anymore */
-               mutex_lock(&submit->gpu->idr_lock);
-               idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
-               mutex_unlock(&submit->gpu->idr_lock);
+               /*
+                * Remove from user fence array before dropping the reference,
+                * so fence can not be found in lookup anymore.
+                */
+               xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
                dma_fence_put(submit->out_fence);
        }
 
index 7bb193b..5f14eff 100644 (file)
@@ -1244,7 +1244,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
         * pretends we didn't find a fence in that case.
         */
        rcu_read_lock();
-       fence = idr_find(&gpu->fence_idr, id);
+       fence = xa_load(&gpu->user_fences, id);
        if (fence)
                fence = dma_fence_get_rcu(fence);
        rcu_read_unlock();
@@ -1744,7 +1744,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 
        gpu->drm = drm;
        gpu->fence_context = dma_fence_context_alloc(1);
-       idr_init(&gpu->fence_idr);
+       xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
        spin_lock_init(&gpu->fence_spinlock);
 
        INIT_WORK(&gpu->sync_point_work, sync_point_worker);
@@ -1798,7 +1798,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
        }
 
        gpu->drm = NULL;
-       idr_destroy(&gpu->fence_idr);
+       xa_destroy(&gpu->user_fences);
 
        if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
                thermal_cooling_device_unregister(gpu->cooling);
@@ -1831,7 +1831,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
        gpu->dev = &pdev->dev;
        mutex_init(&gpu->lock);
        mutex_init(&gpu->sched_lock);
-       mutex_init(&gpu->idr_lock);
 
        /* Map registers: */
        gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
index b868654..98c6f9c 100644 (file)
@@ -121,8 +121,8 @@ struct etnaviv_gpu {
        u32 idle_mask;
 
        /* Fencing support */
-       struct mutex idr_lock;
-       struct idr fence_idr;
+       struct xarray user_fences;
+       u32 next_user_fence;
        u32 next_fence;
        u32 completed_fence;
        wait_queue_head_t fence_event;
index 916e117..1ae87df 100644 (file)
@@ -98,7 +98,7 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
 int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
 {
        struct etnaviv_gpu *gpu = submit->gpu;
-       int ret = 0;
+       int ret;
 
        /*
         * Hold the sched lock across the whole operation to avoid jobs being
@@ -110,14 +110,11 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
        drm_sched_job_arm(&submit->sched_job);
 
        submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
-       mutex_lock(&gpu->idr_lock);
-       submit->out_fence_id = idr_alloc_cyclic(&gpu->fence_idr,
-                                               submit->out_fence, 0,
-                                               INT_MAX, GFP_KERNEL);
-       mutex_unlock(&gpu->idr_lock);
-       if (submit->out_fence_id < 0) {
+       ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
+                             submit->out_fence, xa_limit_32b,
+                             &gpu->next_user_fence, GFP_KERNEL);
+       if (ret < 0) {
                drm_sched_job_cleanup(&submit->sched_job);
-               ret = -ENOMEM;
                goto out_unlock;
        }