gpu: host1x: Add DMA fence implementation
authorMikko Perttunen <mperttunen@nvidia.com>
Thu, 10 Jun 2021 11:04:42 +0000 (14:04 +0300)
committerThierry Reding <treding@nvidia.com>
Tue, 10 Aug 2021 12:39:50 +0000 (14:39 +0200)
Add an implementation of dma_fences based on syncpoints. Syncpoint
interrupts are used to signal fences. Additionally, after
software signaling has been enabled, a 30 second timeout is started.
If the syncpoint threshold is not reached within this period,
the fence is signalled with an -ETIMEDOUT error code. This is to
allow fences that would never reach their syncpoint threshold to
be cleaned up. The timeout can potentially be removed in the future
after job tracking code has been refactored.

Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
drivers/gpu/host1x/Makefile
drivers/gpu/host1x/fence.c [new file with mode: 0644]
drivers/gpu/host1x/fence.h [new file with mode: 0644]
drivers/gpu/host1x/intr.c
drivers/gpu/host1x/intr.h
include/linux/host1x.h

index 096017b..d2b6f7d 100644 (file)
@@ -9,6 +9,7 @@ host1x-y = \
        job.o \
        debug.o \
        mipi.o \
+       fence.o \
        hw/host1x01.o \
        hw/host1x02.o \
        hw/host1x04.o \
diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c
new file mode 100644 (file)
index 0000000..6941add
--- /dev/null
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Syncpoint dma_fence implementation
+ *
+ * Copyright (c) 2020, NVIDIA Corporation.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include "fence.h"
+#include "intr.h"
+#include "syncpt.h"
+
+DEFINE_SPINLOCK(lock);
+
+struct host1x_syncpt_fence {
+       struct dma_fence base;
+
+       atomic_t signaling;
+
+       struct host1x_syncpt *sp;
+       u32 threshold;
+
+       struct host1x_waitlist *waiter;
+       void *waiter_ref;
+
+       struct delayed_work timeout_work;
+};
+
+static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
+{
+       return "host1x";
+}
+
+static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
+{
+       return "syncpoint";
+}
+
+static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
+{
+       return container_of(f, struct host1x_syncpt_fence, base);
+}
+
+static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
+{
+       struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+       int err;
+
+       if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
+               return false;
+
+       dma_fence_get(f);
+
+       /*
+        * The dma_fence framework requires the fence driver to keep a
+        * reference to any fences for which 'enable_signaling' has been
+        * called (and that have not been signalled).
+        *
+        * We provide a userspace API to create arbitrary syncpoint fences,
+        * so we cannot normally guarantee that all fences get signalled.
+        * As such, setup a timeout, so that long-lasting fences will get
+        * reaped eventually.
+        */
+       schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
+
+       err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
+                                    HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
+                                    sf->waiter, &sf->waiter_ref);
+       if (err) {
+               cancel_delayed_work_sync(&sf->timeout_work);
+               dma_fence_put(f);
+               return false;
+       }
+
+       /* intr framework takes ownership of waiter */
+       sf->waiter = NULL;
+
+       /*
+        * The fence may get signalled at any time after the above call,
+        * so we need to initialize all state used by signalling
+        * before it.
+        */
+
+       return true;
+}
+
+static void host1x_syncpt_fence_release(struct dma_fence *f)
+{
+       struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+
+       if (sf->waiter)
+               kfree(sf->waiter);
+
+       dma_fence_free(f);
+}
+
+const struct dma_fence_ops host1x_syncpt_fence_ops = {
+       .get_driver_name = host1x_syncpt_fence_get_driver_name,
+       .get_timeline_name = host1x_syncpt_fence_get_timeline_name,
+       .enable_signaling = host1x_syncpt_fence_enable_signaling,
+       .release = host1x_syncpt_fence_release,
+};
+
+void host1x_fence_signal(struct host1x_syncpt_fence *f)
+{
+       if (atomic_xchg(&f->signaling, 1))
+               return;
+
+       /*
+        * Cancel pending timeout work - if it races, it will
+        * not get 'f->signaling' and return.
+        */
+       cancel_delayed_work_sync(&f->timeout_work);
+
+       host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
+
+       dma_fence_signal(&f->base);
+       dma_fence_put(&f->base);
+}
+
+static void do_fence_timeout(struct work_struct *work)
+{
+       struct delayed_work *dwork = (struct delayed_work *)work;
+       struct host1x_syncpt_fence *f =
+               container_of(dwork, struct host1x_syncpt_fence, timeout_work);
+
+       if (atomic_xchg(&f->signaling, 1))
+               return;
+
+       /*
+        * Cancel pending timeout work - if it races, it will
+        * not get 'f->signaling' and return.
+        */
+       host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
+
+       dma_fence_set_error(&f->base, -ETIMEDOUT);
+       dma_fence_signal(&f->base);
+       dma_fence_put(&f->base);
+}
+
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
+{
+       struct host1x_syncpt_fence *fence;
+
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (!fence)
+               return ERR_PTR(-ENOMEM);
+
+       fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
+       if (!fence->waiter)
+               return ERR_PTR(-ENOMEM);
+
+       fence->sp = sp;
+       fence->threshold = threshold;
+
+       dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
+                      dma_fence_context_alloc(1), 0);
+
+       INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
+
+       return &fence->base;
+}
+EXPORT_SYMBOL(host1x_fence_create);
diff --git a/drivers/gpu/host1x/fence.h b/drivers/gpu/host1x/fence.h
new file mode 100644 (file)
index 0000000..70c91de
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA Corporation.
+ */
+
+#ifndef HOST1X_FENCE_H
+#define HOST1X_FENCE_H
+
+struct host1x_syncpt_fence;
+
+void host1x_fence_signal(struct host1x_syncpt_fence *fence);
+
+#endif
index 6d1f3c0..45b6be9 100644 (file)
@@ -13,6 +13,7 @@
 #include <trace/events/host1x.h>
 #include "channel.h"
 #include "dev.h"
+#include "fence.h"
 #include "intr.h"
 
 /* Wait list management */
@@ -121,12 +122,20 @@ static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
        wake_up_interruptible(wq);
 }
 
+static void action_signal_fence(struct host1x_waitlist *waiter)
+{
+       struct host1x_syncpt_fence *f = waiter->data;
+
+       host1x_fence_signal(f);
+}
+
 typedef void (*action_handler)(struct host1x_waitlist *waiter);
 
 static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
        action_submit_complete,
        action_wakeup,
        action_wakeup_interruptible,
+       action_signal_fence,
 };
 
 static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
index 6ea55e6..e4c3460 100644 (file)
@@ -33,6 +33,8 @@ enum host1x_intr_action {
         */
        HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
 
+       HOST1X_INTR_ACTION_SIGNAL_FENCE,
+
        HOST1X_INTR_ACTION_COUNT
 };
 
index 9b0487c..eb4cc8c 100644 (file)
@@ -170,6 +170,8 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
                                              u32 syncpt_id);
 
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
+
 /*
  * host1x channel
  */