drm/exynos: fence: add dma fence support 06/89506/3
authorInki Dae <inki.dae@samsung.com>
Fri, 19 Aug 2016 11:44:54 +0000 (13:44 +0200)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Tue, 4 Oct 2016 07:04:29 +0000 (00:04 -0700)
This patch adds DMA fence based DMABUF synchronization feature.
The original codes of this feature is a below repository,
    https://chromium.googlesource.com/chromiumos/third_party/kernel
     chromeos-3.14

Change-Id: I4bcefb1487d6a85530fcd5ea4dde5fba435bb827
Signed-off-by: Inki Dae <inki.dae@samsung.com>
[squashed with bugfixes and ported to v4.1 Tizen kernel]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fb.h
drivers/gpu/drm/exynos/exynos_drm_plane.c

index 00c62fdd00b98aa42f1b31d851575122bd8cb8b5..9046b0df90261e96b8e9d6292546dd76f3cc5ab5 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include <linux/component.h>
+#include <linux/fence.h>
 
 #include <drm/exynos_drm.h>
 
@@ -52,6 +53,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        dev_set_drvdata(dev->dev, dev);
        dev->dev_private = (void *)private;
 
+       private->gem_fence_context = fence_context_alloc(1);
+       atomic_set(&private->gem_fence_seqno, 0);
+
        /*
         * create mapping to manage iommu table and set a pointer to iommu
         * mapping structure to iommu_mapping of private data.
index e3377204f192d1b086332b70276ea8b41ec16124..1b94bd02199e5fec2e00993f39e1c9c403ec2ce6 100644 (file)
@@ -16,7 +16,9 @@
 #define _EXYNOS_DRM_DRV_H_
 
 #include <drm/drmP.h>
+#include <drm/drm_sync_helper.h>
 #include <linux/module.h>
+#include <linux/dma-buf.h>
 
 #define MAX_CRTC       3
 #define MAX_PLANE      5
@@ -110,6 +112,17 @@ struct exynos_drm_plane {
 
        bool enabled:1;
        bool resume:1;
+       bool update_pending:1;
+
+#ifdef CONFIG_DRM_DMA_SYNC
+       unsigned fence_context;
+       atomic_t fence_seqno;
+       struct fence *fence;
+       struct drm_reservation_cb rcb;
+
+       struct fence *pending_fence;
+       bool pending_needs_vblank;
+#endif
 };
 
 /*
@@ -262,6 +275,9 @@ struct exynos_drm_private {
        unsigned long da_space_size;
 
        unsigned int pipe;
+
+       unsigned gem_fence_context;
+       atomic_t gem_fence_seqno;
 };
 
 /*
index 142eb4e3f59ea5501805cc56bed7cbef4106ae7b..9e6ed44caae91e96df689fea2ab54fd65325ca72 100644 (file)
@@ -254,6 +254,22 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
        return buffer;
 }
 
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
+                                                                int index)
+{
+       struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+       struct exynos_drm_gem_obj *obj;
+
+       if (index >= MAX_FB_BUFFER)
+               return ERR_PTR(-EINVAL);
+
+       obj = exynos_fb->exynos_gem_obj[index];
+       if (!obj)
+               return ERR_PTR(-EFAULT);
+
+       return obj;
+}
+
 static void exynos_drm_output_poll_changed(struct drm_device *dev)
 {
        struct exynos_drm_private *private = dev->dev_private;
index 517471b37566cd8b5224720954113865549cf12d..12bb1c40d8a94e5512bcddabfca9aa5865ce6144 100644 (file)
@@ -32,4 +32,8 @@ void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
 /* get a buffer count to drm framebuffer. */
 unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
 
+/* get a gem object corresponding to a given index. */
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
+                                                                int index);
+
 #endif
index 3272ed60367ead42cba2885f7966fc464944255b..979108a52dcc9b1be1fa3548335977c488c71004 100644 (file)
@@ -138,6 +138,91 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
        plane->crtc = crtc;
 }
 
+#ifdef CONFIG_DRM_DMA_SYNC
+static void exynos_plane_update_cb(struct drm_reservation_cb *rcb, void *params)
+{
+       struct exynos_drm_plane *exynos_plane = params;
+       struct exynos_drm_crtc *exynos_crtc =
+                                       to_exynos_crtc(exynos_plane->base.crtc);
+
+       if (exynos_crtc->ops->win_commit)
+               exynos_crtc->ops->win_commit(exynos_crtc,
+                                            exynos_plane->zpos);
+
+       exynos_plane->update_pending = false;
+
+       if (exynos_plane->pending_fence) {
+               drm_fence_signal_and_put(&exynos_plane->fence);
+               exynos_plane->fence = exynos_plane->pending_fence;
+               exynos_plane->pending_fence = NULL;
+       }
+
+       /* TODO */
+}
+#endif
+
+#ifdef CONFIG_DRM_DMA_SYNC
+static int exynos_plane_fence(struct exynos_drm_plane *plane,
+                             struct exynos_drm_gem_obj *obj)
+{
+       struct drm_crtc *crtc = plane->base.crtc;
+       struct exynos_drm_crtc *exynos_crtc;
+       struct reservation_object *resv;
+       struct fence *fence;
+       int ret;
+
+       exynos_crtc = to_exynos_crtc(crtc);
+       resv = obj->base.dma_buf->resv;
+
+       ww_mutex_lock(&resv->lock, NULL);
+
+       ret = reservation_object_reserve_shared(resv);
+       if (ret < 0) {
+               DRM_ERROR("Reserving space for shared fence failed: %d\n", ret);
+               goto err_mutex;
+       }
+
+       fence = drm_sw_fence_new(plane->fence_context,
+                                atomic_add_return(1, &plane->fence_seqno));
+       if (IS_ERR(fence)) {
+               ret = PTR_ERR(fence);
+               DRM_ERROR("Failed to create fence: %d\n", ret);
+               goto err_mutex;
+       }
+
+       plane->update_pending = true;
+       plane->pending_fence = fence;
+
+       reservation_object_add_shared_fence(resv, plane->pending_fence);
+
+       if (!reservation_object_test_signaled_rcu(resv, false)) {
+               drm_reservation_cb_init(&plane->rcb, exynos_plane_update_cb, plane);
+               ret = drm_reservation_cb_add(&plane->rcb, resv, false);
+               if (ret < 0) {
+                       DRM_ERROR("Adding reservation to callback failed: %d\n", ret);
+                       goto err_fence;
+               }
+
+               drm_reservation_cb_done(&plane->rcb);
+       } else {
+               exynos_plane_update_cb(&plane->rcb, plane);
+       }
+
+       ww_mutex_unlock(&resv->lock);
+
+       return 0;
+
+err_fence:
+       fence_put(plane->pending_fence);
+       plane->pending_fence = NULL;
+       plane->update_pending = false;
+err_mutex:
+       ww_mutex_unlock(&resv->lock);
+
+       return ret;
+}
+#endif
+
 int
 exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -148,8 +233,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
        struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+       struct exynos_drm_gem_obj *obj;
        int ret;
 
+       if (exynos_plane->update_pending)
+               return -EBUSY;
+
        ret = exynos_check_plane(plane, fb);
        if (ret < 0)
                return ret;
@@ -158,10 +247,28 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                              crtc_w, crtc_h, src_x >> 16, src_y >> 16,
                              src_w >> 16, src_h >> 16);
 
+       obj = exynos_drm_fb_gem_obj(fb, 0);
+       if (IS_ERR(obj)) {
+               /* there should be no a such case. */
+               WARN_ON(1);
+               return PTR_ERR(obj);
+       }
+
+#ifdef CONFIG_DRM_DMA_SYNC
+       if (!obj->base.dma_buf || !obj->base.dma_buf->resv) {
+               if (exynos_crtc->ops->win_commit)
+                       exynos_crtc->ops->win_commit(exynos_crtc,
+                                                    exynos_plane->zpos);
+               return 0;
+       }
+
+       return exynos_plane_fence(exynos_plane, obj);
+#else
        if (exynos_crtc->ops->win_commit)
                exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
 
        return 0;
+#endif
 }
 
 static int exynos_disable_plane(struct drm_plane *plane)
@@ -169,6 +276,8 @@ static int exynos_disable_plane(struct drm_plane *plane)
        struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
 
+       exynos_plane->update_pending = false;
+
        if (exynos_crtc && exynos_crtc->ops->win_disable)
                exynos_crtc->ops->win_disable(exynos_crtc,
                                              exynos_plane->zpos);
@@ -222,5 +331,10 @@ int exynos_plane_init(struct drm_device *dev,
 
        exynos_plane_attach_zpos_property(&exynos_plane->base, zpos);
 
+#ifdef CONFIG_DRM_DMA_SYNC
+       exynos_plane->fence_context = fence_context_alloc(1);
+       atomic_set(&exynos_plane->fence_seqno, 0);
+#endif
+
        return 0;
 }