From 747e44655e990a95cc9d5ee87638b5a4d4de4e57 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Fri, 19 Aug 2016 13:44:54 +0200 Subject: [PATCH] drm/exynos: fence: add dma fence support This patch adds DMA fence based DMABUF synchronization feature. The original codes of this feature is a below repository, https://chromium.googlesource.com/chromiumos/third_party/kernel chromeos-3.14 Change-Id: I4bcefb1487d6a85530fcd5ea4dde5fba435bb827 Signed-off-by: Inki Dae [squashed with bugfixes and ported to v4.1 Tizen kernel] Signed-off-by: Marek Szyprowski --- drivers/gpu/drm/exynos/exynos_drm_drv.c | 4 ++ drivers/gpu/drm/exynos/exynos_drm_drv.h | 16 +++++ drivers/gpu/drm/exynos/exynos_drm_fb.c | 16 +++++ drivers/gpu/drm/exynos/exynos_drm_fb.h | 4 ++ drivers/gpu/drm/exynos/exynos_drm_plane.c | 114 ++++++++++++++++++++++++++++++ 5 files changed, 154 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 00c62fd..9046b0d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -17,6 +17,7 @@ #include #include +#include #include @@ -52,6 +53,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) dev_set_drvdata(dev->dev, dev); dev->dev_private = (void *)private; + private->gem_fence_context = fence_context_alloc(1); + atomic_set(&private->gem_fence_seqno, 0); + /* * create mapping to manage iommu table and set a pointer to iommu * mapping structure to iommu_mapping of private data. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index e337720..1b94bd0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -16,7 +16,9 @@ #define _EXYNOS_DRM_DRV_H_ #include +#include #include +#include #define MAX_CRTC 3 #define MAX_PLANE 5 @@ -110,6 +112,17 @@ struct exynos_drm_plane { bool enabled:1; bool resume:1; + bool update_pending:1; + +#ifdef CONFIG_DRM_DMA_SYNC + unsigned fence_context; + atomic_t fence_seqno; + struct fence *fence; + struct drm_reservation_cb rcb; + + struct fence *pending_fence; + bool pending_needs_vblank; +#endif }; /* @@ -262,6 +275,9 @@ struct exynos_drm_private { unsigned long da_space_size; unsigned int pipe; + + unsigned gem_fence_context; + atomic_t gem_fence_seqno; }; /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 142eb4e..9e6ed44 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -254,6 +254,22 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, return buffer; } +struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, + int index) +{ + struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); + struct exynos_drm_gem_obj *obj; + + if (index >= MAX_FB_BUFFER) + return ERR_PTR(-EINVAL); + + obj = exynos_fb->exynos_gem_obj[index]; + if (!obj) + return ERR_PTR(-EFAULT); + + return obj; +} + static void exynos_drm_output_poll_changed(struct drm_device *dev) { struct exynos_drm_private *private = dev->dev_private; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 517471b..12bb1c4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -32,4 +32,8 @@ void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb, /* get a buffer count to drm framebuffer. */ unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb); +/* get a gem object corresponding to a given index. */ +struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, + int index); + #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 3272ed6..979108a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -138,6 +138,91 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, plane->crtc = crtc; } +#ifdef CONFIG_DRM_DMA_SYNC +static void exynos_plane_update_cb(struct drm_reservation_cb *rcb, void *params) +{ + struct exynos_drm_plane *exynos_plane = params; + struct exynos_drm_crtc *exynos_crtc = + to_exynos_crtc(exynos_plane->base.crtc); + + if (exynos_crtc->ops->win_commit) + exynos_crtc->ops->win_commit(exynos_crtc, + exynos_plane->zpos); + + exynos_plane->update_pending = false; + + if (exynos_plane->pending_fence) { + drm_fence_signal_and_put(&exynos_plane->fence); + exynos_plane->fence = exynos_plane->pending_fence; + exynos_plane->pending_fence = NULL; + } + + /* TODO */ +} +#endif + +#ifdef CONFIG_DRM_DMA_SYNC +static int exynos_plane_fence(struct exynos_drm_plane *plane, + struct exynos_drm_gem_obj *obj) +{ + struct drm_crtc *crtc = plane->base.crtc; + struct exynos_drm_crtc *exynos_crtc; + struct reservation_object *resv; + struct fence *fence; + int ret; + + exynos_crtc = to_exynos_crtc(crtc); + resv = obj->base.dma_buf->resv; + + ww_mutex_lock(&resv->lock, NULL); + + ret = reservation_object_reserve_shared(resv); + if (ret < 0) { + DRM_ERROR("Reserving space for shared fence failed: %d\n", ret); + goto err_mutex; + } + + fence = drm_sw_fence_new(plane->fence_context, + atomic_add_return(1, &plane->fence_seqno)); + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + DRM_ERROR("Failed to create fence: %d\n", ret); + goto err_mutex; + } + + plane->update_pending = true; + plane->pending_fence = fence; + + reservation_object_add_shared_fence(resv, plane->pending_fence); + + if (!reservation_object_test_signaled_rcu(resv, false)) { + drm_reservation_cb_init(&plane->rcb, exynos_plane_update_cb, plane); + ret = drm_reservation_cb_add(&plane->rcb, resv, false); + if (ret < 0) { + DRM_ERROR("Adding reservation to callback failed: %d\n", ret); + goto err_fence; + } + + drm_reservation_cb_done(&plane->rcb); + } else { + exynos_plane_update_cb(&plane->rcb, plane); + } + + ww_mutex_unlock(&resv->lock); + + return 0; + +err_fence: + fence_put(plane->pending_fence); + plane->pending_fence = NULL; + plane->update_pending = false; +err_mutex: + ww_mutex_unlock(&resv->lock); + + return ret; +} +#endif + int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, @@ -148,8 +233,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_gem_obj *obj; int ret; + if (exynos_plane->update_pending) + return -EBUSY; + ret = exynos_check_plane(plane, fb); if (ret < 0) return ret; @@ -158,10 +247,28 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, crtc_w, crtc_h, src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16); + obj = exynos_drm_fb_gem_obj(fb, 0); + if (IS_ERR(obj)) { + /* there should be no a such case. */ + WARN_ON(1); + return PTR_ERR(obj); + } + +#ifdef CONFIG_DRM_DMA_SYNC + if (!obj->base.dma_buf || !obj->base.dma_buf->resv) { + if (exynos_crtc->ops->win_commit) + exynos_crtc->ops->win_commit(exynos_crtc, + exynos_plane->zpos); + return 0; + } + + return exynos_plane_fence(exynos_plane, obj); +#else if (exynos_crtc->ops->win_commit) exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos); return 0; +#endif } static int exynos_disable_plane(struct drm_plane *plane) @@ -169,6 +276,8 @@ static int exynos_disable_plane(struct drm_plane *plane) struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); + exynos_plane->update_pending = false; + if (exynos_crtc && exynos_crtc->ops->win_disable) exynos_crtc->ops->win_disable(exynos_crtc, exynos_plane->zpos); @@ -222,5 +331,10 @@ int exynos_plane_init(struct drm_device *dev, exynos_plane_attach_zpos_property(&exynos_plane->base, zpos); +#ifdef CONFIG_DRM_DMA_SYNC + exynos_plane->fence_context = fence_context_alloc(1); + atomic_set(&exynos_plane->fence_seqno, 0); +#endif + return 0; } -- 2.7.4