plane->crtc = crtc;
}
+#ifdef CONFIG_DRM_DMA_SYNC
+static void exynos_plane_update_cb(struct drm_reservation_cb *rcb, void *params)
+{
+ struct exynos_drm_plane *exynos_plane = params;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(exynos_plane->base.crtc);
+
+ if (exynos_crtc->ops->win_commit)
+ exynos_crtc->ops->win_commit(exynos_crtc,
+ exynos_plane->zpos);
+
+ exynos_plane->update_pending = false;
+
+ if (exynos_plane->pending_fence) {
+ drm_fence_signal_and_put(&exynos_plane->fence);
+ exynos_plane->fence = exynos_plane->pending_fence;
+ exynos_plane->pending_fence = NULL;
+ }
+
+ /* TODO */
+}
+#endif
+
+#ifdef CONFIG_DRM_DMA_SYNC
+static int exynos_plane_fence(struct exynos_drm_plane *plane,
+ struct exynos_drm_gem_obj *obj)
+{
+ struct drm_crtc *crtc = plane->base.crtc;
+ struct exynos_drm_crtc *exynos_crtc;
+ struct reservation_object *resv;
+ struct fence *fence;
+ int ret;
+
+ exynos_crtc = to_exynos_crtc(crtc);
+ resv = obj->base.dma_buf->resv;
+
+ ww_mutex_lock(&resv->lock, NULL);
+
+ ret = reservation_object_reserve_shared(resv);
+ if (ret < 0) {
+ DRM_ERROR("Reserving space for shared fence failed: %d\n", ret);
+ goto err_mutex;
+ }
+
+ fence = drm_sw_fence_new(plane->fence_context,
+ atomic_add_return(1, &plane->fence_seqno));
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ DRM_ERROR("Failed to create fence: %d\n", ret);
+ goto err_mutex;
+ }
+
+ plane->update_pending = true;
+ plane->pending_fence = fence;
+
+ reservation_object_add_shared_fence(resv, plane->pending_fence);
+
+ if (!reservation_object_test_signaled_rcu(resv, false)) {
+ drm_reservation_cb_init(&plane->rcb, exynos_plane_update_cb, plane);
+ ret = drm_reservation_cb_add(&plane->rcb, resv, false);
+ if (ret < 0) {
+ DRM_ERROR("Adding reservation to callback failed: %d\n", ret);
+ goto err_fence;
+ }
+
+ drm_reservation_cb_done(&plane->rcb);
+ } else {
+ exynos_plane_update_cb(&plane->rcb, plane);
+ }
+
+ ww_mutex_unlock(&resv->lock);
+
+ return 0;
+
+err_fence:
+ fence_put(plane->pending_fence);
+ plane->pending_fence = NULL;
+ plane->update_pending = false;
+err_mutex:
+ ww_mutex_unlock(&resv->lock);
+
+ return ret;
+}
+#endif
+
int
exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_gem_obj *obj;
int ret;
+ if (exynos_plane->update_pending)
+ return -EBUSY;
+
ret = exynos_check_plane(plane, fb);
if (ret < 0)
return ret;
crtc_w, crtc_h, src_x >> 16, src_y >> 16,
src_w >> 16, src_h >> 16);
+ obj = exynos_drm_fb_gem_obj(fb, 0);
+ if (IS_ERR(obj)) {
+ /* there should be no a such case. */
+ WARN_ON(1);
+ return PTR_ERR(obj);
+ }
+
+#ifdef CONFIG_DRM_DMA_SYNC
+ if (!obj->base.dma_buf || !obj->base.dma_buf->resv) {
+ if (exynos_crtc->ops->win_commit)
+ exynos_crtc->ops->win_commit(exynos_crtc,
+ exynos_plane->zpos);
+ return 0;
+ }
+
+ return exynos_plane_fence(exynos_plane, obj);
+#else
if (exynos_crtc->ops->win_commit)
exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
return 0;
+#endif
}
static int exynos_disable_plane(struct drm_plane *plane)
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
+ exynos_plane->update_pending = false;
+
if (exynos_crtc && exynos_crtc->ops->win_disable)
exynos_crtc->ops->win_disable(exynos_crtc,
exynos_plane->zpos);
exynos_plane_attach_zpos_property(&exynos_plane->base, zpos);
+#ifdef CONFIG_DRM_DMA_SYNC
+ exynos_plane->fence_context = fence_context_alloc(1);
+ atomic_set(&exynos_plane->fence_seqno, 0);
+#endif
+
return 0;
}