From: jonggab.park Date: Fri, 28 Apr 2017 08:02:34 +0000 (+0900) Subject: drm/tdm: implement PP interface. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9bf084dedb124ec8f589cd679d20bd9830e1cb54;p=profile%2Fwearable%2Fplatform%2Fkernel%2Flinux-3.18-exynos7270.git drm/tdm: implement PP interface. PP interface is included. This patch is from product kernel. Change-Id: I44d06ab017e2023440010ad64a4265e24954f0c2 Signed-off-by: Jin-young Jeon Signed-off-by: Inki Dae --- diff --git a/arch/arm64/boot/dts/tgm-drm.dtsi b/arch/arm64/boot/dts/tgm-drm.dtsi index 550dd04..582035a 100644 --- a/arch/arm64/boot/dts/tgm-drm.dtsi +++ b/arch/arm64/boot/dts/tgm-drm.dtsi @@ -22,4 +22,8 @@ tgm,drv_patchlevel = <0>; tgm,num_crtcs = <1>; }; + + tdm: tdm_pp { + compatible = "tdm,tdm_pp"; + }; }; diff --git a/drivers/gpu/drm/tgm/Kconfig b/drivers/gpu/drm/tgm/Kconfig index 3faa3d4..9f62c17 100644 --- a/drivers/gpu/drm/tgm/Kconfig +++ b/drivers/gpu/drm/tgm/Kconfig @@ -30,6 +30,12 @@ config DRM_TDM_DPMS_CTRL help Choose this option if you want to use dpms ioctl. +config DRM_TDM_PP + bool "Supports PP feature" + depends on DRM_TDM + help + Supports post-processing feature. + config DRM_TBM bool "TBM allocates graphics buffer" depends on DRM_TGM diff --git a/drivers/gpu/drm/tgm/Makefile b/drivers/gpu/drm/tgm/Makefile index db99670..8cb08a9 100644 --- a/drivers/gpu/drm/tgm/Makefile +++ b/drivers/gpu/drm/tgm/Makefile @@ -8,6 +8,7 @@ tgm-y := tgm_drv.o tgm-$(CONFIG_DRM_TDM) += tdm.o tgm-$(CONFIG_DRM_TDM_IRQ) += tdm_irq.o tgm-$(CONFIG_DRM_TDM_IRQ_EXYNOS) += tdm_irq_exynos.o +tgm-$(CONFIG_DRM_TDM_PP) += tdm_pp.o tgm-$(CONFIG_DRM_TBM) += tbm.o tgm-$(CONFIG_DRM_TBM_GEM) += tbm_gem.o tgm-$(CONFIG_DRM_TBM_GEM_DMA_MAPPING) += tbm_gem_dma_mapping.o diff --git a/drivers/gpu/drm/tgm/tdm.h b/drivers/gpu/drm/tgm/tdm.h index fbbf19f..0ac27f2 100644 --- a/drivers/gpu/drm/tgm/tdm.h +++ b/drivers/gpu/drm/tgm/tdm.h @@ -16,7 +16,9 @@ #include #include - +#ifdef CONFIG_DRM_TDM_PP +#include +#endif #ifdef CONFIG_DRM_DMA_SYNC #include #endif diff --git a/drivers/gpu/drm/tgm/tdm_pp.c b/drivers/gpu/drm/tgm/tdm_pp.c new file mode 100644 index 0000000..5cdd7ef78 --- /dev/null +++ b/drivers/gpu/drm/tgm/tdm_pp.c @@ -0,0 +1,1840 @@ +/* + * Copyright (C) 2016 Samsung Electronics Co.Ltd + * Authors: + * Eunchul Kim + * Jinyoung Jeon + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include + +#include +#include +#include "tgm_drv.h" +#include "tbm_gem.h" +#include "tdm_pp.h" + +/* + * TODO + * 1. expand command control id. + * 2. integrate property and config. + * 3. removed send_event id check routine. + * 4. compare send_event id if needed. + * 5. free subdrv_remove notifier callback list if needed. + * 6. need to check subdrv_open about multi-open. + * 7. need to power_on implement power and sysmmu ctrl. + */ +#define PP_STR_LEN 16 +#define get_pp_context(dev) platform_get_drvdata(to_platform_device(dev)) +#define pp_is_m2m_cmd(c) (c == PP_CMD_M2M) + +/* + * A structure of event. + * + * @base: base of event. + * @event: pp event. + */ +struct tdm_pp_send_event { + struct drm_pending_event base; + struct tdm_pp_event event; +}; + +/* + * A structure of memory node. + * + * @list: list head to memory queue information. + * @ops_id: id of operations. + * @prop_id: id of property. + * @buf_id: id of buffer. + * @buf_info: gem objects and dma address, size. + * @filp: a pointer to drm_file. + */ +struct tdm_pp_mem_node { + struct list_head list; + enum tdm_ops_id ops_id; + u32 prop_id; + u32 buf_id; + struct tdm_pp_buf_info buf_info; +}; + +/* + * A structure of pp context. + * + * @subdrv: prepare initialization using subdrv. + * @pp_lock: lock for synchronization of access to pp_idr. + * @prop_lock: lock for synchronization of access to prop_idr. + * @pp_idr: pp driver idr. + * @prop_idr: property idr. + * @event_workq: event work queue. + * @cmd_workq: command work queue. + */ +struct pp_context { + struct tgm_subdrv subdrv; + struct mutex pp_lock; + struct mutex prop_lock; + struct idr pp_idr; + struct idr prop_idr; + struct workqueue_struct *event_workq; + struct workqueue_struct *cmd_workq; +}; + +static LIST_HEAD(tdm_ppdrv_list); +static DEFINE_MUTEX(tdm_ppdrv_lock); +static BLOCKING_NOTIFIER_HEAD(tdm_ppnb_list); + +int tdm_ppdrv_register(struct tdm_ppdrv *ppdrv) +{ + mutex_lock(&tdm_ppdrv_lock); + list_add_tail(&ppdrv->drv_list, &tdm_ppdrv_list); + mutex_unlock(&tdm_ppdrv_lock); + + return 0; +} + +int tdm_ppdrv_unregister(struct tdm_ppdrv *ppdrv) +{ + mutex_lock(&tdm_ppdrv_lock); + list_del(&ppdrv->drv_list); + mutex_unlock(&tdm_ppdrv_lock); + + return 0; +} + +static int pp_create_id(struct idr *id_idr, struct mutex *lock, void *obj) +{ + int ret; + + mutex_lock(lock); + ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); + mutex_unlock(lock); + + return ret; +} + +static void pp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) +{ + mutex_lock(lock); + idr_remove(id_idr, id); + mutex_unlock(lock); +} + +static void *pp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) +{ + void *obj; + + mutex_lock(lock); + obj = idr_find(id_idr, id); + mutex_unlock(lock); + + return obj; +} + +static int pp_check_driver(struct tdm_ppdrv *ppdrv, + struct tdm_pp_property *property) +{ + if (ppdrv->dedicated || (!pp_is_m2m_cmd(property->cmd) && + !pm_runtime_suspended(ppdrv->dev))) + return -EBUSY; + + if (ppdrv->check_property && + ppdrv->check_property(ppdrv->dev, property)) + return -EINVAL; + + return 0; +} + +static struct tdm_ppdrv *pp_find_driver(struct pp_context *ctx, + struct tdm_pp_property *property) +{ + struct tdm_ppdrv *ppdrv; + u32 pp_id = property->pp_id; + int ret; + + if (pp_id) { + ppdrv = pp_find_obj(&ctx->pp_idr, &ctx->pp_lock, pp_id); + if (!ppdrv) { + DRM_DEBUG("pp%d driver not found\n", pp_id); + return ERR_PTR(-ENODEV); + } + + ret = pp_check_driver(ppdrv, property); + if (ret < 0) { + DRM_DEBUG("pp%d driver check error %d\n", pp_id, ret); + return ERR_PTR(ret); + } else + return ppdrv; + + } else { + list_for_each_entry(ppdrv, &tdm_ppdrv_list, drv_list) { + ret = pp_check_driver(ppdrv, property); + if (ret == 0) + return ppdrv; + } + + DRM_DEBUG("cannot find driver suitable for given property.\n"); + } + + return ERR_PTR(-ENODEV); +} + +static struct tdm_ppdrv *pp_find_drv_by_handle(u32 prop_id) +{ + struct tdm_ppdrv *ppdrv; + struct tdm_pp_cmd_node *c_node; + int count = 0; + + DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); + + /* + * This case is search pp driver by prop_id handle. + * sometimes, pp subsystem find driver by prop_id. + * e.g PAUSE state, queue buf, command control. + */ + list_for_each_entry(ppdrv, &tdm_ppdrv_list, drv_list) { + DRM_DEBUG_KMS("count[%d]ppdrv[%p]\n", count++, ppdrv); + + mutex_lock(&ppdrv->cmd_lock); + list_for_each_entry(c_node, &ppdrv->cmd_list, list) { + if (c_node->property.prop_id == prop_id) { + mutex_unlock(&ppdrv->cmd_lock); + return ppdrv; + } + } + mutex_unlock(&ppdrv->cmd_lock); + } + + return ERR_PTR(-ENODEV); +} + +int tdm_pp_get_property(struct drm_device *drm_dev, void *data, + struct drm_file *file) +{ + struct tgm_drv_file_private *file_priv = file->driver_priv; + struct device *dev = file_priv->pp_dev; + struct pp_context *ctx = get_pp_context(dev); + struct tdm_pp_prop_list *prop_list = data; + struct tdm_ppdrv *ppdrv; + int count = 0; + + if (!ctx) { + DRM_ERROR("invalid context.\n"); + return -EINVAL; + } + + if (!prop_list) { + DRM_ERROR("invalid property parameter.\n"); + return -EINVAL; + } + + DRM_DEBUG_KMS("pp_id[%d]\n", prop_list->pp_id); + + if (!prop_list->pp_id) { + list_for_each_entry(ppdrv, &tdm_ppdrv_list, drv_list) + count++; + + /* + * Supports ppdrv list count for user application. + * First step user application getting ppdrv count. + * and second step getting ppdrv capability using pp_id. + */ + prop_list->count = count; + } else { + /* + * Getting ppdrv capability by pp_id. + * some device not supported wb, output interface. + * so, user application detect correct pp driver + * using this ioctl. + */ + ppdrv = pp_find_obj(&ctx->pp_idr, &ctx->pp_lock, + prop_list->pp_id); + if (!ppdrv) { + DRM_ERROR("not found pp%d driver.\n", + prop_list->pp_id); + return -ENODEV; + } + + *prop_list = ppdrv->prop_list; + } + + return 0; +} + +static void pp_print_property(struct tdm_pp_property *property, + int idx) +{ + struct tdm_pp_config *config = &property->config[idx]; + struct tdm_pos *pos = &config->pos; + struct tdm_sz *sz = &config->sz; + + DRM_INFO("pp:prop_id[%d]ops[%s]fmt[0x%x]\n", + property->prop_id, idx ? "dst" : "src", config->fmt); + + DRM_INFO("pp:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", + pos->x, pos->y, pos->w, pos->h, + sz->hsize, sz->vsize, config->flip, config->degree); +} + +static struct tdm_pp_cmd_work *pp_create_cmd_work(void) +{ + struct tdm_pp_cmd_work *cmd_work; + + cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); + if (!cmd_work) + return ERR_PTR(-ENOMEM); + + INIT_WORK((struct work_struct *)cmd_work, pp_sched_cmd); + + return cmd_work; +} + +static struct tdm_pp_event_work *pp_create_event_work(void) +{ + struct tdm_pp_event_work *event_work; + + event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); + if (!event_work) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&event_work->work, pp_sched_event); + + return event_work; +} + +int tdm_pp_set_property(struct drm_device *drm_dev, void *data, + struct drm_file *file) +{ + struct tgm_drv_file_private *file_priv = file->driver_priv; + struct device *dev = file_priv->pp_dev; + struct pp_context *ctx = get_pp_context(dev); + struct tdm_pp_property *property = data; + struct tdm_ppdrv *ppdrv; + struct tdm_pp_cmd_node *c_node; + u32 prop_id; + int ret, i; + + if (!ctx) { + DRM_ERROR("invalid context.\n"); + return -EINVAL; + } + + if (!property) { + DRM_ERROR("invalid property parameter.\n"); + return -EINVAL; + } + + prop_id = property->prop_id; + + /* + * This is log print for user application property. + * user application set various property. + */ + for_each_pp_ops(i) + pp_print_property(property, i); + + /* + * In case prop_id is not zero try to set existing property. + */ + if (prop_id) { + c_node = pp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id); + + if (!c_node || c_node->filp != file) { + DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id); + return -EINVAL; + } + + if (c_node->state != PP_STATE_STOP) { + DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id); + return -EINVAL; + } + + c_node->property = *property; + + return 0; + } + + /* find pp driver using pp id */ + ppdrv = pp_find_driver(ctx, property); + if (IS_ERR(ppdrv)) { + DRM_ERROR("failed to get pp driver.\n"); + return -EINVAL; + } + + /* allocate command node */ + c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); + if (!c_node) + return -ENOMEM; + + ret = pp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node); + if (ret < 0) { + DRM_ERROR("failed to create id.\n"); + goto err_clear; + } + property->prop_id = ret; + + DRM_INFO("%s:prop_id[%d]cmd[%d]ppdrv[%p]type[%d]\n", __func__, + property->prop_id, property->cmd, ppdrv, property->type); + + /* stored property information and ppdrv in private data */ + c_node->property = *property; + c_node->state = PP_STATE_IDLE; + c_node->filp = file; + + if (property->type & PP_EVENT_DRIVEN) { + /* + * create single thread for pp command and event. + * PP supports command thread for user process. + * user process make command node using set property ioctl. + * and make start_work and send this work to command thread. + * and then this command thread start property. + */ + c_node->start_work = pp_create_cmd_work(); + if (IS_ERR_OR_NULL(c_node->start_work)) { + DRM_ERROR("failed to create start work.\n"); + goto err_remove_id; + } + + c_node->stop_work = pp_create_cmd_work(); + if (IS_ERR_OR_NULL(c_node->stop_work)) { + DRM_ERROR("failed to create stop work.\n"); + goto err_free_start; + } + + init_completion(&c_node->stop_complete); + } + + c_node->event_work = pp_create_event_work(); + if (IS_ERR(c_node->event_work)) { + DRM_ERROR("failed to create event work.\n"); + goto err_free_stop; + } + + mutex_init(&c_node->lock); + mutex_init(&c_node->mem_lock); + mutex_init(&c_node->event_lock); + init_completion(&c_node->start_complete); + + for_each_pp_ops(i) + INIT_LIST_HEAD(&c_node->mem_list[i]); + + INIT_LIST_HEAD(&c_node->event_list); + mutex_lock(&ppdrv->cmd_lock); + list_add_tail(&c_node->list, &ppdrv->cmd_list); + mutex_unlock(&ppdrv->cmd_lock); + + /* make dedicated state without m2m */ + if (!pp_is_m2m_cmd(property->cmd)) + ppdrv->dedicated = true; + + return 0; + +err_free_stop: + if (property->type & PP_EVENT_DRIVEN) + kfree(c_node->stop_work); +err_free_start: + if (property->type & PP_EVENT_DRIVEN) + kfree(c_node->start_work); +err_remove_id: + pp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); +err_clear: + kfree(c_node); + return ret; +} + +static int pp_put_mem_node(struct drm_device *drm_dev, + struct tdm_pp_cmd_node *c_node, + struct tdm_pp_mem_node *m_node) +{ + struct tdm_ppdrv *ppdrv; + int i; + + DRM_DEBUG_KMS("node[%p]\n", m_node); + + if (!m_node) { + DRM_ERROR("invalid dequeue node.\n"); + return -EFAULT; + } + + ppdrv = pp_find_drv_by_handle(m_node->prop_id); + if (IS_ERR(ppdrv)) { + DRM_ERROR("failed to get pp driver.\n"); + return -EFAULT; + } + + DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); + + /* put gem buffer */ + for_each_pp_planar(i) { + unsigned long handle = m_node->buf_info.handles[i]; + + if (handle) + tbm_gem_put_dma_addr(drm_dev, ppdrv->dev, + handle, c_node->filp); + } + + list_del(&m_node->list); + kfree(m_node); + + return 0; +} + +static struct tdm_pp_mem_node + *pp_get_mem_node(struct drm_device *drm_dev, + struct tdm_pp_cmd_node *c_node, + struct tdm_pp_queue_buf *qbuf) +{ + struct tdm_pp_mem_node *m_node; + struct tdm_pp_buf_info *buf_info; + struct tdm_ppdrv *ppdrv; + int i; + + ppdrv = pp_find_drv_by_handle(qbuf->prop_id); + if (IS_ERR(ppdrv)) { + DRM_ERROR("failed to get pp driver.\n"); + return ERR_PTR(-EFAULT); + } + + m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); + if (!m_node) + return ERR_PTR(-ENOMEM); + + buf_info = &m_node->buf_info; + + /* operations, buffer id */ + m_node->ops_id = qbuf->ops_id; + m_node->prop_id = qbuf->prop_id; + m_node->buf_id = qbuf->buf_id; + INIT_LIST_HEAD(&m_node->list); + + DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); + DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); + + for_each_pp_planar(i) { + DRM_DEBUG_KMS("i[%d]handle[%d]\n", i, qbuf->handle[i]); + + /* get dma address by handle */ + if (qbuf->handle[i]) { + dma_addr_t *addr; + + addr = tbm_gem_get_dma_addr(drm_dev, ppdrv->dev, + qbuf->handle[i], c_node->filp); + if (IS_ERR(addr)) { + DRM_ERROR("failed to get addr.\n"); + pp_put_mem_node(drm_dev, c_node, m_node); + return ERR_PTR(-EFAULT); + } + + buf_info->handles[i] = qbuf->handle[i]; + buf_info->base[i] = *addr; + DRM_DEBUG_KMS("i[%d]base[0x%x]hdl[%ld]\n", i, + (int)buf_info->base[i], + buf_info->handles[i]); + } + } + + mutex_lock(&c_node->mem_lock); + list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); + mutex_unlock(&c_node->mem_lock); + + return m_node; +} + +static void pp_clean_mem_nodes(struct drm_device *drm_dev, + struct tdm_pp_cmd_node *c_node, int ops) +{ + struct tdm_pp_mem_node *m_node, *tm_node; + struct list_head *head = &c_node->mem_list[ops]; + + mutex_lock(&c_node->mem_lock); + + list_for_each_entry_safe(m_node, tm_node, head, list) { + int ret; + + ret = pp_put_mem_node(drm_dev, c_node, m_node); + if (ret) + DRM_ERROR("failed to put m_node.\n"); + } + + mutex_unlock(&c_node->mem_lock); +} + +static void pp_free_event(struct drm_pending_event *event) +{ + kfree(event); +} + +static int pp_get_event(struct drm_device *drm_dev, + struct tdm_pp_cmd_node *c_node, + struct tdm_pp_queue_buf *qbuf) +{ + struct tdm_pp_send_event *e; + unsigned long flags; + + DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + spin_lock_irqsave(&drm_dev->event_lock, flags); + c_node->filp->event_space += sizeof(e->event); + spin_unlock_irqrestore(&drm_dev->event_lock, flags); + return -ENOMEM; + } + + /* make event */ + e->event.base.type = TDM_PP_EVENT; + e->event.base.length = sizeof(e->event); + e->event.user_data = qbuf->user_data; + e->event.prop_id = qbuf->prop_id; + e->event.buf_id[TDM_OPS_DST] = qbuf->buf_id; + e->base.event = &e->event.base; + e->base.file_priv = c_node->filp; + e->base.destroy = pp_free_event; + mutex_lock(&c_node->event_lock); + list_add_tail(&e->base.link, &c_node->event_list); + mutex_unlock(&c_node->event_lock); + + return 0; +} + +static void pp_put_event(struct tdm_pp_cmd_node *c_node, + struct tdm_pp_queue_buf *qbuf) +{ + struct tdm_pp_send_event *e, *te; + int count = 0; + + mutex_lock(&c_node->event_lock); + list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { + DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); + + /* + * qbuf == NULL condition means all event deletion. + * stop operations want to delete all event list. + * another case delete only same buf id. + */ + if (!qbuf) { + /* delete list */ + list_del(&e->base.link); + kfree(e); + } + + /* compare buffer id */ + if (qbuf && (qbuf->buf_id == + e->event.buf_id[TDM_OPS_DST])) { + /* delete list */ + list_del(&e->base.link); + kfree(e); + goto out_unlock; + } + } + +out_unlock: + mutex_unlock(&c_node->event_lock); +} + +static void pp_clean_cmd_node(struct pp_context *ctx, + struct tdm_pp_cmd_node *c_node) +{ + int i; + + /* cancel works */ + cancel_work_sync(&c_node->start_work->work); + cancel_work_sync(&c_node->stop_work->work); + cancel_work_sync(&c_node->event_work->work); + + /* put event */ + pp_put_event(c_node, NULL); + + for_each_pp_ops(i) + pp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i); + + /* delete list */ + list_del(&c_node->list); + + pp_remove_id(&ctx->prop_idr, &ctx->prop_lock, + c_node->property.prop_id); + + /* destroy mutex */ + mutex_destroy(&c_node->lock); + mutex_destroy(&c_node->mem_lock); + mutex_destroy(&c_node->event_lock); + + /* free command node */ + if (c_node->property.type & PP_EVENT_DRIVEN) { + /* free command node */ + kfree(c_node->start_work); + kfree(c_node->stop_work); + } + + kfree(c_node->event_work); + kfree(c_node); +} + +static bool pp_check_mem_list(struct tdm_pp_cmd_node *c_node) +{ + switch (c_node->property.cmd) { + case PP_CMD_WB: + return !list_empty(&c_node->mem_list[TDM_OPS_DST]); + case PP_CMD_OUTPUT: + return !list_empty(&c_node->mem_list[TDM_OPS_SRC]); + case PP_CMD_M2M: + default: + return !list_empty(&c_node->mem_list[TDM_OPS_SRC]) && + !list_empty(&c_node->mem_list[TDM_OPS_DST]); + } +} + +static struct tdm_pp_mem_node + *pp_find_mem_node(struct tdm_pp_cmd_node *c_node, + struct tdm_pp_queue_buf *qbuf) +{ + struct tdm_pp_mem_node *m_node; + struct list_head *head; + int count = 0; + + DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); + + /* source/destination memory list */ + head = &c_node->mem_list[qbuf->ops_id]; + + /* find memory node from memory list */ + list_for_each_entry(m_node, head, list) { + DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); + + /* compare buffer id */ + if (m_node->buf_id == qbuf->buf_id) + return m_node; + } + + return NULL; +} + +static int pp_set_mem_node(struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node, + struct tdm_pp_mem_node *m_node) +{ + struct tdm_pp_ops *ops = NULL; + int ret = 0; + + DRM_DEBUG_KMS("node[%p]\n", m_node); + + if (!m_node) { + DRM_ERROR("invalid queue node.\n"); + return -EFAULT; + } + + DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); + + /* get operations callback */ + ops = ppdrv->ops[m_node->ops_id]; + if (!ops) { + DRM_ERROR("not support ops.\n"); + return -EFAULT; + } + + /* set address and enable irq */ + if (ops->set_addr) { + ret = ops->set_addr(ppdrv->dev, &m_node->buf_info, + m_node->buf_id, PP_BUF_ENQUEUE); + if (ret) { + DRM_ERROR("failed to set addr.\n"); + return ret; + } + } + + return ret; +} + +static void pp_handle_cmd_work(struct device *dev, + struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_work *cmd_work, + struct tdm_pp_cmd_node *c_node) +{ + struct pp_context *ctx = get_pp_context(dev); + + cmd_work->ppdrv = ppdrv; + cmd_work->c_node = c_node; + queue_work(ctx->cmd_workq, &cmd_work->work); +} + +static int pp_queue_buf_with_run(struct device *dev, + struct tdm_pp_cmd_node *c_node, + struct tdm_pp_mem_node *m_node, + struct tdm_pp_queue_buf *qbuf) +{ + struct tdm_ppdrv *ppdrv; + struct tdm_pp_property *property; + struct tdm_pp_ops *ops; + int ret; + + ppdrv = pp_find_drv_by_handle(qbuf->prop_id); + if (IS_ERR(ppdrv)) { + DRM_ERROR("failed to get pp driver.\n"); + return -EFAULT; + } + + ops = ppdrv->ops[qbuf->ops_id]; + if (!ops) { + DRM_ERROR("failed to get ops.\n"); + return -EFAULT; + } + + property = &c_node->property; + + if (c_node->state != PP_STATE_START) { + DRM_DEBUG_KMS("bypass for invalid state.\n"); + return 0; + } + + mutex_lock(&c_node->mem_lock); + if (!pp_check_mem_list(c_node)) { + mutex_unlock(&c_node->mem_lock); + DRM_DEBUG_KMS("empty memory.\n"); + return 0; + } + + /* + * If set destination buffer and enabled clock, + * then m2m operations need start operations at queue_buf + */ + if (pp_is_m2m_cmd(property->cmd)) { + if (property->type & PP_EVENT_DRIVEN) { + struct tdm_pp_cmd_work *cmd_work = c_node->start_work; + + cmd_work->ctrl = PP_CTRL_PLAY; + pp_handle_cmd_work(dev, ppdrv, cmd_work, c_node); + } else { + mutex_lock(&ppdrv->drv_lock); + + reinit_completion(&c_node->start_complete); + + ret = pp_start_property(ppdrv, c_node); + if (ret) { + DRM_INFO("%s:failed to start property:id[%d]\n" + , __func__, c_node->property.prop_id); + pp_stop_property(ppdrv->drm_dev, ppdrv, c_node); + } + + mutex_unlock(&ppdrv->drv_lock); + } + } else { + ret = pp_set_mem_node(ppdrv, c_node, m_node); + if (ret) { + mutex_unlock(&c_node->mem_lock); + DRM_ERROR("failed to set m node.\n"); + return ret; + } + } + mutex_unlock(&c_node->mem_lock); + + return 0; +} + +static void pp_clean_queue_buf(struct drm_device *drm_dev, + struct tdm_pp_cmd_node *c_node, + struct tdm_pp_queue_buf *qbuf) +{ + struct tdm_pp_mem_node *m_node, *tm_node; + + /* delete list */ + mutex_lock(&c_node->mem_lock); + list_for_each_entry_safe(m_node, tm_node, + &c_node->mem_list[qbuf->ops_id], list) { + if (m_node->buf_id == qbuf->buf_id && + m_node->ops_id == qbuf->ops_id) + pp_put_mem_node(drm_dev, c_node, m_node); + } + mutex_unlock(&c_node->mem_lock); +} + +int tdm_pp_queue_buf(struct drm_device *drm_dev, void *data, + struct drm_file *file) +{ + struct tgm_drv_file_private *file_priv = file->driver_priv; + struct device *dev = file_priv->pp_dev; + struct pp_context *ctx = get_pp_context(dev); + struct tdm_pp_queue_buf *qbuf = data; + struct tdm_pp_cmd_node *c_node; + struct tdm_pp_mem_node *m_node; + int ret; + + if (!qbuf) { + DRM_ERROR("invalid buf parameter.\n"); + return -EINVAL; + } + + if (qbuf->ops_id >= TDM_OPS_MAX) { + DRM_ERROR("invalid ops parameter.\n"); + return -EINVAL; + } + + DRM_INFO("%s:prop_id[%d]ops_id[%s]buf_id[%d][%s]\n", + __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src", + qbuf->buf_id, qbuf->buf_type ? "dq" : "eq"); + + /* find command node */ + c_node = pp_find_obj(&ctx->prop_idr, &ctx->prop_lock, + qbuf->prop_id); + if (!c_node || c_node->filp != file) { + DRM_ERROR("failed to get command node.\n"); + return -ENODEV; + } + + /* buffer control */ + switch (qbuf->buf_type) { + case PP_BUF_ENQUEUE: + /* get memory node */ + m_node = pp_get_mem_node(drm_dev, c_node, qbuf); + if (IS_ERR(m_node)) { + DRM_ERROR("failed to get m_node.\n"); + return PTR_ERR(m_node); + } + + /* + * first step get event for destination buffer. + * and second step when M2M case run with destination buffer + * if needed. + */ + if (qbuf->ops_id == TDM_OPS_DST) { + /* get event for destination buffer */ + ret = pp_get_event(drm_dev, c_node, qbuf); + if (ret) { + DRM_ERROR("failed to get event.\n"); + goto err_clean_node; + } + + /* + * M2M case run play control for streaming feature. + * other case set address and waiting. + */ + ret = pp_queue_buf_with_run(dev, c_node, m_node, qbuf); + if (ret) { + DRM_ERROR("failed to run command.\n"); + goto err_clean_node; + } + } + break; + case PP_BUF_DEQUEUE: + mutex_lock(&c_node->lock); + + /* put event for destination buffer */ + if (qbuf->ops_id == TDM_OPS_DST) + pp_put_event(c_node, qbuf); + + pp_clean_queue_buf(drm_dev, c_node, qbuf); + + mutex_unlock(&c_node->lock); + break; + default: + DRM_ERROR("invalid buffer control.\n"); + return -EINVAL; + } + + return 0; + +err_clean_node: + DRM_ERROR("clean memory nodes.\n"); + + pp_clean_queue_buf(drm_dev, c_node, qbuf); + return ret; +} + +static bool tdm_pp_check_valid(struct device *dev, + enum tdm_pp_ctrl ctrl, enum tdm_pp_state state) +{ + if (ctrl != PP_CTRL_PLAY) { + if (pm_runtime_suspended(dev)) { + DRM_ERROR("pm:runtime_suspended.\n"); + goto err_status; + } + } + + switch (ctrl) { + case PP_CTRL_PLAY: + if (state != PP_STATE_IDLE) + goto err_status; + break; + case PP_CTRL_STOP: + if (state == PP_STATE_STOP) + goto err_status; + break; + case PP_CTRL_PAUSE: + if (state != PP_STATE_START) + goto err_status; + break; + case PP_CTRL_RESUME: + if (state != PP_STATE_STOP) + goto err_status; + break; + default: + DRM_ERROR("invalid state.\n"); + goto err_status; + } + + return true; + +err_status: + DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); + return false; +} + +int tdm_pp_cmd_ctrl(struct drm_device *drm_dev, void *data, + struct drm_file *file) +{ + struct tgm_drv_file_private *file_priv = file->driver_priv; + struct tdm_ppdrv *ppdrv = NULL; + struct device *dev = file_priv->pp_dev; + struct pp_context *ctx = get_pp_context(dev); + struct tdm_pp_cmd_ctrl *cmd_ctrl = data; + struct tdm_pp_cmd_work *cmd_work; + struct tdm_pp_cmd_node *c_node; + struct tdm_pp_property *property; + int ret = 0; + + if (!ctx) { + DRM_ERROR("invalid context.\n"); + return -EINVAL; + } + + if (!cmd_ctrl) { + DRM_ERROR("invalid control parameter.\n"); + return -EINVAL; + } + + DRM_INFO("%s:ctrl[%d]prop_id[%d]\n", __func__, + cmd_ctrl->ctrl, cmd_ctrl->prop_id); + + ppdrv = pp_find_drv_by_handle(cmd_ctrl->prop_id); + if (IS_ERR(ppdrv)) { + DRM_ERROR("failed to get pp driver.\n"); + return PTR_ERR(ppdrv); + } + + c_node = pp_find_obj(&ctx->prop_idr, &ctx->prop_lock, + cmd_ctrl->prop_id); + if (!c_node || c_node->filp != file) { + DRM_ERROR("invalid command node list.\n"); + return -ENODEV; + } + + property = &c_node->property; + + if (!tdm_pp_check_valid(ppdrv->dev, cmd_ctrl->ctrl, + c_node->state)) { + DRM_ERROR("invalid state.\n"); + return -EINVAL; + } + + switch (cmd_ctrl->ctrl) { + case PP_CTRL_PLAY: + if (pm_runtime_suspended(ppdrv->dev)) + pm_runtime_get_sync(ppdrv->dev); + + c_node->state = PP_STATE_START; + + if (property->type & PP_EVENT_DRIVEN) { + cmd_work = c_node->start_work; + cmd_work->ctrl = cmd_ctrl->ctrl; + pp_handle_cmd_work(dev, ppdrv, cmd_work, c_node); + } else { + mutex_lock(&ppdrv->drv_lock); + + reinit_completion(&c_node->start_complete); + + ret = pp_start_property(ppdrv, c_node); + if (ret) { + DRM_INFO("%s:failed to start property:id[%d]\n" + , __func__, property->prop_id); + mutex_unlock(&ppdrv->drv_lock); + goto err; + } + mutex_unlock(&ppdrv->drv_lock); + } + break; + case PP_CTRL_STOP: + if (property->type & PP_EVENT_DRIVEN) { + cmd_work = c_node->stop_work; + cmd_work->ctrl = cmd_ctrl->ctrl; + pp_handle_cmd_work(dev, ppdrv, cmd_work, c_node); + + if (!wait_for_completion_timeout(&c_node->stop_complete, + msecs_to_jiffies(300))) { + DRM_ERROR("timeout stop:prop_id[%d]\n", + c_node->property.prop_id); + } + } else { + ret = pp_stop_property(ppdrv->drm_dev, ppdrv, + c_node); + if (ret) { + DRM_ERROR("failed to stop property.\n"); + goto err; + } + } + + c_node->state = PP_STATE_STOP; + ppdrv->dedicated = false; + mutex_lock(&ppdrv->cmd_lock); + pp_clean_cmd_node(ctx, c_node); + + if (list_empty(&ppdrv->cmd_list)) + pm_runtime_put_sync(ppdrv->dev); + mutex_unlock(&ppdrv->cmd_lock); + break; + case PP_CTRL_PAUSE: + if (property->type & PP_EVENT_DRIVEN) { + cmd_work = c_node->stop_work; + cmd_work->ctrl = cmd_ctrl->ctrl; + pp_handle_cmd_work(dev, ppdrv, cmd_work, c_node); + + if (!wait_for_completion_timeout(&c_node->stop_complete, + msecs_to_jiffies(300))) { + DRM_ERROR("timeout stop:prop_id[%d]\n", + c_node->property.prop_id); + } + } else { + ret = pp_stop_property(ppdrv->drm_dev, ppdrv, + c_node); + if (ret) { + DRM_ERROR("failed to stop property.\n"); + goto err; + } + } + + c_node->state = PP_STATE_STOP; + break; + case PP_CTRL_RESUME: + c_node->state = PP_STATE_START; + + if (property->type & PP_EVENT_DRIVEN) { + cmd_work = c_node->start_work; + cmd_work->ctrl = cmd_ctrl->ctrl; + pp_handle_cmd_work(dev, ppdrv, cmd_work, c_node); + } else { + mutex_lock(&ppdrv->drv_lock); + + reinit_completion(&c_node->start_complete); + + ret = pp_start_property(ppdrv, c_node); + if (ret) { + DRM_INFO("%s:failed to start property:id[%d]\n" + , __func__, property->prop_id); + mutex_unlock(&ppdrv->drv_lock); + goto err; + } + mutex_unlock(&ppdrv->drv_lock); + } + break; + default: + DRM_ERROR("could not support this state currently.\n"); + return -EINVAL; + } + + DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", + cmd_ctrl->ctrl, cmd_ctrl->prop_id); +err: + return ret; +} + +int tdm_ppnb_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register( + &tdm_ppnb_list, nb); +} + +int tdm_ppnb_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister( + &tdm_ppnb_list, nb); +} + +int tdm_ppnb_send_event(unsigned long val, void *v) +{ + return blocking_notifier_call_chain( + &tdm_ppnb_list, val, v); +} + +static int pp_set_property(struct tdm_ppdrv *ppdrv, + struct tdm_pp_property *property) +{ + struct tdm_pp_ops *ops = NULL; + bool swap = false; + int ret, i; + + if (!property) { + DRM_ERROR("invalid property parameter.\n"); + return -EINVAL; + } + + DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); + + /* reset h/w block */ + if (ppdrv->reset && + ppdrv->reset(ppdrv->dev)) { + return -EINVAL; + } + + /* set source,destination operations */ + for_each_pp_ops(i) { + struct tdm_pp_config *config = + &property->config[i]; + + ops = ppdrv->ops[i]; + if (!ops || !config) { + DRM_ERROR("not support ops and config.\n"); + return -EINVAL; + } + + /* set format */ + if (ops->set_fmt) { + ret = ops->set_fmt(ppdrv->dev, config->fmt); + if (ret) + return ret; + } + + /* set transform for rotation, flip */ + if (ops->set_transf) { + ret = ops->set_transf(ppdrv->dev, config->degree, + config->flip, &swap); + if (ret) + return ret; + } + + /* set size */ + if (ops->set_size) { + ret = ops->set_size(ppdrv->dev, swap, &config->pos, + &config->sz); + if (ret) + return ret; + } + } + + return 0; +} + +int pp_start_property(struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node) +{ + struct tdm_pp_mem_node *m_node; + struct tdm_pp_property *property = &c_node->property; + struct list_head *head; + int ret, i; + + DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); + + /* store command info in ppdrv */ + ppdrv->c_node = c_node; + + mutex_lock(&c_node->mem_lock); + if (!pp_check_mem_list(c_node)) { + DRM_DEBUG_KMS("empty memory.\n"); + ret = -ENOMEM; + goto err_unlock; + } + + /* set current property in ppdrv */ + ret = pp_set_property(ppdrv, property); + if (ret) { + DRM_ERROR("failed to set property.\n"); + ppdrv->c_node = NULL; + goto err_unlock; + } + + /* check command */ + switch (property->cmd) { + case PP_CMD_M2M: + for_each_pp_ops(i) { + /* source/destination memory list */ + head = &c_node->mem_list[i]; + + m_node = list_first_entry(head, + struct tdm_pp_mem_node, list); + + DRM_DEBUG_KMS("m_node[%p]\n", m_node); + + ret = pp_set_mem_node(ppdrv, c_node, m_node); + if (ret) { + DRM_ERROR("failed to set m node.\n"); + goto err_unlock; + } + } + break; + case PP_CMD_WB: + /* destination memory list */ + head = &c_node->mem_list[TDM_OPS_DST]; + + list_for_each_entry(m_node, head, list) { + ret = pp_set_mem_node(ppdrv, c_node, m_node); + if (ret) { + DRM_ERROR("failed to set m node.\n"); + goto err_unlock; + } + } + break; + case PP_CMD_OUTPUT: + /* source memory list */ + head = &c_node->mem_list[TDM_OPS_SRC]; + + list_for_each_entry(m_node, head, list) { + ret = pp_set_mem_node(ppdrv, c_node, m_node); + if (ret) { + DRM_ERROR("failed to set m node.\n"); + goto err_unlock; + } + } + break; + default: + DRM_ERROR("invalid operations.\n"); + ret = -EINVAL; + goto err_unlock; + } + mutex_unlock(&c_node->mem_lock); + + DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); + + /* start operations */ + if (ppdrv->start) { + ret = ppdrv->start(ppdrv->dev, property->cmd); + if (ret) { + DRM_ERROR("failed to start ops.\n"); + ppdrv->c_node = NULL; + return ret; + } + } + + return 0; + +err_unlock: + mutex_unlock(&c_node->mem_lock); + ppdrv->c_node = NULL; + return ret; +} + +int pp_stop_property(struct drm_device *drm_dev, + struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node) +{ + struct tdm_pp_property *property = &c_node->property; + int i; + + DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); + + /* stop operations */ + if (ppdrv->stop) + ppdrv->stop(ppdrv->dev, property->cmd); + + /* check command */ + switch (property->cmd) { + case PP_CMD_M2M: + for_each_pp_ops(i) + pp_clean_mem_nodes(drm_dev, c_node, i); + break; + case PP_CMD_WB: + pp_clean_mem_nodes(drm_dev, c_node, TDM_OPS_DST); + break; + case PP_CMD_OUTPUT: + pp_clean_mem_nodes(drm_dev, c_node, TDM_OPS_SRC); + break; + default: + DRM_ERROR("invalid operations.\n"); + return -EINVAL; + } + + return 0; +} + +void pp_sched_cmd(struct work_struct *work) +{ + struct tdm_pp_cmd_work *cmd_work = + container_of(work, struct tdm_pp_cmd_work, work); + struct tdm_ppdrv *ppdrv; + struct tdm_pp_cmd_node *c_node; + struct tdm_pp_property *property; + int ret; + + ppdrv = cmd_work->ppdrv; + if (!ppdrv) { + DRM_ERROR("invalid ppdrv list.\n"); + return; + } + + mutex_lock(&ppdrv->drv_lock); + + c_node = cmd_work->c_node; + if (!c_node) { + DRM_ERROR("invalid command node list.\n"); + mutex_unlock(&ppdrv->drv_lock); + return; + } + + mutex_lock(&c_node->lock); + + property = &c_node->property; + + switch (cmd_work->ctrl) { + case PP_CTRL_PLAY: + case PP_CTRL_RESUME: + ret = pp_start_property(ppdrv, c_node); + if (ret) { + DRM_ERROR("failed to start property:prop_id[%d]\n", + c_node->property.prop_id); + goto err_unlock; + } + + /* + * M2M case supports wait_completion of transfer. + * because M2M case supports single unit operation + * with multiple queue. + * M2M need to wait completion of data transfer. + */ + if (pp_is_m2m_cmd(property->cmd)) { + if (!wait_for_completion_timeout + (&c_node->start_complete, msecs_to_jiffies(200))) { + DRM_ERROR("timeout event:prop_id[%d]\n", + c_node->property.prop_id); + goto err_unlock; + } + } + break; + case PP_CTRL_STOP: + case PP_CTRL_PAUSE: + ret = pp_stop_property(ppdrv->drm_dev, ppdrv, + c_node); + if (ret) { + DRM_ERROR("failed to stop property.\n"); + goto err_unlock; + } + + complete(&c_node->stop_complete); + break; + default: + DRM_ERROR("unknown control type\n"); + break; + } + + DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); + +err_unlock: + mutex_unlock(&c_node->lock); + mutex_unlock(&ppdrv->drv_lock); +} + +static int pp_send_event(struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node, int *buf_id) +{ + struct drm_device *drm_dev = ppdrv->drm_dev; + struct tdm_pp_property *property = &c_node->property; + struct tdm_pp_mem_node *m_node; + struct tdm_pp_queue_buf qbuf; + struct tdm_pp_send_event *e; + struct list_head *head; + struct timeval now; + unsigned long flags; + u32 tbuf_id[TDM_OPS_MAX] = {0, }; + int ret, i; + + for_each_pp_ops(i) + DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); + + if (!drm_dev) { + DRM_ERROR("failed to get drm_dev.\n"); + return -EINVAL; + } + + if (!property) { + DRM_ERROR("failed to get property.\n"); + return -EINVAL; + } + + mutex_lock(&c_node->event_lock); + if (list_empty(&c_node->event_list)) { + DRM_DEBUG_KMS("event list is empty.\n"); + ret = 0; + goto err_event_unlock; + } + + mutex_lock(&c_node->mem_lock); + if (!pp_check_mem_list(c_node)) { + DRM_DEBUG_KMS("empty memory.\n"); + ret = 0; + goto err_mem_unlock; + } + + /* check command */ + switch (property->cmd) { + case PP_CMD_M2M: + for_each_pp_ops(i) { + /* source/destination memory list */ + head = &c_node->mem_list[i]; + + m_node = list_first_entry(head, + struct tdm_pp_mem_node, list); + + tbuf_id[i] = m_node->buf_id; + DRM_DEBUG_KMS("%s buf_id[%d]\n", + i ? "dst" : "src", tbuf_id[i]); + + ret = pp_put_mem_node(drm_dev, c_node, m_node); + if (ret) + DRM_ERROR("failed to put m_node.\n"); + } + break; + case PP_CMD_WB: + /* clear buf for finding */ + memset(&qbuf, 0x0, sizeof(qbuf)); + qbuf.ops_id = TDM_OPS_DST; + qbuf.buf_id = buf_id[TDM_OPS_DST]; + + /* get memory node entry */ + m_node = pp_find_mem_node(c_node, &qbuf); + if (!m_node) { + DRM_ERROR("empty memory node.\n"); + ret = -ENOMEM; + goto err_mem_unlock; + } + + tbuf_id[TDM_OPS_DST] = m_node->buf_id; + + ret = pp_put_mem_node(drm_dev, c_node, m_node); + if (ret) + DRM_ERROR("failed to put m_node.\n"); + break; + case PP_CMD_OUTPUT: + /* source memory list */ + head = &c_node->mem_list[TDM_OPS_SRC]; + + m_node = list_first_entry(head, + struct tdm_pp_mem_node, list); + + tbuf_id[TDM_OPS_SRC] = m_node->buf_id; + + ret = pp_put_mem_node(drm_dev, c_node, m_node); + if (ret) + DRM_ERROR("failed to put m_node.\n"); + break; + default: + DRM_ERROR("invalid operations.\n"); + ret = -EINVAL; + goto err_mem_unlock; + } + mutex_unlock(&c_node->mem_lock); + + if (tbuf_id[TDM_OPS_DST] != buf_id[TDM_OPS_DST]) + DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", + tbuf_id[1], buf_id[1], property->prop_id); + + /* + * command node have event list of destination buffer + * If destination buffer enqueue to mem list, + * then we make event and link to event list tail. + * so, we get first event for first enqueued buffer. + */ + e = list_first_entry(&c_node->event_list, + struct tdm_pp_send_event, base.link); + + do_gettimeofday(&now); + DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + e->event.prop_id = property->prop_id; + + /* set buffer id about source destination */ + for_each_pp_ops(i) + e->event.buf_id[i] = tbuf_id[i]; + + spin_lock_irqsave(&drm_dev->event_lock, flags); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + spin_unlock_irqrestore(&drm_dev->event_lock, flags); + mutex_unlock(&c_node->event_lock); + + DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", + property->cmd, property->prop_id, tbuf_id[TDM_OPS_DST]); + + return 0; + +err_mem_unlock: + mutex_unlock(&c_node->mem_lock); +err_event_unlock: + mutex_unlock(&c_node->event_lock); + return ret; +} + +void pp_sched_event(struct work_struct *work) +{ + struct tdm_pp_event_work *event_work = + container_of(work, struct tdm_pp_event_work, work); + struct tdm_ppdrv *ppdrv; + struct tdm_pp_cmd_node *c_node; + int ret; + + if (!event_work) { + DRM_ERROR("failed to get event_work.\n"); + return; + } + + DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[TDM_OPS_DST]); + + ppdrv = event_work->ppdrv; + if (!ppdrv) { + DRM_ERROR("failed to get pp driver.\n"); + return; + } + + c_node = ppdrv->c_node; + if (!c_node) { + DRM_ERROR("failed to get command node.\n"); + return; + } + + /* + * PP supports command thread, event thread synchronization. + * If PP close immediately from user land, then PP make + * synchronization with command thread, so make complete event. + * or going out operations. + */ + if (c_node->state != PP_STATE_START) { + DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", + c_node->state, c_node->property.prop_id); + goto err_completion; + } + + ret = pp_send_event(ppdrv, c_node, event_work->buf_id); + if (ret) { + DRM_ERROR("failed to send event.\n"); + goto err_completion; + } + +err_completion: + if (pp_is_m2m_cmd(c_node->property.cmd)) + complete(&c_node->start_complete); +} + +static int pp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) +{ + struct pp_context *ctx = get_pp_context(dev); + struct tdm_ppdrv *ppdrv; + int ret, count = 0; + + /* get pp driver entry */ + list_for_each_entry(ppdrv, &tdm_ppdrv_list, drv_list) { + ppdrv->drm_dev = drm_dev; + + ret = pp_create_id(&ctx->pp_idr, &ctx->pp_lock, ppdrv); + if (ret < 0) { + DRM_ERROR("failed to create id.\n"); + goto err; + } + ppdrv->prop_list.pp_id = ret; + + DRM_DEBUG_KMS("count[%d]ppdrv[%p]pp_id[%d]\n", + count++, ppdrv, ret); + + /* store parent device for node */ + ppdrv->parent_dev = dev; + + /* store event work queue and handler */ + ppdrv->event_workq = ctx->event_workq; + ppdrv->sched_event = pp_sched_event; + INIT_LIST_HEAD(&ppdrv->cmd_list); + mutex_init(&ppdrv->cmd_lock); + mutex_init(&ppdrv->drv_lock); + /*ToDo: need to check drm_iommu_attach_device */ + } + + return 0; + +err: + /* get pp driver entry */ + list_for_each_entry_continue_reverse(ppdrv, &tdm_ppdrv_list, + drv_list) { + pp_remove_id(&ctx->pp_idr, &ctx->pp_lock, + ppdrv->prop_list.pp_id); + } + + return ret; +} + +static void pp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) +{ + struct tdm_ppdrv *ppdrv, *t; + struct pp_context *ctx = get_pp_context(dev); + + /* get pp driver entry */ + list_for_each_entry_safe(ppdrv, t, &tdm_ppdrv_list, drv_list) { + pp_remove_id(&ctx->pp_idr, &ctx->pp_lock, + ppdrv->prop_list.pp_id); + + ppdrv->drm_dev = NULL; + tdm_ppdrv_unregister(ppdrv); + } +} + +static int pp_subdrv_open(struct drm_device *drm_dev, struct device *dev, + struct drm_file *file) +{ + struct tgm_drv_file_private *file_priv = file->driver_priv; + + file_priv->pp_dev = dev; + + DRM_DEBUG_KMS("done priv[%p]\n", dev); + + return 0; +} + +static void pp_subdrv_close(struct drm_device *drm_dev, struct device *dev, + struct drm_file *file) +{ + struct tdm_ppdrv *ppdrv = NULL; + struct pp_context *ctx = get_pp_context(dev); + struct tdm_pp_cmd_node *c_node, *tc_node; + int count = 0; + + list_for_each_entry(ppdrv, &tdm_ppdrv_list, drv_list) { + mutex_lock(&ppdrv->cmd_lock); + list_for_each_entry_safe(c_node, tc_node, + &ppdrv->cmd_list, list) { + DRM_DEBUG_KMS("count[%d]ppdrv[%p]\n", + count++, ppdrv); + + if (c_node->filp == file) { + /* + * userland goto unnormal state. process killed. + * and close the file. + * so, PP didn't called stop cmd ctrl. + * so, we are make stop operation in this state. + */ + if (c_node->state == PP_STATE_START) { + pp_stop_property(drm_dev, ppdrv, + c_node); + c_node->state = PP_STATE_STOP; + } + + ppdrv->dedicated = false; + pp_clean_cmd_node(ctx, c_node); + if (list_empty(&ppdrv->cmd_list)) + pm_runtime_put_sync(ppdrv->dev); + } + } + mutex_unlock(&ppdrv->cmd_lock); + } +} + +static int pp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pp_context *ctx; + struct tgm_subdrv *subdrv; + int ret; + + dev_info(dev, "%s\n", __func__); + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + mutex_init(&ctx->pp_lock); + mutex_init(&ctx->prop_lock); + + idr_init(&ctx->pp_idr); + idr_init(&ctx->prop_idr); + + /* + * create single thread for pp event + * PP supports event thread for PP drivers. + * PP driver send event_work to this thread. + * and PP event thread send event to user process. + */ + ctx->event_workq = create_singlethread_workqueue("pp_event"); + if (!ctx->event_workq) { + dev_err(dev, "failed to create event workqueue\n"); + return -EINVAL; + } + + /* + * create single thread for pp command + * PP supports command thread for user process. + * user process make command node using set property ioctl. + * and make start_work and send this work to command thread. + * and then this command thread start property. + */ + ctx->cmd_workq = create_singlethread_workqueue("pp_cmd"); + if (!ctx->cmd_workq) { + dev_err(dev, "failed to create cmd workqueue\n"); + ret = -EINVAL; + goto err_event_workq; + } + + /* set sub driver information */ + subdrv = &ctx->subdrv; + subdrv->dev = dev; + subdrv->probe = pp_subdrv_probe; + subdrv->remove = pp_subdrv_remove; + subdrv->open = pp_subdrv_open; + subdrv->close = pp_subdrv_close; + + platform_set_drvdata(pdev, ctx); + + ret = tgm_subdrv_register(subdrv); + if (ret < 0) { + DRM_ERROR("failed to register drm pp device.\n"); + goto err_cmd_workq; + } + + dev_info(dev, "drm pp registered successfully.\n"); + + return 0; + +err_cmd_workq: + destroy_workqueue(ctx->cmd_workq); +err_event_workq: + destroy_workqueue(ctx->event_workq); + return ret; +} + +static int pp_remove(struct platform_device *pdev) +{ + struct pp_context *ctx = platform_get_drvdata(pdev); + + /* unregister sub driver */ + tgm_subdrv_unregister(&ctx->subdrv); + + /* remove,destroy pp idr */ + idr_destroy(&ctx->pp_idr); + idr_destroy(&ctx->prop_idr); + + mutex_destroy(&ctx->pp_lock); + mutex_destroy(&ctx->prop_lock); + + /* destroy command, event work queue */ + destroy_workqueue(ctx->cmd_workq); + destroy_workqueue(ctx->event_workq); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id tdm_pp_dt_match[] = { + {.compatible = "tdm,tdm_pp",}, + {} +}; +MODULE_DEVICE_TABLE(of, tdm_pp_dt_match); +#endif + +struct platform_driver pp_driver = { + .probe = pp_probe, + .remove = pp_remove, + .driver = { + .name = "tdm-pp", + .owner = THIS_MODULE, +#ifdef CONFIG_OF + .of_match_table = tdm_pp_dt_match, +#endif + }, +}; + diff --git a/drivers/gpu/drm/tgm/tdm_pp.h b/drivers/gpu/drm/tgm/tdm_pp.h new file mode 100644 index 0000000..c06d3c3 --- /dev/null +++ b/drivers/gpu/drm/tgm/tdm_pp.h @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2016 Samsung Electronics Co., Ltd. + * + * Authors: + * Eunchul Kim + * Jinyoung Jeon + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _TDM_PP_H_ +#define _TDM_PP_H_ + +#define for_each_pp_ops(pos) \ + for (pos = 0; pos < TDM_OPS_MAX; pos++) +#define for_each_pp_planar(pos) \ + for (pos = 0; pos < TDM_PLANAR_MAX; pos++) + +#define PP_GET_LCD_WIDTH _IOR('F', 302, int) +#define PP_GET_LCD_HEIGHT _IOR('F', 303, int) +#define PP_SET_WRITEBACK _IOW('F', 304, u32) + +/* definition of state */ +enum tdm_pp_state { + PP_STATE_IDLE, + PP_STATE_START, + PP_STATE_STOP, +}; + +/* + * A structure of command work information. + * @work: work structure. + * @ppdrv: current work ppdrv. + * @c_node: command node information. + * @ctrl: command control. + */ +struct tdm_pp_cmd_work { + struct work_struct work; + struct tdm_ppdrv *ppdrv; + struct tdm_pp_cmd_node *c_node; + enum tdm_pp_ctrl ctrl; +}; + +/* + * A structure of command node. + * + * @list: list head to command queue information. + * @event_list: list head of event. + * @mem_list: list head to source,destination memory queue information. + * @lock: lock for synchronization of access to ioctl. + * @mem_lock: lock for synchronization of access to memory nodes. + * @event_lock: lock for synchronization of access to scheduled event. + * @start_complete: completion of start of command. + * @stop_complete: completion of stop of command. + * @property: property information. + * @start_work: start command work structure. + * @stop_work: stop command work structure. + * @event_work: event work structure. + * @state: state of command node. + * @filp: associated file pointer. + */ +struct tdm_pp_cmd_node { + struct list_head list; + struct list_head event_list; + struct list_head mem_list[TDM_OPS_MAX]; + struct mutex lock; + struct mutex mem_lock; + struct mutex event_lock; + struct completion start_complete; + struct completion stop_complete; + struct tdm_pp_property property; + struct tdm_pp_cmd_work *start_work; + struct tdm_pp_cmd_work *stop_work; + struct tdm_pp_event_work *event_work; + enum tdm_pp_state state; + struct drm_file *filp; +}; + +/* + * A structure of buffer information. + * + * @handles: Y, Cb, Cr each gem object handle. + * @base: Y, Cb, Cr each planar address. + */ +struct tdm_pp_buf_info { + unsigned long handles[TDM_PLANAR_MAX]; + dma_addr_t base[TDM_PLANAR_MAX]; +}; + +/* + * A structure of wb setting information. + * + * @enable: enable flag for wb. + * @refresh: HZ of the refresh rate. + */ +struct tdm_pp_set_wb { + __u32 enable; + __u32 refresh; +}; + +/* + * A structure of event work information. + * + * @work: work structure. + * @ppdrv: current work ppdrv. + * @buf_id: id of src, dst buffer. + */ +struct tdm_pp_event_work { + struct work_struct work; + struct tdm_ppdrv *ppdrv; + u32 buf_id[TDM_OPS_MAX]; +}; + +/* + * A structure of source,destination operations. + * + * @set_fmt: set format of image. + * @set_transf: set transform(rotations, flip). + * @set_size: set size of region. + * @set_addr: set address for dma. + */ +struct tdm_pp_ops { + int (*set_fmt)(struct device *dev, u32 fmt); + int (*set_transf)(struct device *dev, + enum tdm_degree degree, + enum tdm_flip flip, bool *swap); + int (*set_size)(struct device *dev, int swap, + struct tdm_pos *pos, struct tdm_sz *sz); + int (*set_addr)(struct device *dev, + struct tdm_pp_buf_info *buf_info, u32 buf_id, + enum tdm_pp_buf_type buf_type); +}; + +/* + * A structure of pp driver. + * + * @drv_list: list head for registed sub driver information. + * @parent_dev: parent device information. + * @dev: platform device. + * @drm_dev: drm device. + * @dedicated: dedicated pp device. + * @ops: source, destination operations. + * @event_workq: event work queue. + * @c_node: current command information. + * @cmd_list: list head for command information. + * @cmd_lock: lock for synchronization of access to cmd_list. + * @drv_lock: lock for synchronization of access to start operation. + * @prop_list: property information of current pp driver. + * @check_property: check property about format, size, buffer. + * @reset: reset pp block. + * @start: pp each device start. + * @stop: pp each device stop. + * @sched_event: work schedule handler. + */ +struct tdm_ppdrv { + struct list_head drv_list; + struct device *parent_dev; + struct device *dev; + struct drm_device *drm_dev; + bool dedicated; + struct tdm_pp_ops *ops[TDM_OPS_MAX]; + struct workqueue_struct *event_workq; + struct tdm_pp_cmd_node *c_node; + struct list_head cmd_list; + struct mutex cmd_lock; + struct mutex drv_lock; + struct tdm_pp_prop_list prop_list; + + int (*check_property)(struct device *dev, + struct tdm_pp_property *property); + int (*reset)(struct device *dev); + int (*start)(struct device *dev, enum tdm_pp_cmd cmd); + void (*stop)(struct device *dev, enum tdm_pp_cmd cmd); + void (*sched_event)(struct work_struct *work); +}; + +#ifdef CONFIG_DRM_TDM_PP +extern int tdm_ppdrv_register(struct tdm_ppdrv *ppdrv); +extern int tdm_ppdrv_unregister(struct tdm_ppdrv *ppdrv); +extern int tdm_pp_get_property(struct drm_device *drm_dev, void *data, + struct drm_file *file); +extern int tdm_pp_set_property(struct drm_device *drm_dev, void *data, + struct drm_file *file); +extern int tdm_pp_queue_buf(struct drm_device *drm_dev, void *data, + struct drm_file *file); +extern int tdm_pp_cmd_ctrl(struct drm_device *drm_dev, void *data, + struct drm_file *file); +extern int tdm_ppnb_register(struct notifier_block *nb); +extern int tdm_ppnb_unregister(struct notifier_block *nb); +extern int tdm_ppnb_send_event(unsigned long val, void *v); +extern void pp_sched_cmd(struct work_struct *work); +extern void pp_sched_event(struct work_struct *work); +extern int pp_start_property(struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node); +extern int pp_stop_property(struct drm_device *drm_dev, + struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node); +#else +static inline int tdm_ppdrv_register(struct tdm_ppdrv *ppdrv) +{ + return -ENODEV; +} + +static inline int tdm_ppdrv_unregister(struct tdm_ppdrv *ppdrv) +{ + return -ENODEV; +} + +static inline int tdm_pp_get_property(struct drm_device *drm_dev, + void *data, + struct drm_file *file_priv) +{ + return -ENOTTY; +} + +static inline int tdm_pp_set_property(struct drm_device *drm_dev, + void *data, + struct drm_file *file_priv) +{ + return -ENOTTY; +} + +static inline int tdm_pp_queue_buf(struct drm_device *drm_dev, + void *data, + struct drm_file *file) +{ + return -ENOTTY; +} + +static inline int tdm_pp_cmd_ctrl(struct drm_device *drm_dev, + void *data, + struct drm_file *file) +{ + return -ENOTTY; +} + +static inline int tdm_ppnb_register(struct notifier_block *nb) +{ + return -ENODEV; +} + +static inline int tdm_ppnb_unregister(struct notifier_block *nb) +{ + return -ENODEV; +} + +static inline int tdm_ppnb_send_event(unsigned long val, void *v) +{ + return -ENOTTY; +} + +static inline int pp_start_property(struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node) +{ + return -ENOTTY; +} +static inline int pp_stop_property(struct drm_device *drm_dev, + struct tdm_ppdrv *ppdrv, + struct tdm_pp_cmd_node *c_node) +{ + return -ENOTTY; +} +#endif + +#endif /* _TDM_PP_H_ */ + + diff --git a/drivers/gpu/drm/tgm/tgm_drv.c b/drivers/gpu/drm/tgm/tgm_drv.c index bd85700..ab9de26 100644 --- a/drivers/gpu/drm/tgm/tgm_drv.c +++ b/drivers/gpu/drm/tgm/tgm_drv.c @@ -171,6 +171,12 @@ static int tgm_drv_load(struct drm_device *drm_dev, unsigned long flags) return -EINVAL; } + ret = tgm_device_subdrv_probe(drm_dev); + if (ret) { + DRM_ERROR("failed to probe subdrv.\n"); + return -EINVAL; + } + ret = drm_debugfs_create_files(tgm_debugfs_list, TGM_DEBUGFS_ENTRIES, minor->debugfs_root, minor); @@ -182,6 +188,7 @@ static int tgm_drv_unload(struct drm_device *drm_dev) { DRM_INFO("%s\n", __func__); + tgm_device_subdrv_remove(drm_dev); component_unbind_all(drm_dev->dev, drm_dev); drm_debugfs_remove_files(tgm_debugfs_list, TGM_DEBUGFS_ENTRIES, drm_dev->primary); @@ -201,13 +208,15 @@ static int tgm_drv_open(struct drm_device *dev, struct drm_file *file) file->driver_priv = file_priv; - return 0; + return tgm_subdrv_open(dev, file); } static void tgm_drv_preclose(struct drm_device *dev, struct drm_file *file) { DRM_DEBUG("%s\n", __func__); + + tgm_subdrv_close(dev, file); } static void tgm_drv_postclose(struct drm_device *dev, struct drm_file *file) @@ -237,6 +246,16 @@ static struct drm_ioctl_desc tgm_ioctls[] = { DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(TBM_GEM_CPU_FINI, tbm_gem_cpu_fini_ioctl, DRM_UNLOCKED | DRM_AUTH), +#ifdef CONFIG_DRM_TDM_PP + DRM_IOCTL_DEF_DRV(TDM_PP_GET_PROPERTY, + tdm_pp_get_property, DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(TDM_PP_SET_PROPERTY, + tdm_pp_set_property, DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(TDM_PP_QUEUE_BUF, + tdm_pp_queue_buf, DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(TDM_PP_CMD_CTRL, + tdm_pp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH), +#endif #ifdef CONFIG_DRM_TDM_DPMS_CTRL DRM_IOCTL_DEF_DRV(TDM_DPMS_CONTROL, tdm_dpms_ioctl, DRM_MASTER), @@ -585,6 +604,12 @@ static int __init tgm_drv_init(void) DRM_DEBUG("%s\n", __func__); +#ifdef CONFIG_DRM_TDM_PP + ret = platform_driver_register(&pp_driver); + if (ret < 0) + goto out_pp_driver; +#endif + ret = platform_driver_register(&tgm_driver); if (ret) goto out_tgm_drv; @@ -592,7 +617,10 @@ static int __init tgm_drv_init(void) return 0; out_tgm_drv: - +#ifdef CONFIG_DRM_TDM_PP + platform_driver_unregister(&pp_driver); +out_pp_driver: +#endif return ret; } @@ -601,8 +629,13 @@ static void __exit tgm_drv_exit(void) DRM_INFO("%s\n", __func__); platform_driver_unregister(&tgm_driver); + +#ifdef CONFIG_DRM_TDM_PP + platform_driver_unregister(&pp_driver); +#endif } + late_initcall(tgm_drv_init); module_exit(tgm_drv_exit); diff --git a/drivers/gpu/drm/tgm/tgm_drv.h b/drivers/gpu/drm/tgm/tgm_drv.h index a0f0ce0..03beed2 100644 --- a/drivers/gpu/drm/tgm/tgm_drv.h +++ b/drivers/gpu/drm/tgm/tgm_drv.h @@ -31,6 +31,7 @@ struct tgm_drv_private { struct tgm_drv_file_private { pid_t pid; pid_t tgid; + struct device *pp_dev; }; struct tgm_subdrv { @@ -58,4 +59,5 @@ int tgm_device_subdrv_remove(struct drm_device *dev); int tgm_subdrv_open(struct drm_device *dev, struct drm_file *file); void tgm_subdrv_close(struct drm_device *dev, struct drm_file *file); +extern struct platform_driver pp_driver; #endif /* _TGM_DRV_H_ */