2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
22 #include <drm/exynos_drm.h>
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_ipp.h"
28 * IPP is stand for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
34 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
37 * A structure of event.
39 * @base: base of event.
42 struct drm_exynos_ipp_send_event {
43 struct drm_pending_event base;
44 struct drm_exynos_ipp_event event;
48 * A structure of memory node.
50 * @list: list head to memory queue information.
51 * @ops_id: id of operations.
52 * @prop_id: id of property.
53 * @buf_id: id of buffer.
54 * @buf_info: gem objects and dma address, size.
56 struct drm_exynos_ipp_mem_node {
57 struct list_head list;
58 enum drm_exynos_ops_id ops_id;
61 struct drm_exynos_ipp_buf_info buf_info;
65 * A structure of ipp context.
67 * @subdrv: prepare initialization using subdrv.
68 * @ipp_lock: lock for synchronization of access to ipp_idr.
69 * @prop_lock: lock for synchronization of access to prop_idr.
70 * @ipp_idr: ipp driver idr.
71 * @prop_idr: property idr.
72 * @event_workq: event work queue.
73 * @cmd_workq: command work queue.
76 struct exynos_drm_subdrv subdrv;
81 struct workqueue_struct *event_workq;
82 struct workqueue_struct *cmd_workq;
85 static LIST_HEAD(exynos_drm_ippdrv_list);
86 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
88 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
90 DRM_DEBUG_KMS("%s\n", __func__);
95 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
99 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_register);
101 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
103 DRM_DEBUG_KMS("%s\n", __func__);
108 list_del(&ippdrv->drv_list);
112 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_unregister);
114 static int ipp_create_id(struct idr *id_idr, spinlock_t *lock, void *obj,
119 DRM_DEBUG_KMS("%s\n", __func__);
122 /* ensure there is space available to allocate a handle */
123 if (idr_pre_get(id_idr, GFP_KERNEL) == 0)
126 /* do the allocation under our spinlock */
128 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
136 static void *ipp_find_id(struct idr *id_idr, spinlock_t *lock, u32 id)
140 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
144 /* find object using handle */
145 obj = idr_find(id_idr, id);
156 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
157 struct drm_exynos_ipp_property *property)
159 struct exynos_drm_ippdrv *ippdrv;
160 u32 ipp_id = property->ipp_id;
162 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
165 /* find ipp driver */
166 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
169 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
173 /* check dedicated state */
174 if (ippdrv->dedicated) {
175 DRM_ERROR("used choose device.\n");
179 if (property->cmd != IPP_CMD_M2M
180 && !pm_runtime_suspended(ippdrv->dev)) {
181 DRM_ERROR("can't run dedicatedly.\n");
186 if (ippdrv->check_property &&
187 ippdrv->check_property(ippdrv->dev, property)) {
188 DRM_ERROR("not support property.\n");
194 /* get ipp driver entry */
195 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
196 /* check dedicated state */
197 if (ippdrv->dedicated)
200 if (property->cmd != IPP_CMD_M2M
201 && !pm_runtime_suspended(ippdrv->dev)) {
202 DRM_INFO("%s:can't run dedicatedly.\n",
208 if (ippdrv->check_property &&
209 ippdrv->check_property(ippdrv->dev, property)) {
210 DRM_INFO("%s:not support property.\n",
218 DRM_ERROR("not support ipp driver operations.\n");
225 static struct exynos_drm_ippdrv *ipp_find_drv_node(u32 prop_id)
227 struct exynos_drm_ippdrv *ippdrv;
228 struct drm_exynos_ipp_cmd_node *c_node;
231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
233 if (list_empty(&exynos_drm_ippdrv_list)) {
234 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n",
239 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
240 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
241 __func__, count++, (int)ippdrv);
243 if (!list_empty(&ippdrv->cmd_list)) {
244 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
245 if (c_node->property.prop_id == prop_id)
254 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
255 struct drm_file *file)
257 struct drm_exynos_file_private *file_priv = file->driver_priv;
258 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
259 struct device *dev = priv->dev;
260 struct ipp_context *ctx = get_ipp_context(dev);
261 struct drm_exynos_ipp_prop_list *prop_list = data;
262 struct exynos_drm_ippdrv *ippdrv;
265 DRM_DEBUG_KMS("%s\n", __func__);
268 DRM_ERROR("invalid context.\n");
273 DRM_ERROR("invalid property parameter.\n");
277 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
279 if (prop_list->ipp_id == 0) {
280 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
282 prop_list->count = count;
284 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
288 DRM_ERROR("not found ipp%d driver.\n",
293 prop_list = ippdrv->prop_list;
298 EXPORT_SYMBOL_GPL(exynos_drm_ipp_get_property);
300 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
301 struct drm_file *file)
303 struct drm_exynos_file_private *file_priv = file->driver_priv;
304 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
305 struct device *dev = priv->dev;
306 struct ipp_context *ctx = get_ipp_context(dev);
307 struct drm_exynos_ipp_property *property = data;
308 struct exynos_drm_ippdrv *ippdrv;
309 struct drm_exynos_ipp_cmd_node *c_node;
310 struct drm_exynos_ipp_config *config;
311 struct drm_exynos_pos *pos;
312 struct drm_exynos_sz *sz;
315 DRM_DEBUG_KMS("%s\n", __func__);
318 DRM_ERROR("invalid context.\n");
323 DRM_ERROR("invalid property parameter.\n");
327 for_each_ipp_ops(i) {
328 config = &property->config[i];
332 DRM_INFO("%s:prop_id[%d]ops[%s]fmt[%.4s]\n",
333 __func__, property->prop_id,
334 i ? "dst" : "src", (char *)&config->fmt);
336 DRM_INFO("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
337 __func__, pos->x, pos->y, pos->w, pos->h,
338 sz->hsize, sz->vsize, config->flip, config->degree);
341 if (property->prop_id) {
342 ippdrv = ipp_find_drv_node(property->prop_id);
344 DRM_ERROR("failed to get ipp driver.\n");
348 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
349 if ((c_node->property.prop_id ==
350 property->prop_id) &&
351 (c_node->state == IPP_STATE_STOP)) {
352 DRM_INFO("%s:found cmd[%d]ippdrv[0x%x]\n",
353 __func__, property->cmd, (int)ippdrv);
355 c_node->property = *property;
360 DRM_ERROR("failed to search property.\n");
364 /* find ipp driver using ipp id */
365 ippdrv = ipp_find_driver(ctx, property);
367 DRM_ERROR("failed to get ipp driver.\n");
371 /* allocate command node */
372 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
374 DRM_ERROR("failed to allocate map node.\n");
378 /* create property id */
379 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
382 DRM_ERROR("failed to create id.\n");
386 DRM_INFO("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
387 __func__, property->prop_id, property->cmd, (int)ippdrv);
389 /* stored property information and ippdrv in private data */
391 c_node->property = *property;
392 c_node->state = IPP_STATE_IDLE;
394 c_node->start_work = kzalloc(sizeof(*c_node->start_work),
396 if (!c_node->start_work) {
397 DRM_ERROR("failed to alloc start_work.\n");
402 INIT_WORK((struct work_struct *)c_node->start_work,
405 c_node->stop_work = kzalloc(sizeof(*c_node->stop_work),
407 if (!c_node->stop_work) {
408 DRM_ERROR("failed to alloc stop_work.\n");
413 INIT_WORK((struct work_struct *)c_node->stop_work,
416 c_node->event_work = kzalloc(sizeof(*c_node->event_work),
418 if (!c_node->event_work) {
419 DRM_ERROR("failed to alloc event_work.\n");
424 INIT_WORK((struct work_struct *)c_node->event_work,
427 /* init ioctl lock */
428 mutex_init(&c_node->cmd_lock);
429 mutex_init(&c_node->mem_lock);
430 mutex_init(&c_node->event_lock);
431 init_completion(&c_node->start_complete);
432 init_completion(&c_node->stop_complete);
435 INIT_LIST_HEAD(&c_node->mem_list[i]);
437 INIT_LIST_HEAD(&c_node->event_list);
438 list_splice_init(&priv->event_list, &c_node->event_list);
439 list_add_tail(&c_node->list, &ippdrv->cmd_list);
442 * Previously, in case of WB and direct output, the ipp driver was not
443 * become dedicated. By suggestion of Inke Dae, make all ipp driver in
444 * use dedicated even in the case of M2M to avoid concurrent problem in
445 * TLB. We don't know whether there are cases which use more than 2 ipp
446 * driver at the same time. If it is not, this change solves the
447 * problems clearly without any side effect.
449 /* make dedicated state without m2m */
450 /* if (property->cmd != IPP_CMD_M2M) */
451 ippdrv->dedicated = true;
456 kfree(c_node->stop_work);
458 kfree(c_node->start_work);
463 EXPORT_SYMBOL_GPL(exynos_drm_ipp_set_property);
465 static struct drm_exynos_ipp_mem_node
466 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
467 struct drm_exynos_ipp_queue_buf *qbuf)
469 struct drm_exynos_ipp_mem_node *m_node;
470 struct list_head *head;
473 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
475 /* source/destination memory list */
476 head = &c_node->mem_list[qbuf->ops_id];
478 /* find memory node entry */
479 list_for_each_entry(m_node, head, list) {
480 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
481 __func__, count++, (int)m_node);
483 /* compare buffer id */
484 if (m_node->buf_id == qbuf->buf_id)
491 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
493 struct drm_exynos_ipp_property *property = &c_node->property;
494 struct drm_exynos_ipp_mem_node *m_node;
495 struct list_head *head;
496 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
498 DRM_DEBUG_KMS("%s\n", __func__);
500 mutex_lock(&c_node->mem_lock);
502 for_each_ipp_ops(i) {
503 /* source/destination memory list */
504 head = &c_node->mem_list[i];
506 if (list_empty(head)) {
507 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
512 /* find memory node entry */
513 list_for_each_entry(m_node, head, list) {
514 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
515 i ? "dst" : "src", count[i], (int)m_node);
520 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
521 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
522 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
525 if (property->cmd == IPP_CMD_M2M)
526 ret = min(count[EXYNOS_DRM_OPS_SRC],
527 count[EXYNOS_DRM_OPS_DST]);
529 ret = max(count[EXYNOS_DRM_OPS_SRC],
530 count[EXYNOS_DRM_OPS_DST]);
532 mutex_unlock(&c_node->mem_lock);
537 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
539 DRM_DEBUG_KMS("%s\n", __func__);
541 mutex_lock(&c_node->cmd_lock);
543 list_del(&c_node->list);
544 mutex_unlock(&c_node->cmd_lock);
547 mutex_destroy(&c_node->cmd_lock);
548 mutex_destroy(&c_node->mem_lock);
549 mutex_destroy(&c_node->event_lock);
551 /* free command node */
552 kfree(c_node->start_work);
553 kfree(c_node->stop_work);
554 kfree(c_node->event_work);
558 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
559 struct drm_exynos_ipp_cmd_node *c_node,
560 struct drm_exynos_ipp_mem_node *m_node)
562 struct exynos_drm_ipp_ops *ops = NULL;
565 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
568 DRM_ERROR("invalid queue node.\n");
572 mutex_lock(&c_node->mem_lock);
574 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
576 /* get operations callback */
577 ops = ippdrv->ops[m_node->ops_id];
579 DRM_ERROR("not support ops.\n");
584 /* set address and enable irq */
586 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
587 m_node->buf_id, IPP_BUF_ENQUEUE);
589 DRM_ERROR("failed to set addr.\n");
595 mutex_unlock(&c_node->mem_lock);
599 static struct drm_exynos_ipp_mem_node
600 *ipp_get_mem_node(struct drm_device *drm_dev,
601 struct drm_file *file,
602 struct drm_exynos_ipp_cmd_node *c_node,
603 struct drm_exynos_ipp_queue_buf *qbuf)
605 struct drm_exynos_ipp_mem_node *m_node;
606 struct drm_exynos_ipp_buf_info buf_info;
611 mutex_lock(&c_node->mem_lock);
613 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
615 DRM_ERROR("failed to allocate queue node.\n");
619 /* clear base address for error handling */
620 memset(&buf_info, 0x0, sizeof(buf_info));
622 /* operations, buffer id */
623 m_node->ops_id = qbuf->ops_id;
624 m_node->prop_id = qbuf->prop_id;
625 m_node->buf_id = qbuf->buf_id;
627 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
628 (int)m_node, qbuf->ops_id);
629 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
630 qbuf->prop_id, m_node->buf_id);
632 for_each_ipp_planar(i) {
633 unsigned int gem_obj;
635 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
638 /* get dma address by handle */
639 if (qbuf->handle[i] != 0) {
640 addr = exynos_drm_gem_get_dma_addr(drm_dev,
641 qbuf->handle[i], file, &gem_obj);
643 DRM_ERROR("failed to get addr.\n");
647 size = exynos_drm_gem_get_size(drm_dev,
648 qbuf->handle[i], file);
650 DRM_ERROR("failed to get size.\n");
654 buf_info.gem_objs[i] = (void *)gem_obj;
655 buf_info.base[i] = *(dma_addr_t *) addr;
656 buf_info.size[i] = (uint64_t) size;
657 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]size[0x%x]\n",
658 __func__, i, buf_info.base[i],
659 (int)buf_info.size[i]);
663 m_node->buf_info = buf_info;
664 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
666 mutex_unlock(&c_node->mem_lock);
673 mutex_unlock(&c_node->mem_lock);
678 static int ipp_put_mem_node(struct drm_device *drm_dev,
679 struct drm_exynos_ipp_cmd_node *c_node,
680 struct drm_exynos_ipp_mem_node *m_node)
684 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
686 mutex_lock(&c_node->mem_lock);
689 DRM_ERROR("invalid dequeue node.\n");
694 if (list_empty(&m_node->list)) {
695 DRM_ERROR("empty memory node.\n");
700 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
703 for_each_ipp_planar(i) {
704 void *gem_obj = m_node->buf_info.gem_objs[i];
706 exynos_drm_gem_put_dma_addr(drm_dev, gem_obj);
709 /* delete list in queue */
710 list_del(&m_node->list);
714 mutex_unlock(&c_node->mem_lock);
719 static void ipp_free_event(struct drm_pending_event *event)
724 static int ipp_get_event(struct drm_device *drm_dev,
725 struct drm_file *file,
726 struct drm_exynos_ipp_cmd_node *c_node,
727 struct drm_exynos_ipp_queue_buf *qbuf)
729 struct drm_exynos_ipp_send_event *e;
732 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
733 qbuf->ops_id, qbuf->buf_id);
735 e = kzalloc(sizeof(*e), GFP_KERNEL);
738 DRM_ERROR("failed to allocate event.\n");
739 spin_lock_irqsave(&drm_dev->event_lock, flags);
740 file->event_space += sizeof(e->event);
741 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
746 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
747 e->event.base.length = sizeof(e->event);
748 e->event.user_data = qbuf->user_data;
749 e->event.prop_id = qbuf->prop_id;
750 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
751 e->base.event = &e->event.base;
752 e->base.file_priv = file;
753 e->base.destroy = ipp_free_event;
754 list_add_tail(&e->base.link, &c_node->event_list);
759 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
760 struct drm_exynos_ipp_queue_buf *qbuf)
762 struct drm_exynos_ipp_send_event *e, *te;
765 if (list_empty(&c_node->event_list)) {
766 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
770 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
771 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
772 __func__, count++, (int)e);
776 list_del(&e->base.link);
778 } else if (e->event.buf_id[EXYNOS_DRM_OPS_DST]
781 list_del(&e->base.link);
790 void ipp_handle_cmd_work(struct device *dev,
791 struct exynos_drm_ippdrv *ippdrv,
792 struct drm_exynos_ipp_cmd_work *cmd_work,
793 struct drm_exynos_ipp_cmd_node *c_node)
795 struct ipp_context *ctx = get_ipp_context(dev);
797 cmd_work->ippdrv = ippdrv;
798 cmd_work->c_node = c_node;
799 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
802 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
803 struct drm_file *file)
805 struct drm_exynos_file_private *file_priv = file->driver_priv;
806 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
807 struct device *dev = priv->dev;
808 struct ipp_context *ctx = get_ipp_context(dev);
809 struct drm_exynos_ipp_queue_buf *qbuf = data;
810 struct exynos_drm_ippdrv *ippdrv;
811 struct drm_exynos_ipp_property *property;
812 struct exynos_drm_ipp_ops *ops;
813 struct drm_exynos_ipp_cmd_node *c_node;
814 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
817 DRM_DEBUG_KMS("%s\n", __func__);
820 DRM_ERROR("invalid buf parameter.\n");
824 ippdrv = ipp_find_drv_node(qbuf->prop_id);
827 DRM_ERROR("failed to get ipp driver.\n");
831 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
832 DRM_ERROR("invalid ops parameter.\n");
836 ops = ippdrv->ops[qbuf->ops_id];
838 DRM_ERROR("failed to get ops.\n");
842 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
843 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
844 qbuf->buf_id, qbuf->buf_type);
846 /* find command node */
847 c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
850 DRM_ERROR("failed to get command node.\n");
855 property = &c_node->property;
857 DRM_ERROR("invalid property parameter.\n");
862 switch (qbuf->buf_type) {
863 case IPP_BUF_ENQUEUE:
864 /* get memory node */
865 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
867 DRM_ERROR("failed to get m_node.\n");
871 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
873 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
875 DRM_ERROR("failed to get event.\n");
879 if (c_node->state != IPP_STATE_START) {
880 DRM_DEBUG_KMS("%s:bypass for invalid state.\n"
885 if (!ipp_check_mem_list(c_node)) {
886 DRM_DEBUG_KMS("%s:empty memory.\n"
892 * If set source, destination buffer and enable pm
893 * m2m operations need start operations in queue
895 if (property->cmd == IPP_CMD_M2M) {
896 struct drm_exynos_ipp_cmd_work *cmd_work =
899 cmd_work->ctrl = IPP_CTRL_PLAY;
900 ipp_handle_cmd_work(dev, ippdrv, cmd_work,
904 ret = ops->set_addr(ippdrv->dev,
906 m_node->buf_id, qbuf->buf_type);
909 "failed to set addr.\n");
916 case IPP_BUF_DEQUEUE:
917 mutex_lock(&c_node->cmd_lock);
919 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
920 ipp_put_event(c_node, qbuf);
922 /* put memory node */
923 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
924 list_for_each_entry_safe(m_node, tm_node,
925 &c_node->mem_list[qbuf->ops_id], list) {
926 if (m_node->buf_id == qbuf->buf_id &&
927 m_node->ops_id == qbuf->ops_id) {
928 /* put memory node */
929 ipp_put_mem_node(drm_dev,
934 mutex_unlock(&c_node->cmd_lock);
937 DRM_ERROR("invalid buffer control.\n");
944 DRM_ERROR("clean memory nodes.\n");
946 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
948 list_for_each_entry_safe(m_node, tm_node,
949 &c_node->mem_list[qbuf->ops_id], list) {
950 if (m_node->buf_id == qbuf->buf_id &&
951 m_node->ops_id == qbuf->ops_id)
952 ipp_put_mem_node(drm_dev, c_node, m_node);
958 EXPORT_SYMBOL_GPL(exynos_drm_ipp_queue_buf);
960 static bool exynos_drm_ipp_check_valid(struct device *dev,
961 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
963 DRM_DEBUG_KMS("%s\n", __func__);
965 if (ctrl != IPP_CTRL_PLAY) {
966 if (pm_runtime_suspended(dev)) {
967 DRM_ERROR("pm:runtime_suspended.\n");
974 if (state != IPP_STATE_IDLE)
978 if (state == IPP_STATE_STOP)
982 if (state != IPP_STATE_START)
985 case IPP_CTRL_RESUME:
986 if (state != IPP_STATE_STOP)
990 DRM_ERROR("invalid state.\n");
998 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1002 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1003 struct drm_file *file)
1005 struct drm_exynos_file_private *file_priv = file->driver_priv;
1006 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1007 struct exynos_drm_ippdrv *ippdrv = NULL;
1008 struct device *dev = priv->dev;
1009 struct ipp_context *ctx = get_ipp_context(dev);
1010 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1011 struct drm_exynos_ipp_cmd_work *cmd_work;
1012 struct drm_exynos_ipp_cmd_node *c_node;
1015 DRM_DEBUG_KMS("%s\n", __func__);
1018 DRM_ERROR("invalid context.\n");
1023 DRM_ERROR("invalid control parameter.\n");
1027 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1028 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1030 ippdrv = ipp_find_drv_node(cmd_ctrl->prop_id);
1033 DRM_ERROR("failed to get ipp driver.\n");
1037 c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
1041 DRM_ERROR("invalid command node list.\n");
1045 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1047 DRM_ERROR("invalid state.\n");
1051 switch (cmd_ctrl->ctrl) {
1053 if (pm_runtime_suspended(ippdrv->dev))
1054 pm_runtime_get_sync(ippdrv->dev);
1055 c_node->state = IPP_STATE_START;
1057 cmd_work = c_node->start_work;
1058 cmd_work->ctrl = cmd_ctrl->ctrl;
1059 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1062 c_node->state = IPP_STATE_STOP;
1064 cmd_work = c_node->stop_work;
1065 cmd_work->ctrl = cmd_ctrl->ctrl;
1066 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1068 if (!wait_for_completion_timeout(&c_node->stop_complete,
1069 msecs_to_jiffies(200))) {
1070 DRM_ERROR("timeout stop:prop_id[%d]\n",
1071 c_node->property.prop_id);
1074 ippdrv->dedicated = false;
1075 ipp_clean_cmd_node(c_node);
1077 if (list_empty(&ippdrv->cmd_list))
1078 pm_runtime_put_sync(ippdrv->dev);
1080 case IPP_CTRL_PAUSE:
1081 c_node->state = IPP_STATE_STOP;
1083 cmd_work = c_node->stop_work;
1084 cmd_work->ctrl = cmd_ctrl->ctrl;
1085 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1087 if (!wait_for_completion_timeout(&c_node->stop_complete,
1088 msecs_to_jiffies(200))) {
1089 DRM_ERROR("timeout stop:prop_id[%d]\n",
1090 c_node->property.prop_id);
1093 case IPP_CTRL_RESUME:
1094 c_node->state = IPP_STATE_START;
1096 cmd_work = c_node->start_work;
1097 cmd_work->ctrl = cmd_ctrl->ctrl;
1098 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1101 /* ToDo: expand ctrl operation */
1102 DRM_ERROR("could not support this state currently.\n");
1106 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1107 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1114 EXPORT_SYMBOL_GPL(exynos_drm_ipp_cmd_ctrl);
1116 int exynos_drm_ippnb_register(struct notifier_block *nb)
1118 return blocking_notifier_chain_register(
1119 &exynos_drm_ippnb_list, nb);
1121 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_register);
1123 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1125 return blocking_notifier_chain_unregister(
1126 &exynos_drm_ippnb_list, nb);
1128 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_unregister);
1130 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1132 return blocking_notifier_call_chain(
1133 &exynos_drm_ippnb_list, val, v);
1135 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_send_event);
1137 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1138 struct drm_exynos_ipp_property *property)
1140 struct exynos_drm_ipp_ops *ops = NULL;
1141 int ret, i, swap = 0;
1144 DRM_ERROR("invalid property parameter.\n");
1148 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1150 /* reset h/w block */
1151 if (ippdrv->reset &&
1152 ippdrv->reset(ippdrv->dev)) {
1153 DRM_ERROR("failed to reset.\n");
1157 /* set source,destination operations */
1158 for_each_ipp_ops(i) {
1159 /* ToDo: integrate property and config */
1160 struct drm_exynos_ipp_config *config =
1161 &property->config[i];
1163 ops = ippdrv->ops[i];
1164 if (!ops || !config) {
1165 DRM_ERROR("not support ops and config.\n");
1171 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1173 DRM_ERROR("not support format.\n");
1178 /* set transform for rotation, flip */
1179 if (ops->set_transf) {
1180 swap = ops->set_transf(ippdrv->dev, config->degree,
1183 DRM_ERROR("not support tranf.\n");
1189 if (ops->set_size) {
1190 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1193 DRM_ERROR("not support size.\n");
1202 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1203 struct drm_exynos_ipp_cmd_node *c_node)
1205 struct drm_exynos_ipp_mem_node *m_node;
1206 struct drm_exynos_ipp_property *property = &c_node->property;
1207 struct list_head *head;
1210 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1212 /* store command info in ippdrv */
1213 ippdrv->cmd = c_node;
1215 if (!ipp_check_mem_list(c_node)) {
1216 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1220 /* set current property in ippdrv */
1221 ret = ipp_set_property(ippdrv, property);
1223 DRM_ERROR("failed to set property.\n");
1229 switch (property->cmd) {
1231 for_each_ipp_ops(i) {
1232 /* source/destination memory list */
1233 head = &c_node->mem_list[i];
1235 /* get first entry */
1236 m_node = list_first_entry(head,
1237 struct drm_exynos_ipp_mem_node, list);
1239 DRM_ERROR("failed to get node.\n");
1244 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1245 __func__, (int)m_node);
1247 /* set memory node to ippdrv */
1248 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1250 DRM_ERROR("failed to set m node.\n");
1256 /* destination memory list */
1257 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1259 /* get list entry */
1260 list_for_each_entry(m_node, head, list) {
1261 /* set memory node to ippdrv */
1262 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1264 DRM_ERROR("failed to set m node.\n");
1269 case IPP_CMD_OUTPUT:
1270 /* source memory list */
1271 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1273 /* get list entry */
1274 list_for_each_entry(m_node, head, list) {
1275 /* set memory node to ippdrv */
1276 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1278 DRM_ERROR("failed to set m node.\n");
1284 DRM_ERROR("invalid operations.\n");
1288 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1290 /* start operations */
1291 if (ippdrv->start) {
1292 ret = ippdrv->start(ippdrv->dev, property->cmd);
1294 DRM_ERROR("failed to start ops.\n");
1302 static int ipp_stop_property(struct drm_device *drm_dev,
1303 struct exynos_drm_ippdrv *ippdrv,
1304 struct drm_exynos_ipp_cmd_node *c_node)
1306 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1307 struct drm_exynos_ipp_property *property = &c_node->property;
1308 struct list_head *head;
1311 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1314 ipp_put_event(c_node, NULL);
1317 switch (property->cmd) {
1319 for_each_ipp_ops(i) {
1320 /* source/destination memory list */
1321 head = &c_node->mem_list[i];
1323 if (list_empty(head)) {
1324 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1329 /* get list entry */
1330 list_for_each_entry_safe(m_node, tm_node,
1332 /* put memory node */
1333 ret = ipp_put_mem_node(drm_dev, c_node,
1336 DRM_ERROR("failed to put m_node.\n");
1343 /* destination memory list */
1344 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1346 if (list_empty(head)) {
1347 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1351 /* get list entry */
1352 list_for_each_entry_safe(m_node, tm_node, head, list) {
1353 /* put memory node */
1354 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1356 DRM_ERROR("failed to put m_node.\n");
1361 case IPP_CMD_OUTPUT:
1362 /* source memory list */
1363 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1365 if (list_empty(head)) {
1366 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1370 /* get list entry */
1371 list_for_each_entry_safe(m_node, tm_node, head, list) {
1372 /* put memory node */
1373 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1375 DRM_ERROR("failed to put m_node.\n");
1381 DRM_ERROR("invalid operations.\n");
1387 /* stop operations */
1389 ippdrv->stop(ippdrv->dev, property->cmd);
1394 void ipp_sched_cmd(struct work_struct *work)
1396 struct drm_exynos_ipp_cmd_work *cmd_work =
1397 (struct drm_exynos_ipp_cmd_work *)work;
1398 struct exynos_drm_ippdrv *ippdrv;
1399 struct drm_exynos_ipp_cmd_node *c_node;
1400 struct drm_exynos_ipp_property *property;
1403 DRM_DEBUG_KMS("%s\n", __func__);
1405 ippdrv = cmd_work->ippdrv;
1407 DRM_ERROR("invalid ippdrv list.\n");
1411 c_node = cmd_work->c_node;
1413 DRM_ERROR("invalid command node list.\n");
1417 mutex_lock(&c_node->cmd_lock);
1420 property = &c_node->property;
1422 DRM_ERROR("failed to get property:prop_id[%d]\n",
1423 c_node->property.prop_id);
1427 switch (cmd_work->ctrl) {
1429 case IPP_CTRL_RESUME:
1430 /* start property */
1431 ret = ipp_start_property(ippdrv, c_node);
1433 DRM_ERROR("failed to start property:prop_id[%d]\n",
1434 c_node->property.prop_id);
1438 if (property->cmd == IPP_CMD_M2M) {
1439 if (!wait_for_completion_timeout
1440 (&c_node->start_complete, msecs_to_jiffies(200))) {
1441 DRM_ERROR("timeout event:prop_id[%d]\n",
1442 c_node->property.prop_id);
1444 ippdrv->stop(ippdrv->dev,
1451 case IPP_CTRL_PAUSE:
1452 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1455 DRM_ERROR("failed to stop property.\n");
1459 complete(&c_node->stop_complete);
1462 DRM_ERROR("unknown control type\n");
1466 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1469 mutex_unlock(&c_node->cmd_lock);
1472 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1473 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1475 struct drm_device *drm_dev = ippdrv->drm_dev;
1476 struct drm_exynos_ipp_property *property = &c_node->property;
1477 struct drm_exynos_ipp_mem_node *m_node;
1478 struct drm_exynos_ipp_queue_buf qbuf;
1479 struct drm_exynos_ipp_send_event *e;
1480 struct list_head *head;
1482 unsigned long flags;
1483 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1487 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1488 i ? "dst" : "src", buf_id[i]);
1491 DRM_ERROR("failed to get drm_dev.\n");
1496 DRM_ERROR("failed to get property.\n");
1500 if (list_empty(&c_node->event_list)) {
1501 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1505 if (!ipp_check_mem_list(c_node)) {
1506 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1511 switch (property->cmd) {
1513 for_each_ipp_ops(i) {
1514 /* source/destination memory list */
1515 head = &c_node->mem_list[i];
1517 /* get first entry */
1518 m_node = list_first_entry(head,
1519 struct drm_exynos_ipp_mem_node, list);
1521 DRM_ERROR("empty memory node.\n");
1525 tbuf_id[i] = m_node->buf_id;
1526 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1527 i ? "dst" : "src", tbuf_id[i]);
1529 /* put memory node */
1530 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1532 DRM_ERROR("failed to put m_node.\n");
1536 /* clear buf for finding */
1537 memset(&qbuf, 0x0, sizeof(qbuf));
1538 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1539 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1541 /* get memory node entry */
1542 m_node = ipp_find_mem_node(c_node, &qbuf);
1544 DRM_ERROR("empty memory node.\n");
1548 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1550 /* put memory node */
1551 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1553 DRM_ERROR("failed to put m_node.\n");
1555 case IPP_CMD_OUTPUT:
1556 /* source memory list */
1557 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1559 /* get first entry */
1560 m_node = list_first_entry(head,
1561 struct drm_exynos_ipp_mem_node, list);
1563 DRM_ERROR("empty memory node.\n");
1567 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1569 /* put memory node */
1570 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1572 DRM_ERROR("failed to put m_node.\n");
1575 DRM_ERROR("invalid operations.\n");
1579 /* ToDo: Fix buffer id */
1580 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1581 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1582 tbuf_id[1], buf_id[1], property->prop_id);
1584 /* get first event entry */
1585 e = list_first_entry(&c_node->event_list,
1586 struct drm_exynos_ipp_send_event, base.link);
1589 DRM_ERROR("empty event.\n");
1593 do_gettimeofday(&now);
1594 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1595 , __func__, now.tv_sec, now.tv_usec);
1596 e->event.tv_sec = now.tv_sec;
1597 e->event.tv_usec = now.tv_usec;
1598 e->event.prop_id = property->prop_id;
1600 /* set buffer id about source destination */
1602 e->event.buf_id[i] = tbuf_id[i];
1603 /* ToDo: compare index. If needed */
1605 spin_lock_irqsave(&drm_dev->event_lock, flags);
1606 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1607 wake_up_interruptible(&e->base.file_priv->event_wait);
1608 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1610 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1611 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1616 void ipp_sched_event(struct work_struct *work)
1618 struct drm_exynos_ipp_event_work *event_work =
1619 (struct drm_exynos_ipp_event_work *)work;
1620 struct exynos_drm_ippdrv *ippdrv;
1621 struct drm_exynos_ipp_cmd_node *c_node;
1625 DRM_ERROR("failed to get event_work.\n");
1629 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1630 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1632 ippdrv = event_work->ippdrv;
1634 DRM_ERROR("failed to get ipp driver.\n");
1638 c_node = ippdrv->cmd;
1640 DRM_ERROR("failed to get command node.\n");
1644 if (c_node->state != IPP_STATE_START) {
1645 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1646 __func__, c_node->state, c_node->property.prop_id);
1647 goto err_completion;
1650 mutex_lock(&c_node->event_lock);
1652 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1654 DRM_ERROR("failed to send event.\n");
1655 goto err_completion;
1659 if (c_node->property.cmd == IPP_CMD_M2M)
1660 complete(&c_node->start_complete);
1662 mutex_unlock(&c_node->event_lock);
1665 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1667 struct ipp_context *ctx = get_ipp_context(dev);
1668 struct exynos_drm_ippdrv *ippdrv;
1671 DRM_DEBUG_KMS("%s\n", __func__);
1673 /* get ipp driver entry */
1674 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1675 ippdrv->drm_dev = drm_dev;
1678 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1681 DRM_ERROR("failed to create id.\n");
1685 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1686 count++, (int)ippdrv, ippdrv->ipp_id);
1688 if (ippdrv->ipp_id == 0) {
1689 DRM_ERROR("failed to get ipp_id[%d]\n",
1694 /* store parent device for node */
1695 ippdrv->parent_dev = dev;
1697 /* store event work queue and handler */
1698 ippdrv->event_workq = ctx->event_workq;
1699 ippdrv->sched_event = ipp_sched_event;
1700 INIT_LIST_HEAD(&ippdrv->cmd_list);
1706 idr_remove_all(&ctx->ipp_idr);
1707 idr_remove_all(&ctx->prop_idr);
1708 idr_destroy(&ctx->ipp_idr);
1709 idr_destroy(&ctx->prop_idr);
1714 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1716 struct exynos_drm_ippdrv *ippdrv;
1718 DRM_DEBUG_KMS("%s\n", __func__);
1720 /* get ipp driver entry */
1721 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1722 ippdrv->drm_dev = NULL;
1723 exynos_drm_ippdrv_unregister(ippdrv);
1726 /* ToDo: free notifier callback list if needed */
1729 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1730 struct drm_file *file)
1732 struct drm_exynos_file_private *file_priv = file->driver_priv;
1733 struct exynos_drm_ipp_private *priv;
1735 DRM_DEBUG_KMS("%s\n", __func__);
1737 /* ToDo: multi device open */
1739 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1741 DRM_ERROR("failed to allocate priv.\n");
1745 file_priv->ipp_priv = priv;
1747 INIT_LIST_HEAD(&priv->event_list);
1749 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1754 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1755 struct drm_file *file)
1757 struct drm_exynos_file_private *file_priv = file->driver_priv;
1758 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1759 struct exynos_drm_ippdrv *ippdrv = NULL;
1760 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1763 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1765 if (list_empty(&exynos_drm_ippdrv_list)) {
1766 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1770 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1771 if (list_empty(&ippdrv->cmd_list))
1774 list_for_each_entry_safe(c_node, tc_node,
1775 &ippdrv->cmd_list, list) {
1776 DRM_INFO("%s:count[%d]ippdrv[0x%x]\n",
1777 __func__, count++, (int)ippdrv);
1779 if (c_node->priv == priv) {
1780 if (c_node->state == IPP_STATE_START) {
1781 ipp_stop_property(drm_dev, ippdrv,
1783 c_node->state = IPP_STATE_STOP;
1786 ippdrv->dedicated = false;
1787 ipp_clean_cmd_node(c_node);
1788 if (list_empty(&ippdrv->cmd_list))
1789 pm_runtime_put_sync(ippdrv->dev);
1800 static int __devinit ipp_probe(struct platform_device *pdev)
1802 struct device *dev = &pdev->dev;
1803 struct ipp_context *ctx;
1804 struct exynos_drm_subdrv *subdrv;
1807 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1811 DRM_DEBUG_KMS("%s\n", __func__);
1813 spin_lock_init(&ctx->ipp_lock);
1814 spin_lock_init(&ctx->prop_lock);
1815 /* init ipp driver idr */
1816 idr_init(&ctx->ipp_idr);
1817 idr_init(&ctx->prop_idr);
1818 /* create single thread for ipp event */
1819 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1820 if (!ctx->event_workq) {
1821 dev_err(dev, "failed to create event workqueue\n");
1826 /* create single thread for ipp command */
1827 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1828 if (!ctx->cmd_workq) {
1829 dev_err(dev, "failed to create cmd workqueue\n");
1831 goto err_event_workq;
1834 /* set sub driver informations */
1835 subdrv = &ctx->subdrv;
1837 subdrv->probe = ipp_subdrv_probe;
1838 subdrv->remove = ipp_subdrv_remove;
1839 subdrv->open = ipp_subdrv_open;
1840 subdrv->close = ipp_subdrv_close;
1842 /* set driver data */
1843 platform_set_drvdata(pdev, ctx);
1845 /* register sub driver */
1846 ret = exynos_drm_subdrv_register(subdrv);
1848 DRM_ERROR("failed to register drm ipp device.\n");
1852 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1857 destroy_workqueue(ctx->cmd_workq);
1859 destroy_workqueue(ctx->event_workq);
1866 static int __devexit ipp_remove(struct platform_device *pdev)
1868 struct ipp_context *ctx = platform_get_drvdata(pdev);
1870 DRM_DEBUG_KMS("%s\n", __func__);
1872 /* unregister sub driver */
1873 exynos_drm_subdrv_unregister(&ctx->subdrv);
1875 /* remove,destroy ipp idr */
1876 idr_remove_all(&ctx->ipp_idr);
1877 idr_remove_all(&ctx->prop_idr);
1878 idr_destroy(&ctx->ipp_idr);
1879 idr_destroy(&ctx->prop_idr);
1881 /* destroy command, event work queue */
1882 destroy_workqueue(ctx->cmd_workq);
1883 destroy_workqueue(ctx->event_workq);
1890 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1892 /* ToDo: Need to implement power and sysmmu ctrl. */
1893 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1898 #ifdef CONFIG_PM_SLEEP
1899 static int ipp_suspend(struct device *dev)
1901 struct ipp_context *ctx = get_ipp_context(dev);
1903 DRM_DEBUG_KMS("%s\n", __func__);
1905 if (pm_runtime_suspended(dev))
1908 return ipp_power_ctrl(ctx, false);
1911 static int ipp_resume(struct device *dev)
1913 struct ipp_context *ctx = get_ipp_context(dev);
1915 DRM_DEBUG_KMS("%s\n", __func__);
1917 if (!pm_runtime_suspended(dev))
1918 return ipp_power_ctrl(ctx, true);
1924 #ifdef CONFIG_PM_RUNTIME
1925 static int ipp_runtime_suspend(struct device *dev)
1927 struct ipp_context *ctx = get_ipp_context(dev);
1929 DRM_DEBUG_KMS("%s\n", __func__);
1931 return ipp_power_ctrl(ctx, false);
1934 static int ipp_runtime_resume(struct device *dev)
1936 struct ipp_context *ctx = get_ipp_context(dev);
1938 DRM_DEBUG_KMS("%s\n", __func__);
1940 return ipp_power_ctrl(ctx, true);
1944 static const struct dev_pm_ops ipp_pm_ops = {
1945 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1946 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1949 struct platform_driver ipp_driver = {
1951 .remove = __devexit_p(ipp_remove),
1953 .name = "exynos-drm-ipp",
1954 .owner = THIS_MODULE,