2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
22 #include <drm/exynos_drm.h>
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_ipp.h"
28 * IPP is stand for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
34 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
37 * A structure of event.
39 * @base: base of event.
42 struct drm_exynos_ipp_send_event {
43 struct drm_pending_event base;
44 struct drm_exynos_ipp_event event;
48 * A structure of memory node.
50 * @list: list head to memory queue information.
51 * @ops_id: id of operations.
52 * @prop_id: id of property.
53 * @buf_id: id of buffer.
54 * @buf_info: gem objects and dma address, size.
56 struct drm_exynos_ipp_mem_node {
57 struct list_head list;
58 enum drm_exynos_ops_id ops_id;
61 struct drm_exynos_ipp_buf_info buf_info;
65 * A structure of ipp context.
67 * @subdrv: prepare initialization using subdrv.
68 * @ipp_lock: lock for synchronization of access to ipp_idr.
69 * @prop_lock: lock for synchronization of access to prop_idr.
70 * @ipp_idr: ipp driver idr.
71 * @prop_idr: property idr.
72 * @event_workq: event work queue.
73 * @cmd_workq: command work queue.
76 struct exynos_drm_subdrv subdrv;
81 struct workqueue_struct *event_workq;
82 struct workqueue_struct *cmd_workq;
85 static LIST_HEAD(exynos_drm_ippdrv_list);
86 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
88 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
90 DRM_DEBUG_KMS("%s\n", __func__);
95 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
99 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_register);
101 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
103 DRM_DEBUG_KMS("%s\n", __func__);
108 list_del(&ippdrv->drv_list);
112 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_unregister);
114 static int ipp_create_id(struct idr *id_idr, spinlock_t *lock, void *obj,
119 DRM_DEBUG_KMS("%s\n", __func__);
122 /* ensure there is space available to allocate a handle */
123 if (idr_pre_get(id_idr, GFP_KERNEL) == 0)
126 /* do the allocation under our spinlock */
128 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
136 static void *ipp_find_id(struct idr *id_idr, spinlock_t *lock, u32 id)
140 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
144 /* find object using handle */
145 obj = idr_find(id_idr, id);
156 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
157 struct drm_exynos_ipp_property *property)
159 struct exynos_drm_ippdrv *ippdrv;
160 u32 ipp_id = property->ipp_id;
162 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
165 /* find ipp driver */
166 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
169 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
173 /* check dedicated state */
174 if (ippdrv->dedicated) {
175 DRM_ERROR("used choose device.\n");
179 if (property->cmd != IPP_CMD_M2M
180 && !pm_runtime_suspended(ippdrv->dev)) {
181 DRM_ERROR("can't run dedicatedly.\n");
186 if (ippdrv->check_property &&
187 ippdrv->check_property(ippdrv->dev, property)) {
188 DRM_ERROR("not support property.\n");
194 /* get ipp driver entry */
195 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
196 /* check dedicated state */
197 if (ippdrv->dedicated)
200 if (property->cmd != IPP_CMD_M2M
201 && !pm_runtime_suspended(ippdrv->dev)) {
202 DRM_INFO("%s:can't run dedicatedly.\n",
208 if (ippdrv->check_property &&
209 ippdrv->check_property(ippdrv->dev, property)) {
210 DRM_INFO("%s:not support property.\n",
218 DRM_ERROR("not support ipp driver operations.\n");
225 static struct exynos_drm_ippdrv *ipp_find_drv_node(u32 prop_id)
227 struct exynos_drm_ippdrv *ippdrv;
228 struct drm_exynos_ipp_cmd_node *c_node;
231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
233 if (list_empty(&exynos_drm_ippdrv_list)) {
234 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n",
239 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
240 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
241 __func__, count++, (int)ippdrv);
243 if (!list_empty(&ippdrv->cmd_list)) {
244 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
245 if (c_node->property.prop_id == prop_id)
254 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
255 struct drm_file *file)
257 struct drm_exynos_file_private *file_priv = file->driver_priv;
258 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
259 struct device *dev = priv->dev;
260 struct ipp_context *ctx = get_ipp_context(dev);
261 struct drm_exynos_ipp_prop_list *prop_list = data;
262 struct exynos_drm_ippdrv *ippdrv;
265 DRM_DEBUG_KMS("%s\n", __func__);
268 DRM_ERROR("invalid context.\n");
273 DRM_ERROR("invalid property parameter.\n");
277 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
279 if (prop_list->ipp_id == 0) {
280 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
282 prop_list->count = count;
284 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
288 DRM_ERROR("not found ipp%d driver.\n",
293 prop_list = ippdrv->prop_list;
298 EXPORT_SYMBOL_GPL(exynos_drm_ipp_get_property);
300 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
301 struct drm_file *file)
303 struct drm_exynos_file_private *file_priv = file->driver_priv;
304 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
305 struct device *dev = priv->dev;
306 struct ipp_context *ctx = get_ipp_context(dev);
307 struct drm_exynos_ipp_property *property = data;
308 struct exynos_drm_ippdrv *ippdrv;
309 struct drm_exynos_ipp_cmd_node *c_node;
310 struct drm_exynos_ipp_config *config;
311 struct drm_exynos_pos *pos;
312 struct drm_exynos_sz *sz;
315 DRM_DEBUG_KMS("%s\n", __func__);
318 DRM_ERROR("invalid context.\n");
323 DRM_ERROR("invalid property parameter.\n");
327 for_each_ipp_ops(i) {
328 config = &property->config[i];
332 DRM_INFO("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
333 __func__, property->prop_id,
334 i ? "dst" : "src", config->fmt);
336 DRM_INFO("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
337 __func__, pos->x, pos->y, pos->w, pos->h,
338 sz->hsize, sz->vsize, config->flip, config->degree);
341 if (property->prop_id) {
342 ippdrv = ipp_find_drv_node(property->prop_id);
344 DRM_ERROR("failed to get ipp driver.\n");
348 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
349 if ((c_node->property.prop_id ==
350 property->prop_id) &&
351 (c_node->state == IPP_STATE_STOP)) {
352 DRM_INFO("%s:found cmd[%d]ippdrv[0x%x]\n",
353 __func__, property->cmd, (int)ippdrv);
355 c_node->property = *property;
360 DRM_ERROR("failed to search property.\n");
364 /* find ipp driver using ipp id */
365 ippdrv = ipp_find_driver(ctx, property);
367 DRM_ERROR("failed to get ipp driver.\n");
371 /* allocate command node */
372 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
374 DRM_ERROR("failed to allocate map node.\n");
378 /* create property id */
379 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
382 DRM_ERROR("failed to create id.\n");
386 DRM_INFO("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
387 __func__, property->prop_id, property->cmd, (int)ippdrv);
389 /* stored property information and ippdrv in private data */
391 c_node->property = *property;
392 c_node->state = IPP_STATE_IDLE;
394 c_node->start_work = kzalloc(sizeof(*c_node->start_work),
396 if (!c_node->start_work) {
397 DRM_ERROR("failed to alloc start_work.\n");
402 INIT_WORK((struct work_struct *)c_node->start_work,
405 c_node->stop_work = kzalloc(sizeof(*c_node->stop_work),
407 if (!c_node->stop_work) {
408 DRM_ERROR("failed to alloc stop_work.\n");
413 INIT_WORK((struct work_struct *)c_node->stop_work,
416 c_node->event_work = kzalloc(sizeof(*c_node->event_work),
418 if (!c_node->event_work) {
419 DRM_ERROR("failed to alloc event_work.\n");
424 INIT_WORK((struct work_struct *)c_node->event_work,
427 /* init ioctl lock */
428 mutex_init(&c_node->cmd_lock);
429 mutex_init(&c_node->mem_lock);
430 mutex_init(&c_node->event_lock);
431 init_completion(&c_node->start_complete);
432 init_completion(&c_node->stop_complete);
435 INIT_LIST_HEAD(&c_node->mem_list[i]);
437 INIT_LIST_HEAD(&c_node->event_list);
438 list_splice_init(&priv->event_list, &c_node->event_list);
439 list_add_tail(&c_node->list, &ippdrv->cmd_list);
441 /* make dedicated state without m2m */
442 if (property->cmd != IPP_CMD_M2M)
443 ippdrv->dedicated = true;
448 kfree(c_node->stop_work);
450 kfree(c_node->start_work);
455 EXPORT_SYMBOL_GPL(exynos_drm_ipp_set_property);
457 static struct drm_exynos_ipp_mem_node
458 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
459 struct drm_exynos_ipp_queue_buf *qbuf)
461 struct drm_exynos_ipp_mem_node *m_node;
462 struct list_head *head;
465 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
467 /* source/destination memory list */
468 head = &c_node->mem_list[qbuf->ops_id];
470 /* find memory node entry */
471 list_for_each_entry(m_node, head, list) {
472 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
473 __func__, count++, (int)m_node);
475 /* compare buffer id */
476 if (m_node->buf_id == qbuf->buf_id)
483 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
485 struct drm_exynos_ipp_property *property = &c_node->property;
486 struct drm_exynos_ipp_mem_node *m_node;
487 struct list_head *head;
488 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
490 DRM_DEBUG_KMS("%s\n", __func__);
492 mutex_lock(&c_node->mem_lock);
494 for_each_ipp_ops(i) {
495 /* source/destination memory list */
496 head = &c_node->mem_list[i];
498 if (list_empty(head)) {
499 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
504 /* find memory node entry */
505 list_for_each_entry(m_node, head, list) {
506 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
507 i ? "dst" : "src", count[i], (int)m_node);
512 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
513 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
514 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
517 if (property->cmd == IPP_CMD_M2M)
518 ret = min(count[EXYNOS_DRM_OPS_SRC],
519 count[EXYNOS_DRM_OPS_DST]);
521 ret = max(count[EXYNOS_DRM_OPS_SRC],
522 count[EXYNOS_DRM_OPS_DST]);
524 mutex_unlock(&c_node->mem_lock);
529 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
531 DRM_DEBUG_KMS("%s\n", __func__);
533 mutex_lock(&c_node->cmd_lock);
535 list_del(&c_node->list);
536 mutex_unlock(&c_node->cmd_lock);
539 mutex_destroy(&c_node->cmd_lock);
540 mutex_destroy(&c_node->mem_lock);
541 mutex_destroy(&c_node->event_lock);
543 /* free command node */
544 kfree(c_node->start_work);
545 kfree(c_node->stop_work);
546 kfree(c_node->event_work);
550 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
551 struct drm_exynos_ipp_cmd_node *c_node,
552 struct drm_exynos_ipp_mem_node *m_node)
554 struct exynos_drm_ipp_ops *ops = NULL;
557 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
560 DRM_ERROR("invalid queue node.\n");
564 mutex_lock(&c_node->mem_lock);
566 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
568 /* get operations callback */
569 ops = ippdrv->ops[m_node->ops_id];
571 DRM_ERROR("not support ops.\n");
576 /* set address and enable irq */
578 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
579 m_node->buf_id, IPP_BUF_ENQUEUE);
581 DRM_ERROR("failed to set addr.\n");
587 mutex_unlock(&c_node->mem_lock);
591 static struct drm_exynos_ipp_mem_node
592 *ipp_get_mem_node(struct drm_device *drm_dev,
593 struct drm_file *file,
594 struct drm_exynos_ipp_cmd_node *c_node,
595 struct drm_exynos_ipp_queue_buf *qbuf)
597 struct drm_exynos_ipp_mem_node *m_node;
598 struct drm_exynos_ipp_buf_info buf_info;
603 mutex_lock(&c_node->mem_lock);
605 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
607 DRM_ERROR("failed to allocate queue node.\n");
611 /* clear base address for error handling */
612 memset(&buf_info, 0x0, sizeof(buf_info));
614 /* operations, buffer id */
615 m_node->ops_id = qbuf->ops_id;
616 m_node->prop_id = qbuf->prop_id;
617 m_node->buf_id = qbuf->buf_id;
619 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
620 (int)m_node, qbuf->ops_id);
621 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
622 qbuf->prop_id, m_node->buf_id);
624 for_each_ipp_planar(i) {
625 unsigned int gem_obj;
627 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
630 /* get dma address by handle */
631 if (qbuf->handle[i] != 0) {
632 addr = exynos_drm_gem_get_dma_addr(drm_dev,
633 qbuf->handle[i], file, &gem_obj);
635 DRM_ERROR("failed to get addr.\n");
639 size = exynos_drm_gem_get_size(drm_dev,
640 qbuf->handle[i], file);
642 DRM_ERROR("failed to get size.\n");
646 buf_info.gem_objs[i] = (void *)gem_obj;
647 buf_info.base[i] = *(dma_addr_t *) addr;
648 buf_info.size[i] = (uint64_t) size;
649 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]size[0x%x]\n",
650 __func__, i, buf_info.base[i],
651 (int)buf_info.size[i]);
655 m_node->buf_info = buf_info;
656 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
658 mutex_unlock(&c_node->mem_lock);
665 mutex_unlock(&c_node->mem_lock);
670 static int ipp_put_mem_node(struct drm_device *drm_dev,
671 struct drm_exynos_ipp_cmd_node *c_node,
672 struct drm_exynos_ipp_mem_node *m_node)
676 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
678 mutex_lock(&c_node->mem_lock);
681 DRM_ERROR("invalid dequeue node.\n");
686 if (list_empty(&m_node->list)) {
687 DRM_ERROR("empty memory node.\n");
692 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
695 for_each_ipp_planar(i) {
696 void *gem_obj = m_node->buf_info.gem_objs[i];
698 exynos_drm_gem_put_dma_addr(drm_dev, gem_obj);
701 /* delete list in queue */
702 list_del(&m_node->list);
706 mutex_unlock(&c_node->mem_lock);
711 static void ipp_free_event(struct drm_pending_event *event)
716 static int ipp_get_event(struct drm_device *drm_dev,
717 struct drm_file *file,
718 struct drm_exynos_ipp_cmd_node *c_node,
719 struct drm_exynos_ipp_queue_buf *qbuf)
721 struct drm_exynos_ipp_send_event *e;
724 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
725 qbuf->ops_id, qbuf->buf_id);
727 e = kzalloc(sizeof(*e), GFP_KERNEL);
730 DRM_ERROR("failed to allocate event.\n");
731 spin_lock_irqsave(&drm_dev->event_lock, flags);
732 file->event_space += sizeof(e->event);
733 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
738 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
739 e->event.base.length = sizeof(e->event);
740 e->event.user_data = qbuf->user_data;
741 e->event.prop_id = qbuf->prop_id;
742 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
743 e->base.event = &e->event.base;
744 e->base.file_priv = file;
745 e->base.destroy = ipp_free_event;
746 list_add_tail(&e->base.link, &c_node->event_list);
751 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
752 struct drm_exynos_ipp_queue_buf *qbuf)
754 struct drm_exynos_ipp_send_event *e, *te;
757 if (list_empty(&c_node->event_list)) {
758 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
762 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
763 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
764 __func__, count++, (int)e);
768 list_del(&e->base.link);
770 } else if (e->event.buf_id[EXYNOS_DRM_OPS_DST]
773 list_del(&e->base.link);
782 void ipp_handle_cmd_work(struct device *dev,
783 struct exynos_drm_ippdrv *ippdrv,
784 struct drm_exynos_ipp_cmd_work *cmd_work,
785 struct drm_exynos_ipp_cmd_node *c_node)
787 struct ipp_context *ctx = get_ipp_context(dev);
789 cmd_work->ippdrv = ippdrv;
790 cmd_work->c_node = c_node;
791 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
794 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
795 struct drm_file *file)
797 struct drm_exynos_file_private *file_priv = file->driver_priv;
798 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
799 struct device *dev = priv->dev;
800 struct ipp_context *ctx = get_ipp_context(dev);
801 struct drm_exynos_ipp_queue_buf *qbuf = data;
802 struct exynos_drm_ippdrv *ippdrv;
803 struct drm_exynos_ipp_property *property;
804 struct exynos_drm_ipp_ops *ops;
805 struct drm_exynos_ipp_cmd_node *c_node;
806 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
809 DRM_DEBUG_KMS("%s\n", __func__);
812 DRM_ERROR("invalid buf parameter.\n");
816 ippdrv = ipp_find_drv_node(qbuf->prop_id);
819 DRM_ERROR("failed to get ipp driver.\n");
823 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
824 DRM_ERROR("invalid ops parameter.\n");
828 ops = ippdrv->ops[qbuf->ops_id];
830 DRM_ERROR("failed to get ops.\n");
834 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
835 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
836 qbuf->buf_id, qbuf->buf_type);
838 /* find command node */
839 c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
842 DRM_ERROR("failed to get command node.\n");
847 property = &c_node->property;
849 DRM_ERROR("invalid property parameter.\n");
854 switch (qbuf->buf_type) {
855 case IPP_BUF_ENQUEUE:
856 /* get memory node */
857 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
859 DRM_ERROR("failed to get m_node.\n");
863 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
865 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
867 DRM_ERROR("failed to get event.\n");
871 if (c_node->state != IPP_STATE_START) {
872 DRM_DEBUG_KMS("%s:bypass for invalid state.\n"
877 if (!ipp_check_mem_list(c_node)) {
878 DRM_DEBUG_KMS("%s:empty memory.\n"
884 * If set source, destination buffer and enable pm
885 * m2m operations need start operations in queue
887 if (property->cmd == IPP_CMD_M2M) {
888 struct drm_exynos_ipp_cmd_work *cmd_work =
891 cmd_work->ctrl = IPP_CTRL_PLAY;
892 ipp_handle_cmd_work(dev, ippdrv, cmd_work,
896 ret = ops->set_addr(ippdrv->dev,
898 m_node->buf_id, qbuf->buf_type);
901 "failed to set addr.\n");
908 case IPP_BUF_DEQUEUE:
909 mutex_lock(&c_node->cmd_lock);
911 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
912 ipp_put_event(c_node, qbuf);
914 /* put memory node */
915 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
916 list_for_each_entry_safe(m_node, tm_node,
917 &c_node->mem_list[qbuf->ops_id], list) {
918 if (m_node->buf_id == qbuf->buf_id &&
919 m_node->ops_id == qbuf->ops_id) {
920 /* put memory node */
921 ipp_put_mem_node(drm_dev,
926 mutex_unlock(&c_node->cmd_lock);
929 DRM_ERROR("invalid buffer control.\n");
936 DRM_ERROR("clean memory nodes.\n");
938 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
940 list_for_each_entry_safe(m_node, tm_node,
941 &c_node->mem_list[qbuf->ops_id], list) {
942 if (m_node->buf_id == qbuf->buf_id &&
943 m_node->ops_id == qbuf->ops_id)
944 ipp_put_mem_node(drm_dev, c_node, m_node);
950 EXPORT_SYMBOL_GPL(exynos_drm_ipp_queue_buf);
952 static bool exynos_drm_ipp_check_valid(struct device *dev,
953 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
955 DRM_DEBUG_KMS("%s\n", __func__);
957 if (ctrl != IPP_CTRL_PLAY) {
958 if (pm_runtime_suspended(dev)) {
959 DRM_ERROR("pm:runtime_suspended.\n");
966 if (state != IPP_STATE_IDLE)
970 if (state == IPP_STATE_STOP)
974 if (state != IPP_STATE_START)
977 case IPP_CTRL_RESUME:
978 if (state != IPP_STATE_STOP)
982 DRM_ERROR("invalid state.\n");
990 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
994 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
995 struct drm_file *file)
997 struct drm_exynos_file_private *file_priv = file->driver_priv;
998 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
999 struct exynos_drm_ippdrv *ippdrv = NULL;
1000 struct device *dev = priv->dev;
1001 struct ipp_context *ctx = get_ipp_context(dev);
1002 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1003 struct drm_exynos_ipp_cmd_work *cmd_work;
1004 struct drm_exynos_ipp_cmd_node *c_node;
1007 DRM_DEBUG_KMS("%s\n", __func__);
1010 DRM_ERROR("invalid context.\n");
1015 DRM_ERROR("invalid control parameter.\n");
1019 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1020 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1022 ippdrv = ipp_find_drv_node(cmd_ctrl->prop_id);
1025 DRM_ERROR("failed to get ipp driver.\n");
1029 c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
1033 DRM_ERROR("invalid command node list.\n");
1037 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1039 DRM_ERROR("invalid state.\n");
1043 switch (cmd_ctrl->ctrl) {
1045 if (pm_runtime_suspended(ippdrv->dev))
1046 pm_runtime_get_sync(ippdrv->dev);
1047 c_node->state = IPP_STATE_START;
1049 cmd_work = c_node->start_work;
1050 cmd_work->ctrl = cmd_ctrl->ctrl;
1051 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1054 c_node->state = IPP_STATE_STOP;
1056 cmd_work = c_node->stop_work;
1057 cmd_work->ctrl = cmd_ctrl->ctrl;
1058 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1060 if (!wait_for_completion_timeout(&c_node->stop_complete,
1061 msecs_to_jiffies(200))) {
1062 DRM_ERROR("timeout stop:prop_id[%d]\n",
1063 c_node->property.prop_id);
1066 ippdrv->dedicated = false;
1067 ipp_clean_cmd_node(c_node);
1069 if (list_empty(&ippdrv->cmd_list))
1070 pm_runtime_put_sync(ippdrv->dev);
1072 case IPP_CTRL_PAUSE:
1073 c_node->state = IPP_STATE_STOP;
1075 cmd_work = c_node->stop_work;
1076 cmd_work->ctrl = cmd_ctrl->ctrl;
1077 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1079 if (!wait_for_completion_timeout(&c_node->stop_complete,
1080 msecs_to_jiffies(200))) {
1081 DRM_ERROR("timeout stop:prop_id[%d]\n",
1082 c_node->property.prop_id);
1085 case IPP_CTRL_RESUME:
1086 c_node->state = IPP_STATE_START;
1088 cmd_work = c_node->start_work;
1089 cmd_work->ctrl = cmd_ctrl->ctrl;
1090 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1093 /* ToDo: expand ctrl operation */
1094 DRM_ERROR("could not support this state currently.\n");
1098 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1099 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1106 EXPORT_SYMBOL_GPL(exynos_drm_ipp_cmd_ctrl);
1108 int exynos_drm_ippnb_register(struct notifier_block *nb)
1110 return blocking_notifier_chain_register(
1111 &exynos_drm_ippnb_list, nb);
1113 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_register);
1115 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1117 return blocking_notifier_chain_unregister(
1118 &exynos_drm_ippnb_list, nb);
1120 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_unregister);
1122 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1124 return blocking_notifier_call_chain(
1125 &exynos_drm_ippnb_list, val, v);
1127 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_send_event);
1129 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1130 struct drm_exynos_ipp_property *property)
1132 struct exynos_drm_ipp_ops *ops = NULL;
1133 int ret, i, swap = 0;
1136 DRM_ERROR("invalid property parameter.\n");
1140 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1142 /* reset h/w block */
1143 if (ippdrv->reset &&
1144 ippdrv->reset(ippdrv->dev)) {
1145 DRM_ERROR("failed to reset.\n");
1149 /* set source,destination operations */
1150 for_each_ipp_ops(i) {
1151 /* ToDo: integrate property and config */
1152 struct drm_exynos_ipp_config *config =
1153 &property->config[i];
1155 ops = ippdrv->ops[i];
1156 if (!ops || !config) {
1157 DRM_ERROR("not support ops and config.\n");
1163 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1165 DRM_ERROR("not support format.\n");
1170 /* set transform for rotation, flip */
1171 if (ops->set_transf) {
1172 swap = ops->set_transf(ippdrv->dev, config->degree,
1175 DRM_ERROR("not support tranf.\n");
1181 if (ops->set_size) {
1182 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1185 DRM_ERROR("not support size.\n");
1194 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1195 struct drm_exynos_ipp_cmd_node *c_node)
1197 struct drm_exynos_ipp_mem_node *m_node;
1198 struct drm_exynos_ipp_property *property = &c_node->property;
1199 struct list_head *head;
1202 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1204 /* store command info in ippdrv */
1205 ippdrv->cmd = c_node;
1207 if (!ipp_check_mem_list(c_node)) {
1208 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1212 /* set current property in ippdrv */
1213 ret = ipp_set_property(ippdrv, property);
1215 DRM_ERROR("failed to set property.\n");
1221 switch (property->cmd) {
1223 for_each_ipp_ops(i) {
1224 /* source/destination memory list */
1225 head = &c_node->mem_list[i];
1227 /* get first entry */
1228 m_node = list_first_entry(head,
1229 struct drm_exynos_ipp_mem_node, list);
1231 DRM_ERROR("failed to get node.\n");
1236 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1237 __func__, (int)m_node);
1239 /* set memory node to ippdrv */
1240 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1242 DRM_ERROR("failed to set m node.\n");
1248 /* destination memory list */
1249 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1251 /* get list entry */
1252 list_for_each_entry(m_node, head, list) {
1253 /* set memory node to ippdrv */
1254 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1256 DRM_ERROR("failed to set m node.\n");
1261 case IPP_CMD_OUTPUT:
1262 /* source memory list */
1263 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1265 /* get list entry */
1266 list_for_each_entry(m_node, head, list) {
1267 /* set memory node to ippdrv */
1268 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1270 DRM_ERROR("failed to set m node.\n");
1276 DRM_ERROR("invalid operations.\n");
1280 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1282 /* start operations */
1283 if (ippdrv->start) {
1284 ret = ippdrv->start(ippdrv->dev, property->cmd);
1286 DRM_ERROR("failed to start ops.\n");
1294 static int ipp_stop_property(struct drm_device *drm_dev,
1295 struct exynos_drm_ippdrv *ippdrv,
1296 struct drm_exynos_ipp_cmd_node *c_node)
1298 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1299 struct drm_exynos_ipp_property *property = &c_node->property;
1300 struct list_head *head;
1303 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1306 ipp_put_event(c_node, NULL);
1309 switch (property->cmd) {
1311 for_each_ipp_ops(i) {
1312 /* source/destination memory list */
1313 head = &c_node->mem_list[i];
1315 if (list_empty(head)) {
1316 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1321 /* get list entry */
1322 list_for_each_entry_safe(m_node, tm_node,
1324 /* put memory node */
1325 ret = ipp_put_mem_node(drm_dev, c_node,
1328 DRM_ERROR("failed to put m_node.\n");
1335 /* destination memory list */
1336 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1338 if (list_empty(head)) {
1339 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1343 /* get list entry */
1344 list_for_each_entry_safe(m_node, tm_node, head, list) {
1345 /* put memory node */
1346 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1348 DRM_ERROR("failed to put m_node.\n");
1353 case IPP_CMD_OUTPUT:
1354 /* source memory list */
1355 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1357 if (list_empty(head)) {
1358 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1362 /* get list entry */
1363 list_for_each_entry_safe(m_node, tm_node, head, list) {
1364 /* put memory node */
1365 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1367 DRM_ERROR("failed to put m_node.\n");
1373 DRM_ERROR("invalid operations.\n");
1379 /* stop operations */
1381 ippdrv->stop(ippdrv->dev, property->cmd);
1386 void ipp_sched_cmd(struct work_struct *work)
1388 struct drm_exynos_ipp_cmd_work *cmd_work =
1389 (struct drm_exynos_ipp_cmd_work *)work;
1390 struct exynos_drm_ippdrv *ippdrv;
1391 struct drm_exynos_ipp_cmd_node *c_node;
1392 struct drm_exynos_ipp_property *property;
1395 DRM_DEBUG_KMS("%s\n", __func__);
1397 ippdrv = cmd_work->ippdrv;
1399 DRM_ERROR("invalid ippdrv list.\n");
1403 c_node = cmd_work->c_node;
1405 DRM_ERROR("invalid command node list.\n");
1409 mutex_lock(&c_node->cmd_lock);
1412 property = &c_node->property;
1414 DRM_ERROR("failed to get property:prop_id[%d]\n",
1415 c_node->property.prop_id);
1419 switch (cmd_work->ctrl) {
1421 case IPP_CTRL_RESUME:
1422 /* start property */
1423 ret = ipp_start_property(ippdrv, c_node);
1425 DRM_ERROR("failed to start property:prop_id[%d]\n",
1426 c_node->property.prop_id);
1430 if (property->cmd == IPP_CMD_M2M) {
1431 if (!wait_for_completion_timeout
1432 (&c_node->start_complete, msecs_to_jiffies(200))) {
1433 DRM_ERROR("timeout event:prop_id[%d]\n",
1434 c_node->property.prop_id);
1436 ippdrv->stop(ippdrv->dev,
1443 case IPP_CTRL_PAUSE:
1444 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1447 DRM_ERROR("failed to stop property.\n");
1451 complete(&c_node->stop_complete);
1454 DRM_ERROR("unknown control type\n");
1458 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1461 mutex_unlock(&c_node->cmd_lock);
1464 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1465 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1467 struct drm_device *drm_dev = ippdrv->drm_dev;
1468 struct drm_exynos_ipp_property *property = &c_node->property;
1469 struct drm_exynos_ipp_mem_node *m_node;
1470 struct drm_exynos_ipp_queue_buf qbuf;
1471 struct drm_exynos_ipp_send_event *e;
1472 struct list_head *head;
1474 unsigned long flags;
1475 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1479 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1480 i ? "dst" : "src", buf_id[i]);
1483 DRM_ERROR("failed to get drm_dev.\n");
1488 DRM_ERROR("failed to get property.\n");
1492 if (list_empty(&c_node->event_list)) {
1493 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1497 if (!ipp_check_mem_list(c_node)) {
1498 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1503 switch (property->cmd) {
1505 for_each_ipp_ops(i) {
1506 /* source/destination memory list */
1507 head = &c_node->mem_list[i];
1509 /* get first entry */
1510 m_node = list_first_entry(head,
1511 struct drm_exynos_ipp_mem_node, list);
1513 DRM_ERROR("empty memory node.\n");
1517 tbuf_id[i] = m_node->buf_id;
1518 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1519 i ? "dst" : "src", tbuf_id[i]);
1521 /* put memory node */
1522 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1524 DRM_ERROR("failed to put m_node.\n");
1528 /* clear buf for finding */
1529 memset(&qbuf, 0x0, sizeof(qbuf));
1530 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1531 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1533 /* get memory node entry */
1534 m_node = ipp_find_mem_node(c_node, &qbuf);
1536 DRM_ERROR("empty memory node.\n");
1540 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1542 /* put memory node */
1543 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1545 DRM_ERROR("failed to put m_node.\n");
1547 case IPP_CMD_OUTPUT:
1548 /* source memory list */
1549 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1551 /* get first entry */
1552 m_node = list_first_entry(head,
1553 struct drm_exynos_ipp_mem_node, list);
1555 DRM_ERROR("empty memory node.\n");
1559 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1561 /* put memory node */
1562 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1564 DRM_ERROR("failed to put m_node.\n");
1567 DRM_ERROR("invalid operations.\n");
1571 /* ToDo: Fix buffer id */
1572 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1573 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1574 tbuf_id[1], buf_id[1], property->prop_id);
1576 /* get first event entry */
1577 e = list_first_entry(&c_node->event_list,
1578 struct drm_exynos_ipp_send_event, base.link);
1581 DRM_ERROR("empty event.\n");
1585 do_gettimeofday(&now);
1586 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1587 , __func__, now.tv_sec, now.tv_usec);
1588 e->event.tv_sec = now.tv_sec;
1589 e->event.tv_usec = now.tv_usec;
1590 e->event.prop_id = property->prop_id;
1592 /* set buffer id about source destination */
1594 e->event.buf_id[i] = tbuf_id[i];
1595 /* ToDo: compare index. If needed */
1597 spin_lock_irqsave(&drm_dev->event_lock, flags);
1598 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1599 wake_up_interruptible(&e->base.file_priv->event_wait);
1600 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1602 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1603 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1608 void ipp_sched_event(struct work_struct *work)
1610 struct drm_exynos_ipp_event_work *event_work =
1611 (struct drm_exynos_ipp_event_work *)work;
1612 struct exynos_drm_ippdrv *ippdrv;
1613 struct drm_exynos_ipp_cmd_node *c_node;
1617 DRM_ERROR("failed to get event_work.\n");
1621 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1622 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1624 ippdrv = event_work->ippdrv;
1626 DRM_ERROR("failed to get ipp driver.\n");
1630 c_node = ippdrv->cmd;
1632 DRM_ERROR("failed to get command node.\n");
1636 if (c_node->state != IPP_STATE_START) {
1637 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1638 __func__, c_node->state, c_node->property.prop_id);
1639 goto err_completion;
1642 mutex_lock(&c_node->event_lock);
1644 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1646 DRM_ERROR("failed to send event.\n");
1647 goto err_completion;
1651 if (c_node->property.cmd == IPP_CMD_M2M)
1652 complete(&c_node->start_complete);
1654 mutex_unlock(&c_node->event_lock);
1657 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1659 struct ipp_context *ctx = get_ipp_context(dev);
1660 struct exynos_drm_ippdrv *ippdrv;
1663 DRM_DEBUG_KMS("%s\n", __func__);
1665 /* get ipp driver entry */
1666 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1667 ippdrv->drm_dev = drm_dev;
1670 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1673 DRM_ERROR("failed to create id.\n");
1677 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1678 count++, (int)ippdrv, ippdrv->ipp_id);
1680 if (ippdrv->ipp_id == 0) {
1681 DRM_ERROR("failed to get ipp_id[%d]\n",
1686 /* store parent device for node */
1687 ippdrv->parent_dev = dev;
1689 /* store event work queue and handler */
1690 ippdrv->event_workq = ctx->event_workq;
1691 ippdrv->sched_event = ipp_sched_event;
1692 INIT_LIST_HEAD(&ippdrv->cmd_list);
1698 idr_remove_all(&ctx->ipp_idr);
1699 idr_remove_all(&ctx->prop_idr);
1700 idr_destroy(&ctx->ipp_idr);
1701 idr_destroy(&ctx->prop_idr);
1706 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1708 struct exynos_drm_ippdrv *ippdrv;
1710 DRM_DEBUG_KMS("%s\n", __func__);
1712 /* get ipp driver entry */
1713 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1714 ippdrv->drm_dev = NULL;
1715 exynos_drm_ippdrv_unregister(ippdrv);
1718 /* ToDo: free notifier callback list if needed */
1721 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1722 struct drm_file *file)
1724 struct drm_exynos_file_private *file_priv = file->driver_priv;
1725 struct exynos_drm_ipp_private *priv;
1727 DRM_DEBUG_KMS("%s\n", __func__);
1729 /* ToDo: multi device open */
1731 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1733 DRM_ERROR("failed to allocate priv.\n");
1737 file_priv->ipp_priv = priv;
1739 INIT_LIST_HEAD(&priv->event_list);
1741 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1746 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1747 struct drm_file *file)
1749 struct drm_exynos_file_private *file_priv = file->driver_priv;
1750 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1751 struct exynos_drm_ippdrv *ippdrv = NULL;
1752 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1755 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1757 if (list_empty(&exynos_drm_ippdrv_list)) {
1758 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1762 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1763 if (list_empty(&ippdrv->cmd_list))
1766 list_for_each_entry_safe(c_node, tc_node,
1767 &ippdrv->cmd_list, list) {
1768 DRM_INFO("%s:count[%d]ippdrv[0x%x]\n",
1769 __func__, count++, (int)ippdrv);
1771 if (c_node->priv == priv) {
1772 if (c_node->state == IPP_STATE_START) {
1773 ipp_stop_property(drm_dev, ippdrv,
1775 c_node->state = IPP_STATE_STOP;
1778 ippdrv->dedicated = false;
1779 ipp_clean_cmd_node(c_node);
1780 if (list_empty(&ippdrv->cmd_list))
1781 pm_runtime_put_sync(ippdrv->dev);
1792 static int __devinit ipp_probe(struct platform_device *pdev)
1794 struct device *dev = &pdev->dev;
1795 struct ipp_context *ctx;
1796 struct exynos_drm_subdrv *subdrv;
1799 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1803 DRM_DEBUG_KMS("%s\n", __func__);
1805 spin_lock_init(&ctx->ipp_lock);
1806 spin_lock_init(&ctx->prop_lock);
1807 /* init ipp driver idr */
1808 idr_init(&ctx->ipp_idr);
1809 idr_init(&ctx->prop_idr);
1810 /* create single thread for ipp event */
1811 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1812 if (!ctx->event_workq) {
1813 dev_err(dev, "failed to create event workqueue\n");
1818 /* create single thread for ipp command */
1819 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1820 if (!ctx->cmd_workq) {
1821 dev_err(dev, "failed to create cmd workqueue\n");
1823 goto err_event_workq;
1826 /* set sub driver informations */
1827 subdrv = &ctx->subdrv;
1829 subdrv->probe = ipp_subdrv_probe;
1830 subdrv->remove = ipp_subdrv_remove;
1831 subdrv->open = ipp_subdrv_open;
1832 subdrv->close = ipp_subdrv_close;
1834 /* set driver data */
1835 platform_set_drvdata(pdev, ctx);
1837 /* register sub driver */
1838 ret = exynos_drm_subdrv_register(subdrv);
1840 DRM_ERROR("failed to register drm ipp device.\n");
1844 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1849 destroy_workqueue(ctx->cmd_workq);
1851 destroy_workqueue(ctx->event_workq);
1858 static int __devexit ipp_remove(struct platform_device *pdev)
1860 struct ipp_context *ctx = platform_get_drvdata(pdev);
1862 DRM_DEBUG_KMS("%s\n", __func__);
1864 /* unregister sub driver */
1865 exynos_drm_subdrv_unregister(&ctx->subdrv);
1867 /* remove,destroy ipp idr */
1868 idr_remove_all(&ctx->ipp_idr);
1869 idr_remove_all(&ctx->prop_idr);
1870 idr_destroy(&ctx->ipp_idr);
1871 idr_destroy(&ctx->prop_idr);
1873 /* destroy command, event work queue */
1874 destroy_workqueue(ctx->cmd_workq);
1875 destroy_workqueue(ctx->event_workq);
1882 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1884 /* ToDo: Need to implement power and sysmmu ctrl. */
1885 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1890 #ifdef CONFIG_PM_SLEEP
1891 static int ipp_suspend(struct device *dev)
1893 struct ipp_context *ctx = get_ipp_context(dev);
1895 DRM_DEBUG_KMS("%s\n", __func__);
1897 if (pm_runtime_suspended(dev))
1900 return ipp_power_ctrl(ctx, false);
1903 static int ipp_resume(struct device *dev)
1905 struct ipp_context *ctx = get_ipp_context(dev);
1907 DRM_DEBUG_KMS("%s\n", __func__);
1909 if (!pm_runtime_suspended(dev))
1910 return ipp_power_ctrl(ctx, true);
1916 #ifdef CONFIG_PM_RUNTIME
1917 static int ipp_runtime_suspend(struct device *dev)
1919 struct ipp_context *ctx = get_ipp_context(dev);
1921 DRM_DEBUG_KMS("%s\n", __func__);
1923 return ipp_power_ctrl(ctx, false);
1926 static int ipp_runtime_resume(struct device *dev)
1928 struct ipp_context *ctx = get_ipp_context(dev);
1930 DRM_DEBUG_KMS("%s\n", __func__);
1932 return ipp_power_ctrl(ctx, true);
1936 static const struct dev_pm_ops ipp_pm_ops = {
1937 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1938 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1941 struct platform_driver ipp_driver = {
1943 .remove = __devexit_p(ipp_remove),
1945 .name = "exynos-drm-ipp",
1946 .owner = THIS_MODULE,