8f3707dac407bbe110233c835face6ce07d7bc06
[kernel/linux-3.0.git] / drivers / gpu / drm / exynos / exynos_drm_ipp.c
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *      Eunchul Kim <chulspro.kim@samsung.com>
5  *      Jinyoung Jeon <jy0.jeon@samsung.com>
6  *      Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
20
21 #include <drm/drmP.h>
22 #include <drm/exynos_drm.h>
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_ipp.h"
26
27 /*
28  * IPP is stand for Image Post Processing and
29  * supports image scaler/rotator and input/output DMA operations.
30  * using FIMC, GSC, Rotator, so on.
31  * IPP is integration device driver of same attribute h/w
32  */
33
34 #define get_ipp_context(dev)    platform_get_drvdata(to_platform_device(dev))
35
36 /*
37  * A structure of event.
38  *
39  * @base: base of event.
40  * @event: ipp event.
41  */
42 struct drm_exynos_ipp_send_event {
43         struct drm_pending_event        base;
44         struct drm_exynos_ipp_event     event;
45 };
46
47 /*
48  * A structure of memory node.
49  *
50  * @list: list head to memory queue information.
51  * @ops_id: id of operations.
52  * @prop_id: id of property.
53  * @buf_id: id of buffer.
54  * @buf_info: gem objects and dma address, size.
55  */
56 struct drm_exynos_ipp_mem_node {
57         struct list_head        list;
58         enum drm_exynos_ops_id  ops_id;
59         u32     prop_id;
60         u32     buf_id;
61         struct drm_exynos_ipp_buf_info  buf_info;
62 };
63
64 /*
65  * A structure of ipp context.
66  *
67  * @subdrv: prepare initialization using subdrv.
68  * @ipp_lock: lock for synchronization of access to ipp_idr.
69  * @prop_lock: lock for synchronization of access to prop_idr.
70  * @ipp_idr: ipp driver idr.
71  * @prop_idr: property idr.
72  * @event_workq: event work queue.
73  * @cmd_workq: command work queue.
74  */
75 struct ipp_context {
76         struct exynos_drm_subdrv        subdrv;
77         spinlock_t      ipp_lock;
78         spinlock_t      prop_lock;
79         struct idr      ipp_idr;
80         struct idr      prop_idr;
81         struct workqueue_struct *event_workq;
82         struct workqueue_struct *cmd_workq;
83 };
84
85 static LIST_HEAD(exynos_drm_ippdrv_list);
86 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
87
88 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
89 {
90         DRM_DEBUG_KMS("%s\n", __func__);
91
92         if (!ippdrv)
93                 return -EINVAL;
94
95         list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
96
97         return 0;
98 }
99 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_register);
100
101 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
102 {
103         DRM_DEBUG_KMS("%s\n", __func__);
104
105         if (!ippdrv)
106                 return -EINVAL;
107
108         list_del(&ippdrv->drv_list);
109
110         return 0;
111 }
112 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_unregister);
113
114 static int ipp_create_id(struct idr *id_idr, spinlock_t *lock, void *obj,
115         u32 *idp)
116 {
117         int ret = -EINVAL;
118
119         DRM_DEBUG_KMS("%s\n", __func__);
120
121 again:
122         /* ensure there is space available to allocate a handle */
123         if (idr_pre_get(id_idr, GFP_KERNEL) == 0)
124                 return -ENOMEM;
125
126         /* do the allocation under our spinlock */
127         spin_lock(lock);
128         ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
129         spin_unlock(lock);
130         if (ret == -EAGAIN)
131                 goto again;
132
133         return ret;
134 }
135
136 static void *ipp_find_id(struct idr *id_idr, spinlock_t *lock, u32 id)
137 {
138         void *obj;
139
140         DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
141
142         spin_lock(lock);
143
144         /* find object using handle */
145         obj = idr_find(id_idr, id);
146         if (obj == NULL) {
147                 spin_unlock(lock);
148                 return NULL;
149         }
150
151         spin_unlock(lock);
152
153         return obj;
154 }
155
156 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
157         struct drm_exynos_ipp_property *property)
158 {
159         struct exynos_drm_ippdrv *ippdrv;
160         u32 ipp_id = property->ipp_id;
161
162         DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
163
164         if (ipp_id) {
165                 /* find ipp driver */
166                 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
167                         ipp_id);
168                 if (!ippdrv) {
169                         DRM_ERROR("not found ipp%d driver.\n", ipp_id);
170                         goto err_null;
171                 }
172
173                 /* check dedicated state */
174                 if (ippdrv->dedicated) {
175                         DRM_ERROR("used choose device.\n");
176                         goto err_null;
177                 }
178
179                 if (property->cmd != IPP_CMD_M2M
180                         && !pm_runtime_suspended(ippdrv->dev)) {
181                         DRM_ERROR("can't run dedicatedly.\n");
182                         goto err_null;
183                 }
184
185                 /* check property */
186                 if (ippdrv->check_property &&
187                     ippdrv->check_property(ippdrv->dev, property)) {
188                         DRM_ERROR("not support property.\n");
189                         goto err_null;
190                 }
191
192                 return ippdrv;
193         } else {
194                 /* get ipp driver entry */
195                 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
196                         /* check dedicated state */
197                         if (ippdrv->dedicated)
198                                 continue;
199
200                         if (property->cmd != IPP_CMD_M2M
201                                 && !pm_runtime_suspended(ippdrv->dev)) {
202                                 DRM_INFO("%s:can't run dedicatedly.\n",
203                                         __func__);
204                                 continue;
205                         }
206
207                         /* check property */
208                         if (ippdrv->check_property &&
209                             ippdrv->check_property(ippdrv->dev, property)) {
210                                 DRM_INFO("%s:not support property.\n",
211                                         __func__);
212                                 continue;
213                         }
214
215                         return ippdrv;
216                 }
217
218                 DRM_ERROR("not support ipp driver operations.\n");
219         }
220
221 err_null:
222         return NULL;
223 }
224
225 static struct exynos_drm_ippdrv *ipp_find_drv_node(u32 prop_id)
226 {
227         struct exynos_drm_ippdrv *ippdrv;
228         struct drm_exynos_ipp_cmd_node *c_node;
229         int count = 0;
230
231         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
232
233         if (list_empty(&exynos_drm_ippdrv_list)) {
234                 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n",
235                         __func__);
236                 return NULL;
237         }
238
239         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
240                 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
241                         __func__, count++, (int)ippdrv);
242
243                 if (!list_empty(&ippdrv->cmd_list)) {
244                         list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
245                                 if (c_node->property.prop_id == prop_id)
246                                         return ippdrv;
247                         }
248                 }
249         }
250
251         return NULL;
252 }
253
254 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
255                                         struct drm_file *file)
256 {
257         struct drm_exynos_file_private *file_priv = file->driver_priv;
258         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
259         struct device *dev = priv->dev;
260         struct ipp_context *ctx = get_ipp_context(dev);
261         struct drm_exynos_ipp_prop_list *prop_list = data;
262         struct exynos_drm_ippdrv *ippdrv;
263         int count = 0;
264
265         DRM_DEBUG_KMS("%s\n", __func__);
266
267         if (!ctx) {
268                 DRM_ERROR("invalid context.\n");
269                 return -EINVAL;
270         }
271
272         if (!prop_list) {
273                 DRM_ERROR("invalid property parameter.\n");
274                 return -EINVAL;
275         }
276
277         DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
278
279         if (prop_list->ipp_id == 0) {
280                 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
281                         count++;
282                 prop_list->count = count;
283         } else {
284                 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
285                                                    prop_list->ipp_id);
286
287                 if (!ippdrv) {
288                         DRM_ERROR("not found ipp%d driver.\n",
289                                         prop_list->ipp_id);
290                         return -EINVAL;
291                 }
292
293                 prop_list = ippdrv->prop_list;
294         }
295
296         return 0;
297 }
298 EXPORT_SYMBOL_GPL(exynos_drm_ipp_get_property);
299
300 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
301                                         struct drm_file *file)
302 {
303         struct drm_exynos_file_private *file_priv = file->driver_priv;
304         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
305         struct device *dev = priv->dev;
306         struct ipp_context *ctx = get_ipp_context(dev);
307         struct drm_exynos_ipp_property *property = data;
308         struct exynos_drm_ippdrv *ippdrv;
309         struct drm_exynos_ipp_cmd_node *c_node;
310         struct drm_exynos_ipp_config *config;
311         struct drm_exynos_pos *pos;
312         struct drm_exynos_sz *sz;
313         int ret, i;
314
315         DRM_DEBUG_KMS("%s\n", __func__);
316
317         if (!ctx) {
318                 DRM_ERROR("invalid context.\n");
319                 return -EINVAL;
320         }
321
322         if (!property) {
323                 DRM_ERROR("invalid property parameter.\n");
324                 return -EINVAL;
325         }
326
327         for_each_ipp_ops(i) {
328                 config = &property->config[i];
329                 pos = &config->pos;
330                 sz = &config->sz;
331
332                 DRM_INFO("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
333                         __func__, property->prop_id,
334                         i ? "dst" : "src", config->fmt);
335
336                 DRM_INFO("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
337                         __func__, pos->x, pos->y, pos->w, pos->h,
338                         sz->hsize, sz->vsize, config->flip, config->degree);
339         }
340
341         if (property->prop_id) {
342                 ippdrv = ipp_find_drv_node(property->prop_id);
343                 if (!ippdrv) {
344                         DRM_ERROR("failed to get ipp driver.\n");
345                         return -EINVAL;
346                 }
347
348                 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
349                         if ((c_node->property.prop_id ==
350                                 property->prop_id) &&
351                                 (c_node->state == IPP_STATE_STOP)) {
352                                 DRM_INFO("%s:found cmd[%d]ippdrv[0x%x]\n",
353                                         __func__, property->cmd, (int)ippdrv);
354
355                                 c_node->property = *property;
356                                 return 0;
357                         }
358                 }
359
360                 DRM_ERROR("failed to search property.\n");
361                 return -EINVAL;
362         }
363
364         /* find ipp driver using ipp id */
365         ippdrv = ipp_find_driver(ctx, property);
366         if (!ippdrv) {
367                 DRM_ERROR("failed to get ipp driver.\n");
368                 return -EINVAL;
369         }
370
371         /* allocate command node */
372         c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
373         if (!c_node) {
374                 DRM_ERROR("failed to allocate map node.\n");
375                 return -ENOMEM;
376         }
377
378         /* create property id */
379         ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
380                 &property->prop_id);
381         if (ret) {
382                 DRM_ERROR("failed to create id.\n");
383                 goto err_clear;
384         }
385
386         DRM_INFO("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
387                 __func__, property->prop_id, property->cmd, (int)ippdrv);
388
389         /* stored property information and ippdrv in private data */
390         c_node->priv = priv;
391         c_node->property = *property;
392         c_node->state = IPP_STATE_IDLE;
393
394         c_node->start_work = kzalloc(sizeof(*c_node->start_work),
395                 GFP_KERNEL);
396         if (!c_node->start_work) {
397                 DRM_ERROR("failed to alloc start_work.\n");
398                 ret = -ENOMEM;
399                 goto err_clear;
400         }
401
402         INIT_WORK((struct work_struct *)c_node->start_work,
403                 ipp_sched_cmd);
404
405         c_node->stop_work = kzalloc(sizeof(*c_node->stop_work),
406                 GFP_KERNEL);
407         if (!c_node->stop_work) {
408                 DRM_ERROR("failed to alloc stop_work.\n");
409                 ret = -ENOMEM;
410                 goto err_free_start;
411         }
412
413         INIT_WORK((struct work_struct *)c_node->stop_work,
414                 ipp_sched_cmd);
415
416         c_node->event_work = kzalloc(sizeof(*c_node->event_work),
417                 GFP_KERNEL);
418         if (!c_node->event_work) {
419                 DRM_ERROR("failed to alloc event_work.\n");
420                 ret = -ENOMEM;
421                 goto err_free_stop;
422         }
423
424         INIT_WORK((struct work_struct *)c_node->event_work,
425                 ipp_sched_event);
426
427         /* init ioctl lock */
428         mutex_init(&c_node->cmd_lock);
429         mutex_init(&c_node->mem_lock);
430         mutex_init(&c_node->event_lock);
431         init_completion(&c_node->start_complete);
432         init_completion(&c_node->stop_complete);
433
434         for_each_ipp_ops(i)
435                 INIT_LIST_HEAD(&c_node->mem_list[i]);
436
437         INIT_LIST_HEAD(&c_node->event_list);
438         list_splice_init(&priv->event_list, &c_node->event_list);
439         list_add_tail(&c_node->list, &ippdrv->cmd_list);
440
441         /* make dedicated state without m2m */
442         if (property->cmd != IPP_CMD_M2M)
443                 ippdrv->dedicated = true;
444
445         return 0;
446
447 err_free_stop:
448         kfree(c_node->stop_work);
449 err_free_start:
450         kfree(c_node->start_work);
451 err_clear:
452         kfree(c_node);
453         return ret;
454 }
455 EXPORT_SYMBOL_GPL(exynos_drm_ipp_set_property);
456
457 static struct drm_exynos_ipp_mem_node
458         *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
459         struct drm_exynos_ipp_queue_buf *qbuf)
460 {
461         struct drm_exynos_ipp_mem_node *m_node;
462         struct list_head *head;
463         int count = 0;
464
465         DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
466
467         /* source/destination memory list */
468         head = &c_node->mem_list[qbuf->ops_id];
469
470         /* find memory node entry */
471         list_for_each_entry(m_node, head, list) {
472                 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
473                         __func__, count++, (int)m_node);
474
475                 /* compare buffer id */
476                 if (m_node->buf_id == qbuf->buf_id)
477                         return m_node;
478         }
479
480         return NULL;
481 }
482
483 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
484 {
485         struct drm_exynos_ipp_property *property = &c_node->property;
486         struct drm_exynos_ipp_mem_node *m_node;
487         struct list_head *head;
488         int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
489
490         DRM_DEBUG_KMS("%s\n", __func__);
491
492         mutex_lock(&c_node->mem_lock);
493
494         for_each_ipp_ops(i) {
495                 /* source/destination memory list */
496                 head = &c_node->mem_list[i];
497
498                 if (list_empty(head)) {
499                         DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
500                                 i ? "dst" : "src");
501                         continue;
502                 }
503
504                 /* find memory node entry */
505                 list_for_each_entry(m_node, head, list) {
506                         DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
507                                 i ? "dst" : "src", count[i], (int)m_node);
508                         count[i]++;
509                 }
510         }
511
512         DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
513                 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
514                 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
515
516
517         if (property->cmd == IPP_CMD_M2M)
518                 ret = min(count[EXYNOS_DRM_OPS_SRC],
519                         count[EXYNOS_DRM_OPS_DST]);
520         else
521                 ret = max(count[EXYNOS_DRM_OPS_SRC],
522                         count[EXYNOS_DRM_OPS_DST]);
523
524         mutex_unlock(&c_node->mem_lock);
525
526         return ret;
527 }
528
529 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
530 {
531         DRM_DEBUG_KMS("%s\n", __func__);
532
533         mutex_lock(&c_node->cmd_lock);
534         /* delete list */
535         list_del(&c_node->list);
536         mutex_unlock(&c_node->cmd_lock);
537
538         /* destroy mutex */
539         mutex_destroy(&c_node->cmd_lock);
540         mutex_destroy(&c_node->mem_lock);
541         mutex_destroy(&c_node->event_lock);
542
543         /* free command node */
544         kfree(c_node->start_work);
545         kfree(c_node->stop_work);
546         kfree(c_node->event_work);
547         kfree(c_node);
548 }
549
550 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
551         struct drm_exynos_ipp_cmd_node *c_node,
552         struct drm_exynos_ipp_mem_node *m_node)
553 {
554         struct exynos_drm_ipp_ops *ops = NULL;
555         int ret = 0;
556
557         DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
558
559         if (!m_node) {
560                 DRM_ERROR("invalid queue node.\n");
561                 return -EFAULT;
562         }
563
564         mutex_lock(&c_node->mem_lock);
565
566         DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
567
568         /* get operations callback */
569         ops = ippdrv->ops[m_node->ops_id];
570         if (!ops) {
571                 DRM_ERROR("not support ops.\n");
572                 ret = -EIO;
573                 goto err_unlock;
574         }
575
576         /* set address and enable irq */
577         if (ops->set_addr) {
578                 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
579                         m_node->buf_id, IPP_BUF_ENQUEUE);
580                 if (ret) {
581                         DRM_ERROR("failed to set addr.\n");
582                         goto err_unlock;
583                 }
584         }
585
586 err_unlock:
587         mutex_unlock(&c_node->mem_lock);
588         return ret;
589 }
590
591 static struct drm_exynos_ipp_mem_node
592         *ipp_get_mem_node(struct drm_device *drm_dev,
593         struct drm_file *file,
594         struct drm_exynos_ipp_cmd_node *c_node,
595         struct drm_exynos_ipp_queue_buf *qbuf)
596 {
597         struct drm_exynos_ipp_mem_node *m_node;
598         struct drm_exynos_ipp_buf_info buf_info;
599         void *addr;
600         unsigned long size;
601         int i;
602
603         mutex_lock(&c_node->mem_lock);
604
605         m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
606         if (!m_node) {
607                 DRM_ERROR("failed to allocate queue node.\n");
608                 goto err_unlock;
609         }
610
611         /* clear base address for error handling */
612         memset(&buf_info, 0x0, sizeof(buf_info));
613
614         /* operations, buffer id */
615         m_node->ops_id = qbuf->ops_id;
616         m_node->prop_id = qbuf->prop_id;
617         m_node->buf_id = qbuf->buf_id;
618
619         DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
620                 (int)m_node, qbuf->ops_id);
621         DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
622                 qbuf->prop_id, m_node->buf_id);
623
624         for_each_ipp_planar(i) {
625                 unsigned int gem_obj;
626
627                 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
628                         i, qbuf->handle[i]);
629
630                 /* get dma address by handle */
631                 if (qbuf->handle[i] != 0) {
632                         addr = exynos_drm_gem_get_dma_addr(drm_dev,
633                                         qbuf->handle[i], file, &gem_obj);
634                         if (!addr) {
635                                 DRM_ERROR("failed to get addr.\n");
636                                 goto err_clear;
637                         }
638
639                         size = exynos_drm_gem_get_size(drm_dev,
640                                                 qbuf->handle[i], file);
641                         if (!size) {
642                                 DRM_ERROR("failed to get size.\n");
643                                 goto err_clear;
644                         }
645
646                         buf_info.gem_objs[i] = (void *)gem_obj;
647                         buf_info.base[i] = *(dma_addr_t *) addr;
648                         buf_info.size[i] = (uint64_t) size;
649                         DRM_DEBUG_KMS("%s:i[%d]base[0x%x]size[0x%x]\n",
650                                 __func__, i, buf_info.base[i],
651                                 (int)buf_info.size[i]);
652                 }
653         }
654
655         m_node->buf_info = buf_info;
656         list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
657
658         mutex_unlock(&c_node->mem_lock);
659         return m_node;
660
661 err_clear:
662         kfree(m_node);
663
664 err_unlock:
665         mutex_unlock(&c_node->mem_lock);
666
667         return NULL;
668 }
669
670 static int ipp_put_mem_node(struct drm_device *drm_dev,
671         struct drm_exynos_ipp_cmd_node *c_node,
672         struct drm_exynos_ipp_mem_node *m_node)
673 {
674         int i, ret = 0;
675
676         DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
677
678         mutex_lock(&c_node->mem_lock);
679
680         if (!m_node) {
681                 DRM_ERROR("invalid dequeue node.\n");
682                 ret = -EFAULT;
683                 goto err_unlock;
684         }
685
686         if (list_empty(&m_node->list)) {
687                 DRM_ERROR("empty memory node.\n");
688                 ret = -ENOMEM;
689                 goto err_unlock;
690         }
691
692         DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
693
694         /* put gem buffer */
695         for_each_ipp_planar(i) {
696                 void *gem_obj = m_node->buf_info.gem_objs[i];
697                 if (gem_obj)
698                         exynos_drm_gem_put_dma_addr(drm_dev, gem_obj);
699         }
700
701         /* delete list in queue */
702         list_del(&m_node->list);
703         kfree(m_node);
704
705 err_unlock:
706         mutex_unlock(&c_node->mem_lock);
707
708         return ret;
709 }
710
711 static void ipp_free_event(struct drm_pending_event *event)
712 {
713         kfree(event);
714 }
715
716 static int ipp_get_event(struct drm_device *drm_dev,
717         struct drm_file *file,
718         struct drm_exynos_ipp_cmd_node *c_node,
719         struct drm_exynos_ipp_queue_buf *qbuf)
720 {
721         struct drm_exynos_ipp_send_event *e;
722         unsigned long flags;
723
724         DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
725                 qbuf->ops_id, qbuf->buf_id);
726
727         e = kzalloc(sizeof(*e), GFP_KERNEL);
728
729         if (!e) {
730                 DRM_ERROR("failed to allocate event.\n");
731                 spin_lock_irqsave(&drm_dev->event_lock, flags);
732                 file->event_space += sizeof(e->event);
733                 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
734                 return -ENOMEM;
735         }
736
737         /* make event */
738         e->event.base.type = DRM_EXYNOS_IPP_EVENT;
739         e->event.base.length = sizeof(e->event);
740         e->event.user_data = qbuf->user_data;
741         e->event.prop_id = qbuf->prop_id;
742         e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
743         e->base.event = &e->event.base;
744         e->base.file_priv = file;
745         e->base.destroy = ipp_free_event;
746         list_add_tail(&e->base.link, &c_node->event_list);
747
748         return 0;
749 }
750
751 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
752         struct drm_exynos_ipp_queue_buf *qbuf)
753 {
754         struct drm_exynos_ipp_send_event *e, *te;
755         int count = 0;
756
757         if (list_empty(&c_node->event_list)) {
758                 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
759                 return;
760         }
761
762         list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
763                 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
764                         __func__, count++, (int)e);
765
766                 if (!qbuf) {
767                         /* delete list */
768                         list_del(&e->base.link);
769                         kfree(e);
770                 } else if (e->event.buf_id[EXYNOS_DRM_OPS_DST]
771                         == qbuf->buf_id) {
772                         /* delete list */
773                         list_del(&e->base.link);
774                         kfree(e);
775                         return;
776                 }
777         }
778
779         return;
780 }
781
782 void ipp_handle_cmd_work(struct device *dev,
783         struct exynos_drm_ippdrv *ippdrv,
784         struct drm_exynos_ipp_cmd_work *cmd_work,
785         struct drm_exynos_ipp_cmd_node *c_node)
786 {
787         struct ipp_context *ctx = get_ipp_context(dev);
788
789         cmd_work->ippdrv = ippdrv;
790         cmd_work->c_node = c_node;
791         queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
792 }
793
794 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
795         struct drm_file *file)
796 {
797         struct drm_exynos_file_private *file_priv = file->driver_priv;
798         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
799         struct device *dev = priv->dev;
800         struct ipp_context *ctx = get_ipp_context(dev);
801         struct drm_exynos_ipp_queue_buf *qbuf = data;
802         struct exynos_drm_ippdrv *ippdrv;
803         struct drm_exynos_ipp_property *property;
804         struct exynos_drm_ipp_ops *ops;
805         struct drm_exynos_ipp_cmd_node *c_node;
806         struct drm_exynos_ipp_mem_node *m_node, *tm_node;
807         int ret;
808
809         DRM_DEBUG_KMS("%s\n", __func__);
810
811         if (!qbuf) {
812                 DRM_ERROR("invalid buf parameter.\n");
813                 return -EINVAL;
814         }
815
816         ippdrv = ipp_find_drv_node(qbuf->prop_id);
817
818         if (!ippdrv) {
819                 DRM_ERROR("failed to get ipp driver.\n");
820                 return -EINVAL;
821         }
822
823         if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
824                 DRM_ERROR("invalid ops parameter.\n");
825                 return -EINVAL;
826         }
827
828         ops = ippdrv->ops[qbuf->ops_id];
829         if (!ops) {
830                 DRM_ERROR("failed to get ops.\n");
831                 return -EINVAL;
832         }
833
834         DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
835                 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
836                 qbuf->buf_id, qbuf->buf_type);
837
838         /* find command node */
839         c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
840                 qbuf->prop_id);
841         if (!c_node) {
842                 DRM_ERROR("failed to get command node.\n");
843                 return -EINVAL;
844         }
845
846         /* get property */
847         property = &c_node->property;
848         if (!property) {
849                 DRM_ERROR("invalid property parameter.\n");
850                 return -EINVAL;
851         }
852
853         /* buffer control */
854         switch (qbuf->buf_type) {
855         case IPP_BUF_ENQUEUE:
856                 /* get memory node */
857                 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
858                 if (!m_node) {
859                         DRM_ERROR("failed to get m_node.\n");
860                         return -EINVAL;
861                 }
862
863                 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
864                         /* get event */
865                         ret = ipp_get_event(drm_dev, file, c_node, qbuf);
866                         if (ret) {
867                                 DRM_ERROR("failed to get event.\n");
868                                 goto err_clean_node;
869                         }
870
871                         if (c_node->state != IPP_STATE_START) {
872                                 DRM_DEBUG_KMS("%s:bypass for invalid state.\n"
873                                         , __func__);
874                                 break;
875                         }
876
877                         if (!ipp_check_mem_list(c_node)) {
878                                 DRM_DEBUG_KMS("%s:empty memory.\n"
879                                         , __func__);
880                                 break;
881                         }
882
883                         /*
884                          * If set source, destination buffer and enable pm
885                          * m2m operations need start operations in queue
886                          */
887                         if (property->cmd == IPP_CMD_M2M) {
888                                 struct drm_exynos_ipp_cmd_work *cmd_work =
889                                         c_node->start_work;
890
891                                 cmd_work->ctrl = IPP_CTRL_PLAY;
892                                 ipp_handle_cmd_work(dev, ippdrv, cmd_work,
893                                         c_node);
894                         } else {
895                                 if (ops->set_addr) {
896                                         ret = ops->set_addr(ippdrv->dev,
897                                                 &m_node->buf_info,
898                                                 m_node->buf_id, qbuf->buf_type);
899                                         if (ret) {
900                                                 DRM_ERROR(
901                                                         "failed to set addr.\n");
902                                                 goto err_clean_node;
903                                         }
904                                 }
905                         }
906                 }
907                 break;
908         case IPP_BUF_DEQUEUE:
909                 mutex_lock(&c_node->cmd_lock);
910
911                 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
912                         ipp_put_event(c_node, qbuf);
913
914                 /* put memory node */
915                 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
916                         list_for_each_entry_safe(m_node, tm_node,
917                                 &c_node->mem_list[qbuf->ops_id], list) {
918                                 if (m_node->buf_id == qbuf->buf_id &&
919                                         m_node->ops_id == qbuf->ops_id) {
920                                         /* put memory node */
921                                         ipp_put_mem_node(drm_dev,
922                                                 c_node, m_node);
923                                 }
924                         }
925                 }
926                 mutex_unlock(&c_node->cmd_lock);
927                 break;
928         default:
929                 DRM_ERROR("invalid buffer control.\n");
930                 return -EINVAL;
931         }
932
933         return 0;
934
935 err_clean_node:
936         DRM_ERROR("clean memory nodes.\n");
937
938         if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
939                 /* delete list */
940                 list_for_each_entry_safe(m_node, tm_node,
941                         &c_node->mem_list[qbuf->ops_id], list) {
942                         if (m_node->buf_id == qbuf->buf_id &&
943                                 m_node->ops_id == qbuf->ops_id)
944                                 ipp_put_mem_node(drm_dev, c_node, m_node);
945                 }
946         }
947
948         return ret;
949 }
950 EXPORT_SYMBOL_GPL(exynos_drm_ipp_queue_buf);
951
952 static bool exynos_drm_ipp_check_valid(struct device *dev,
953         enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
954 {
955         DRM_DEBUG_KMS("%s\n", __func__);
956
957         if (ctrl != IPP_CTRL_PLAY) {
958                 if (pm_runtime_suspended(dev)) {
959                         DRM_ERROR("pm:runtime_suspended.\n");
960                         goto err_status;
961                 }
962         }
963
964         switch (ctrl) {
965         case IPP_CTRL_PLAY:
966                 if (state != IPP_STATE_IDLE)
967                         goto err_status;
968                 break;
969         case IPP_CTRL_STOP:
970                 if (state == IPP_STATE_STOP)
971                         goto err_status;
972                 break;
973         case IPP_CTRL_PAUSE:
974                 if (state != IPP_STATE_START)
975                         goto err_status;
976                 break;
977         case IPP_CTRL_RESUME:
978                 if (state != IPP_STATE_STOP)
979                         goto err_status;
980                 break;
981         default:
982                 DRM_ERROR("invalid state.\n");
983                 goto err_status;
984                 break;
985         }
986
987         return true;
988
989 err_status:
990         DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
991         return false;
992 }
993
994 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
995         struct drm_file *file)
996 {
997         struct drm_exynos_file_private *file_priv = file->driver_priv;
998         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
999         struct exynos_drm_ippdrv *ippdrv = NULL;
1000         struct device *dev = priv->dev;
1001         struct ipp_context *ctx = get_ipp_context(dev);
1002         struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1003         struct drm_exynos_ipp_cmd_work *cmd_work;
1004         struct drm_exynos_ipp_cmd_node *c_node;
1005         int ret = 0;
1006
1007         DRM_DEBUG_KMS("%s\n", __func__);
1008
1009         if (!ctx) {
1010                 DRM_ERROR("invalid context.\n");
1011                 return -EINVAL;
1012         }
1013
1014         if (!cmd_ctrl) {
1015                 DRM_ERROR("invalid control parameter.\n");
1016                 return -EINVAL;
1017         }
1018
1019         DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1020                 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1021
1022         ippdrv = ipp_find_drv_node(cmd_ctrl->prop_id);
1023
1024         if (!ippdrv) {
1025                 DRM_ERROR("failed to get ipp driver.\n");
1026                 return -EINVAL;
1027         }
1028
1029         c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
1030                 cmd_ctrl->prop_id);
1031
1032         if (!c_node) {
1033                 DRM_ERROR("invalid command node list.\n");
1034                 return -EINVAL;
1035         }
1036
1037         if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1038             c_node->state)) {
1039                 DRM_ERROR("invalid state.\n");
1040                 return -EINVAL;
1041         }
1042
1043         switch (cmd_ctrl->ctrl) {
1044         case IPP_CTRL_PLAY:
1045                 if (pm_runtime_suspended(ippdrv->dev))
1046                         pm_runtime_get_sync(ippdrv->dev);
1047                 c_node->state = IPP_STATE_START;
1048
1049                 cmd_work = c_node->start_work;
1050                 cmd_work->ctrl = cmd_ctrl->ctrl;
1051                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1052                 break;
1053         case IPP_CTRL_STOP:
1054                 c_node->state = IPP_STATE_STOP;
1055
1056                 cmd_work = c_node->stop_work;
1057                 cmd_work->ctrl = cmd_ctrl->ctrl;
1058                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1059
1060                 if (!wait_for_completion_timeout(&c_node->stop_complete,
1061                     msecs_to_jiffies(200))) {
1062                         DRM_ERROR("timeout stop:prop_id[%d]\n",
1063                                 c_node->property.prop_id);
1064                 }
1065
1066                 ippdrv->dedicated = false;
1067                 ipp_clean_cmd_node(c_node);
1068
1069                 if (list_empty(&ippdrv->cmd_list))
1070                         pm_runtime_put_sync(ippdrv->dev);
1071                 break;
1072         case IPP_CTRL_PAUSE:
1073                 c_node->state = IPP_STATE_STOP;
1074
1075                 cmd_work = c_node->stop_work;
1076                 cmd_work->ctrl = cmd_ctrl->ctrl;
1077                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1078
1079                 if (!wait_for_completion_timeout(&c_node->stop_complete,
1080                     msecs_to_jiffies(200))) {
1081                         DRM_ERROR("timeout stop:prop_id[%d]\n",
1082                                 c_node->property.prop_id);
1083                 }
1084                 break;
1085         case IPP_CTRL_RESUME:
1086                 c_node->state = IPP_STATE_START;
1087
1088                 cmd_work = c_node->start_work;
1089                 cmd_work->ctrl = cmd_ctrl->ctrl;
1090                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1091                 break;
1092         default:
1093                 /* ToDo: expand ctrl operation */
1094                 DRM_ERROR("could not support this state currently.\n");
1095                 goto err_clear;
1096         }
1097
1098         DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1099                 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1100
1101         return 0;
1102
1103 err_clear:
1104         return ret;
1105 }
1106 EXPORT_SYMBOL_GPL(exynos_drm_ipp_cmd_ctrl);
1107
1108 int exynos_drm_ippnb_register(struct notifier_block *nb)
1109 {
1110         return blocking_notifier_chain_register(
1111                         &exynos_drm_ippnb_list, nb);
1112 }
1113 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_register);
1114
1115 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1116 {
1117         return blocking_notifier_chain_unregister(
1118                         &exynos_drm_ippnb_list, nb);
1119 }
1120 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_unregister);
1121
1122 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1123 {
1124         return blocking_notifier_call_chain(
1125                         &exynos_drm_ippnb_list, val, v);
1126 }
1127 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_send_event);
1128
1129 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1130         struct drm_exynos_ipp_property *property)
1131 {
1132         struct exynos_drm_ipp_ops *ops = NULL;
1133         int ret, i, swap = 0;
1134
1135         if (!property) {
1136                 DRM_ERROR("invalid property parameter.\n");
1137                 return -EINVAL;
1138         }
1139
1140         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1141
1142         /* reset h/w block */
1143         if (ippdrv->reset &&
1144                 ippdrv->reset(ippdrv->dev)) {
1145                 DRM_ERROR("failed to reset.\n");
1146                 return -EINVAL;
1147         }
1148
1149         /* set source,destination operations */
1150         for_each_ipp_ops(i) {
1151                 /* ToDo: integrate property and config */
1152                 struct drm_exynos_ipp_config *config =
1153                         &property->config[i];
1154
1155                 ops = ippdrv->ops[i];
1156                 if (!ops || !config) {
1157                         DRM_ERROR("not support ops and config.\n");
1158                         return -EINVAL;
1159                 }
1160
1161                 /* set format */
1162                 if (ops->set_fmt) {
1163                         ret = ops->set_fmt(ippdrv->dev, config->fmt);
1164                         if (ret) {
1165                                 DRM_ERROR("not support format.\n");
1166                                 return ret;
1167                         }
1168                 }
1169
1170                 /* set transform for rotation, flip */
1171                 if (ops->set_transf) {
1172                         swap = ops->set_transf(ippdrv->dev, config->degree,
1173                                 config->flip);
1174                         if (swap < 0) {
1175                                 DRM_ERROR("not support tranf.\n");
1176                                 return -EINVAL;
1177                         }
1178                 }
1179
1180                 /* set size */
1181                 if (ops->set_size) {
1182                         ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1183                                 &config->sz);
1184                         if (ret) {
1185                                 DRM_ERROR("not support size.\n");
1186                                 return ret;
1187                         }
1188                 }
1189         }
1190
1191         return 0;
1192 }
1193
1194 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1195         struct drm_exynos_ipp_cmd_node *c_node)
1196 {
1197         struct drm_exynos_ipp_mem_node *m_node;
1198         struct drm_exynos_ipp_property *property = &c_node->property;
1199         struct list_head *head;
1200         int ret, i;
1201
1202         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1203
1204         /* store command info in ippdrv */
1205         ippdrv->cmd = c_node;
1206
1207         if (!ipp_check_mem_list(c_node)) {
1208                 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1209                 return -ENOMEM;
1210         }
1211
1212         /* set current property in ippdrv */
1213         ret = ipp_set_property(ippdrv, property);
1214         if (ret) {
1215                 DRM_ERROR("failed to set property.\n");
1216                 ippdrv->cmd = NULL;
1217                 return ret;
1218         }
1219
1220         /* check command */
1221         switch (property->cmd) {
1222         case IPP_CMD_M2M:
1223                 for_each_ipp_ops(i) {
1224                         /* source/destination memory list */
1225                         head = &c_node->mem_list[i];
1226
1227                         /* get first entry */
1228                         m_node = list_first_entry(head,
1229                                 struct drm_exynos_ipp_mem_node, list);
1230                         if (!m_node) {
1231                                 DRM_ERROR("failed to get node.\n");
1232                                 ret = -EFAULT;
1233                                 return ret;
1234                         }
1235
1236                         DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1237                                 __func__, (int)m_node);
1238
1239                         /* set memory node to ippdrv */
1240                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1241                         if (ret) {
1242                                 DRM_ERROR("failed to set m node.\n");
1243                                 return ret;
1244                         }
1245                 }
1246                 break;
1247         case IPP_CMD_WB:
1248                 /* destination memory list */
1249                 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1250
1251                 /* get list entry */
1252                 list_for_each_entry(m_node, head, list) {
1253                         /* set memory node to ippdrv */
1254                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1255                         if (ret) {
1256                                 DRM_ERROR("failed to set m node.\n");
1257                                 return ret;
1258                         }
1259                 }
1260                 break;
1261         case IPP_CMD_OUTPUT:
1262                 /* source memory list */
1263                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1264
1265                 /* get list entry */
1266                 list_for_each_entry(m_node, head, list) {
1267                         /* set memory node to ippdrv */
1268                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1269                         if (ret) {
1270                                 DRM_ERROR("failed to set m node.\n");
1271                                 return ret;
1272                         }
1273                 }
1274                 break;
1275         default:
1276                 DRM_ERROR("invalid operations.\n");
1277                 return -EINVAL;
1278         }
1279
1280         DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1281
1282         /* start operations */
1283         if (ippdrv->start) {
1284                 ret = ippdrv->start(ippdrv->dev, property->cmd);
1285                 if (ret) {
1286                         DRM_ERROR("failed to start ops.\n");
1287                         return ret;
1288                 }
1289         }
1290
1291         return 0;
1292 }
1293
1294 static int ipp_stop_property(struct drm_device *drm_dev,
1295         struct exynos_drm_ippdrv *ippdrv,
1296         struct drm_exynos_ipp_cmd_node *c_node)
1297 {
1298         struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1299         struct drm_exynos_ipp_property *property = &c_node->property;
1300         struct list_head *head;
1301         int ret, i;
1302
1303         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1304
1305         /* put event */
1306         ipp_put_event(c_node, NULL);
1307
1308         /* check command */
1309         switch (property->cmd) {
1310         case IPP_CMD_M2M:
1311                 for_each_ipp_ops(i) {
1312                         /* source/destination memory list */
1313                         head = &c_node->mem_list[i];
1314
1315                         if (list_empty(head)) {
1316                                 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1317                                         __func__);
1318                                 break;
1319                         }
1320
1321                         /* get list entry */
1322                         list_for_each_entry_safe(m_node, tm_node,
1323                                 head, list) {
1324                                 /* put memory node */
1325                                 ret = ipp_put_mem_node(drm_dev, c_node,
1326                                         m_node);
1327                                 if (ret) {
1328                                         DRM_ERROR("failed to put m_node.\n");
1329                                         goto err_clear;
1330                                 }
1331                         }
1332                 }
1333                 break;
1334         case IPP_CMD_WB:
1335                 /* destination memory list */
1336                 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1337
1338                 if (list_empty(head)) {
1339                         DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1340                         break;
1341                 }
1342
1343                 /* get list entry */
1344                 list_for_each_entry_safe(m_node, tm_node, head, list) {
1345                         /* put memory node */
1346                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1347                         if (ret) {
1348                                 DRM_ERROR("failed to put m_node.\n");
1349                                 goto err_clear;
1350                         }
1351                 }
1352                 break;
1353         case IPP_CMD_OUTPUT:
1354                 /* source memory list */
1355                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1356
1357                 if (list_empty(head)) {
1358                         DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1359                         break;
1360                 }
1361
1362                 /* get list entry */
1363                 list_for_each_entry_safe(m_node, tm_node, head, list) {
1364                         /* put memory node */
1365                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1366                         if (ret) {
1367                                 DRM_ERROR("failed to put m_node.\n");
1368                                 goto err_clear;
1369                         }
1370                 }
1371                 break;
1372         default:
1373                 DRM_ERROR("invalid operations.\n");
1374                 ret = -EINVAL;
1375                 goto err_clear;
1376         }
1377
1378 err_clear:
1379         /* stop operations */
1380         if (ippdrv->stop)
1381                 ippdrv->stop(ippdrv->dev, property->cmd);
1382
1383         return 0;
1384 }
1385
1386 void ipp_sched_cmd(struct work_struct *work)
1387 {
1388         struct drm_exynos_ipp_cmd_work *cmd_work =
1389                 (struct drm_exynos_ipp_cmd_work *)work;
1390         struct exynos_drm_ippdrv *ippdrv;
1391         struct drm_exynos_ipp_cmd_node *c_node;
1392         struct drm_exynos_ipp_property *property;
1393         int ret;
1394
1395         DRM_DEBUG_KMS("%s\n", __func__);
1396
1397         ippdrv = cmd_work->ippdrv;
1398         if (!ippdrv) {
1399                 DRM_ERROR("invalid ippdrv list.\n");
1400                 return;
1401         }
1402
1403         c_node = cmd_work->c_node;
1404         if (!c_node) {
1405                 DRM_ERROR("invalid command node list.\n");
1406                 return;
1407         }
1408
1409         mutex_lock(&c_node->cmd_lock);
1410
1411         /* get property */
1412         property = &c_node->property;
1413         if (!property) {
1414                 DRM_ERROR("failed to get property:prop_id[%d]\n",
1415                         c_node->property.prop_id);
1416                 goto err_unlock;
1417         }
1418
1419         switch (cmd_work->ctrl) {
1420         case IPP_CTRL_PLAY:
1421         case IPP_CTRL_RESUME:
1422                 /* start property */
1423                 ret = ipp_start_property(ippdrv, c_node);
1424                 if (ret) {
1425                         DRM_ERROR("failed to start property:prop_id[%d]\n",
1426                                 c_node->property.prop_id);
1427                         goto err_unlock;
1428                 }
1429
1430                 if (property->cmd == IPP_CMD_M2M) {
1431                         if (!wait_for_completion_timeout
1432                             (&c_node->start_complete, msecs_to_jiffies(200))) {
1433                                 DRM_ERROR("timeout event:prop_id[%d]\n",
1434                                         c_node->property.prop_id);
1435                                 if (ippdrv->stop)
1436                                         ippdrv->stop(ippdrv->dev,
1437                                                 property->cmd);
1438                                 goto err_unlock;
1439                         }
1440                 }
1441                 break;
1442         case IPP_CTRL_STOP:
1443         case IPP_CTRL_PAUSE:
1444                 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1445                         c_node);
1446                 if (ret) {
1447                         DRM_ERROR("failed to stop property.\n");
1448                         goto err_unlock;
1449                 }
1450
1451                 complete(&c_node->stop_complete);
1452                 break;
1453         default:
1454                 DRM_ERROR("unknown control type\n");
1455                 break;
1456         }
1457
1458         DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1459
1460 err_unlock:
1461         mutex_unlock(&c_node->cmd_lock);
1462 }
1463
1464 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1465         struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1466 {
1467         struct drm_device *drm_dev = ippdrv->drm_dev;
1468         struct drm_exynos_ipp_property *property = &c_node->property;
1469         struct drm_exynos_ipp_mem_node *m_node;
1470         struct drm_exynos_ipp_queue_buf qbuf;
1471         struct drm_exynos_ipp_send_event *e;
1472         struct list_head *head;
1473         struct timeval now;
1474         unsigned long flags;
1475         u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1476         int ret, i;
1477
1478         for_each_ipp_ops(i)
1479                 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1480                         i ? "dst" : "src", buf_id[i]);
1481
1482         if (!drm_dev) {
1483                 DRM_ERROR("failed to get drm_dev.\n");
1484                 return -EINVAL;
1485         }
1486
1487         if (!property) {
1488                 DRM_ERROR("failed to get property.\n");
1489                 return -EINVAL;
1490         }
1491
1492         if (list_empty(&c_node->event_list)) {
1493                 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1494                 return 0;
1495         }
1496
1497         if (!ipp_check_mem_list(c_node)) {
1498                 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1499                 return 0;
1500         }
1501
1502         /* check command */
1503         switch (property->cmd) {
1504         case IPP_CMD_M2M:
1505                 for_each_ipp_ops(i) {
1506                         /* source/destination memory list */
1507                         head = &c_node->mem_list[i];
1508
1509                         /* get first entry */
1510                         m_node = list_first_entry(head,
1511                                 struct drm_exynos_ipp_mem_node, list);
1512                         if (!m_node) {
1513                                 DRM_ERROR("empty memory node.\n");
1514                                 return -ENOMEM;
1515                         }
1516
1517                         tbuf_id[i] = m_node->buf_id;
1518                         DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1519                                 i ? "dst" : "src", tbuf_id[i]);
1520
1521                         /* put memory node */
1522                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1523                         if (ret)
1524                                 DRM_ERROR("failed to put m_node.\n");
1525                 }
1526                 break;
1527         case IPP_CMD_WB:
1528                 /* clear buf for finding */
1529                 memset(&qbuf, 0x0, sizeof(qbuf));
1530                 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1531                 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1532
1533                 /* get memory node entry */
1534                 m_node = ipp_find_mem_node(c_node, &qbuf);
1535                 if (!m_node) {
1536                         DRM_ERROR("empty memory node.\n");
1537                         return -ENOMEM;
1538                 }
1539
1540                 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1541
1542                 /* put memory node */
1543                 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1544                 if (ret)
1545                         DRM_ERROR("failed to put m_node.\n");
1546                 break;
1547         case IPP_CMD_OUTPUT:
1548                 /* source memory list */
1549                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1550
1551                 /* get first entry */
1552                 m_node = list_first_entry(head,
1553                         struct drm_exynos_ipp_mem_node, list);
1554                 if (!m_node) {
1555                         DRM_ERROR("empty memory node.\n");
1556                         return -ENOMEM;
1557                 }
1558
1559                 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1560
1561                 /* put memory node */
1562                 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1563                 if (ret)
1564                         DRM_ERROR("failed to put m_node.\n");
1565                 break;
1566         default:
1567                 DRM_ERROR("invalid operations.\n");
1568                 return -EINVAL;
1569         }
1570
1571         /* ToDo: Fix buffer id */
1572         if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1573                 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1574                         tbuf_id[1], buf_id[1], property->prop_id);
1575
1576         /* get first event entry */
1577         e = list_first_entry(&c_node->event_list,
1578                 struct drm_exynos_ipp_send_event, base.link);
1579
1580         if (!e) {
1581                 DRM_ERROR("empty event.\n");
1582                 return -EINVAL;
1583         }
1584
1585         do_gettimeofday(&now);
1586         DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1587                 , __func__, now.tv_sec, now.tv_usec);
1588         e->event.tv_sec = now.tv_sec;
1589         e->event.tv_usec = now.tv_usec;
1590         e->event.prop_id = property->prop_id;
1591
1592         /* set buffer id about source destination */
1593         for_each_ipp_ops(i)
1594                 e->event.buf_id[i] = tbuf_id[i];
1595         /* ToDo: compare index. If needed */
1596
1597         spin_lock_irqsave(&drm_dev->event_lock, flags);
1598         list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1599         wake_up_interruptible(&e->base.file_priv->event_wait);
1600         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1601
1602         DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1603                 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1604
1605         return 0;
1606 }
1607
1608 void ipp_sched_event(struct work_struct *work)
1609 {
1610         struct drm_exynos_ipp_event_work *event_work =
1611                 (struct drm_exynos_ipp_event_work *)work;
1612         struct exynos_drm_ippdrv *ippdrv;
1613         struct drm_exynos_ipp_cmd_node *c_node;
1614         int ret;
1615
1616         if (!event_work) {
1617                 DRM_ERROR("failed to get event_work.\n");
1618                 return;
1619         }
1620
1621         DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1622                 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1623
1624         ippdrv = event_work->ippdrv;
1625         if (!ippdrv) {
1626                 DRM_ERROR("failed to get ipp driver.\n");
1627                 return;
1628         }
1629
1630         c_node = ippdrv->cmd;
1631         if (!c_node) {
1632                 DRM_ERROR("failed to get command node.\n");
1633                 return;
1634         }
1635
1636         if (c_node->state != IPP_STATE_START) {
1637                 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1638                         __func__, c_node->state, c_node->property.prop_id);
1639                 goto err_completion;
1640         }
1641
1642         mutex_lock(&c_node->event_lock);
1643
1644         ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1645         if (ret) {
1646                 DRM_ERROR("failed to send event.\n");
1647                 goto err_completion;
1648         }
1649
1650 err_completion:
1651         if (c_node->property.cmd == IPP_CMD_M2M)
1652                 complete(&c_node->start_complete);
1653
1654         mutex_unlock(&c_node->event_lock);
1655 }
1656
1657 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1658 {
1659         struct ipp_context *ctx = get_ipp_context(dev);
1660         struct exynos_drm_ippdrv *ippdrv;
1661         int ret, count = 0;
1662
1663         DRM_DEBUG_KMS("%s\n", __func__);
1664
1665         /* get ipp driver entry */
1666         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1667                 ippdrv->drm_dev = drm_dev;
1668
1669                 /* create ipp id */
1670                 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1671                         &ippdrv->ipp_id);
1672                 if (ret) {
1673                         DRM_ERROR("failed to create id.\n");
1674                         goto err_idr;
1675                 }
1676
1677                 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1678                         count++, (int)ippdrv, ippdrv->ipp_id);
1679
1680                 if (ippdrv->ipp_id == 0) {
1681                         DRM_ERROR("failed to get ipp_id[%d]\n",
1682                                 ippdrv->ipp_id);
1683                         goto err_idr;
1684                 }
1685
1686                 /* store parent device for node */
1687                 ippdrv->parent_dev = dev;
1688
1689                 /* store event work queue and handler */
1690                 ippdrv->event_workq = ctx->event_workq;
1691                 ippdrv->sched_event = ipp_sched_event;
1692                 INIT_LIST_HEAD(&ippdrv->cmd_list);
1693         }
1694
1695         return 0;
1696
1697 err_idr:
1698         idr_remove_all(&ctx->ipp_idr);
1699         idr_remove_all(&ctx->prop_idr);
1700         idr_destroy(&ctx->ipp_idr);
1701         idr_destroy(&ctx->prop_idr);
1702
1703         return ret;
1704 }
1705
1706 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1707 {
1708         struct exynos_drm_ippdrv *ippdrv;
1709
1710         DRM_DEBUG_KMS("%s\n", __func__);
1711
1712         /* get ipp driver entry */
1713         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1714                 ippdrv->drm_dev = NULL;
1715                 exynos_drm_ippdrv_unregister(ippdrv);
1716         }
1717
1718         /* ToDo: free notifier callback list if needed */
1719 }
1720
1721 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1722                                                         struct drm_file *file)
1723 {
1724         struct drm_exynos_file_private *file_priv = file->driver_priv;
1725         struct exynos_drm_ipp_private *priv;
1726
1727         DRM_DEBUG_KMS("%s\n", __func__);
1728
1729         /* ToDo: multi device open */
1730
1731         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1732         if (!priv) {
1733                 DRM_ERROR("failed to allocate priv.\n");
1734                 return -ENOMEM;
1735         }
1736         priv->dev = dev;
1737         file_priv->ipp_priv = priv;
1738
1739         INIT_LIST_HEAD(&priv->event_list);
1740
1741         DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1742
1743         return 0;
1744 }
1745
1746 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1747                                                         struct drm_file *file)
1748 {
1749         struct drm_exynos_file_private *file_priv = file->driver_priv;
1750         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1751         struct exynos_drm_ippdrv *ippdrv = NULL;
1752         struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1753         int count = 0;
1754
1755         DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1756
1757         if (list_empty(&exynos_drm_ippdrv_list)) {
1758                 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1759                 goto err_clear;
1760         }
1761
1762         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1763                 if (list_empty(&ippdrv->cmd_list))
1764                         continue;
1765
1766                 list_for_each_entry_safe(c_node, tc_node,
1767                         &ippdrv->cmd_list, list) {
1768                         DRM_INFO("%s:count[%d]ippdrv[0x%x]\n",
1769                                 __func__, count++, (int)ippdrv);
1770
1771                         if (c_node->priv == priv) {
1772                                 if (c_node->state == IPP_STATE_START) {
1773                                         ipp_stop_property(drm_dev, ippdrv,
1774                                                 c_node);
1775                                         c_node->state = IPP_STATE_STOP;
1776                                 }
1777
1778                                 ippdrv->dedicated = false;
1779                                 ipp_clean_cmd_node(c_node);
1780                                 if (list_empty(&ippdrv->cmd_list))
1781                                         pm_runtime_put_sync(ippdrv->dev);
1782                         }
1783                 }
1784         }
1785
1786 err_clear:
1787         kfree(priv);
1788
1789         return;
1790 }
1791
1792 static int __devinit ipp_probe(struct platform_device *pdev)
1793 {
1794         struct device *dev = &pdev->dev;
1795         struct ipp_context *ctx;
1796         struct exynos_drm_subdrv *subdrv;
1797         int ret = -EINVAL;
1798
1799         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1800         if (!ctx)
1801                 return -ENOMEM;
1802
1803         DRM_DEBUG_KMS("%s\n", __func__);
1804
1805         spin_lock_init(&ctx->ipp_lock);
1806         spin_lock_init(&ctx->prop_lock);
1807         /* init ipp driver idr */
1808         idr_init(&ctx->ipp_idr);
1809         idr_init(&ctx->prop_idr);
1810         /* create single thread for ipp event */
1811         ctx->event_workq = create_singlethread_workqueue("ipp_event");
1812         if (!ctx->event_workq) {
1813                 dev_err(dev, "failed to create event workqueue\n");
1814                 ret = -EINVAL;
1815                 goto err_clear;
1816         }
1817
1818         /* create single thread for ipp command */
1819         ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1820         if (!ctx->cmd_workq) {
1821                 dev_err(dev, "failed to create cmd workqueue\n");
1822                 ret = -EINVAL;
1823                 goto err_event_workq;
1824         }
1825
1826         /* set sub driver informations */
1827         subdrv = &ctx->subdrv;
1828         subdrv->dev = dev;
1829         subdrv->probe = ipp_subdrv_probe;
1830         subdrv->remove = ipp_subdrv_remove;
1831         subdrv->open = ipp_subdrv_open;
1832         subdrv->close = ipp_subdrv_close;
1833
1834         /* set driver data */
1835         platform_set_drvdata(pdev, ctx);
1836
1837         /* register sub driver */
1838         ret = exynos_drm_subdrv_register(subdrv);
1839         if (ret < 0) {
1840                 DRM_ERROR("failed to register drm ipp device.\n");
1841                 goto err_cmd_workq;
1842         }
1843
1844         dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1845
1846         return 0;
1847
1848 err_cmd_workq:
1849         destroy_workqueue(ctx->cmd_workq);
1850 err_event_workq:
1851         destroy_workqueue(ctx->event_workq);
1852 err_clear:
1853         kfree(ctx);
1854
1855         return ret;
1856 }
1857
1858 static int __devexit ipp_remove(struct platform_device *pdev)
1859 {
1860         struct ipp_context *ctx = platform_get_drvdata(pdev);
1861
1862         DRM_DEBUG_KMS("%s\n", __func__);
1863
1864         /* unregister sub driver */
1865         exynos_drm_subdrv_unregister(&ctx->subdrv);
1866
1867         /* remove,destroy ipp idr */
1868         idr_remove_all(&ctx->ipp_idr);
1869         idr_remove_all(&ctx->prop_idr);
1870         idr_destroy(&ctx->ipp_idr);
1871         idr_destroy(&ctx->prop_idr);
1872
1873         /* destroy command, event work queue */
1874         destroy_workqueue(ctx->cmd_workq);
1875         destroy_workqueue(ctx->event_workq);
1876
1877         kfree(ctx);
1878
1879         return 0;
1880 }
1881
1882 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1883 {
1884         /* ToDo: Need to implement power and sysmmu ctrl. */
1885         DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1886
1887         return 0;
1888 }
1889
1890 #ifdef CONFIG_PM_SLEEP
1891 static int ipp_suspend(struct device *dev)
1892 {
1893         struct ipp_context *ctx = get_ipp_context(dev);
1894
1895         DRM_DEBUG_KMS("%s\n", __func__);
1896
1897         if (pm_runtime_suspended(dev))
1898                 return 0;
1899
1900         return  ipp_power_ctrl(ctx, false);
1901 }
1902
1903 static int ipp_resume(struct device *dev)
1904 {
1905         struct ipp_context *ctx = get_ipp_context(dev);
1906
1907         DRM_DEBUG_KMS("%s\n", __func__);
1908
1909         if (!pm_runtime_suspended(dev))
1910                 return  ipp_power_ctrl(ctx, true);
1911
1912         return 0;
1913 }
1914 #endif
1915
1916 #ifdef CONFIG_PM_RUNTIME
1917 static int ipp_runtime_suspend(struct device *dev)
1918 {
1919         struct ipp_context *ctx = get_ipp_context(dev);
1920
1921         DRM_DEBUG_KMS("%s\n", __func__);
1922
1923         return  ipp_power_ctrl(ctx, false);
1924 }
1925
1926 static int ipp_runtime_resume(struct device *dev)
1927 {
1928         struct ipp_context *ctx = get_ipp_context(dev);
1929
1930         DRM_DEBUG_KMS("%s\n", __func__);
1931
1932         return  ipp_power_ctrl(ctx, true);
1933 }
1934 #endif
1935
1936 static const struct dev_pm_ops ipp_pm_ops = {
1937         SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1938         SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1939 };
1940
1941 struct platform_driver ipp_driver = {
1942         .probe          = ipp_probe,
1943         .remove         = __devexit_p(ipp_remove),
1944         .driver         = {
1945                 .name   = "exynos-drm-ipp",
1946                 .owner  = THIS_MODULE,
1947                 .pm     = &ipp_pm_ops,
1948         },
1949 };
1950