tizen 2.4 release
[kernel/linux-3.0.git] / drivers / gpu / drm / exynos / exynos_drm_ipp.c
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *      Eunchul Kim <chulspro.kim@samsung.com>
5  *      Jinyoung Jeon <jy0.jeon@samsung.com>
6  *      Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
20
21 #include <drm/drmP.h>
22 #include <drm/exynos_drm.h>
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_ipp.h"
26
27 /*
28  * IPP is stand for Image Post Processing and
29  * supports image scaler/rotator and input/output DMA operations.
30  * using FIMC, GSC, Rotator, so on.
31  * IPP is integration device driver of same attribute h/w
32  */
33
34 #define get_ipp_context(dev)    platform_get_drvdata(to_platform_device(dev))
35
36 /*
37  * A structure of event.
38  *
39  * @base: base of event.
40  * @event: ipp event.
41  */
42 struct drm_exynos_ipp_send_event {
43         struct drm_pending_event        base;
44         struct drm_exynos_ipp_event     event;
45 };
46
47 /*
48  * A structure of memory node.
49  *
50  * @list: list head to memory queue information.
51  * @ops_id: id of operations.
52  * @prop_id: id of property.
53  * @buf_id: id of buffer.
54  * @buf_info: gem objects and dma address, size.
55  */
56 struct drm_exynos_ipp_mem_node {
57         struct list_head        list;
58         enum drm_exynos_ops_id  ops_id;
59         u32     prop_id;
60         u32     buf_id;
61         struct drm_exynos_ipp_buf_info  buf_info;
62 };
63
64 /*
65  * A structure of ipp context.
66  *
67  * @subdrv: prepare initialization using subdrv.
68  * @ipp_lock: lock for synchronization of access to ipp_idr.
69  * @prop_lock: lock for synchronization of access to prop_idr.
70  * @ipp_idr: ipp driver idr.
71  * @prop_idr: property idr.
72  * @event_workq: event work queue.
73  * @cmd_workq: command work queue.
74  */
75 struct ipp_context {
76         struct exynos_drm_subdrv        subdrv;
77         spinlock_t      ipp_lock;
78         spinlock_t      prop_lock;
79         struct idr      ipp_idr;
80         struct idr      prop_idr;
81         struct workqueue_struct *event_workq;
82         struct workqueue_struct *cmd_workq;
83 };
84
85 static LIST_HEAD(exynos_drm_ippdrv_list);
86 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
87
88 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
89 {
90         DRM_DEBUG_KMS("%s\n", __func__);
91
92         if (!ippdrv)
93                 return -EINVAL;
94
95         list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
96
97         return 0;
98 }
99 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_register);
100
101 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
102 {
103         DRM_DEBUG_KMS("%s\n", __func__);
104
105         if (!ippdrv)
106                 return -EINVAL;
107
108         list_del(&ippdrv->drv_list);
109
110         return 0;
111 }
112 EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_unregister);
113
114 static int ipp_create_id(struct idr *id_idr, spinlock_t *lock, void *obj,
115         u32 *idp)
116 {
117         int ret = -EINVAL;
118
119         DRM_DEBUG_KMS("%s\n", __func__);
120
121 again:
122         /* ensure there is space available to allocate a handle */
123         if (idr_pre_get(id_idr, GFP_KERNEL) == 0)
124                 return -ENOMEM;
125
126         /* do the allocation under our spinlock */
127         spin_lock(lock);
128         ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
129         spin_unlock(lock);
130         if (ret == -EAGAIN)
131                 goto again;
132
133         return ret;
134 }
135
136 static void *ipp_find_id(struct idr *id_idr, spinlock_t *lock, u32 id)
137 {
138         void *obj;
139
140         DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
141
142         spin_lock(lock);
143
144         /* find object using handle */
145         obj = idr_find(id_idr, id);
146         if (obj == NULL) {
147                 spin_unlock(lock);
148                 return NULL;
149         }
150
151         spin_unlock(lock);
152
153         return obj;
154 }
155
156 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
157         struct drm_exynos_ipp_property *property)
158 {
159         struct exynos_drm_ippdrv *ippdrv;
160         u32 ipp_id = property->ipp_id;
161
162         DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
163
164         if (ipp_id) {
165                 /* find ipp driver */
166                 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
167                         ipp_id);
168                 if (!ippdrv) {
169                         DRM_ERROR("not found ipp%d driver.\n", ipp_id);
170                         goto err_null;
171                 }
172
173                 /* check dedicated state */
174                 if (ippdrv->dedicated) {
175                         DRM_ERROR("used choose device.\n");
176                         goto err_null;
177                 }
178
179                 if (property->cmd != IPP_CMD_M2M
180                         && !pm_runtime_suspended(ippdrv->dev)) {
181                         DRM_ERROR("can't run dedicatedly.\n");
182                         goto err_null;
183                 }
184
185                 /* check property */
186                 if (ippdrv->check_property &&
187                     ippdrv->check_property(ippdrv->dev, property)) {
188                         DRM_ERROR("not support property.\n");
189                         goto err_null;
190                 }
191
192                 return ippdrv;
193         } else {
194                 /* get ipp driver entry */
195                 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
196                         /* check dedicated state */
197                         if (ippdrv->dedicated)
198                                 continue;
199
200                         if (property->cmd != IPP_CMD_M2M
201                                 && !pm_runtime_suspended(ippdrv->dev)) {
202                                 DRM_INFO("%s:can't run dedicatedly.\n",
203                                         __func__);
204                                 continue;
205                         }
206
207                         /* check property */
208                         if (ippdrv->check_property &&
209                             ippdrv->check_property(ippdrv->dev, property)) {
210                                 DRM_INFO("%s:not support property.\n",
211                                         __func__);
212                                 continue;
213                         }
214
215                         return ippdrv;
216                 }
217
218                 DRM_ERROR("not support ipp driver operations.\n");
219         }
220
221 err_null:
222         return NULL;
223 }
224
225 static struct exynos_drm_ippdrv *ipp_find_drv_node(u32 prop_id)
226 {
227         struct exynos_drm_ippdrv *ippdrv;
228         struct drm_exynos_ipp_cmd_node *c_node;
229         int count = 0;
230
231         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
232
233         if (list_empty(&exynos_drm_ippdrv_list)) {
234                 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n",
235                         __func__);
236                 return NULL;
237         }
238
239         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
240                 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
241                         __func__, count++, (int)ippdrv);
242
243                 if (!list_empty(&ippdrv->cmd_list)) {
244                         list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
245                                 if (c_node->property.prop_id == prop_id)
246                                         return ippdrv;
247                         }
248                 }
249         }
250
251         return NULL;
252 }
253
254 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
255                                         struct drm_file *file)
256 {
257         struct drm_exynos_file_private *file_priv = file->driver_priv;
258         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
259         struct device *dev = priv->dev;
260         struct ipp_context *ctx = get_ipp_context(dev);
261         struct drm_exynos_ipp_prop_list *prop_list = data;
262         struct exynos_drm_ippdrv *ippdrv;
263         int count = 0;
264
265         DRM_DEBUG_KMS("%s\n", __func__);
266
267         if (!ctx) {
268                 DRM_ERROR("invalid context.\n");
269                 return -EINVAL;
270         }
271
272         if (!prop_list) {
273                 DRM_ERROR("invalid property parameter.\n");
274                 return -EINVAL;
275         }
276
277         DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
278
279         if (prop_list->ipp_id == 0) {
280                 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
281                         count++;
282                 prop_list->count = count;
283         } else {
284                 ippdrv = ipp_find_id(&ctx->ipp_idr, &ctx->ipp_lock,
285                                                    prop_list->ipp_id);
286
287                 if (!ippdrv) {
288                         DRM_ERROR("not found ipp%d driver.\n",
289                                         prop_list->ipp_id);
290                         return -EINVAL;
291                 }
292
293                 prop_list = ippdrv->prop_list;
294         }
295
296         return 0;
297 }
298 EXPORT_SYMBOL_GPL(exynos_drm_ipp_get_property);
299
300 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
301                                         struct drm_file *file)
302 {
303         struct drm_exynos_file_private *file_priv = file->driver_priv;
304         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
305         struct device *dev = priv->dev;
306         struct ipp_context *ctx = get_ipp_context(dev);
307         struct drm_exynos_ipp_property *property = data;
308         struct exynos_drm_ippdrv *ippdrv;
309         struct drm_exynos_ipp_cmd_node *c_node;
310         struct drm_exynos_ipp_config *config;
311         struct drm_exynos_pos *pos;
312         struct drm_exynos_sz *sz;
313         int ret, i;
314
315         DRM_DEBUG_KMS("%s\n", __func__);
316
317         if (!ctx) {
318                 DRM_ERROR("invalid context.\n");
319                 return -EINVAL;
320         }
321
322         if (!property) {
323                 DRM_ERROR("invalid property parameter.\n");
324                 return -EINVAL;
325         }
326
327         for_each_ipp_ops(i) {
328                 config = &property->config[i];
329                 pos = &config->pos;
330                 sz = &config->sz;
331
332                 DRM_INFO("%s:prop_id[%d]ops[%s]fmt[%.4s]\n",
333                         __func__, property->prop_id,
334                         i ? "dst" : "src", (char *)&config->fmt);
335
336                 DRM_INFO("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
337                         __func__, pos->x, pos->y, pos->w, pos->h,
338                         sz->hsize, sz->vsize, config->flip, config->degree);
339         }
340
341         if (property->prop_id) {
342                 ippdrv = ipp_find_drv_node(property->prop_id);
343                 if (!ippdrv) {
344                         DRM_ERROR("failed to get ipp driver.\n");
345                         return -EINVAL;
346                 }
347
348                 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
349                         if ((c_node->property.prop_id ==
350                                 property->prop_id) &&
351                                 (c_node->state == IPP_STATE_STOP)) {
352                                 DRM_INFO("%s:found cmd[%d]ippdrv[0x%x]\n",
353                                         __func__, property->cmd, (int)ippdrv);
354
355                                 c_node->property = *property;
356                                 return 0;
357                         }
358                 }
359
360                 DRM_ERROR("failed to search property.\n");
361                 return -EINVAL;
362         }
363
364         /* find ipp driver using ipp id */
365         ippdrv = ipp_find_driver(ctx, property);
366         if (!ippdrv) {
367                 DRM_ERROR("failed to get ipp driver.\n");
368                 return -EINVAL;
369         }
370
371         /* allocate command node */
372         c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
373         if (!c_node) {
374                 DRM_ERROR("failed to allocate map node.\n");
375                 return -ENOMEM;
376         }
377
378         /* create property id */
379         ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
380                 &property->prop_id);
381         if (ret) {
382                 DRM_ERROR("failed to create id.\n");
383                 goto err_clear;
384         }
385
386         DRM_INFO("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
387                 __func__, property->prop_id, property->cmd, (int)ippdrv);
388
389         /* stored property information and ippdrv in private data */
390         c_node->priv = priv;
391         c_node->property = *property;
392         c_node->state = IPP_STATE_IDLE;
393
394         c_node->start_work = kzalloc(sizeof(*c_node->start_work),
395                 GFP_KERNEL);
396         if (!c_node->start_work) {
397                 DRM_ERROR("failed to alloc start_work.\n");
398                 ret = -ENOMEM;
399                 goto err_clear;
400         }
401
402         INIT_WORK((struct work_struct *)c_node->start_work,
403                 ipp_sched_cmd);
404
405         c_node->stop_work = kzalloc(sizeof(*c_node->stop_work),
406                 GFP_KERNEL);
407         if (!c_node->stop_work) {
408                 DRM_ERROR("failed to alloc stop_work.\n");
409                 ret = -ENOMEM;
410                 goto err_free_start;
411         }
412
413         INIT_WORK((struct work_struct *)c_node->stop_work,
414                 ipp_sched_cmd);
415
416         c_node->event_work = kzalloc(sizeof(*c_node->event_work),
417                 GFP_KERNEL);
418         if (!c_node->event_work) {
419                 DRM_ERROR("failed to alloc event_work.\n");
420                 ret = -ENOMEM;
421                 goto err_free_stop;
422         }
423
424         INIT_WORK((struct work_struct *)c_node->event_work,
425                 ipp_sched_event);
426
427         /* init ioctl lock */
428         mutex_init(&c_node->cmd_lock);
429         mutex_init(&c_node->mem_lock);
430         mutex_init(&c_node->event_lock);
431         init_completion(&c_node->start_complete);
432         init_completion(&c_node->stop_complete);
433
434         for_each_ipp_ops(i)
435                 INIT_LIST_HEAD(&c_node->mem_list[i]);
436
437         INIT_LIST_HEAD(&c_node->event_list);
438         list_splice_init(&priv->event_list, &c_node->event_list);
439         list_add_tail(&c_node->list, &ippdrv->cmd_list);
440
441         /*
442          * Previously, in case of WB and direct output, the ipp driver was not
443          * become dedicated. By suggestion of Inke Dae, make all ipp driver in
444          * use dedicated even in the case of M2M to avoid concurrent problem in
445          * TLB. We don't know whether there are cases which use more than 2 ipp
446          * driver at the same time. If it is not, this change solves the
447          * problems clearly without any side effect.
448          */
449         /* make dedicated state without m2m */
450         /* if (property->cmd != IPP_CMD_M2M) */
451         ippdrv->dedicated = true;
452
453         return 0;
454
455 err_free_stop:
456         kfree(c_node->stop_work);
457 err_free_start:
458         kfree(c_node->start_work);
459 err_clear:
460         kfree(c_node);
461         return ret;
462 }
463 EXPORT_SYMBOL_GPL(exynos_drm_ipp_set_property);
464
465 static struct drm_exynos_ipp_mem_node
466         *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
467         struct drm_exynos_ipp_queue_buf *qbuf)
468 {
469         struct drm_exynos_ipp_mem_node *m_node;
470         struct list_head *head;
471         int count = 0;
472
473         DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
474
475         /* source/destination memory list */
476         head = &c_node->mem_list[qbuf->ops_id];
477
478         /* find memory node entry */
479         list_for_each_entry(m_node, head, list) {
480                 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
481                         __func__, count++, (int)m_node);
482
483                 /* compare buffer id */
484                 if (m_node->buf_id == qbuf->buf_id)
485                         return m_node;
486         }
487
488         return NULL;
489 }
490
491 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
492 {
493         struct drm_exynos_ipp_property *property = &c_node->property;
494         struct drm_exynos_ipp_mem_node *m_node;
495         struct list_head *head;
496         int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
497
498         DRM_DEBUG_KMS("%s\n", __func__);
499
500         mutex_lock(&c_node->mem_lock);
501
502         for_each_ipp_ops(i) {
503                 /* source/destination memory list */
504                 head = &c_node->mem_list[i];
505
506                 if (list_empty(head)) {
507                         DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
508                                 i ? "dst" : "src");
509                         continue;
510                 }
511
512                 /* find memory node entry */
513                 list_for_each_entry(m_node, head, list) {
514                         DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
515                                 i ? "dst" : "src", count[i], (int)m_node);
516                         count[i]++;
517                 }
518         }
519
520         DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
521                 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
522                 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
523
524
525         if (property->cmd == IPP_CMD_M2M)
526                 ret = min(count[EXYNOS_DRM_OPS_SRC],
527                         count[EXYNOS_DRM_OPS_DST]);
528         else
529                 ret = max(count[EXYNOS_DRM_OPS_SRC],
530                         count[EXYNOS_DRM_OPS_DST]);
531
532         mutex_unlock(&c_node->mem_lock);
533
534         return ret;
535 }
536
537 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
538 {
539         DRM_DEBUG_KMS("%s\n", __func__);
540
541         mutex_lock(&c_node->cmd_lock);
542         /* delete list */
543         list_del(&c_node->list);
544         mutex_unlock(&c_node->cmd_lock);
545
546         /* destroy mutex */
547         mutex_destroy(&c_node->cmd_lock);
548         mutex_destroy(&c_node->mem_lock);
549         mutex_destroy(&c_node->event_lock);
550
551         /* free command node */
552         kfree(c_node->start_work);
553         kfree(c_node->stop_work);
554         kfree(c_node->event_work);
555         kfree(c_node);
556 }
557
558 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
559         struct drm_exynos_ipp_cmd_node *c_node,
560         struct drm_exynos_ipp_mem_node *m_node)
561 {
562         struct exynos_drm_ipp_ops *ops = NULL;
563         int ret = 0;
564
565         DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
566
567         if (!m_node) {
568                 DRM_ERROR("invalid queue node.\n");
569                 return -EFAULT;
570         }
571
572         mutex_lock(&c_node->mem_lock);
573
574         DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
575
576         /* get operations callback */
577         ops = ippdrv->ops[m_node->ops_id];
578         if (!ops) {
579                 DRM_ERROR("not support ops.\n");
580                 ret = -EIO;
581                 goto err_unlock;
582         }
583
584         /* set address and enable irq */
585         if (ops->set_addr) {
586                 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
587                         m_node->buf_id, IPP_BUF_ENQUEUE);
588                 if (ret) {
589                         DRM_ERROR("failed to set addr.\n");
590                         goto err_unlock;
591                 }
592         }
593
594 err_unlock:
595         mutex_unlock(&c_node->mem_lock);
596         return ret;
597 }
598
599 static struct drm_exynos_ipp_mem_node
600         *ipp_get_mem_node(struct drm_device *drm_dev,
601         struct drm_file *file,
602         struct drm_exynos_ipp_cmd_node *c_node,
603         struct drm_exynos_ipp_queue_buf *qbuf)
604 {
605         struct drm_exynos_ipp_mem_node *m_node;
606         struct drm_exynos_ipp_buf_info buf_info;
607         void *addr;
608         unsigned long size;
609         int i;
610
611         mutex_lock(&c_node->mem_lock);
612
613         m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
614         if (!m_node) {
615                 DRM_ERROR("failed to allocate queue node.\n");
616                 goto err_unlock;
617         }
618
619         /* clear base address for error handling */
620         memset(&buf_info, 0x0, sizeof(buf_info));
621
622         /* operations, buffer id */
623         m_node->ops_id = qbuf->ops_id;
624         m_node->prop_id = qbuf->prop_id;
625         m_node->buf_id = qbuf->buf_id;
626
627         DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
628                 (int)m_node, qbuf->ops_id);
629         DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
630                 qbuf->prop_id, m_node->buf_id);
631
632         for_each_ipp_planar(i) {
633                 unsigned int gem_obj;
634
635                 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
636                         i, qbuf->handle[i]);
637
638                 /* get dma address by handle */
639                 if (qbuf->handle[i] != 0) {
640                         addr = exynos_drm_gem_get_dma_addr(drm_dev,
641                                         qbuf->handle[i], file, &gem_obj);
642                         if (!addr) {
643                                 DRM_ERROR("failed to get addr.\n");
644                                 goto err_clear;
645                         }
646
647                         size = exynos_drm_gem_get_size(drm_dev,
648                                                 qbuf->handle[i], file);
649                         if (!size) {
650                                 DRM_ERROR("failed to get size.\n");
651                                 goto err_clear;
652                         }
653
654                         buf_info.gem_objs[i] = (void *)gem_obj;
655                         buf_info.base[i] = *(dma_addr_t *) addr;
656                         buf_info.size[i] = (uint64_t) size;
657                         DRM_DEBUG_KMS("%s:i[%d]base[0x%x]size[0x%x]\n",
658                                 __func__, i, buf_info.base[i],
659                                 (int)buf_info.size[i]);
660                 }
661         }
662
663         m_node->buf_info = buf_info;
664         list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
665
666         mutex_unlock(&c_node->mem_lock);
667         return m_node;
668
669 err_clear:
670         kfree(m_node);
671
672 err_unlock:
673         mutex_unlock(&c_node->mem_lock);
674
675         return NULL;
676 }
677
678 static int ipp_put_mem_node(struct drm_device *drm_dev,
679         struct drm_exynos_ipp_cmd_node *c_node,
680         struct drm_exynos_ipp_mem_node *m_node)
681 {
682         int i, ret = 0;
683
684         DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
685
686         mutex_lock(&c_node->mem_lock);
687
688         if (!m_node) {
689                 DRM_ERROR("invalid dequeue node.\n");
690                 ret = -EFAULT;
691                 goto err_unlock;
692         }
693
694         if (list_empty(&m_node->list)) {
695                 DRM_ERROR("empty memory node.\n");
696                 ret = -ENOMEM;
697                 goto err_unlock;
698         }
699
700         DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
701
702         /* put gem buffer */
703         for_each_ipp_planar(i) {
704                 void *gem_obj = m_node->buf_info.gem_objs[i];
705                 if (gem_obj)
706                         exynos_drm_gem_put_dma_addr(drm_dev, gem_obj);
707         }
708
709         /* delete list in queue */
710         list_del(&m_node->list);
711         kfree(m_node);
712
713 err_unlock:
714         mutex_unlock(&c_node->mem_lock);
715
716         return ret;
717 }
718
719 static void ipp_free_event(struct drm_pending_event *event)
720 {
721         kfree(event);
722 }
723
724 static int ipp_get_event(struct drm_device *drm_dev,
725         struct drm_file *file,
726         struct drm_exynos_ipp_cmd_node *c_node,
727         struct drm_exynos_ipp_queue_buf *qbuf)
728 {
729         struct drm_exynos_ipp_send_event *e;
730         unsigned long flags;
731
732         DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
733                 qbuf->ops_id, qbuf->buf_id);
734
735         e = kzalloc(sizeof(*e), GFP_KERNEL);
736
737         if (!e) {
738                 DRM_ERROR("failed to allocate event.\n");
739                 spin_lock_irqsave(&drm_dev->event_lock, flags);
740                 file->event_space += sizeof(e->event);
741                 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
742                 return -ENOMEM;
743         }
744
745         /* make event */
746         e->event.base.type = DRM_EXYNOS_IPP_EVENT;
747         e->event.base.length = sizeof(e->event);
748         e->event.user_data = qbuf->user_data;
749         e->event.prop_id = qbuf->prop_id;
750         e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
751         e->base.event = &e->event.base;
752         e->base.file_priv = file;
753         e->base.destroy = ipp_free_event;
754         list_add_tail(&e->base.link, &c_node->event_list);
755
756         return 0;
757 }
758
759 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
760         struct drm_exynos_ipp_queue_buf *qbuf)
761 {
762         struct drm_exynos_ipp_send_event *e, *te;
763         int count = 0;
764
765         if (list_empty(&c_node->event_list)) {
766                 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
767                 return;
768         }
769
770         list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
771                 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
772                         __func__, count++, (int)e);
773
774                 if (!qbuf) {
775                         /* delete list */
776                         list_del(&e->base.link);
777                         kfree(e);
778                 } else if (e->event.buf_id[EXYNOS_DRM_OPS_DST]
779                         == qbuf->buf_id) {
780                         /* delete list */
781                         list_del(&e->base.link);
782                         kfree(e);
783                         return;
784                 }
785         }
786
787         return;
788 }
789
790 void ipp_handle_cmd_work(struct device *dev,
791         struct exynos_drm_ippdrv *ippdrv,
792         struct drm_exynos_ipp_cmd_work *cmd_work,
793         struct drm_exynos_ipp_cmd_node *c_node)
794 {
795         struct ipp_context *ctx = get_ipp_context(dev);
796
797         cmd_work->ippdrv = ippdrv;
798         cmd_work->c_node = c_node;
799         queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
800 }
801
802 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
803         struct drm_file *file)
804 {
805         struct drm_exynos_file_private *file_priv = file->driver_priv;
806         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
807         struct device *dev = priv->dev;
808         struct ipp_context *ctx = get_ipp_context(dev);
809         struct drm_exynos_ipp_queue_buf *qbuf = data;
810         struct exynos_drm_ippdrv *ippdrv;
811         struct drm_exynos_ipp_property *property;
812         struct exynos_drm_ipp_ops *ops;
813         struct drm_exynos_ipp_cmd_node *c_node;
814         struct drm_exynos_ipp_mem_node *m_node, *tm_node;
815         int ret;
816
817         DRM_DEBUG_KMS("%s\n", __func__);
818
819         if (!qbuf) {
820                 DRM_ERROR("invalid buf parameter.\n");
821                 return -EINVAL;
822         }
823
824         ippdrv = ipp_find_drv_node(qbuf->prop_id);
825
826         if (!ippdrv) {
827                 DRM_ERROR("failed to get ipp driver.\n");
828                 return -EINVAL;
829         }
830
831         if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
832                 DRM_ERROR("invalid ops parameter.\n");
833                 return -EINVAL;
834         }
835
836         ops = ippdrv->ops[qbuf->ops_id];
837         if (!ops) {
838                 DRM_ERROR("failed to get ops.\n");
839                 return -EINVAL;
840         }
841
842         DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
843                 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
844                 qbuf->buf_id, qbuf->buf_type);
845
846         /* find command node */
847         c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
848                 qbuf->prop_id);
849         if (!c_node) {
850                 DRM_ERROR("failed to get command node.\n");
851                 return -EINVAL;
852         }
853
854         /* get property */
855         property = &c_node->property;
856         if (!property) {
857                 DRM_ERROR("invalid property parameter.\n");
858                 return -EINVAL;
859         }
860
861         /* buffer control */
862         switch (qbuf->buf_type) {
863         case IPP_BUF_ENQUEUE:
864                 /* get memory node */
865                 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
866                 if (!m_node) {
867                         DRM_ERROR("failed to get m_node.\n");
868                         return -EINVAL;
869                 }
870
871                 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
872                         /* get event */
873                         ret = ipp_get_event(drm_dev, file, c_node, qbuf);
874                         if (ret) {
875                                 DRM_ERROR("failed to get event.\n");
876                                 goto err_clean_node;
877                         }
878
879                         if (c_node->state != IPP_STATE_START) {
880                                 DRM_DEBUG_KMS("%s:bypass for invalid state.\n"
881                                         , __func__);
882                                 break;
883                         }
884
885                         if (!ipp_check_mem_list(c_node)) {
886                                 DRM_DEBUG_KMS("%s:empty memory.\n"
887                                         , __func__);
888                                 break;
889                         }
890
891                         /*
892                          * If set source, destination buffer and enable pm
893                          * m2m operations need start operations in queue
894                          */
895                         if (property->cmd == IPP_CMD_M2M) {
896                                 struct drm_exynos_ipp_cmd_work *cmd_work =
897                                         c_node->start_work;
898
899                                 cmd_work->ctrl = IPP_CTRL_PLAY;
900                                 ipp_handle_cmd_work(dev, ippdrv, cmd_work,
901                                         c_node);
902                         } else {
903                                 if (ops->set_addr) {
904                                         ret = ops->set_addr(ippdrv->dev,
905                                                 &m_node->buf_info,
906                                                 m_node->buf_id, qbuf->buf_type);
907                                         if (ret) {
908                                                 DRM_ERROR(
909                                                         "failed to set addr.\n");
910                                                 goto err_clean_node;
911                                         }
912                                 }
913                         }
914                 }
915                 break;
916         case IPP_BUF_DEQUEUE:
917                 mutex_lock(&c_node->cmd_lock);
918
919                 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
920                         ipp_put_event(c_node, qbuf);
921
922                 /* put memory node */
923                 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
924                         list_for_each_entry_safe(m_node, tm_node,
925                                 &c_node->mem_list[qbuf->ops_id], list) {
926                                 if (m_node->buf_id == qbuf->buf_id &&
927                                         m_node->ops_id == qbuf->ops_id) {
928                                         /* put memory node */
929                                         ipp_put_mem_node(drm_dev,
930                                                 c_node, m_node);
931                                 }
932                         }
933                 }
934                 mutex_unlock(&c_node->cmd_lock);
935                 break;
936         default:
937                 DRM_ERROR("invalid buffer control.\n");
938                 return -EINVAL;
939         }
940
941         return 0;
942
943 err_clean_node:
944         DRM_ERROR("clean memory nodes.\n");
945
946         if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
947                 /* delete list */
948                 list_for_each_entry_safe(m_node, tm_node,
949                         &c_node->mem_list[qbuf->ops_id], list) {
950                         if (m_node->buf_id == qbuf->buf_id &&
951                                 m_node->ops_id == qbuf->ops_id)
952                                 ipp_put_mem_node(drm_dev, c_node, m_node);
953                 }
954         }
955
956         return ret;
957 }
958 EXPORT_SYMBOL_GPL(exynos_drm_ipp_queue_buf);
959
960 static bool exynos_drm_ipp_check_valid(struct device *dev,
961         enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
962 {
963         DRM_DEBUG_KMS("%s\n", __func__);
964
965         if (ctrl != IPP_CTRL_PLAY) {
966                 if (pm_runtime_suspended(dev)) {
967                         DRM_ERROR("pm:runtime_suspended.\n");
968                         goto err_status;
969                 }
970         }
971
972         switch (ctrl) {
973         case IPP_CTRL_PLAY:
974                 if (state != IPP_STATE_IDLE)
975                         goto err_status;
976                 break;
977         case IPP_CTRL_STOP:
978                 if (state == IPP_STATE_STOP)
979                         goto err_status;
980                 break;
981         case IPP_CTRL_PAUSE:
982                 if (state != IPP_STATE_START)
983                         goto err_status;
984                 break;
985         case IPP_CTRL_RESUME:
986                 if (state != IPP_STATE_STOP)
987                         goto err_status;
988                 break;
989         default:
990                 DRM_ERROR("invalid state.\n");
991                 goto err_status;
992                 break;
993         }
994
995         return true;
996
997 err_status:
998         DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
999         return false;
1000 }
1001
1002 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1003         struct drm_file *file)
1004 {
1005         struct drm_exynos_file_private *file_priv = file->driver_priv;
1006         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1007         struct exynos_drm_ippdrv *ippdrv = NULL;
1008         struct device *dev = priv->dev;
1009         struct ipp_context *ctx = get_ipp_context(dev);
1010         struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1011         struct drm_exynos_ipp_cmd_work *cmd_work;
1012         struct drm_exynos_ipp_cmd_node *c_node;
1013         int ret = 0;
1014
1015         DRM_DEBUG_KMS("%s\n", __func__);
1016
1017         if (!ctx) {
1018                 DRM_ERROR("invalid context.\n");
1019                 return -EINVAL;
1020         }
1021
1022         if (!cmd_ctrl) {
1023                 DRM_ERROR("invalid control parameter.\n");
1024                 return -EINVAL;
1025         }
1026
1027         DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1028                 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1029
1030         ippdrv = ipp_find_drv_node(cmd_ctrl->prop_id);
1031
1032         if (!ippdrv) {
1033                 DRM_ERROR("failed to get ipp driver.\n");
1034                 return -EINVAL;
1035         }
1036
1037         c_node = ipp_find_id(&ctx->prop_idr, &ctx->prop_lock,
1038                 cmd_ctrl->prop_id);
1039
1040         if (!c_node) {
1041                 DRM_ERROR("invalid command node list.\n");
1042                 return -EINVAL;
1043         }
1044
1045         if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1046             c_node->state)) {
1047                 DRM_ERROR("invalid state.\n");
1048                 return -EINVAL;
1049         }
1050
1051         switch (cmd_ctrl->ctrl) {
1052         case IPP_CTRL_PLAY:
1053                 if (pm_runtime_suspended(ippdrv->dev))
1054                         pm_runtime_get_sync(ippdrv->dev);
1055                 c_node->state = IPP_STATE_START;
1056
1057                 cmd_work = c_node->start_work;
1058                 cmd_work->ctrl = cmd_ctrl->ctrl;
1059                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1060                 break;
1061         case IPP_CTRL_STOP:
1062                 c_node->state = IPP_STATE_STOP;
1063
1064                 cmd_work = c_node->stop_work;
1065                 cmd_work->ctrl = cmd_ctrl->ctrl;
1066                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1067
1068                 if (!wait_for_completion_timeout(&c_node->stop_complete,
1069                     msecs_to_jiffies(200))) {
1070                         DRM_ERROR("timeout stop:prop_id[%d]\n",
1071                                 c_node->property.prop_id);
1072                 }
1073
1074                 ippdrv->dedicated = false;
1075                 ipp_clean_cmd_node(c_node);
1076
1077                 if (list_empty(&ippdrv->cmd_list))
1078                         pm_runtime_put_sync(ippdrv->dev);
1079                 break;
1080         case IPP_CTRL_PAUSE:
1081                 c_node->state = IPP_STATE_STOP;
1082
1083                 cmd_work = c_node->stop_work;
1084                 cmd_work->ctrl = cmd_ctrl->ctrl;
1085                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1086
1087                 if (!wait_for_completion_timeout(&c_node->stop_complete,
1088                     msecs_to_jiffies(200))) {
1089                         DRM_ERROR("timeout stop:prop_id[%d]\n",
1090                                 c_node->property.prop_id);
1091                 }
1092                 break;
1093         case IPP_CTRL_RESUME:
1094                 c_node->state = IPP_STATE_START;
1095
1096                 cmd_work = c_node->start_work;
1097                 cmd_work->ctrl = cmd_ctrl->ctrl;
1098                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1099                 break;
1100         default:
1101                 /* ToDo: expand ctrl operation */
1102                 DRM_ERROR("could not support this state currently.\n");
1103                 goto err_clear;
1104         }
1105
1106         DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1107                 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1108
1109         return 0;
1110
1111 err_clear:
1112         return ret;
1113 }
1114 EXPORT_SYMBOL_GPL(exynos_drm_ipp_cmd_ctrl);
1115
1116 int exynos_drm_ippnb_register(struct notifier_block *nb)
1117 {
1118         return blocking_notifier_chain_register(
1119                         &exynos_drm_ippnb_list, nb);
1120 }
1121 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_register);
1122
1123 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1124 {
1125         return blocking_notifier_chain_unregister(
1126                         &exynos_drm_ippnb_list, nb);
1127 }
1128 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_unregister);
1129
1130 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1131 {
1132         return blocking_notifier_call_chain(
1133                         &exynos_drm_ippnb_list, val, v);
1134 }
1135 EXPORT_SYMBOL_GPL(exynos_drm_ippnb_send_event);
1136
1137 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1138         struct drm_exynos_ipp_property *property)
1139 {
1140         struct exynos_drm_ipp_ops *ops = NULL;
1141         int ret, i, swap = 0;
1142
1143         if (!property) {
1144                 DRM_ERROR("invalid property parameter.\n");
1145                 return -EINVAL;
1146         }
1147
1148         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1149
1150         /* reset h/w block */
1151         if (ippdrv->reset &&
1152                 ippdrv->reset(ippdrv->dev)) {
1153                 DRM_ERROR("failed to reset.\n");
1154                 return -EINVAL;
1155         }
1156
1157         /* set source,destination operations */
1158         for_each_ipp_ops(i) {
1159                 /* ToDo: integrate property and config */
1160                 struct drm_exynos_ipp_config *config =
1161                         &property->config[i];
1162
1163                 ops = ippdrv->ops[i];
1164                 if (!ops || !config) {
1165                         DRM_ERROR("not support ops and config.\n");
1166                         return -EINVAL;
1167                 }
1168
1169                 /* set format */
1170                 if (ops->set_fmt) {
1171                         ret = ops->set_fmt(ippdrv->dev, config->fmt);
1172                         if (ret) {
1173                                 DRM_ERROR("not support format.\n");
1174                                 return ret;
1175                         }
1176                 }
1177
1178                 /* set transform for rotation, flip */
1179                 if (ops->set_transf) {
1180                         swap = ops->set_transf(ippdrv->dev, config->degree,
1181                                 config->flip);
1182                         if (swap < 0) {
1183                                 DRM_ERROR("not support tranf.\n");
1184                                 return -EINVAL;
1185                         }
1186                 }
1187
1188                 /* set size */
1189                 if (ops->set_size) {
1190                         ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1191                                 &config->sz);
1192                         if (ret) {
1193                                 DRM_ERROR("not support size.\n");
1194                                 return ret;
1195                         }
1196                 }
1197         }
1198
1199         return 0;
1200 }
1201
1202 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1203         struct drm_exynos_ipp_cmd_node *c_node)
1204 {
1205         struct drm_exynos_ipp_mem_node *m_node;
1206         struct drm_exynos_ipp_property *property = &c_node->property;
1207         struct list_head *head;
1208         int ret, i;
1209
1210         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1211
1212         /* store command info in ippdrv */
1213         ippdrv->cmd = c_node;
1214
1215         if (!ipp_check_mem_list(c_node)) {
1216                 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1217                 return -ENOMEM;
1218         }
1219
1220         /* set current property in ippdrv */
1221         ret = ipp_set_property(ippdrv, property);
1222         if (ret) {
1223                 DRM_ERROR("failed to set property.\n");
1224                 ippdrv->cmd = NULL;
1225                 return ret;
1226         }
1227
1228         /* check command */
1229         switch (property->cmd) {
1230         case IPP_CMD_M2M:
1231                 for_each_ipp_ops(i) {
1232                         /* source/destination memory list */
1233                         head = &c_node->mem_list[i];
1234
1235                         /* get first entry */
1236                         m_node = list_first_entry(head,
1237                                 struct drm_exynos_ipp_mem_node, list);
1238                         if (!m_node) {
1239                                 DRM_ERROR("failed to get node.\n");
1240                                 ret = -EFAULT;
1241                                 return ret;
1242                         }
1243
1244                         DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1245                                 __func__, (int)m_node);
1246
1247                         /* set memory node to ippdrv */
1248                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1249                         if (ret) {
1250                                 DRM_ERROR("failed to set m node.\n");
1251                                 return ret;
1252                         }
1253                 }
1254                 break;
1255         case IPP_CMD_WB:
1256                 /* destination memory list */
1257                 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1258
1259                 /* get list entry */
1260                 list_for_each_entry(m_node, head, list) {
1261                         /* set memory node to ippdrv */
1262                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1263                         if (ret) {
1264                                 DRM_ERROR("failed to set m node.\n");
1265                                 return ret;
1266                         }
1267                 }
1268                 break;
1269         case IPP_CMD_OUTPUT:
1270                 /* source memory list */
1271                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1272
1273                 /* get list entry */
1274                 list_for_each_entry(m_node, head, list) {
1275                         /* set memory node to ippdrv */
1276                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1277                         if (ret) {
1278                                 DRM_ERROR("failed to set m node.\n");
1279                                 return ret;
1280                         }
1281                 }
1282                 break;
1283         default:
1284                 DRM_ERROR("invalid operations.\n");
1285                 return -EINVAL;
1286         }
1287
1288         DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1289
1290         /* start operations */
1291         if (ippdrv->start) {
1292                 ret = ippdrv->start(ippdrv->dev, property->cmd);
1293                 if (ret) {
1294                         DRM_ERROR("failed to start ops.\n");
1295                         return ret;
1296                 }
1297         }
1298
1299         return 0;
1300 }
1301
1302 static int ipp_stop_property(struct drm_device *drm_dev,
1303         struct exynos_drm_ippdrv *ippdrv,
1304         struct drm_exynos_ipp_cmd_node *c_node)
1305 {
1306         struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1307         struct drm_exynos_ipp_property *property = &c_node->property;
1308         struct list_head *head;
1309         int ret, i;
1310
1311         DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1312
1313         /* put event */
1314         ipp_put_event(c_node, NULL);
1315
1316         /* check command */
1317         switch (property->cmd) {
1318         case IPP_CMD_M2M:
1319                 for_each_ipp_ops(i) {
1320                         /* source/destination memory list */
1321                         head = &c_node->mem_list[i];
1322
1323                         if (list_empty(head)) {
1324                                 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1325                                         __func__);
1326                                 break;
1327                         }
1328
1329                         /* get list entry */
1330                         list_for_each_entry_safe(m_node, tm_node,
1331                                 head, list) {
1332                                 /* put memory node */
1333                                 ret = ipp_put_mem_node(drm_dev, c_node,
1334                                         m_node);
1335                                 if (ret) {
1336                                         DRM_ERROR("failed to put m_node.\n");
1337                                         goto err_clear;
1338                                 }
1339                         }
1340                 }
1341                 break;
1342         case IPP_CMD_WB:
1343                 /* destination memory list */
1344                 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1345
1346                 if (list_empty(head)) {
1347                         DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1348                         break;
1349                 }
1350
1351                 /* get list entry */
1352                 list_for_each_entry_safe(m_node, tm_node, head, list) {
1353                         /* put memory node */
1354                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1355                         if (ret) {
1356                                 DRM_ERROR("failed to put m_node.\n");
1357                                 goto err_clear;
1358                         }
1359                 }
1360                 break;
1361         case IPP_CMD_OUTPUT:
1362                 /* source memory list */
1363                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1364
1365                 if (list_empty(head)) {
1366                         DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1367                         break;
1368                 }
1369
1370                 /* get list entry */
1371                 list_for_each_entry_safe(m_node, tm_node, head, list) {
1372                         /* put memory node */
1373                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1374                         if (ret) {
1375                                 DRM_ERROR("failed to put m_node.\n");
1376                                 goto err_clear;
1377                         }
1378                 }
1379                 break;
1380         default:
1381                 DRM_ERROR("invalid operations.\n");
1382                 ret = -EINVAL;
1383                 goto err_clear;
1384         }
1385
1386 err_clear:
1387         /* stop operations */
1388         if (ippdrv->stop)
1389                 ippdrv->stop(ippdrv->dev, property->cmd);
1390
1391         return 0;
1392 }
1393
1394 void ipp_sched_cmd(struct work_struct *work)
1395 {
1396         struct drm_exynos_ipp_cmd_work *cmd_work =
1397                 (struct drm_exynos_ipp_cmd_work *)work;
1398         struct exynos_drm_ippdrv *ippdrv;
1399         struct drm_exynos_ipp_cmd_node *c_node;
1400         struct drm_exynos_ipp_property *property;
1401         int ret;
1402
1403         DRM_DEBUG_KMS("%s\n", __func__);
1404
1405         ippdrv = cmd_work->ippdrv;
1406         if (!ippdrv) {
1407                 DRM_ERROR("invalid ippdrv list.\n");
1408                 return;
1409         }
1410
1411         c_node = cmd_work->c_node;
1412         if (!c_node) {
1413                 DRM_ERROR("invalid command node list.\n");
1414                 return;
1415         }
1416
1417         mutex_lock(&c_node->cmd_lock);
1418
1419         /* get property */
1420         property = &c_node->property;
1421         if (!property) {
1422                 DRM_ERROR("failed to get property:prop_id[%d]\n",
1423                         c_node->property.prop_id);
1424                 goto err_unlock;
1425         }
1426
1427         switch (cmd_work->ctrl) {
1428         case IPP_CTRL_PLAY:
1429         case IPP_CTRL_RESUME:
1430                 /* start property */
1431                 ret = ipp_start_property(ippdrv, c_node);
1432                 if (ret) {
1433                         DRM_ERROR("failed to start property:prop_id[%d]\n",
1434                                 c_node->property.prop_id);
1435                         goto err_unlock;
1436                 }
1437
1438                 if (property->cmd == IPP_CMD_M2M) {
1439                         if (!wait_for_completion_timeout
1440                             (&c_node->start_complete, msecs_to_jiffies(200))) {
1441                                 DRM_ERROR("timeout event:prop_id[%d]\n",
1442                                         c_node->property.prop_id);
1443                                 if (ippdrv->stop)
1444                                         ippdrv->stop(ippdrv->dev,
1445                                                 property->cmd);
1446                                 goto err_unlock;
1447                         }
1448                 }
1449                 break;
1450         case IPP_CTRL_STOP:
1451         case IPP_CTRL_PAUSE:
1452                 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1453                         c_node);
1454                 if (ret) {
1455                         DRM_ERROR("failed to stop property.\n");
1456                         goto err_unlock;
1457                 }
1458
1459                 complete(&c_node->stop_complete);
1460                 break;
1461         default:
1462                 DRM_ERROR("unknown control type\n");
1463                 break;
1464         }
1465
1466         DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1467
1468 err_unlock:
1469         mutex_unlock(&c_node->cmd_lock);
1470 }
1471
1472 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1473         struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1474 {
1475         struct drm_device *drm_dev = ippdrv->drm_dev;
1476         struct drm_exynos_ipp_property *property = &c_node->property;
1477         struct drm_exynos_ipp_mem_node *m_node;
1478         struct drm_exynos_ipp_queue_buf qbuf;
1479         struct drm_exynos_ipp_send_event *e;
1480         struct list_head *head;
1481         struct timeval now;
1482         unsigned long flags;
1483         u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1484         int ret, i;
1485
1486         for_each_ipp_ops(i)
1487                 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1488                         i ? "dst" : "src", buf_id[i]);
1489
1490         if (!drm_dev) {
1491                 DRM_ERROR("failed to get drm_dev.\n");
1492                 return -EINVAL;
1493         }
1494
1495         if (!property) {
1496                 DRM_ERROR("failed to get property.\n");
1497                 return -EINVAL;
1498         }
1499
1500         if (list_empty(&c_node->event_list)) {
1501                 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1502                 return 0;
1503         }
1504
1505         if (!ipp_check_mem_list(c_node)) {
1506                 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1507                 return 0;
1508         }
1509
1510         /* check command */
1511         switch (property->cmd) {
1512         case IPP_CMD_M2M:
1513                 for_each_ipp_ops(i) {
1514                         /* source/destination memory list */
1515                         head = &c_node->mem_list[i];
1516
1517                         /* get first entry */
1518                         m_node = list_first_entry(head,
1519                                 struct drm_exynos_ipp_mem_node, list);
1520                         if (!m_node) {
1521                                 DRM_ERROR("empty memory node.\n");
1522                                 return -ENOMEM;
1523                         }
1524
1525                         tbuf_id[i] = m_node->buf_id;
1526                         DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1527                                 i ? "dst" : "src", tbuf_id[i]);
1528
1529                         /* put memory node */
1530                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1531                         if (ret)
1532                                 DRM_ERROR("failed to put m_node.\n");
1533                 }
1534                 break;
1535         case IPP_CMD_WB:
1536                 /* clear buf for finding */
1537                 memset(&qbuf, 0x0, sizeof(qbuf));
1538                 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1539                 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1540
1541                 /* get memory node entry */
1542                 m_node = ipp_find_mem_node(c_node, &qbuf);
1543                 if (!m_node) {
1544                         DRM_ERROR("empty memory node.\n");
1545                         return -ENOMEM;
1546                 }
1547
1548                 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1549
1550                 /* put memory node */
1551                 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1552                 if (ret)
1553                         DRM_ERROR("failed to put m_node.\n");
1554                 break;
1555         case IPP_CMD_OUTPUT:
1556                 /* source memory list */
1557                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1558
1559                 /* get first entry */
1560                 m_node = list_first_entry(head,
1561                         struct drm_exynos_ipp_mem_node, list);
1562                 if (!m_node) {
1563                         DRM_ERROR("empty memory node.\n");
1564                         return -ENOMEM;
1565                 }
1566
1567                 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1568
1569                 /* put memory node */
1570                 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1571                 if (ret)
1572                         DRM_ERROR("failed to put m_node.\n");
1573                 break;
1574         default:
1575                 DRM_ERROR("invalid operations.\n");
1576                 return -EINVAL;
1577         }
1578
1579         /* ToDo: Fix buffer id */
1580         if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1581                 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1582                         tbuf_id[1], buf_id[1], property->prop_id);
1583
1584         /* get first event entry */
1585         e = list_first_entry(&c_node->event_list,
1586                 struct drm_exynos_ipp_send_event, base.link);
1587
1588         if (!e) {
1589                 DRM_ERROR("empty event.\n");
1590                 return -EINVAL;
1591         }
1592
1593         do_gettimeofday(&now);
1594         DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1595                 , __func__, now.tv_sec, now.tv_usec);
1596         e->event.tv_sec = now.tv_sec;
1597         e->event.tv_usec = now.tv_usec;
1598         e->event.prop_id = property->prop_id;
1599
1600         /* set buffer id about source destination */
1601         for_each_ipp_ops(i)
1602                 e->event.buf_id[i] = tbuf_id[i];
1603         /* ToDo: compare index. If needed */
1604
1605         spin_lock_irqsave(&drm_dev->event_lock, flags);
1606         list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1607         wake_up_interruptible(&e->base.file_priv->event_wait);
1608         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1609
1610         DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1611                 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1612
1613         return 0;
1614 }
1615
1616 void ipp_sched_event(struct work_struct *work)
1617 {
1618         struct drm_exynos_ipp_event_work *event_work =
1619                 (struct drm_exynos_ipp_event_work *)work;
1620         struct exynos_drm_ippdrv *ippdrv;
1621         struct drm_exynos_ipp_cmd_node *c_node;
1622         int ret;
1623
1624         if (!event_work) {
1625                 DRM_ERROR("failed to get event_work.\n");
1626                 return;
1627         }
1628
1629         DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1630                 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1631
1632         ippdrv = event_work->ippdrv;
1633         if (!ippdrv) {
1634                 DRM_ERROR("failed to get ipp driver.\n");
1635                 return;
1636         }
1637
1638         c_node = ippdrv->cmd;
1639         if (!c_node) {
1640                 DRM_ERROR("failed to get command node.\n");
1641                 return;
1642         }
1643
1644         if (c_node->state != IPP_STATE_START) {
1645                 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1646                         __func__, c_node->state, c_node->property.prop_id);
1647                 goto err_completion;
1648         }
1649
1650         mutex_lock(&c_node->event_lock);
1651
1652         ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1653         if (ret) {
1654                 DRM_ERROR("failed to send event.\n");
1655                 goto err_completion;
1656         }
1657
1658 err_completion:
1659         if (c_node->property.cmd == IPP_CMD_M2M)
1660                 complete(&c_node->start_complete);
1661
1662         mutex_unlock(&c_node->event_lock);
1663 }
1664
1665 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1666 {
1667         struct ipp_context *ctx = get_ipp_context(dev);
1668         struct exynos_drm_ippdrv *ippdrv;
1669         int ret, count = 0;
1670
1671         DRM_DEBUG_KMS("%s\n", __func__);
1672
1673         /* get ipp driver entry */
1674         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1675                 ippdrv->drm_dev = drm_dev;
1676
1677                 /* create ipp id */
1678                 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1679                         &ippdrv->ipp_id);
1680                 if (ret) {
1681                         DRM_ERROR("failed to create id.\n");
1682                         goto err_idr;
1683                 }
1684
1685                 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1686                         count++, (int)ippdrv, ippdrv->ipp_id);
1687
1688                 if (ippdrv->ipp_id == 0) {
1689                         DRM_ERROR("failed to get ipp_id[%d]\n",
1690                                 ippdrv->ipp_id);
1691                         goto err_idr;
1692                 }
1693
1694                 /* store parent device for node */
1695                 ippdrv->parent_dev = dev;
1696
1697                 /* store event work queue and handler */
1698                 ippdrv->event_workq = ctx->event_workq;
1699                 ippdrv->sched_event = ipp_sched_event;
1700                 INIT_LIST_HEAD(&ippdrv->cmd_list);
1701         }
1702
1703         return 0;
1704
1705 err_idr:
1706         idr_remove_all(&ctx->ipp_idr);
1707         idr_remove_all(&ctx->prop_idr);
1708         idr_destroy(&ctx->ipp_idr);
1709         idr_destroy(&ctx->prop_idr);
1710
1711         return ret;
1712 }
1713
1714 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1715 {
1716         struct exynos_drm_ippdrv *ippdrv;
1717
1718         DRM_DEBUG_KMS("%s\n", __func__);
1719
1720         /* get ipp driver entry */
1721         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1722                 ippdrv->drm_dev = NULL;
1723                 exynos_drm_ippdrv_unregister(ippdrv);
1724         }
1725
1726         /* ToDo: free notifier callback list if needed */
1727 }
1728
1729 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1730                                                         struct drm_file *file)
1731 {
1732         struct drm_exynos_file_private *file_priv = file->driver_priv;
1733         struct exynos_drm_ipp_private *priv;
1734
1735         DRM_DEBUG_KMS("%s\n", __func__);
1736
1737         /* ToDo: multi device open */
1738
1739         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1740         if (!priv) {
1741                 DRM_ERROR("failed to allocate priv.\n");
1742                 return -ENOMEM;
1743         }
1744         priv->dev = dev;
1745         file_priv->ipp_priv = priv;
1746
1747         INIT_LIST_HEAD(&priv->event_list);
1748
1749         DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1750
1751         return 0;
1752 }
1753
1754 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1755                                                         struct drm_file *file)
1756 {
1757         struct drm_exynos_file_private *file_priv = file->driver_priv;
1758         struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1759         struct exynos_drm_ippdrv *ippdrv = NULL;
1760         struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1761         int count = 0;
1762
1763         DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1764
1765         if (list_empty(&exynos_drm_ippdrv_list)) {
1766                 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1767                 goto err_clear;
1768         }
1769
1770         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1771                 if (list_empty(&ippdrv->cmd_list))
1772                         continue;
1773
1774                 list_for_each_entry_safe(c_node, tc_node,
1775                         &ippdrv->cmd_list, list) {
1776                         DRM_INFO("%s:count[%d]ippdrv[0x%x]\n",
1777                                 __func__, count++, (int)ippdrv);
1778
1779                         if (c_node->priv == priv) {
1780                                 if (c_node->state == IPP_STATE_START) {
1781                                         ipp_stop_property(drm_dev, ippdrv,
1782                                                 c_node);
1783                                         c_node->state = IPP_STATE_STOP;
1784                                 }
1785
1786                                 ippdrv->dedicated = false;
1787                                 ipp_clean_cmd_node(c_node);
1788                                 if (list_empty(&ippdrv->cmd_list))
1789                                         pm_runtime_put_sync(ippdrv->dev);
1790                         }
1791                 }
1792         }
1793
1794 err_clear:
1795         kfree(priv);
1796
1797         return;
1798 }
1799
1800 static int __devinit ipp_probe(struct platform_device *pdev)
1801 {
1802         struct device *dev = &pdev->dev;
1803         struct ipp_context *ctx;
1804         struct exynos_drm_subdrv *subdrv;
1805         int ret = -EINVAL;
1806
1807         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1808         if (!ctx)
1809                 return -ENOMEM;
1810
1811         DRM_DEBUG_KMS("%s\n", __func__);
1812
1813         spin_lock_init(&ctx->ipp_lock);
1814         spin_lock_init(&ctx->prop_lock);
1815         /* init ipp driver idr */
1816         idr_init(&ctx->ipp_idr);
1817         idr_init(&ctx->prop_idr);
1818         /* create single thread for ipp event */
1819         ctx->event_workq = create_singlethread_workqueue("ipp_event");
1820         if (!ctx->event_workq) {
1821                 dev_err(dev, "failed to create event workqueue\n");
1822                 ret = -EINVAL;
1823                 goto err_clear;
1824         }
1825
1826         /* create single thread for ipp command */
1827         ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1828         if (!ctx->cmd_workq) {
1829                 dev_err(dev, "failed to create cmd workqueue\n");
1830                 ret = -EINVAL;
1831                 goto err_event_workq;
1832         }
1833
1834         /* set sub driver informations */
1835         subdrv = &ctx->subdrv;
1836         subdrv->dev = dev;
1837         subdrv->probe = ipp_subdrv_probe;
1838         subdrv->remove = ipp_subdrv_remove;
1839         subdrv->open = ipp_subdrv_open;
1840         subdrv->close = ipp_subdrv_close;
1841
1842         /* set driver data */
1843         platform_set_drvdata(pdev, ctx);
1844
1845         /* register sub driver */
1846         ret = exynos_drm_subdrv_register(subdrv);
1847         if (ret < 0) {
1848                 DRM_ERROR("failed to register drm ipp device.\n");
1849                 goto err_cmd_workq;
1850         }
1851
1852         dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1853
1854         return 0;
1855
1856 err_cmd_workq:
1857         destroy_workqueue(ctx->cmd_workq);
1858 err_event_workq:
1859         destroy_workqueue(ctx->event_workq);
1860 err_clear:
1861         kfree(ctx);
1862
1863         return ret;
1864 }
1865
1866 static int __devexit ipp_remove(struct platform_device *pdev)
1867 {
1868         struct ipp_context *ctx = platform_get_drvdata(pdev);
1869
1870         DRM_DEBUG_KMS("%s\n", __func__);
1871
1872         /* unregister sub driver */
1873         exynos_drm_subdrv_unregister(&ctx->subdrv);
1874
1875         /* remove,destroy ipp idr */
1876         idr_remove_all(&ctx->ipp_idr);
1877         idr_remove_all(&ctx->prop_idr);
1878         idr_destroy(&ctx->ipp_idr);
1879         idr_destroy(&ctx->prop_idr);
1880
1881         /* destroy command, event work queue */
1882         destroy_workqueue(ctx->cmd_workq);
1883         destroy_workqueue(ctx->event_workq);
1884
1885         kfree(ctx);
1886
1887         return 0;
1888 }
1889
1890 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1891 {
1892         /* ToDo: Need to implement power and sysmmu ctrl. */
1893         DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1894
1895         return 0;
1896 }
1897
1898 #ifdef CONFIG_PM_SLEEP
1899 static int ipp_suspend(struct device *dev)
1900 {
1901         struct ipp_context *ctx = get_ipp_context(dev);
1902
1903         DRM_DEBUG_KMS("%s\n", __func__);
1904
1905         if (pm_runtime_suspended(dev))
1906                 return 0;
1907
1908         return  ipp_power_ctrl(ctx, false);
1909 }
1910
1911 static int ipp_resume(struct device *dev)
1912 {
1913         struct ipp_context *ctx = get_ipp_context(dev);
1914
1915         DRM_DEBUG_KMS("%s\n", __func__);
1916
1917         if (!pm_runtime_suspended(dev))
1918                 return  ipp_power_ctrl(ctx, true);
1919
1920         return 0;
1921 }
1922 #endif
1923
1924 #ifdef CONFIG_PM_RUNTIME
1925 static int ipp_runtime_suspend(struct device *dev)
1926 {
1927         struct ipp_context *ctx = get_ipp_context(dev);
1928
1929         DRM_DEBUG_KMS("%s\n", __func__);
1930
1931         return  ipp_power_ctrl(ctx, false);
1932 }
1933
1934 static int ipp_runtime_resume(struct device *dev)
1935 {
1936         struct ipp_context *ctx = get_ipp_context(dev);
1937
1938         DRM_DEBUG_KMS("%s\n", __func__);
1939
1940         return  ipp_power_ctrl(ctx, true);
1941 }
1942 #endif
1943
1944 static const struct dev_pm_ops ipp_pm_ops = {
1945         SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1946         SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1947 };
1948
1949 struct platform_driver ipp_driver = {
1950         .probe          = ipp_probe,
1951         .remove         = __devexit_p(ipp_remove),
1952         .driver         = {
1953                 .name   = "exynos-drm-ipp",
1954                 .owner  = THIS_MODULE,
1955                 .pm     = &ipp_pm_ops,
1956         },
1957 };
1958