Initial commit
[kernel/linux-3.0.git] / drivers / gpu / drm / exynos_tmp / exynos_drm_rotator.c
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors: YoungJun Cho <yj44.cho@samsung.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundationr
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pm_qos_params.h>
19
20 #include "drmP.h"
21 #include "exynos_drm.h"
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_iommu.h"
25
26 /* Configuration */
27 #define ROT_CONFIG                      0x00
28 #define ROT_CONFIG_IRQ                  (3 << 8)
29
30 /* Image Control */
31 #define ROT_CONTROL                     0x10
32 #define ROT_CONTROL_PATTERN_WRITE       (1 << 16)
33 #define ROT_CONTROL_FMT_YCBCR420_2P     (1 << 8)
34 #define ROT_CONTROL_FMT_RGB888          (6 << 8)
35 #define ROT_CONTROL_FMT_MASK            (7 << 8)
36 #define ROT_CONTROL_FLIP_VERTICAL       (2 << 6)
37 #define ROT_CONTROL_FLIP_HORIZONTAL     (3 << 6)
38 #define ROT_CONTROL_FLIP_MASK           (3 << 6)
39 #define ROT_CONTROL_ROT_90              (1 << 4)
40 #define ROT_CONTROL_ROT_180             (2 << 4)
41 #define ROT_CONTROL_ROT_270             (3 << 4)
42 #define ROT_CONTROL_ROT_MASK            (3 << 4)
43 #define ROT_CONTROL_START               (1 << 0)
44
45 /* Status */
46 #define ROT_STATUS                      0x20
47 #define ROT_STATUS_IRQ_PENDING(x)       (1 << (x))
48 #define ROT_STATUS_IRQ(x)               (((x) >> 8) & 0x3)
49 #define ROT_STATUS_IRQ_VAL_COMPLETE     1
50 #define ROT_STATUS_IRQ_VAL_ILLEGAL      2
51
52 /* Sourc Buffer Address */
53 #define ROT_SRC_BUF_ADDR(n)             (0x30 + ((n) << 2))
54
55 /* Source Buffer Size */
56 #define ROT_SRC_BUF_SIZE                0x3c
57 #define ROT_SRC_BUF_SIZE_H(x)           ((x) << 16)
58 #define ROT_SRC_BUF_SIZE_W(x)           ((x) << 0)
59
60 /* Source Crop Position */
61 #define ROT_SRC_CROP_POS                0x40
62 #define ROT_SRC_CROP_POS_Y(x)           ((x) << 16)
63 #define ROT_SRC_CROP_POS_X(x)           ((x) << 0)
64
65 /* Source Crop Size */
66 #define ROT_SRC_CROP_SIZE               0x44
67 #define ROT_SRC_CROP_SIZE_H(x)          ((x) << 16)
68 #define ROT_SRC_CROP_SIZE_W(x)          ((x) << 0)
69
70 /* Destination Buffer Address */
71 #define ROT_DST_BUF_ADDR(n)             (0x50 + ((n) << 2))
72
73 /* Destination Buffer Size */
74 #define ROT_DST_BUF_SIZE                0x5c
75 #define ROT_DST_BUF_SIZE_H(x)           ((x) << 16)
76 #define ROT_DST_BUF_SIZE_W(x)           ((x) << 0)
77
78 /* Destination Crop Position */
79 #define ROT_DST_CROP_POS                0x60
80 #define ROT_DST_CROP_POS_Y(x)           ((x) << 16)
81 #define ROT_DST_CROP_POS_X(x)           ((x) << 0)
82
83 /* Round to nearest aligned value */
84 #define ROT_ALIGN(x, align, mask)       ((*(x) + (1 << ((align) - 1))) & (mask))
85 /* Minimum limit value */
86 #define ROT_MIN(min, mask)              (((min) + ~(mask)) & (mask))
87 /* Maximum limit value */
88 #define ROT_MAX(max, mask)              ((max) & (mask))
89
90 enum rot_irq_status {
91         ROT_IRQ_STATUS_COMPLETE = 8,
92         ROT_IRQ_STATUS_ILLEGAL  = 9,
93 };
94
95 struct rot_limit {
96         u32     min_w;
97         u32     min_h;
98         u32     max_w;
99         u32     max_h;
100         u32     align;
101 };
102
103 struct rot_limit_table {
104         struct rot_limit        ycbcr420_2p;
105         struct rot_limit        rgb888;
106 };
107
108 struct rot_context {
109         struct rot_limit_table          *limit_tbl;
110         struct clk                      *clock;
111         struct resource                 *regs_res;
112         void __iomem                    *regs;
113         int                             irq;
114         int                             exec_ret;
115         struct exynos_drm_subdrv        subdrv;
116         struct completion               complete;
117         struct mutex                    exec_mutex;
118         spinlock_t                      irq_lock;
119         struct pm_qos_request_list      pm_qos;
120         bool                            suspended;
121 };
122
123 struct rot_buffer {
124         dma_addr_t      src_addr[DRM_EXYNOS_ROT_MAX_BUF];
125         dma_addr_t      dst_addr[DRM_EXYNOS_ROT_MAX_BUF];
126         void            *src_gem_obj[DRM_EXYNOS_ROT_MAX_BUF];
127         void            *dst_gem_obj[DRM_EXYNOS_ROT_MAX_BUF];
128         u32             src_cnt;
129         u32             dst_cnt;
130         u32             src_w;
131         u32             src_h;
132         u32             dst_w;
133         u32             dst_h;
134 };
135
136 static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
137 {
138         u32 value = readl(rot->regs + ROT_CONFIG);
139
140         if (enable == true)
141                 value |= ROT_CONFIG_IRQ;
142         else
143                 value &= ~ROT_CONFIG_IRQ;
144
145         writel(value, rot->regs + ROT_CONFIG);
146 }
147
148 static void rotator_reg_set_format(struct rot_context *rot, u32 img_fmt)
149 {
150         u32 value = readl(rot->regs + ROT_CONTROL);
151         value &= ~ROT_CONTROL_FMT_MASK;
152
153         switch (img_fmt) {
154         case DRM_FORMAT_NV12:
155         case DRM_FORMAT_NV12M:
156                 value |= ROT_CONTROL_FMT_YCBCR420_2P;
157                 break;
158         case DRM_FORMAT_RGB888:
159                 value |= ROT_CONTROL_FMT_RGB888;
160                 break;
161         default:
162                 DRM_ERROR("invalid image format\n");
163                 return;
164         }
165
166         writel(value, rot->regs + ROT_CONTROL);
167 }
168
169 static void rotator_reg_set_flip(struct rot_context *rot,
170                                                 enum drm_exynos_rot_flip flip)
171 {
172         u32 value = readl(rot->regs + ROT_CONTROL);
173         value &= ~ROT_CONTROL_FLIP_MASK;
174
175         switch (flip) {
176         case ROT_FLIP_VERTICAL:
177                 value |= ROT_CONTROL_FLIP_VERTICAL;
178                 break;
179         case ROT_FLIP_HORIZONTAL:
180                 value |= ROT_CONTROL_FLIP_HORIZONTAL;
181                 break;
182         default:
183                 /* Flip None */
184                 break;
185         }
186
187         writel(value, rot->regs + ROT_CONTROL);
188 }
189
190 static void rotator_reg_set_rotation(struct rot_context *rot,
191                                         enum drm_exynos_rot_degree degree)
192 {
193         u32 value = readl(rot->regs + ROT_CONTROL);
194         value &= ~ROT_CONTROL_ROT_MASK;
195
196         switch (degree) {
197         case ROT_DEGREE_90:
198                 value |= ROT_CONTROL_ROT_90;
199                 break;
200         case ROT_DEGREE_180:
201                 value |= ROT_CONTROL_ROT_180;
202                 break;
203         case ROT_DEGREE_270:
204                 value |= ROT_CONTROL_ROT_270;
205                 break;
206         default:
207                 /* Rotation 0 Degree */
208                 break;
209         }
210
211         writel(value, rot->regs + ROT_CONTROL);
212 }
213
214 static void rotator_reg_set_start(struct rot_context *rot)
215 {
216         u32 value = readl(rot->regs + ROT_CONTROL);
217
218         value |= ROT_CONTROL_START;
219
220         writel(value, rot->regs + ROT_CONTROL);
221 }
222
223 static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
224 {
225         u32 value = readl(rot->regs + ROT_STATUS);
226         value = ROT_STATUS_IRQ(value);
227
228         if (value == ROT_STATUS_IRQ_VAL_COMPLETE)
229                 return ROT_IRQ_STATUS_COMPLETE;
230         else
231                 return ROT_IRQ_STATUS_ILLEGAL;
232 }
233
234 static void rotator_reg_set_irq_status_clear(struct rot_context *rot,
235                                                 enum rot_irq_status status)
236 {
237         u32 value = readl(rot->regs + ROT_STATUS);
238
239         value |= ROT_STATUS_IRQ_PENDING((u32)status);
240
241         writel(value, rot->regs + ROT_STATUS);
242 }
243
244 static void rotator_reg_set_src_buf_addr(struct rot_context *rot,
245                                                         dma_addr_t addr, int i)
246 {
247         writel(addr, rot->regs + ROT_SRC_BUF_ADDR(i));
248 }
249
250 static void rotator_reg_set_src_buf_size(struct rot_context *rot, u32 w, u32 h)
251 {
252         u32 value = ROT_SRC_BUF_SIZE_H(h) | ROT_SRC_BUF_SIZE_W(w);
253
254         writel(value, rot->regs + ROT_SRC_BUF_SIZE);
255 }
256
257 static void rotator_reg_set_src_crop_pos(struct rot_context *rot, u32 x, u32 y)
258 {
259         u32 value = ROT_SRC_CROP_POS_Y(y) | ROT_SRC_CROP_POS_X(x);
260
261         writel(value, rot->regs + ROT_SRC_CROP_POS);
262 }
263
264 static void rotator_reg_set_src_crop_size(struct rot_context *rot, u32 w, u32 h)
265 {
266         u32 value = ROT_SRC_CROP_SIZE_H(h) | ROT_SRC_CROP_SIZE_W(w);
267
268         writel(value, rot->regs + ROT_SRC_CROP_SIZE);
269 }
270
271 static void rotator_reg_set_dst_buf_addr(struct rot_context *rot,
272                                                         dma_addr_t addr, int i)
273 {
274         writel(addr, rot->regs + ROT_DST_BUF_ADDR(i));
275 }
276
277 static void rotator_reg_set_dst_buf_size(struct rot_context *rot, u32 w, u32 h)
278 {
279         u32 value = ROT_DST_BUF_SIZE_H(h) | ROT_DST_BUF_SIZE_W(w);
280
281         writel(value, rot->regs + ROT_DST_BUF_SIZE);
282 }
283
284 static void rotator_reg_set_dst_crop_pos(struct rot_context *rot, u32 x, u32 y)
285 {
286         u32 value = ROT_DST_CROP_POS_Y(y) | ROT_DST_CROP_POS_X(x);
287
288         writel(value, rot->regs + ROT_DST_CROP_POS);
289 }
290
291 static void rotator_reg_get_dump(struct rot_context *rot)
292 {
293         u32 value, i;
294
295         for (i = 0; i <= ROT_DST_CROP_POS; i += 0x4) {
296                 value = readl(rot->regs + i);
297                 DRM_INFO("+0x%x: 0x%x", i, value);
298         }
299 }
300
301 static bool rotator_check_format_n_handle_valid(u32 img_fmt,
302                                                         u32 src_buf_handle_cnt,
303                                                         u32 dst_buf_handle_cnt)
304 {
305         bool ret = false;
306
307         if ((src_buf_handle_cnt != dst_buf_handle_cnt)
308                                                 || (src_buf_handle_cnt == 0))
309                 return ret;
310
311         switch (img_fmt) {
312         case DRM_FORMAT_NV12M:
313                 if (src_buf_handle_cnt == 2)
314                         ret = true;
315                 break;
316         case DRM_FORMAT_NV12:
317         case DRM_FORMAT_RGB888:
318                 if (src_buf_handle_cnt == 1)
319                         ret = true;
320                 break;
321         default:
322                 DRM_ERROR("invalid image format\n");
323                 break;
324         }
325
326         return ret;
327 }
328
329 static void rotator_align_size(struct rot_limit *limit, u32 mask, u32 *w,
330                                                                         u32 *h)
331 {
332         u32 value;
333
334         value = ROT_ALIGN(w, limit->align, mask);
335         if (value < limit->min_w)
336                 *w = ROT_MIN(limit->min_w, mask);
337         else if (value > limit->max_w)
338                 *w = ROT_MAX(limit->max_w, mask);
339         else
340                 *w = value;
341
342         value = ROT_ALIGN(h, limit->align, mask);
343         if (value < limit->min_h)
344                 *h = ROT_MIN(limit->min_h, mask);
345         else if (value > limit->max_h)
346                 *h = ROT_MAX(limit->max_h, mask);
347         else
348                 *h = value;
349 }
350
351 static void rotator_align_buffer(struct rot_context *rot,
352                                         struct rot_buffer *buf,
353                                         struct drm_exynos_rot_buffer *req_buf,
354                                         struct drm_exynos_rot_control *control)
355 {
356         struct rot_limit_table *limit_tbl = rot->limit_tbl;
357         struct rot_limit *limit;
358         u32 mask;
359
360         /* Get size limit */
361         if (control->img_fmt == DRM_FORMAT_RGB888)
362                 limit = &limit_tbl->rgb888;
363         else
364                 limit = &limit_tbl->ycbcr420_2p;
365
366         /* Get mask for rounding to nearest aligned value */
367         mask = ~((1 << limit->align) - 1);
368
369         /* For source buffer */
370         buf->src_w = req_buf->src_w;
371         buf->src_h = req_buf->src_h;
372         rotator_align_size(limit, mask, &buf->src_w, &buf->src_h);
373
374         /* For destination buffer */
375         buf->dst_w = req_buf->dst_w;
376         buf->dst_h = req_buf->dst_h;
377         rotator_align_size(limit, mask, &buf->dst_w, &buf->dst_h);
378 }
379
380 static bool rotator_check_crop_boundary(struct rot_buffer *buf,
381                                         struct drm_exynos_rot_control *control,
382                                         struct drm_exynos_rot_crop *crop)
383 {
384         bool ret = true;
385
386         /* Check source crop position */
387         if ((crop->src_x + crop->src_w > buf->src_w)
388                                 || (crop->src_y + crop->src_h > buf->src_h))
389                 return false;
390
391         /* Check destination crop position */
392         switch (control->degree) {
393         case ROT_DEGREE_90:
394         case ROT_DEGREE_270:
395                 if ((crop->dst_x + crop->src_h > buf->dst_w)
396                                 || (crop->dst_y + crop->src_w > buf->dst_h))
397                         ret = false;
398                 break;
399         default:
400                 if ((crop->dst_x + crop->src_w > buf->dst_w)
401                                 || (crop->dst_y + crop->src_h > buf->dst_h))
402                         ret = false;
403                 break;
404         }
405
406         return ret;
407 }
408
409 static int rotator_iommu_map(struct rot_buffer *buf,
410                                         struct drm_exynos_rot_buffer *req_buf,
411                                         struct iommu_gem_map_params *params,
412                                         struct list_head *iommu_list)
413 {
414         /* For source buffer */
415         buf->src_cnt = 0;
416         while (buf->src_cnt < req_buf->src_cnt) {
417                 buf->src_addr[buf->src_cnt] = exynos_drm_iommu_map_gem(params,
418                                         iommu_list,
419                                         req_buf->src_handle[buf->src_cnt],
420                                         IOMMU_ROTATOR);
421                 if (!buf->src_addr[buf->src_cnt]) {
422                         DRM_ERROR("failed to map src handle[%u]\n",
423                                                                 buf->src_cnt);
424                         return -EINVAL;
425                 }
426                 buf->src_gem_obj[(buf->src_cnt)++] = params->gem_obj;
427         }
428
429         /* For destination buffer */
430         buf->dst_cnt = 0;
431         while (buf->dst_cnt < req_buf->dst_cnt) {
432                 buf->dst_addr[buf->dst_cnt] = exynos_drm_iommu_map_gem(params,
433                                         iommu_list,
434                                         req_buf->dst_handle[buf->dst_cnt],
435                                         IOMMU_ROTATOR);
436                 if (!buf->dst_addr[buf->dst_cnt]) {
437                         DRM_ERROR("failed to map dst handle[%u]\n",
438                                                                 buf->dst_cnt);
439                         return -EINVAL;
440                 }
441                 buf->dst_gem_obj[(buf->dst_cnt)++] = params->gem_obj;
442         }
443
444         return 0;
445 }
446
447 static void rotator_iommu_unmap(struct rot_buffer *buf,
448                                         struct iommu_gem_map_params *params)
449 {
450         /* For destination buffer */
451         while (buf->dst_cnt > 0) {
452                 params->gem_obj = buf->dst_gem_obj[--(buf->dst_cnt)];
453                 exynos_drm_iommu_unmap_gem(params,
454                                                 buf->dst_addr[buf->dst_cnt],
455                                                 IOMMU_ROTATOR);
456         }
457
458         /* For source buffer */
459         while (buf->src_cnt > 0) {
460                 params->gem_obj = buf->src_gem_obj[--(buf->src_cnt)];
461                 exynos_drm_iommu_unmap_gem(params,
462                                                 buf->src_addr[buf->src_cnt],
463                                                 IOMMU_ROTATOR);
464         }
465 }
466
467 static void rotator_execute(struct rot_context *rot,
468                                         struct rot_buffer *buf,
469                                         struct drm_exynos_rot_control *control,
470                                         struct drm_exynos_rot_crop *crop)
471 {
472         int i;
473
474         pm_runtime_get_sync(rot->subdrv.dev);
475
476         /* Set interrupt enable */
477         rotator_reg_set_irq(rot, true);
478
479         /* Set control registers */
480         rotator_reg_set_format(rot, control->img_fmt);
481         rotator_reg_set_flip(rot, control->flip);
482         rotator_reg_set_rotation(rot, control->degree);
483
484         /* Set source buffer address */
485         for (i = 0; i < DRM_EXYNOS_ROT_MAX_BUF; i++)
486                 rotator_reg_set_src_buf_addr(rot, buf->src_addr[i], i);
487
488         /* Set source buffer size */
489         rotator_reg_set_src_buf_size(rot, buf->src_w, buf->src_h);
490
491         /* Set destination buffer address */
492         for (i = 0; i < DRM_EXYNOS_ROT_MAX_BUF; i++)
493                 rotator_reg_set_dst_buf_addr(rot, buf->dst_addr[i], i);
494
495         /* Set destination buffer size */
496         rotator_reg_set_dst_buf_size(rot, buf->dst_w, buf->dst_h);
497
498         /* Set source crop image position */
499         rotator_reg_set_src_crop_pos(rot, crop->src_x, crop->src_y);
500
501         /* Set source crop image size */
502         rotator_reg_set_src_crop_size(rot, crop->src_w, crop->src_h);
503
504         /* Set destination crop image position */
505         rotator_reg_set_dst_crop_pos(rot, crop->dst_x, crop->dst_y);
506
507         /* Start rotator operation */
508         rotator_reg_set_start(rot);
509 }
510
511 int exynos_drm_rotator_exec_ioctl(struct drm_device *drm_dev, void *data,
512                                                         struct drm_file *file)
513 {
514         struct drm_exynos_file_private *file_priv = file->driver_priv;
515         struct exynos_drm_rot_private *priv = file_priv->rot_priv;
516         struct device *dev = priv->dev;
517         struct rot_context *rot;
518         struct drm_exynos_rot_exec_data *req = data;
519         struct drm_exynos_rot_buffer *req_buf = &req->buf;
520         struct drm_exynos_rot_control *control = &req->control;
521         struct drm_exynos_rot_crop *crop = &req->crop;
522         struct rot_buffer buf;
523         struct iommu_gem_map_params params;
524
525         if (!dev) {
526                 DRM_ERROR("failed to get dev\n");
527                 return -ENODEV;
528         }
529
530         rot = dev_get_drvdata(dev);
531         if (!rot) {
532                 DRM_ERROR("failed to get drvdata\n");
533                 return -EFAULT;
534         }
535
536         if (rot->suspended) {
537                 DRM_ERROR("suspended state\n");
538                 return -EPERM;
539         }
540
541         if (!rotator_check_format_n_handle_valid(control->img_fmt,
542                                                         req_buf->src_cnt,
543                                                         req_buf->dst_cnt)) {
544                 DRM_ERROR("format or handles are invalid\n");
545                 return -EINVAL;
546         }
547
548         init_completion(&rot->complete);
549
550         /* Align buffer */
551         rotator_align_buffer(rot, &buf, req_buf, control);
552
553         /* Check crop boundary */
554         if (!rotator_check_crop_boundary(&buf, control, crop)) {
555                 DRM_ERROR("boundary errror\n");
556                 return -EINVAL;
557         }
558
559         params.dev = dev;
560         params.drm_dev = drm_dev;
561         params.file = file;
562
563         /* Map IOMMU */
564         rot->exec_ret = rotator_iommu_map(&buf, req_buf, &params,
565                                                         &priv->iommu_list);
566         if (rot->exec_ret < 0)
567                 goto err_iommu_map;
568
569         /* Assign another src/dst_addr for NV12 image format */
570         if (control->img_fmt == DRM_FORMAT_NV12) {
571                 u32 size = crop->src_w * crop->src_h;
572
573                 buf.src_addr[buf.src_cnt + 1] =
574                                         buf.src_addr[buf.src_cnt] + size;
575                 buf.dst_addr[buf.dst_cnt + 1] =
576                                         buf.dst_addr[buf.dst_cnt] + size;
577         }
578
579         /* Execute */
580         mutex_lock(&rot->exec_mutex);
581         rotator_execute(rot, &buf, control, crop);
582         if (!wait_for_completion_timeout(&rot->complete, 2 * HZ)) {
583                 DRM_ERROR("timeout error\n");
584                 rot->exec_ret = -ETIMEDOUT;
585                 mutex_unlock(&rot->exec_mutex);
586                 goto err_iommu_map;
587         }
588         mutex_unlock(&rot->exec_mutex);
589
590         /* Unmap IOMMU */
591         rotator_iommu_unmap(&buf, &params);
592
593         return rot->exec_ret;
594
595 err_iommu_map:
596         rotator_iommu_unmap(&buf, &params);
597         return rot->exec_ret;
598 }
599 EXPORT_SYMBOL_GPL(exynos_drm_rotator_exec_ioctl);
600
601 static irqreturn_t rotator_irq_thread(int irq, void *arg)
602 {
603         struct rot_context *rot = (struct rot_context *)arg;
604         enum rot_irq_status irq_status;
605         unsigned long flags;
606
607         pm_qos_update_request(&rot->pm_qos, 0);
608
609         /* Get execution result */
610         spin_lock_irqsave(&rot->irq_lock, flags);
611         irq_status = rotator_reg_get_irq_status(rot);
612         rotator_reg_set_irq_status_clear(rot, irq_status);
613         spin_unlock_irqrestore(&rot->irq_lock, flags);
614
615         rot->exec_ret = 0;
616         if (irq_status != ROT_IRQ_STATUS_COMPLETE) {
617                 DRM_ERROR("the SFR is set illegally\n");
618                 rot->exec_ret = -EINVAL;
619                 rotator_reg_get_dump(rot);
620         }
621
622         pm_runtime_put(rot->subdrv.dev);
623
624         complete(&rot->complete);
625
626         return IRQ_HANDLED;
627 }
628
629 static int rotator_subdrv_open(struct drm_device *drm_dev, struct device *dev,
630                                                         struct drm_file *file)
631 {
632         struct drm_exynos_file_private *file_priv = file->driver_priv;
633         struct exynos_drm_rot_private *priv;
634
635         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
636         if (!priv) {
637                 dev_err(dev, "failed to allocate priv\n");
638                 return -ENOMEM;
639         }
640
641         priv->dev = dev;
642         INIT_LIST_HEAD(&priv->iommu_list);
643
644         file_priv->rot_priv = priv;
645
646         return 0;
647 }
648
649 static void rotator_subdrv_close(struct drm_device *drm_dev, struct device *dev,
650                                                         struct drm_file *file)
651 {
652         struct drm_exynos_file_private *file_priv = file->driver_priv;
653         struct exynos_drm_rot_private *priv = file_priv->rot_priv;
654         struct iommu_gem_map_params params;
655         struct iommu_info_node *node, *n;
656
657         params.dev = dev;
658         params.drm_dev = drm_dev;
659         params.file = file;
660
661         list_for_each_entry_safe(node, n, &priv->iommu_list, list) {
662                 params.gem_obj = node->gem_obj;
663                 exynos_drm_iommu_unmap_gem(&params, node->dma_addr,
664                                                                 IOMMU_ROTATOR);
665                 list_del(&node->list);
666                 kfree(node);
667                 node = NULL;
668         }
669
670         kfree(priv);
671
672         return;
673 }
674
675 static int __devinit rotator_probe(struct platform_device *pdev)
676 {
677         struct device *dev = &pdev->dev;
678         struct rot_context *rot;
679         struct resource *res;
680         struct exynos_drm_subdrv *subdrv;
681         int ret;
682
683         rot = kzalloc(sizeof(*rot), GFP_KERNEL);
684         if (!rot) {
685                 dev_err(dev, "failed to allocate rot\n");
686                 return -ENOMEM;
687         }
688
689         rot->limit_tbl = (struct rot_limit_table *)
690                                 platform_get_device_id(pdev)->driver_data;
691
692         mutex_init(&rot->exec_mutex);
693         spin_lock_init(&rot->irq_lock);
694
695         ret = exynos_drm_iommu_setup(dev);
696         if (ret < 0) {
697                 dev_err(dev, "failed to setup iommu\n");
698                 goto err_iommu_setup;
699         }
700
701         ret = exynos_drm_iommu_activate(dev);
702         if (ret < 0) {
703                 dev_err(dev, "failed to activate iommu\n");
704                 goto err_iommu_activate;
705         }
706
707         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
708         if (!res) {
709                 dev_err(dev, "failed to find registers\n");
710                 ret = -ENOENT;
711                 goto err_get_resource;
712         }
713
714         rot->regs_res = request_mem_region(res->start, resource_size(res),
715                                                                 dev_name(dev));
716         if (!rot->regs_res) {
717                 dev_err(dev, "failed to claim register region\n");
718                 ret = -ENOENT;
719                 goto err_get_resource;
720         }
721
722         rot->regs = ioremap(res->start, resource_size(res));
723         if (!rot->regs) {
724                 dev_err(dev, "failed to map register\n");
725                 ret = -ENXIO;
726                 goto err_ioremap;
727         }
728
729         rot->irq = platform_get_irq(pdev, 0);
730         if (rot->irq < 0) {
731                 dev_err(dev, "faild to get irq\n");
732                 ret = rot->irq;
733                 goto err_get_irq;
734         }
735
736         ret = request_threaded_irq(rot->irq, NULL, rotator_irq_thread,
737                                         IRQF_ONESHOT, "drm_rotator", rot);
738         if (ret < 0) {
739                 dev_err(dev, "failed to request irq\n");
740                 goto err_get_irq;
741         }
742
743         rot->clock = clk_get(dev, "rotator");
744         if (IS_ERR_OR_NULL(rot->clock)) {
745                 dev_err(dev, "faild to get clock\n");
746                 ret = PTR_ERR(rot->clock);
747                 goto err_clk_get;
748         }
749
750         pm_runtime_enable(dev);
751         pm_qos_add_request(&rot->pm_qos, PM_QOS_BUS_DMA_THROUGHPUT, 0);
752
753         subdrv = &rot->subdrv;
754         subdrv->dev = dev;
755         subdrv->open = rotator_subdrv_open;
756         subdrv->close = rotator_subdrv_close;
757
758         platform_set_drvdata(pdev, rot);
759
760         ret = exynos_drm_subdrv_register(subdrv);
761         if (ret < 0) {
762                 dev_err(dev, "failed to register drm rotator device\n");
763                 goto err_subdrv_register;
764         }
765
766         dev_info(dev, "The exynos rotator is probed successfully\n");
767
768         return 0;
769
770 err_subdrv_register:
771         pm_runtime_disable(dev);
772         clk_put(rot->clock);
773 err_clk_get:
774         free_irq(rot->irq, rot);
775 err_get_irq:
776         iounmap(rot->regs);
777 err_ioremap:
778         release_resource(rot->regs_res);
779         kfree(rot->regs_res);
780 err_get_resource:
781         exynos_drm_iommu_deactivate(dev);
782 err_iommu_activate:
783         exynos_drm_iommu_cleanup(dev);
784 err_iommu_setup:
785         kfree(rot);
786         return ret;
787 }
788
789 static int __devexit rotator_remove(struct platform_device *pdev)
790 {
791         struct rot_context *rot = platform_get_drvdata(pdev);
792
793         pm_qos_remove_request(&rot->pm_qos);
794
795         exynos_drm_subdrv_unregister(&rot->subdrv);
796
797         pm_runtime_disable(&pdev->dev);
798         clk_put(rot->clock);
799
800         free_irq(rot->irq, rot);
801
802         iounmap(rot->regs);
803
804         release_resource(rot->regs_res);
805         kfree(rot->regs_res);
806
807         exynos_drm_iommu_deactivate(&pdev->dev);
808         exynos_drm_iommu_cleanup(&pdev->dev);
809
810         kfree(rot);
811
812         return 0;
813 }
814
815 struct rot_limit_table rot_limit_tbl = {
816         .ycbcr420_2p = {
817                 .min_w = 32,
818                 .min_h = 32,
819                 .max_w = SZ_32K,
820                 .max_h = SZ_32K,
821                 .align = 3,
822         },
823         .rgb888 = {
824                 .min_w = 8,
825                 .min_h = 8,
826                 .max_w = SZ_8K,
827                 .max_h = SZ_8K,
828                 .align = 2,
829         },
830 };
831
832 struct platform_device_id rotator_driver_ids[] = {
833         {
834                 .name           = "exynos-rot",
835                 .driver_data    = (unsigned long)&rot_limit_tbl,
836         },
837         {},
838 };
839
840 #ifdef CONFIG_PM_SLEEP
841 static int rotator_suspend(struct device *dev)
842 {
843         struct rot_context *rot = dev_get_drvdata(dev);
844
845         /* Check & wait for running state */
846         mutex_lock(&rot->exec_mutex);
847         mutex_unlock(&rot->exec_mutex);
848
849         rot->suspended = true;
850
851         exynos_drm_iommu_deactivate(dev);
852
853         return 0;
854 }
855
856 static int rotator_resume(struct device *dev)
857 {
858         struct rot_context *rot = dev_get_drvdata(dev);
859
860         rot->suspended = false;
861
862         exynos_drm_iommu_activate(dev);
863
864         return 0;
865 }
866 #endif
867
868 #ifdef CONFIG_PM_RUNTIME
869 static int rotator_runtime_suspend(struct device *dev)
870 {
871         struct rot_context *rot = dev_get_drvdata(dev);
872
873         clk_disable(rot->clock);
874
875         return 0;
876 }
877
878 static int rotator_runtime_resume(struct device *dev)
879 {
880         struct rot_context *rot = dev_get_drvdata(dev);
881
882         clk_enable(rot->clock);
883         pm_qos_update_request(&rot->pm_qos, 400000);
884
885         return 0;
886 }
887 #endif
888
889 static const struct dev_pm_ops rotator_pm_ops = {
890         SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
891         SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
892                                                                         NULL)
893 };
894
895 struct platform_driver rotator_driver = {
896         .probe          = rotator_probe,
897         .remove         = __devexit_p(rotator_remove),
898         .id_table       = rotator_driver_ids,
899         .driver         = {
900                 .name   = "exynos-rot",
901                 .owner  = THIS_MODULE,
902                 .pm     = &rotator_pm_ops,
903         },
904 };