upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / media / video / videobuf2-sdvmm.c
1 /* linux/drivers/media/video/videobuf2-sdvmm.c
2  *
3  * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4  *              http://www.samsung.com/
5  *
6  * Implementation of SDVMM memory allocator for videobuf2
7  * SDVMM : Shared Device Virtual Memory Management
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12 */
13
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/cma.h>
22 #include <linux/vcm-drv.h>
23
24 #include <asm/cacheflush.h>
25
26 #include <plat/s5p-vcm.h>
27 #include <media/videobuf2-sdvmm.h>
28
29 #include "ump_kernel_interface.h"
30 #include "ump_kernel_interface_ref_drv.h"
31 #include "ump_kernel_interface_vcm.h"
32
33 static int sdvmm_debug;
34 module_param(sdvmm_debug, int, 0644);
35 #define dbg(level, fmt, arg...)                                         \
36         do {                                                            \
37                 if (sdvmm_debug >= level)                               \
38                         printk(KERN_DEBUG "vb2_sdvmm: " fmt, ## arg);   \
39         } while (0)
40
41 #define SIZE_THRESHOLD (1280 * 720 * 1.5)
42
43 struct vb2_sdvmm_conf {
44         spinlock_t              slock;
45
46         /* For CMA */
47         struct device           *dev;
48         const char              *type;
49         unsigned long           alignment;
50         bool                    use_cma;
51
52         /* For VCMM */
53         struct vcm              *vcm_ctx;
54         enum vcm_dev_id         vcm_id;
55
56         /* SYS.MMU */
57         bool                    mmu_clk;
58
59         bool                    cacheable;
60         bool                    remap_dva;
61 };
62
63 struct vb2_sdvmm_buf {
64         struct vm_area_struct           *vma;
65         struct vb2_sdvmm_conf           *conf;
66         struct vb2_vmarea_handler       handler;
67
68         atomic_t                        ref;
69         unsigned long                   size;
70
71         struct vcm_res                  *vcm_res;
72         struct vcm_res                  *vcm_res_kern;
73         ump_dd_handle                   ump_dd_handle;
74         size_t                          dva_offset;
75
76         bool                            cacheable;
77         bool                            remap_dva;
78 };
79
80 static void vb2_sdvmm_put(void *buf_priv);
81 static int _vb2_sdvmm_mmap_pfn_range(struct vm_area_struct *vma,
82                                      struct vcm_phys *vcm_phys,
83                                      unsigned long size,
84                                      const struct vm_operations_struct *vm_ops,
85                                      void *priv);
86
87 static void *_vb2_sdvmm_ump_register(struct vb2_sdvmm_buf *buf)
88 {
89         struct vcm_phys_part    *part = buf->vcm_res->phys->parts;
90         ump_dd_physical_block   *blocks;
91         ump_dd_handle           *handle;
92         struct ump_vcm          ump_vcm;
93         int num_blocks = buf->vcm_res->phys->count;
94         int block_size, i;
95
96         block_size = sizeof(ump_dd_physical_block) * num_blocks;
97         blocks = (ump_dd_physical_block *)vmalloc(block_size);
98         for (i = 0; i < num_blocks; i++) {
99                 blocks[i].addr = part->start;
100                 blocks[i].size = part->size;
101                 ++part;
102
103                 dbg(6, "block addr(0x%08x), size(0x%08x)\n",
104                         (u32)blocks[i].addr, (u32)blocks[i].size);
105         }
106
107         handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
108         vfree(blocks);
109         if (handle == UMP_DD_HANDLE_INVALID) {
110                 pr_err("ump_dd_handle_create_from_phys_blocks failed\n");
111                 return ERR_PTR(-ENOMEM);
112         }
113
114         ump_vcm.vcm = buf->conf->vcm_ctx;
115         ump_vcm.vcm_res = buf->vcm_res;
116         ump_vcm.dev_id = buf->conf->vcm_id;
117
118         if (ump_dd_meminfo_set(handle, (void *)&ump_vcm)) {
119                 ump_dd_reference_release(handle);
120                 return ERR_PTR(-EINVAL);
121         }
122
123         return (void *)handle;
124 }
125
126 static void _vb2_sdvmm_cma_free(struct vcm_phys *vcm_phys)
127 {
128         cma_free(vcm_phys->parts[0].start);
129         kfree(vcm_phys);
130 }
131
132 static void *vb2_sdvmm_alloc(void *alloc_ctx, unsigned long size)
133 {
134         struct vb2_sdvmm_conf   *conf = alloc_ctx;
135         struct vb2_sdvmm_buf    *buf;
136         struct vcm_phys         *vcm_phys = NULL;
137         dma_addr_t              paddr;
138         unsigned long           aligned_size = ALIGN(size, SZ_4K);
139         int ret;
140
141         buf = kzalloc(sizeof *buf, GFP_KERNEL);
142         if (!buf) {
143                 pr_err("no memory for vb2_sdvmm_conf\n");
144                 return ERR_PTR(-ENOMEM);
145         }
146
147         /* Set vb2_sdvmm_buf.conf and size */
148         buf->conf = conf;
149         buf->size = size;
150         buf->cacheable = conf->cacheable;
151
152         /* Allocate: physical memory */
153         if (conf->use_cma) {    /* physically contiguous memory allocation */
154                 paddr = cma_alloc(conf->dev, conf->type, size, conf->alignment);
155                 if (IS_ERR((void *)paddr)) {
156                         pr_err("cma_alloc of size %ld failed\n", size);
157                         ret = -ENOMEM;
158                         goto err_alloc;
159                 }
160
161                 vcm_phys = kzalloc(sizeof(*vcm_phys) + sizeof(*vcm_phys->parts),
162                                    GFP_KERNEL);
163                 vcm_phys->count = 1;
164                 vcm_phys->size = aligned_size;
165                 vcm_phys->free = _vb2_sdvmm_cma_free;
166                 vcm_phys->parts[0].start = paddr;
167                 vcm_phys->parts[0].size = aligned_size;
168         } else {
169                 vcm_phys = vcm_alloc(conf->vcm_ctx, aligned_size, 0);
170                 if (IS_ERR((struct vcm_phys *)vcm_phys)) {
171                         pr_err("vcm_alloc of size %ld failed\n", size);
172                         ret = -ENOMEM;
173                         goto err_alloc;
174                 }
175         }
176         dbg(6, "PA(0x%x)\n", vcm_phys->parts[0].start);
177
178         /* Reserve & Bind: device virtual address */
179         buf->vcm_res = vcm_map(conf->vcm_ctx, vcm_phys, 0);
180         if (IS_ERR((struct vcm_res *)buf->vcm_res)) {
181                 pr_err("vcm_map of size %ld failed\n", size);
182                 ret = -ENOMEM;
183                 goto err_map;
184         }
185         dbg(6, "DVA(0x%x)\n", buf->vcm_res->start);
186
187         /* Register: UMP */
188         buf->ump_dd_handle = _vb2_sdvmm_ump_register(buf);
189         if (IS_ERR(buf->ump_dd_handle)) {
190                 pr_err("ump_register failed\n");
191                 ret = -ENOMEM;
192                 goto err_ump;
193         }
194
195         /* Set struct vb2_vmarea_handler */
196         buf->handler.refcount = &buf->ref;
197         buf->handler.put = vb2_sdvmm_put;
198         buf->handler.arg = buf;
199
200         atomic_inc(&buf->ref);
201
202         return buf;
203
204 err_ump:
205         vcm_unmap(buf->vcm_res);
206
207 err_map:
208         vcm_free(vcm_phys);
209
210 err_alloc:
211         kfree(buf);
212
213         return ERR_PTR(ret);
214 }
215
216 static void vb2_sdvmm_put(void *buf_priv)
217 {
218         struct vb2_sdvmm_buf *buf = buf_priv;
219
220         if (atomic_dec_and_test(&buf->ref)) {
221                 if (buf->vcm_res_kern)
222                         vcm_unmap(buf->vcm_res_kern);
223
224                 ump_dd_reference_release(buf->ump_dd_handle);
225
226                 kfree(buf);
227         }
228
229         dbg(6, "released: buf_refcnt(%d)\n", atomic_read(&buf->ref));
230 }
231
232 /**
233  * _vb2_get_sdvmm_userptr() - lock userspace mapped memory
234  * @vaddr:      starting virtual address of the area to be verified
235  * @size:       size of the area
236  * @res_vma:    will return locked copy of struct vm_area for the given area
237  *
238  * This function will go through memory area of size @size mapped at @vaddr
239  * If they are contiguous the virtual memory area is locked and a @res_vma is
240  * filled with the copy and @res_pa set to the physical address of the buffer.
241  *
242  * Returns 0 on success.
243  */
244 static int _vb2_get_sdvmm_userptr(unsigned long vaddr, unsigned long size,
245                                   struct vm_area_struct **res_vma)
246 {
247         struct mm_struct *mm = current->mm;
248         struct vm_area_struct *vma;
249         unsigned long offset, start, end;
250         int ret = -EFAULT;
251
252         start = vaddr;
253         offset = start & ~PAGE_MASK;
254         end = start + size;
255
256         down_read(&mm->mmap_sem);
257         vma = find_vma(mm, start);
258
259         if (vma == NULL || vma->vm_end < end)
260                 goto done;
261
262         /* Lock vma and return to the caller */
263         *res_vma = vb2_get_vma(vma);
264         if (*res_vma == NULL) {
265                 ret = -ENOMEM;
266                 goto done;
267         }
268         ret = 0;
269
270 done:
271         up_read(&mm->mmap_sem);
272         return ret;
273 }
274
275 static void *vb2_sdvmm_get_userptr(void *alloc_ctx, unsigned long vaddr,
276                                    unsigned long size, int write)
277 {
278         struct vb2_sdvmm_conf *conf = alloc_ctx;
279         struct vb2_sdvmm_buf *buf = NULL;
280         struct vm_area_struct *vma = NULL;
281         struct vcm *vcm = NULL;
282         struct vcm_res *vcm_res = NULL;
283         ump_dd_handle ump_dd_handle = NULL;
284         ump_secure_id secure_id = 0;
285         size_t offset = 0;
286         int ret;
287
288         /* buffer should be registered in UMP before QBUF */
289         ret = ump_dd_secure_id_get_from_vaddr(vaddr, &secure_id, &offset);
290         if (ret) {
291                 pr_err("fail: get SecureID from vaddr(0x%08x)\n", (u32)vaddr);
292                 return ERR_PTR(-EINVAL);
293         }
294
295         ump_dd_handle = ump_dd_handle_create_from_secure_id(secure_id);
296         if (ump_dd_handle == NULL) {
297                 pr_err("ump_dd_handle_get_from_vaddr failed\n");
298                 return ERR_PTR(-EINVAL);
299         }
300
301         buf = kzalloc(sizeof *buf, GFP_KERNEL);
302         if (!buf)
303                 return ERR_PTR(-ENOMEM);
304
305         buf->vcm_res = (struct vcm_res *)ump_dd_meminfo_get(secure_id,
306                                                         (void *)conf->vcm_id);
307         if (buf->vcm_res == NULL) {
308                 pr_err("ump_dd_meminfo_get failed\n");
309                 kfree(buf);
310                 return ERR_PTR(-EINVAL);
311         }
312
313         buf->dva_offset = offset;
314         dbg(6, "dva(0x%x), size(0x%x), offset(0x%x)\n",
315                         (u32)buf->vcm_res->start, (u32)size, (u32)offset);
316
317         vcm = vcm_find_vcm(conf->vcm_id);
318         switch (vcm_reservation_in_vcm(vcm, buf->vcm_res)) {
319         case S5PVCM_RES_IN_VCM: /* No need to remap */
320                 break;
321
322         case S5PVCM_RES_IN_ADDRSPACE:
323                 if (conf->remap_dva) {  /* Need to remap */
324                         vcm_res = buf->vcm_res;
325                         buf->vcm_res = vcm_map(vcm, vcm_res->phys, 0);
326                         buf->remap_dva = true;
327                         dbg(6, "remap: dva(0x%x)\n", (u32)buf->vcm_res->start);
328                 }
329
330                 break;
331
332         case S5PVCM_RES_NOT_IN_VCM:
333                 pr_err("fail: vcm_reservation_in_vcm\n");
334                 ump_dd_reference_release(ump_dd_handle);
335                 kfree(buf);
336                 return ERR_PTR(-EINVAL);
337         }
338
339         ret = _vb2_get_sdvmm_userptr(vaddr, size, &vma);
340         if (ret) {
341                 pr_err("Failed acquiring VMA 0x%08lx\n", vaddr);
342                 ump_dd_reference_release(ump_dd_handle);
343                 kfree(buf);
344                 return ERR_PTR(ret);
345         }
346
347         buf->conf = conf;
348         buf->size = size;
349         buf->vma = vma;
350         buf->ump_dd_handle = ump_dd_handle;
351
352         return buf;
353 }
354
355 static void vb2_sdvmm_put_userptr(void *mem_priv)
356 {
357         struct vb2_sdvmm_buf *buf = mem_priv;
358
359         if (!buf) {
360                 pr_err("No buffer to put\n");
361                 return;
362         }
363
364         if (buf->remap_dva)     /* Need to unmap */
365                 vcm_unmap(buf->vcm_res);
366
367         ump_dd_reference_release(buf->ump_dd_handle);
368
369         vb2_put_vma(buf->vma);
370
371         kfree(buf);
372 }
373
374 static void *vb2_sdvmm_cookie(void *buf_priv)
375 {
376         struct vb2_sdvmm_buf *buf = buf_priv;
377
378         return (void *)(buf->vcm_res->start + buf->dva_offset);
379 }
380
381 static void *vb2_sdvmm_vaddr(void *buf_priv)
382 {
383         struct vb2_sdvmm_buf *buf = buf_priv;
384
385         if (!buf) {
386                 pr_err("failed to get buffer\n");
387                 return NULL;
388         }
389
390         if (!buf->vcm_res_kern) {
391                 buf->vcm_res_kern = vcm_map(vcm_vmm, buf->vcm_res->phys, 0);
392                 if (IS_ERR(buf->vcm_res_kern)) {
393                         pr_err("failed to get kernel virtual\n");
394                         return NULL;
395                 }
396         }
397
398         return (void *)buf->vcm_res_kern->start;
399 }
400
401 static unsigned int vb2_sdvmm_num_users(void *buf_priv)
402 {
403         struct vb2_sdvmm_buf *buf = buf_priv;
404
405         return atomic_read(&buf->ref);
406 }
407
408 static int vb2_sdvmm_mmap(void *buf_priv, struct vm_area_struct *vma)
409 {
410         struct vb2_sdvmm_buf *buf = buf_priv;
411
412         if (!buf) {
413                 pr_err("No buffer to map\n");
414                 return -EINVAL;
415         }
416
417         if (!buf->cacheable)
418                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
419
420         return _vb2_sdvmm_mmap_pfn_range(vma, buf->vcm_res->phys, buf->size,
421                                 &vb2_common_vm_ops, &buf->handler);
422 }
423
424 const struct vb2_mem_ops vb2_sdvmm_memops = {
425         .alloc          = vb2_sdvmm_alloc,
426         .put            = vb2_sdvmm_put,
427         .cookie         = vb2_sdvmm_cookie,
428         .vaddr          = vb2_sdvmm_vaddr,
429         .mmap           = vb2_sdvmm_mmap,
430         .get_userptr    = vb2_sdvmm_get_userptr,
431         .put_userptr    = vb2_sdvmm_put_userptr,
432         .num_users      = vb2_sdvmm_num_users,
433 };
434 EXPORT_SYMBOL_GPL(vb2_sdvmm_memops);
435
436 void vb2_sdvmm_set_cacheable(void *alloc_ctx, bool cacheable)
437 {
438         ((struct vb2_sdvmm_conf *)alloc_ctx)->cacheable = cacheable;
439 }
440
441 bool vb2_sdvmm_get_cacheable(void *alloc_ctx)
442 {
443         return ((struct vb2_sdvmm_conf *)alloc_ctx)->cacheable;
444 }
445
446 static void _vb2_sdvmm_cache_flush_all(void)
447 {
448         flush_cache_all();      /* L1 */
449         outer_flush_all();      /* L2 */
450 }
451
452 static void _vb2_sdvmm_cache_flush_range(struct vb2_sdvmm_buf *buf)
453 {
454         struct vcm_phys *vcm_phys = buf->vcm_res->phys;
455         phys_addr_t start, end;
456         int count = vcm_phys->count;
457         int i;
458
459         /* sequentially traversal phys */
460         for (i = 0; i < count; i++) {
461                 start = vcm_phys->parts[i].start;
462                 end = start + vcm_phys->parts[i].size - 1;
463
464                 dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
465                 outer_flush_range(start, end);  /* L2 */
466         }
467 }
468
469 int vb2_sdvmm_cache_flush(void *alloc_ctx, struct vb2_buffer *vb, u32 plane_no)
470 {
471         struct vb2_sdvmm_buf *buf = vb->planes[plane_no].mem_priv;
472
473         if (!buf->cacheable) {
474                 pr_warning("This is non-cacheable buffer allocator\n");
475                 return -EINVAL;
476         }
477
478         if (buf->size > (unsigned long)SIZE_THRESHOLD)
479                 _vb2_sdvmm_cache_flush_all();
480         else
481                 _vb2_sdvmm_cache_flush_range(buf);
482
483         return 0;
484 }
485
486 void vb2_sdvmm_suspend(void *alloc_ctx)
487 {
488         struct vb2_sdvmm_conf *conf = alloc_ctx;
489         unsigned long flags;
490
491         spin_lock_irqsave(&conf->slock, flags);
492         if (!conf->mmu_clk) {
493                 pr_warning("Already suspend: vcm_id(%d)\n", conf->vcm_id);
494                 spin_unlock_irqrestore(&conf->slock, flags);
495                 return;
496         }
497
498         conf->mmu_clk = false;
499         s5p_vcm_turn_off(conf->vcm_ctx);
500
501         spin_unlock_irqrestore(&conf->slock, flags);
502 }
503
504 void vb2_sdvmm_resume(void *alloc_ctx)
505 {
506         struct vb2_sdvmm_conf *conf = alloc_ctx;
507         unsigned long flags;
508
509         spin_lock_irqsave(&conf->slock, flags);
510
511         if (conf->mmu_clk) {
512                 pr_warning("Already resume: vcm_id(%d)\n", conf->vcm_id);
513                 spin_unlock_irqrestore(&conf->slock, flags);
514                 return;
515         }
516
517         conf->mmu_clk = true;
518         s5p_vcm_turn_on(conf->vcm_ctx);
519
520         spin_unlock_irqrestore(&conf->slock, flags);
521 }
522
523 void *vb2_sdvmm_init(struct vb2_vcm *vcm,
524                      struct vb2_cma *cma,
525                      struct vb2_drv *drv)
526 {
527         struct vb2_sdvmm_conf *conf;
528         int ret;
529
530         conf = kzalloc(sizeof *conf, GFP_KERNEL);
531         if (!conf)
532                 return ERR_PTR(-ENOMEM);
533
534         if (cma != NULL) {
535                 conf->dev       = cma->dev;
536                 conf->type      = cma->type;
537                 conf->alignment = cma->alignment;
538                 conf->use_cma   = true;
539         }
540
541         conf->vcm_id = vcm->vcm_id;
542         conf->vcm_ctx = vcm_create_unified(vcm->size, vcm->vcm_id, NULL);
543         if (IS_ERR(conf->vcm_ctx)) {
544                 pr_err("vcm_create failed: vcm_id(%d), size(%ld)\n",
545                                 conf->vcm_id, (long int)vcm->size);
546                 goto err_vcm_create;
547         }
548
549         s5p_vcm_turn_off(conf->vcm_ctx);
550         ret = vcm_activate(conf->vcm_ctx);
551         if (ret < 0) {
552                 pr_err("vcm_activate failed\n");
553                 goto err_vcm_activate;
554         }
555
556         conf->mmu_clk   = false;
557         conf->cacheable = drv->cacheable;
558         conf->remap_dva = drv->remap_dva;
559
560         spin_lock_init(&conf->slock);
561
562         return conf;
563
564 err_vcm_activate:
565         s5p_vcm_turn_off(conf->vcm_ctx);
566         vcm_destroy(conf->vcm_ctx);
567
568 err_vcm_create:
569         kfree(conf);
570
571         return ERR_PTR(-EINVAL);
572 }
573 EXPORT_SYMBOL_GPL(vb2_sdvmm_init);
574
575 void vb2_sdvmm_cleanup(void *alloc_ctx)
576 {
577         struct vb2_sdvmm_conf *local_conf = alloc_ctx;
578
579         vcm_deactivate(local_conf->vcm_ctx);
580         vcm_destroy(local_conf->vcm_ctx);
581         kfree(alloc_ctx);
582 }
583 EXPORT_SYMBOL_GPL(vb2_sdvmm_cleanup);
584
585 void **vb2_sdvmm_init_multi(unsigned int num_planes,
586                             struct vb2_vcm *vcm,
587                             struct vb2_cma *cma[],
588                             struct vb2_drv *drv)
589 {
590         struct vb2_sdvmm_conf *conf;
591         struct vcm *vcm_ctx;
592         void **alloc_ctxes;
593         u32 i, ret;
594
595         /* allocate structure of alloc_ctxes */
596         alloc_ctxes = kzalloc((sizeof *alloc_ctxes + sizeof *conf) * num_planes,
597                               GFP_KERNEL);
598
599         if (!alloc_ctxes)
600                 return ERR_PTR(-ENOMEM);
601
602         vcm_ctx = vcm_create_unified(vcm->size, vcm->vcm_id, NULL);
603         if (IS_ERR(vcm_ctx)) {
604                 pr_err("vcm_create of size %ld failed\n", (long int)vcm->size);
605                 goto err_vcm_create;
606         }
607
608         s5p_vcm_turn_off(vcm_ctx);
609         ret = vcm_activate(vcm_ctx);
610         if (ret < 0) {
611                 pr_err("vcm_activate failed\n");
612                 goto err_vcm_activate;
613         }
614
615         conf = (void *)(alloc_ctxes + num_planes);
616
617         for (i = 0; i < num_planes; ++i, ++conf) {
618                 alloc_ctxes[i] = conf;
619                 if ((cma != NULL) && (cma[i] != NULL)) {
620                         conf->dev       = cma[i]->dev;
621                         conf->type      = cma[i]->type;
622                         conf->alignment = cma[i]->alignment;
623                         conf->use_cma   = true;
624                 }
625                 conf->vcm_ctx   = vcm_ctx;
626                 conf->vcm_id    = vcm->vcm_id;
627                 conf->mmu_clk   = false;
628                 conf->cacheable = drv->cacheable;
629                 conf->remap_dva = drv->remap_dva;
630                 spin_lock_init(&conf->slock);
631         }
632
633         return alloc_ctxes;
634
635 err_vcm_activate:
636         s5p_vcm_turn_off(vcm_ctx);
637         vcm_destroy(vcm_ctx);
638
639 err_vcm_create:
640         kfree(alloc_ctxes);
641
642         return ERR_PTR(-EINVAL);
643 }
644 EXPORT_SYMBOL_GPL(vb2_sdvmm_init_multi);
645
646 void vb2_sdvmm_cleanup_multi(void **alloc_ctxes)
647 {
648         struct vb2_sdvmm_conf *local_conf = alloc_ctxes[0];
649
650         vcm_deactivate(local_conf->vcm_ctx);
651         vcm_destroy(local_conf->vcm_ctx);
652
653         kfree(alloc_ctxes);
654 }
655 EXPORT_SYMBOL_GPL(vb2_sdvmm_cleanup_multi);
656
657 /**
658  * _vb2_sdvmm_mmap_pfn_range() - map physical pages(vcm) to userspace
659  * @vma:        virtual memory region for the mapping
660  * @vcm_phys:   vcm physical group information to be mapped
661  * @size:       size of the memory to be mapped
662  * @vm_ops:     vm operations to be assigned to the created area
663  * @priv:       private data to be associated with the area
664  *
665  * Returns 0 on success.
666  */
667 static int _vb2_sdvmm_mmap_pfn_range(struct vm_area_struct *vma,
668                                       struct vcm_phys *vcm_phys,
669                                       unsigned long size,
670                                       const struct vm_operations_struct *vm_ops,
671                                       void *priv)
672 {
673         unsigned long org_vm_start = vma->vm_start;
674         int ret, i;
675         int count = vcm_phys->count;
676         int mapped_size = 0;
677         int vma_size = vma->vm_end - vma->vm_start;
678         int remap_break = 0;
679         resource_size_t remap_size;
680
681         /* sequentially physical-virtual mapping */
682         for (i = 0; (i < count && !remap_break); i++) {
683                 if ((mapped_size + vcm_phys->parts[i].size) > vma_size) {
684                         remap_size = vma_size - mapped_size;
685                         remap_break = 1;
686                 } else {
687                         remap_size = vcm_phys->parts[i].size;
688                 }
689
690                 ret = remap_pfn_range(vma, vma->vm_start,
691                                 vcm_phys->parts[i].start >> PAGE_SHIFT,
692                                 remap_size, vma->vm_page_prot);
693                 if (ret) {
694                         pr_err("Remapping failed, error: %d\n", ret);
695                         return ret;
696                 }
697
698                 dbg(6, "%dth page vaddr(0x%08x), paddr(0x%08x), size(0x%08x)\n",
699                         i, (u32)vma->vm_start, vcm_phys->parts[i].start,
700                         vcm_phys->parts[i].size);
701
702                 mapped_size += remap_size;
703                 vma->vm_start += vcm_phys->parts[i].size;
704         }
705
706         WARN_ON(size > mapped_size);
707
708         /* re-assign initial start address */
709         vma->vm_start           = org_vm_start;
710         vma->vm_flags           |= VM_DONTEXPAND | VM_RESERVED;
711         vma->vm_private_data    = priv;
712         vma->vm_ops             = vm_ops;
713
714         vma->vm_ops->open(vma);
715
716         return 0;
717 }
718
719 MODULE_AUTHOR("Sewoon Park <senui.park@samsung.com>");
720 MODULE_AUTHOR("Jonghun, Han <jonghun.han@samsung.com>");
721 MODULE_DESCRIPTION("SDVMM allocator handling routines for videobuf2");
722 MODULE_LICENSE("GPL");