1 /* linux/drivers/media/video/videobuf2-sdvmm.c
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
6 * Implementation of SDVMM memory allocator for videobuf2
7 * SDVMM : Shared Device Virtual Memory Management
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/cma.h>
22 #include <linux/vcm-drv.h>
24 #include <asm/cacheflush.h>
26 #include <plat/s5p-vcm.h>
27 #include <media/videobuf2-sdvmm.h>
29 #include "ump_kernel_interface.h"
30 #include "ump_kernel_interface_ref_drv.h"
31 #include "ump_kernel_interface_vcm.h"
33 static int sdvmm_debug;
34 module_param(sdvmm_debug, int, 0644);
35 #define dbg(level, fmt, arg...) \
37 if (sdvmm_debug >= level) \
38 printk(KERN_DEBUG "vb2_sdvmm: " fmt, ## arg); \
41 #define SIZE_THRESHOLD (1280 * 720 * 1.5)
43 struct vb2_sdvmm_conf {
49 unsigned long alignment;
54 enum vcm_dev_id vcm_id;
63 struct vb2_sdvmm_buf {
64 struct vm_area_struct *vma;
65 struct vb2_sdvmm_conf *conf;
66 struct vb2_vmarea_handler handler;
71 struct vcm_res *vcm_res;
72 struct vcm_res *vcm_res_kern;
73 ump_dd_handle ump_dd_handle;
80 static void vb2_sdvmm_put(void *buf_priv);
81 static int _vb2_sdvmm_mmap_pfn_range(struct vm_area_struct *vma,
82 struct vcm_phys *vcm_phys,
84 const struct vm_operations_struct *vm_ops,
87 static void *_vb2_sdvmm_ump_register(struct vb2_sdvmm_buf *buf)
89 struct vcm_phys_part *part = buf->vcm_res->phys->parts;
90 ump_dd_physical_block *blocks;
91 ump_dd_handle *handle;
92 struct ump_vcm ump_vcm;
93 int num_blocks = buf->vcm_res->phys->count;
96 block_size = sizeof(ump_dd_physical_block) * num_blocks;
97 blocks = (ump_dd_physical_block *)vmalloc(block_size);
98 for (i = 0; i < num_blocks; i++) {
99 blocks[i].addr = part->start;
100 blocks[i].size = part->size;
103 dbg(6, "block addr(0x%08x), size(0x%08x)\n",
104 (u32)blocks[i].addr, (u32)blocks[i].size);
107 handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
109 if (handle == UMP_DD_HANDLE_INVALID) {
110 pr_err("ump_dd_handle_create_from_phys_blocks failed\n");
111 return ERR_PTR(-ENOMEM);
114 ump_vcm.vcm = buf->conf->vcm_ctx;
115 ump_vcm.vcm_res = buf->vcm_res;
116 ump_vcm.dev_id = buf->conf->vcm_id;
118 if (ump_dd_meminfo_set(handle, (void *)&ump_vcm)) {
119 ump_dd_reference_release(handle);
120 return ERR_PTR(-EINVAL);
123 return (void *)handle;
126 static void _vb2_sdvmm_cma_free(struct vcm_phys *vcm_phys)
128 cma_free(vcm_phys->parts[0].start);
132 static void *vb2_sdvmm_alloc(void *alloc_ctx, unsigned long size)
134 struct vb2_sdvmm_conf *conf = alloc_ctx;
135 struct vb2_sdvmm_buf *buf;
136 struct vcm_phys *vcm_phys = NULL;
138 unsigned long aligned_size = ALIGN(size, SZ_4K);
141 buf = kzalloc(sizeof *buf, GFP_KERNEL);
143 pr_err("no memory for vb2_sdvmm_conf\n");
144 return ERR_PTR(-ENOMEM);
147 /* Set vb2_sdvmm_buf.conf and size */
150 buf->cacheable = conf->cacheable;
152 /* Allocate: physical memory */
153 if (conf->use_cma) { /* physically contiguous memory allocation */
154 paddr = cma_alloc(conf->dev, conf->type, size, conf->alignment);
155 if (IS_ERR((void *)paddr)) {
156 pr_err("cma_alloc of size %ld failed\n", size);
161 vcm_phys = kzalloc(sizeof(*vcm_phys) + sizeof(*vcm_phys->parts),
164 vcm_phys->size = aligned_size;
165 vcm_phys->free = _vb2_sdvmm_cma_free;
166 vcm_phys->parts[0].start = paddr;
167 vcm_phys->parts[0].size = aligned_size;
169 vcm_phys = vcm_alloc(conf->vcm_ctx, aligned_size, 0);
170 if (IS_ERR((struct vcm_phys *)vcm_phys)) {
171 pr_err("vcm_alloc of size %ld failed\n", size);
176 dbg(6, "PA(0x%x)\n", vcm_phys->parts[0].start);
178 /* Reserve & Bind: device virtual address */
179 buf->vcm_res = vcm_map(conf->vcm_ctx, vcm_phys, 0);
180 if (IS_ERR((struct vcm_res *)buf->vcm_res)) {
181 pr_err("vcm_map of size %ld failed\n", size);
185 dbg(6, "DVA(0x%x)\n", buf->vcm_res->start);
188 buf->ump_dd_handle = _vb2_sdvmm_ump_register(buf);
189 if (IS_ERR(buf->ump_dd_handle)) {
190 pr_err("ump_register failed\n");
195 /* Set struct vb2_vmarea_handler */
196 buf->handler.refcount = &buf->ref;
197 buf->handler.put = vb2_sdvmm_put;
198 buf->handler.arg = buf;
200 atomic_inc(&buf->ref);
205 vcm_unmap(buf->vcm_res);
216 static void vb2_sdvmm_put(void *buf_priv)
218 struct vb2_sdvmm_buf *buf = buf_priv;
220 if (atomic_dec_and_test(&buf->ref)) {
221 if (buf->vcm_res_kern)
222 vcm_unmap(buf->vcm_res_kern);
224 ump_dd_reference_release(buf->ump_dd_handle);
229 dbg(6, "released: buf_refcnt(%d)\n", atomic_read(&buf->ref));
233 * _vb2_get_sdvmm_userptr() - lock userspace mapped memory
234 * @vaddr: starting virtual address of the area to be verified
235 * @size: size of the area
236 * @res_vma: will return locked copy of struct vm_area for the given area
238 * This function will go through memory area of size @size mapped at @vaddr
239 * If they are contiguous the virtual memory area is locked and a @res_vma is
240 * filled with the copy and @res_pa set to the physical address of the buffer.
242 * Returns 0 on success.
244 static int _vb2_get_sdvmm_userptr(unsigned long vaddr, unsigned long size,
245 struct vm_area_struct **res_vma)
247 struct mm_struct *mm = current->mm;
248 struct vm_area_struct *vma;
249 unsigned long offset, start, end;
253 offset = start & ~PAGE_MASK;
256 down_read(&mm->mmap_sem);
257 vma = find_vma(mm, start);
259 if (vma == NULL || vma->vm_end < end)
262 /* Lock vma and return to the caller */
263 *res_vma = vb2_get_vma(vma);
264 if (*res_vma == NULL) {
271 up_read(&mm->mmap_sem);
275 static void *vb2_sdvmm_get_userptr(void *alloc_ctx, unsigned long vaddr,
276 unsigned long size, int write)
278 struct vb2_sdvmm_conf *conf = alloc_ctx;
279 struct vb2_sdvmm_buf *buf = NULL;
280 struct vm_area_struct *vma = NULL;
281 struct vcm *vcm = NULL;
282 struct vcm_res *vcm_res = NULL;
283 ump_dd_handle ump_dd_handle = NULL;
284 ump_secure_id secure_id = 0;
288 /* buffer should be registered in UMP before QBUF */
289 ret = ump_dd_secure_id_get_from_vaddr(vaddr, &secure_id, &offset);
291 pr_err("fail: get SecureID from vaddr(0x%08x)\n", (u32)vaddr);
292 return ERR_PTR(-EINVAL);
295 ump_dd_handle = ump_dd_handle_create_from_secure_id(secure_id);
296 if (ump_dd_handle == NULL) {
297 pr_err("ump_dd_handle_get_from_vaddr failed\n");
298 return ERR_PTR(-EINVAL);
301 buf = kzalloc(sizeof *buf, GFP_KERNEL);
303 return ERR_PTR(-ENOMEM);
305 buf->vcm_res = (struct vcm_res *)ump_dd_meminfo_get(secure_id,
306 (void *)conf->vcm_id);
307 if (buf->vcm_res == NULL) {
308 pr_err("ump_dd_meminfo_get failed\n");
310 return ERR_PTR(-EINVAL);
313 buf->dva_offset = offset;
314 dbg(6, "dva(0x%x), size(0x%x), offset(0x%x)\n",
315 (u32)buf->vcm_res->start, (u32)size, (u32)offset);
317 vcm = vcm_find_vcm(conf->vcm_id);
318 switch (vcm_reservation_in_vcm(vcm, buf->vcm_res)) {
319 case S5PVCM_RES_IN_VCM: /* No need to remap */
322 case S5PVCM_RES_IN_ADDRSPACE:
323 if (conf->remap_dva) { /* Need to remap */
324 vcm_res = buf->vcm_res;
325 buf->vcm_res = vcm_map(vcm, vcm_res->phys, 0);
326 buf->remap_dva = true;
327 dbg(6, "remap: dva(0x%x)\n", (u32)buf->vcm_res->start);
332 case S5PVCM_RES_NOT_IN_VCM:
333 pr_err("fail: vcm_reservation_in_vcm\n");
334 ump_dd_reference_release(ump_dd_handle);
336 return ERR_PTR(-EINVAL);
339 ret = _vb2_get_sdvmm_userptr(vaddr, size, &vma);
341 pr_err("Failed acquiring VMA 0x%08lx\n", vaddr);
342 ump_dd_reference_release(ump_dd_handle);
350 buf->ump_dd_handle = ump_dd_handle;
355 static void vb2_sdvmm_put_userptr(void *mem_priv)
357 struct vb2_sdvmm_buf *buf = mem_priv;
360 pr_err("No buffer to put\n");
364 if (buf->remap_dva) /* Need to unmap */
365 vcm_unmap(buf->vcm_res);
367 ump_dd_reference_release(buf->ump_dd_handle);
369 vb2_put_vma(buf->vma);
374 static void *vb2_sdvmm_cookie(void *buf_priv)
376 struct vb2_sdvmm_buf *buf = buf_priv;
378 return (void *)(buf->vcm_res->start + buf->dva_offset);
381 static void *vb2_sdvmm_vaddr(void *buf_priv)
383 struct vb2_sdvmm_buf *buf = buf_priv;
386 pr_err("failed to get buffer\n");
390 if (!buf->vcm_res_kern) {
391 buf->vcm_res_kern = vcm_map(vcm_vmm, buf->vcm_res->phys, 0);
392 if (IS_ERR(buf->vcm_res_kern)) {
393 pr_err("failed to get kernel virtual\n");
398 return (void *)buf->vcm_res_kern->start;
401 static unsigned int vb2_sdvmm_num_users(void *buf_priv)
403 struct vb2_sdvmm_buf *buf = buf_priv;
405 return atomic_read(&buf->ref);
408 static int vb2_sdvmm_mmap(void *buf_priv, struct vm_area_struct *vma)
410 struct vb2_sdvmm_buf *buf = buf_priv;
413 pr_err("No buffer to map\n");
418 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
420 return _vb2_sdvmm_mmap_pfn_range(vma, buf->vcm_res->phys, buf->size,
421 &vb2_common_vm_ops, &buf->handler);
424 const struct vb2_mem_ops vb2_sdvmm_memops = {
425 .alloc = vb2_sdvmm_alloc,
426 .put = vb2_sdvmm_put,
427 .cookie = vb2_sdvmm_cookie,
428 .vaddr = vb2_sdvmm_vaddr,
429 .mmap = vb2_sdvmm_mmap,
430 .get_userptr = vb2_sdvmm_get_userptr,
431 .put_userptr = vb2_sdvmm_put_userptr,
432 .num_users = vb2_sdvmm_num_users,
434 EXPORT_SYMBOL_GPL(vb2_sdvmm_memops);
436 void vb2_sdvmm_set_cacheable(void *alloc_ctx, bool cacheable)
438 ((struct vb2_sdvmm_conf *)alloc_ctx)->cacheable = cacheable;
441 bool vb2_sdvmm_get_cacheable(void *alloc_ctx)
443 return ((struct vb2_sdvmm_conf *)alloc_ctx)->cacheable;
446 static void _vb2_sdvmm_cache_flush_all(void)
448 flush_cache_all(); /* L1 */
449 outer_flush_all(); /* L2 */
452 static void _vb2_sdvmm_cache_flush_range(struct vb2_sdvmm_buf *buf)
454 struct vcm_phys *vcm_phys = buf->vcm_res->phys;
455 phys_addr_t start, end;
456 int count = vcm_phys->count;
459 /* sequentially traversal phys */
460 for (i = 0; i < count; i++) {
461 start = vcm_phys->parts[i].start;
462 end = start + vcm_phys->parts[i].size - 1;
464 dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
465 outer_flush_range(start, end); /* L2 */
469 int vb2_sdvmm_cache_flush(void *alloc_ctx, struct vb2_buffer *vb, u32 plane_no)
471 struct vb2_sdvmm_buf *buf = vb->planes[plane_no].mem_priv;
473 if (!buf->cacheable) {
474 pr_warning("This is non-cacheable buffer allocator\n");
478 if (buf->size > (unsigned long)SIZE_THRESHOLD)
479 _vb2_sdvmm_cache_flush_all();
481 _vb2_sdvmm_cache_flush_range(buf);
486 void vb2_sdvmm_suspend(void *alloc_ctx)
488 struct vb2_sdvmm_conf *conf = alloc_ctx;
491 spin_lock_irqsave(&conf->slock, flags);
492 if (!conf->mmu_clk) {
493 pr_warning("Already suspend: vcm_id(%d)\n", conf->vcm_id);
494 spin_unlock_irqrestore(&conf->slock, flags);
498 conf->mmu_clk = false;
499 s5p_vcm_turn_off(conf->vcm_ctx);
501 spin_unlock_irqrestore(&conf->slock, flags);
504 void vb2_sdvmm_resume(void *alloc_ctx)
506 struct vb2_sdvmm_conf *conf = alloc_ctx;
509 spin_lock_irqsave(&conf->slock, flags);
512 pr_warning("Already resume: vcm_id(%d)\n", conf->vcm_id);
513 spin_unlock_irqrestore(&conf->slock, flags);
517 conf->mmu_clk = true;
518 s5p_vcm_turn_on(conf->vcm_ctx);
520 spin_unlock_irqrestore(&conf->slock, flags);
523 void *vb2_sdvmm_init(struct vb2_vcm *vcm,
527 struct vb2_sdvmm_conf *conf;
530 conf = kzalloc(sizeof *conf, GFP_KERNEL);
532 return ERR_PTR(-ENOMEM);
535 conf->dev = cma->dev;
536 conf->type = cma->type;
537 conf->alignment = cma->alignment;
538 conf->use_cma = true;
541 conf->vcm_id = vcm->vcm_id;
542 conf->vcm_ctx = vcm_create_unified(vcm->size, vcm->vcm_id, NULL);
543 if (IS_ERR(conf->vcm_ctx)) {
544 pr_err("vcm_create failed: vcm_id(%d), size(%ld)\n",
545 conf->vcm_id, (long int)vcm->size);
549 s5p_vcm_turn_off(conf->vcm_ctx);
550 ret = vcm_activate(conf->vcm_ctx);
552 pr_err("vcm_activate failed\n");
553 goto err_vcm_activate;
556 conf->mmu_clk = false;
557 conf->cacheable = drv->cacheable;
558 conf->remap_dva = drv->remap_dva;
560 spin_lock_init(&conf->slock);
565 s5p_vcm_turn_off(conf->vcm_ctx);
566 vcm_destroy(conf->vcm_ctx);
571 return ERR_PTR(-EINVAL);
573 EXPORT_SYMBOL_GPL(vb2_sdvmm_init);
575 void vb2_sdvmm_cleanup(void *alloc_ctx)
577 struct vb2_sdvmm_conf *local_conf = alloc_ctx;
579 vcm_deactivate(local_conf->vcm_ctx);
580 vcm_destroy(local_conf->vcm_ctx);
583 EXPORT_SYMBOL_GPL(vb2_sdvmm_cleanup);
585 void **vb2_sdvmm_init_multi(unsigned int num_planes,
587 struct vb2_cma *cma[],
590 struct vb2_sdvmm_conf *conf;
595 /* allocate structure of alloc_ctxes */
596 alloc_ctxes = kzalloc((sizeof *alloc_ctxes + sizeof *conf) * num_planes,
600 return ERR_PTR(-ENOMEM);
602 vcm_ctx = vcm_create_unified(vcm->size, vcm->vcm_id, NULL);
603 if (IS_ERR(vcm_ctx)) {
604 pr_err("vcm_create of size %ld failed\n", (long int)vcm->size);
608 s5p_vcm_turn_off(vcm_ctx);
609 ret = vcm_activate(vcm_ctx);
611 pr_err("vcm_activate failed\n");
612 goto err_vcm_activate;
615 conf = (void *)(alloc_ctxes + num_planes);
617 for (i = 0; i < num_planes; ++i, ++conf) {
618 alloc_ctxes[i] = conf;
619 if ((cma != NULL) && (cma[i] != NULL)) {
620 conf->dev = cma[i]->dev;
621 conf->type = cma[i]->type;
622 conf->alignment = cma[i]->alignment;
623 conf->use_cma = true;
625 conf->vcm_ctx = vcm_ctx;
626 conf->vcm_id = vcm->vcm_id;
627 conf->mmu_clk = false;
628 conf->cacheable = drv->cacheable;
629 conf->remap_dva = drv->remap_dva;
630 spin_lock_init(&conf->slock);
636 s5p_vcm_turn_off(vcm_ctx);
637 vcm_destroy(vcm_ctx);
642 return ERR_PTR(-EINVAL);
644 EXPORT_SYMBOL_GPL(vb2_sdvmm_init_multi);
646 void vb2_sdvmm_cleanup_multi(void **alloc_ctxes)
648 struct vb2_sdvmm_conf *local_conf = alloc_ctxes[0];
650 vcm_deactivate(local_conf->vcm_ctx);
651 vcm_destroy(local_conf->vcm_ctx);
655 EXPORT_SYMBOL_GPL(vb2_sdvmm_cleanup_multi);
658 * _vb2_sdvmm_mmap_pfn_range() - map physical pages(vcm) to userspace
659 * @vma: virtual memory region for the mapping
660 * @vcm_phys: vcm physical group information to be mapped
661 * @size: size of the memory to be mapped
662 * @vm_ops: vm operations to be assigned to the created area
663 * @priv: private data to be associated with the area
665 * Returns 0 on success.
667 static int _vb2_sdvmm_mmap_pfn_range(struct vm_area_struct *vma,
668 struct vcm_phys *vcm_phys,
670 const struct vm_operations_struct *vm_ops,
673 unsigned long org_vm_start = vma->vm_start;
675 int count = vcm_phys->count;
677 int vma_size = vma->vm_end - vma->vm_start;
679 resource_size_t remap_size;
681 /* sequentially physical-virtual mapping */
682 for (i = 0; (i < count && !remap_break); i++) {
683 if ((mapped_size + vcm_phys->parts[i].size) > vma_size) {
684 remap_size = vma_size - mapped_size;
687 remap_size = vcm_phys->parts[i].size;
690 ret = remap_pfn_range(vma, vma->vm_start,
691 vcm_phys->parts[i].start >> PAGE_SHIFT,
692 remap_size, vma->vm_page_prot);
694 pr_err("Remapping failed, error: %d\n", ret);
698 dbg(6, "%dth page vaddr(0x%08x), paddr(0x%08x), size(0x%08x)\n",
699 i, (u32)vma->vm_start, vcm_phys->parts[i].start,
700 vcm_phys->parts[i].size);
702 mapped_size += remap_size;
703 vma->vm_start += vcm_phys->parts[i].size;
706 WARN_ON(size > mapped_size);
708 /* re-assign initial start address */
709 vma->vm_start = org_vm_start;
710 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
711 vma->vm_private_data = priv;
712 vma->vm_ops = vm_ops;
714 vma->vm_ops->open(vma);
719 MODULE_AUTHOR("Sewoon Park <senui.park@samsung.com>");
720 MODULE_AUTHOR("Jonghun, Han <jonghun.han@samsung.com>");
721 MODULE_DESCRIPTION("SDVMM allocator handling routines for videobuf2");
722 MODULE_LICENSE("GPL");