1 // SPDX-License-Identifier: GPL-2.0
3 * VideoCore Shared Memory driver using CMA.
5 * Copyright: 2018, Raspberry Pi (Trading) Ltd
6 * Dave Stevenson <dave.stevenson@raspberrypi.org>
8 * Based on vmcs_sm driver from Broadcom Corporation for some API,
9 * and taking some code for buffer allocation and dmabuf handling from
13 * This driver has 3 main uses:
14 * 1) Allocating buffers for the kernel or userspace that can be shared with the
16 * 2) Importing dmabufs from elsewhere for sharing with the VPU.
17 * 3) Allocating buffers for use by the VPU.
19 * In the first and second cases the native handle is a dmabuf. Releasing the
20 * resource inherently comes from releasing the dmabuf, and this will trigger
21 * unmapping on the VPU. The underlying allocation and our buffer structure are
22 * retained until the VPU has confirmed that it has finished with it.
24 * For the VPU allocations the VPU is responsible for triggering the release,
25 * and therefore the released message decrements the dma_buf refcount (with the
26 * VPU mapping having already been marked as released).
29 /* ---- Include Files ----------------------------------------------------- */
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/debugfs.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dma-buf.h>
35 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/miscdevice.h>
40 #include <linux/module.h>
42 #include <linux/of_device.h>
43 #include <linux/platform_device.h>
44 #include <linux/proc_fs.h>
45 #include <linux/slab.h>
46 #include <linux/seq_file.h>
47 #include <linux/syscalls.h>
48 #include <linux/types.h>
49 #include <asm/cacheflush.h>
51 #include "vchiq_connected.h"
52 #include "vc_sm_cma_vchi.h"
55 #include "vc_sm_knl.h"
56 #include <linux/broadcom/vc_sm_cma_ioctl.h>
58 /* ---- Private Constants and Types --------------------------------------- */
60 #define DEVICE_NAME "vcsm-cma"
61 #define DEVICE_MINOR 0
63 #define VC_SM_RESOURCE_NAME_DEFAULT "sm-host-resource"
65 #define VC_SM_DIR_ROOT_NAME "vcsm-cma"
66 #define VC_SM_STATE "state"
68 /* Private file data associated with each opened device. */
69 struct vc_sm_privdata_t {
70 pid_t pid; /* PID of creator. */
72 int restart_sys; /* Tracks restart on interrupt. */
73 enum vc_sm_msg_type int_action; /* Interrupted action. */
74 u32 int_trans_id; /* Interrupted transaction. */
77 typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
79 VC_SM_SHOW show; /* Debug fs function hookup. */
80 struct dentry *dir_entry; /* Debug fs directory entry. */
81 void *priv_data; /* Private data */
84 /* Global state information. */
86 struct platform_device *pdev;
88 struct miscdevice misc_dev;
90 struct sm_instance *sm_handle; /* Handle for videocore service. */
92 spinlock_t kernelid_map_lock; /* Spinlock protecting kernelid_map */
93 struct idr kernelid_map;
95 struct mutex map_lock; /* Global map lock. */
96 struct list_head buffer_list; /* List of buffer. */
98 struct vc_sm_privdata_t *data_knl; /* Kernel internal data tracking. */
99 struct vc_sm_privdata_t *vpu_allocs; /* All allocations from the VPU */
100 struct dentry *dir_root; /* Debug fs entries root. */
101 struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
103 bool require_released_callback; /* VPU will send a released msg when it
104 * has finished with a resource.
106 u32 int_trans_id; /* Interrupted transaction. */
109 struct vc_sm_dma_buf_attachment {
111 struct sg_table sg_table;
112 struct list_head list;
113 enum dma_data_direction dma_dir;
116 /* ---- Private Variables ----------------------------------------------- */
118 static struct sm_state_t *sm_state;
119 static int sm_inited;
121 /* ---- Private Function Prototypes -------------------------------------- */
123 /* ---- Private Functions ------------------------------------------------ */
125 static int get_kernel_id(struct vc_sm_buffer *buffer)
129 spin_lock(&sm_state->kernelid_map_lock);
130 handle = idr_alloc(&sm_state->kernelid_map, buffer, 0, 0, GFP_KERNEL);
131 spin_unlock(&sm_state->kernelid_map_lock);
136 static struct vc_sm_buffer *lookup_kernel_id(int handle)
138 return idr_find(&sm_state->kernelid_map, handle);
141 static void free_kernel_id(int handle)
143 spin_lock(&sm_state->kernelid_map_lock);
144 idr_remove(&sm_state->kernelid_map, handle);
145 spin_unlock(&sm_state->kernelid_map_lock);
148 static int vc_sm_cma_seq_file_show(struct seq_file *s, void *v)
150 struct sm_pde_t *sm_pde;
152 sm_pde = (struct sm_pde_t *)(s->private);
154 if (sm_pde && sm_pde->show)
160 static int vc_sm_cma_single_open(struct inode *inode, struct file *file)
162 return single_open(file, vc_sm_cma_seq_file_show, inode->i_private);
165 static const struct file_operations vc_sm_cma_debug_fs_fops = {
166 .open = vc_sm_cma_single_open,
169 .release = single_release,
172 static int vc_sm_cma_global_state_show(struct seq_file *s, void *v)
174 struct vc_sm_buffer *resource = NULL;
175 int resource_count = 0;
180 seq_printf(s, "\nVC-ServiceHandle %p\n", sm_state->sm_handle);
182 /* Log all applicable mapping(s). */
184 mutex_lock(&sm_state->map_lock);
185 seq_puts(s, "\nResources\n");
186 if (!list_empty(&sm_state->buffer_list)) {
187 list_for_each_entry(resource, &sm_state->buffer_list,
188 global_buffer_list) {
191 seq_printf(s, "\nResource %p\n",
193 seq_printf(s, " NAME %s\n",
195 seq_printf(s, " SIZE %zu\n",
197 seq_printf(s, " DMABUF %p\n",
199 if (resource->imported) {
200 seq_printf(s, " ATTACH %p\n",
201 resource->import.attach);
202 seq_printf(s, " SGT %p\n",
203 resource->import.sgt);
205 seq_printf(s, " SGT %p\n",
206 resource->alloc.sg_table);
208 seq_printf(s, " DMA_ADDR %pad\n",
209 &resource->dma_addr);
210 seq_printf(s, " VC_HANDLE %08x\n",
211 resource->vc_handle);
212 seq_printf(s, " VC_MAPPING %d\n",
213 resource->vpu_state);
216 seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
218 mutex_unlock(&sm_state->map_lock);
224 * Adds a buffer to the private data list which tracks all the allocated
227 static void vc_sm_add_resource(struct vc_sm_privdata_t *privdata,
228 struct vc_sm_buffer *buffer)
230 mutex_lock(&sm_state->map_lock);
231 list_add(&buffer->global_buffer_list, &sm_state->buffer_list);
232 mutex_unlock(&sm_state->map_lock);
234 pr_debug("[%s]: added buffer %p (name %s, size %zu)\n",
235 __func__, buffer, buffer->name, buffer->size);
239 * Cleans up imported dmabuf.
240 * Should be called with mutex held.
242 static void vc_sm_clean_up_dmabuf(struct vc_sm_buffer *buffer)
244 if (!buffer->imported)
247 /* Handle cleaning up imported dmabufs */
248 if (buffer->import.sgt) {
249 dma_buf_unmap_attachment(buffer->import.attach,
252 buffer->import.sgt = NULL;
254 if (buffer->import.attach) {
255 dma_buf_detach(buffer->dma_buf, buffer->import.attach);
256 buffer->import.attach = NULL;
261 * Instructs VPU to decrement the refcount on a buffer.
263 static void vc_sm_vpu_free(struct vc_sm_buffer *buffer)
265 if (buffer->vc_handle && buffer->vpu_state == VPU_MAPPED) {
266 struct vc_sm_free_t free = { buffer->vc_handle, 0 };
267 int status = vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
268 &sm_state->int_trans_id);
269 if (status != 0 && status != -EINTR) {
270 pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
271 __func__, status, sm_state->int_trans_id);
274 if (sm_state->require_released_callback) {
275 /* Need to wait for the VPU to confirm the free. */
277 /* Retain a reference on this until the VPU has
280 buffer->vpu_state = VPU_UNMAPPING;
282 buffer->vpu_state = VPU_NOT_MAPPED;
283 buffer->vc_handle = 0;
289 * Release an allocation.
290 * All refcounting is done via the dma buf object.
292 * Must be called with the mutex held. The function will either release the
293 * mutex (if defering the release) or destroy it. The caller must therefore not
294 * reuse the buffer on return.
296 static void vc_sm_release_resource(struct vc_sm_buffer *buffer)
298 pr_debug("[%s]: buffer %p (name %s, size %zu), imported %u\n",
299 __func__, buffer, buffer->name, buffer->size,
302 if (buffer->vc_handle) {
303 /* We've sent the unmap request but not had the response. */
304 pr_debug("[%s]: Waiting for VPU unmap response on %p\n",
308 if (buffer->in_use) {
309 /* dmabuf still in use - we await the release */
310 pr_debug("[%s]: buffer %p is still in use\n", __func__, buffer);
314 /* Release the allocation (whether imported dmabuf or CMA allocation) */
315 if (buffer->imported) {
316 if (buffer->import.dma_buf)
317 dma_buf_put(buffer->import.dma_buf);
319 pr_err("%s: Imported dmabuf already been put for buf %p\n",
321 buffer->import.dma_buf = NULL;
323 dma_free_coherent(&sm_state->pdev->dev, buffer->size,
324 buffer->cookie, buffer->dma_addr);
327 /* Free our buffer. Start by removing it from the list */
328 mutex_lock(&sm_state->map_lock);
329 list_del(&buffer->global_buffer_list);
330 mutex_unlock(&sm_state->map_lock);
332 pr_debug("%s: Release our allocation - done\n", __func__);
333 mutex_unlock(&buffer->lock);
335 mutex_destroy(&buffer->lock);
341 mutex_unlock(&buffer->lock);
344 /* Create support for private data tracking. */
345 static struct vc_sm_privdata_t *vc_sm_cma_create_priv_data(pid_t id)
348 struct vc_sm_privdata_t *file_data = NULL;
350 /* Allocate private structure. */
351 file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
356 snprintf(alloc_name, sizeof(alloc_name), "%d", id);
363 /* Dma buf operations for use with our own allocations */
365 static int vc_sm_dma_buf_attach(struct dma_buf *dmabuf,
366 struct dma_buf_attachment *attachment)
369 struct vc_sm_dma_buf_attachment *a;
370 struct sg_table *sgt;
371 struct vc_sm_buffer *buf = dmabuf->priv;
372 struct scatterlist *rd, *wr;
375 a = kzalloc(sizeof(*a), GFP_KERNEL);
379 pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
381 mutex_lock(&buf->lock);
383 INIT_LIST_HEAD(&a->list);
387 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
388 * map the same scatter list to multiple attachments at the same time.
390 ret = sg_alloc_table(sgt, buf->alloc.sg_table->orig_nents, GFP_KERNEL);
396 rd = buf->alloc.sg_table->sgl;
398 for (i = 0; i < sgt->orig_nents; ++i) {
399 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
404 a->dma_dir = DMA_NONE;
405 attachment->priv = a;
407 list_add(&a->list, &buf->attachments);
408 mutex_unlock(&buf->lock);
413 static void vc_sm_dma_buf_detach(struct dma_buf *dmabuf,
414 struct dma_buf_attachment *attachment)
416 struct vc_sm_dma_buf_attachment *a = attachment->priv;
417 struct vc_sm_buffer *buf = dmabuf->priv;
418 struct sg_table *sgt;
420 pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
426 /* release the scatterlist cache */
427 if (a->dma_dir != DMA_NONE)
428 dma_unmap_sg(attachment->dev, sgt->sgl, sgt->orig_nents,
432 mutex_lock(&buf->lock);
434 mutex_unlock(&buf->lock);
439 static struct sg_table *vc_sm_map_dma_buf(struct dma_buf_attachment *attachment,
440 enum dma_data_direction direction)
442 struct vc_sm_dma_buf_attachment *a = attachment->priv;
443 /* stealing dmabuf mutex to serialize map/unmap operations */
444 struct mutex *lock = &attachment->dmabuf->lock;
445 struct sg_table *table;
448 pr_debug("%s attachment %p\n", __func__, attachment);
449 table = &a->sg_table;
451 /* return previously mapped sg table */
452 if (a->dma_dir == direction) {
457 /* release any previous cache */
458 if (a->dma_dir != DMA_NONE) {
459 dma_unmap_sg(attachment->dev, table->sgl, table->orig_nents,
461 a->dma_dir = DMA_NONE;
464 /* mapping to the client with new direction */
465 table->nents = dma_map_sg(attachment->dev, table->sgl,
466 table->orig_nents, direction);
468 pr_err("failed to map scatterlist\n");
470 return ERR_PTR(-EIO);
473 a->dma_dir = direction;
476 pr_debug("%s attachment %p\n", __func__, attachment);
480 static void vc_sm_unmap_dma_buf(struct dma_buf_attachment *attachment,
481 struct sg_table *table,
482 enum dma_data_direction direction)
484 pr_debug("%s attachment %p\n", __func__, attachment);
485 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
488 static int vc_sm_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
490 struct vc_sm_buffer *buf = dmabuf->priv;
493 pr_debug("%s dmabuf %p, buf %p, vm_start %08lX\n", __func__, dmabuf,
496 mutex_lock(&buf->lock);
498 /* now map it to userspace */
501 ret = dma_mmap_coherent(&sm_state->pdev->dev, vma, buf->cookie,
502 buf->dma_addr, buf->size);
505 pr_err("Remapping memory failed, error: %d\n", ret);
509 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
511 mutex_unlock(&buf->lock);
514 pr_err("%s: failure mapping buffer to userspace\n",
520 static void vc_sm_dma_buf_release(struct dma_buf *dmabuf)
522 struct vc_sm_buffer *buffer;
527 buffer = (struct vc_sm_buffer *)dmabuf->priv;
529 mutex_lock(&buffer->lock);
531 pr_debug("%s dmabuf %p, buffer %p\n", __func__, dmabuf, buffer);
535 /* Unmap on the VPU */
536 vc_sm_vpu_free(buffer);
537 pr_debug("%s vpu_free done\n", __func__);
539 /* Unmap our dma_buf object (the vc_sm_buffer remains until released
542 vc_sm_clean_up_dmabuf(buffer);
543 pr_debug("%s clean_up dmabuf done\n", __func__);
545 /* buffer->lock will be destroyed by vc_sm_release_resource if finished
546 * with, otherwise unlocked. Do NOT unlock here.
548 vc_sm_release_resource(buffer);
549 pr_debug("%s done\n", __func__);
552 static int vc_sm_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
553 enum dma_data_direction direction)
555 struct vc_sm_buffer *buf;
556 struct vc_sm_dma_buf_attachment *a;
565 mutex_lock(&buf->lock);
567 list_for_each_entry(a, &buf->attachments, list) {
568 dma_sync_sg_for_cpu(a->dev, a->sg_table.sgl,
569 a->sg_table.nents, direction);
571 mutex_unlock(&buf->lock);
576 static int vc_sm_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
577 enum dma_data_direction direction)
579 struct vc_sm_buffer *buf;
580 struct vc_sm_dma_buf_attachment *a;
588 mutex_lock(&buf->lock);
590 list_for_each_entry(a, &buf->attachments, list) {
591 dma_sync_sg_for_device(a->dev, a->sg_table.sgl,
592 a->sg_table.nents, direction);
594 mutex_unlock(&buf->lock);
599 static const struct dma_buf_ops dma_buf_ops = {
600 .map_dma_buf = vc_sm_map_dma_buf,
601 .unmap_dma_buf = vc_sm_unmap_dma_buf,
602 .mmap = vc_sm_dmabuf_mmap,
603 .release = vc_sm_dma_buf_release,
604 .attach = vc_sm_dma_buf_attach,
605 .detach = vc_sm_dma_buf_detach,
606 .begin_cpu_access = vc_sm_dma_buf_begin_cpu_access,
607 .end_cpu_access = vc_sm_dma_buf_end_cpu_access,
610 /* Dma_buf operations for chaining through to an imported dma_buf */
613 int vc_sm_import_dma_buf_attach(struct dma_buf *dmabuf,
614 struct dma_buf_attachment *attachment)
616 struct vc_sm_buffer *buf = dmabuf->priv;
620 return buf->import.dma_buf->ops->attach(buf->import.dma_buf,
625 void vc_sm_import_dma_buf_detatch(struct dma_buf *dmabuf,
626 struct dma_buf_attachment *attachment)
628 struct vc_sm_buffer *buf = dmabuf->priv;
632 buf->import.dma_buf->ops->detach(buf->import.dma_buf, attachment);
636 struct sg_table *vc_sm_import_map_dma_buf(struct dma_buf_attachment *attachment,
637 enum dma_data_direction direction)
639 struct vc_sm_buffer *buf = attachment->dmabuf->priv;
643 return buf->import.dma_buf->ops->map_dma_buf(attachment,
648 void vc_sm_import_unmap_dma_buf(struct dma_buf_attachment *attachment,
649 struct sg_table *table,
650 enum dma_data_direction direction)
652 struct vc_sm_buffer *buf = attachment->dmabuf->priv;
656 buf->import.dma_buf->ops->unmap_dma_buf(attachment, table, direction);
660 int vc_sm_import_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
662 struct vc_sm_buffer *buf = dmabuf->priv;
664 pr_debug("%s: mmap dma_buf %p, buf %p, imported db %p\n", __func__,
665 dmabuf, buf, buf->import.dma_buf);
666 if (!buf->imported) {
667 pr_err("%s: mmap dma_buf %p- not an imported buffer\n",
671 return buf->import.dma_buf->ops->mmap(buf->import.dma_buf, vma);
675 int vc_sm_import_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
676 enum dma_data_direction direction)
678 struct vc_sm_buffer *buf = dmabuf->priv;
682 return buf->import.dma_buf->ops->begin_cpu_access(buf->import.dma_buf,
687 int vc_sm_import_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
688 enum dma_data_direction direction)
690 struct vc_sm_buffer *buf = dmabuf->priv;
694 return buf->import.dma_buf->ops->end_cpu_access(buf->import.dma_buf,
698 static const struct dma_buf_ops dma_buf_import_ops = {
699 .map_dma_buf = vc_sm_import_map_dma_buf,
700 .unmap_dma_buf = vc_sm_import_unmap_dma_buf,
701 .mmap = vc_sm_import_dmabuf_mmap,
702 .release = vc_sm_dma_buf_release,
703 .attach = vc_sm_import_dma_buf_attach,
704 .detach = vc_sm_import_dma_buf_detatch,
705 .begin_cpu_access = vc_sm_import_dma_buf_begin_cpu_access,
706 .end_cpu_access = vc_sm_import_dma_buf_end_cpu_access,
709 /* Import a dma_buf to be shared with VC. */
711 vc_sm_cma_import_dmabuf_internal(struct vc_sm_privdata_t *private,
712 struct dma_buf *dma_buf,
714 struct dma_buf **imported_buf)
716 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
717 struct vc_sm_buffer *buffer = NULL;
718 struct vc_sm_import import = { };
719 struct vc_sm_import_result result = { };
720 struct dma_buf_attachment *attach = NULL;
721 struct sg_table *sgt = NULL;
727 /* Setup our allocation parameters */
728 pr_debug("%s: importing dma_buf %p/fd %d\n", __func__, dma_buf, fd);
731 get_dma_buf(dma_buf);
733 dma_buf = dma_buf_get(fd);
738 attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
739 if (IS_ERR(attach)) {
740 ret = PTR_ERR(attach);
744 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
750 /* Verify that the address block is contiguous */
751 if (sgt->nents != 1) {
756 /* Allocate local buffer to track this allocation. */
757 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
763 import.type = VC_SM_ALLOC_NON_CACHED;
764 dma_addr = sg_dma_address(sgt->sgl);
765 import.addr = (u32)dma_addr;
766 cache_alias = import.addr & 0xC0000000;
767 if (cache_alias != 0xC0000000 && cache_alias != 0x80000000) {
768 pr_err("%s: Expecting an uncached alias for dma_addr %pad\n",
769 __func__, &dma_addr);
770 /* Note that this assumes we're on >= Pi2, but it implies a
771 * DT configuration error.
773 import.addr |= 0xC0000000;
775 import.size = sg_dma_len(sgt->sgl);
776 import.allocator = current->tgid;
777 import.kernel_id = get_kernel_id(buffer);
779 memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
780 sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
782 pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %pad, size %u.\n",
783 __func__, import.name, import.type, &dma_addr, import.size);
785 /* Allocate the videocore buffer. */
786 status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
787 &sm_state->int_trans_id);
788 if (status == -EINTR) {
789 pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
790 __func__, sm_state->int_trans_id);
792 private->restart_sys = -EINTR;
793 private->int_action = VC_SM_MSG_TYPE_IMPORT;
795 } else if (status || !result.res_handle) {
796 pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
797 __func__, status, sm_state->int_trans_id);
802 mutex_init(&buffer->lock);
803 INIT_LIST_HEAD(&buffer->attachments);
804 memcpy(buffer->name, import.name,
805 min(sizeof(buffer->name), sizeof(import.name) - 1));
807 /* Keep track of the buffer we created. */
808 buffer->private = private;
809 buffer->vc_handle = result.res_handle;
810 buffer->size = import.size;
811 buffer->vpu_state = VPU_MAPPED;
813 buffer->imported = 1;
814 buffer->import.dma_buf = dma_buf;
816 buffer->import.attach = attach;
817 buffer->import.sgt = sgt;
818 buffer->dma_addr = dma_addr;
820 buffer->kernel_id = import.kernel_id;
823 * We're done - we need to export a new dmabuf chaining through most
824 * functions, but enabling us to release our own internal references
827 exp_info.ops = &dma_buf_import_ops;
828 exp_info.size = import.size;
829 exp_info.flags = O_RDWR;
830 exp_info.priv = buffer;
832 buffer->dma_buf = dma_buf_export(&exp_info);
833 if (IS_ERR(buffer->dma_buf)) {
834 ret = PTR_ERR(buffer->dma_buf);
838 vc_sm_add_resource(private, buffer);
840 *imported_buf = buffer->dma_buf;
845 if (result.res_handle) {
846 struct vc_sm_free_t free = { result.res_handle, 0 };
848 vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
849 &sm_state->int_trans_id);
851 free_kernel_id(import.kernel_id);
854 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
856 dma_buf_detach(dma_buf, attach);
857 dma_buf_put(dma_buf);
861 static int vc_sm_cma_vpu_alloc(u32 size, u32 align, const char *name,
862 u32 mem_handle, struct vc_sm_buffer **ret_buffer)
864 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
865 struct vc_sm_buffer *buffer = NULL;
866 struct sg_table *sgt;
870 /* Align to the user requested align */
871 aligned_size = ALIGN(size, align);
872 /* and then to a page boundary */
873 aligned_size = PAGE_ALIGN(aligned_size);
878 /* Allocate local buffer to track this allocation. */
879 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
883 mutex_init(&buffer->lock);
884 /* Acquire the mutex as vc_sm_release_resource will release it in the
887 mutex_lock(&buffer->lock);
889 buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
890 aligned_size, &buffer->dma_addr,
892 if (!buffer->cookie) {
893 pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
894 __func__, aligned_size);
899 pr_debug("[%s]: alloc of %d bytes success\n",
900 __func__, aligned_size);
902 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
908 ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
909 buffer->dma_addr, buffer->size);
911 pr_err("failed to get scatterlist from DMA API\n");
916 buffer->alloc.sg_table = sgt;
918 INIT_LIST_HEAD(&buffer->attachments);
920 memcpy(buffer->name, name,
921 min(sizeof(buffer->name), strlen(name)));
923 exp_info.ops = &dma_buf_ops;
924 exp_info.size = aligned_size;
925 exp_info.flags = O_RDWR;
926 exp_info.priv = buffer;
928 buffer->dma_buf = dma_buf_export(&exp_info);
929 if (IS_ERR(buffer->dma_buf)) {
930 ret = PTR_ERR(buffer->dma_buf);
933 buffer->dma_addr = (u32)sg_dma_address(buffer->alloc.sg_table->sgl);
934 if ((buffer->dma_addr & 0xC0000000) != 0xC0000000) {
935 pr_warn_once("%s: Expecting an uncached alias for dma_addr %pad\n",
936 __func__, &buffer->dma_addr);
937 buffer->dma_addr |= 0xC0000000;
939 buffer->private = sm_state->vpu_allocs;
941 buffer->vc_handle = mem_handle;
942 buffer->vpu_state = VPU_MAPPED;
943 buffer->vpu_allocated = 1;
946 * Create an ID that will be passed along with our message so
947 * that when we service the release reply, we can look up which
948 * resource is being released.
950 buffer->kernel_id = get_kernel_id(buffer);
952 vc_sm_add_resource(sm_state->vpu_allocs, buffer);
954 mutex_unlock(&buffer->lock);
956 *ret_buffer = buffer;
960 vc_sm_release_resource(buffer);
965 vc_sm_vpu_event(struct sm_instance *instance, struct vc_sm_result_t *reply,
968 switch (reply->trans_id & ~0x80000000) {
969 case VC_SM_MSG_TYPE_CLIENT_VERSION:
971 /* Acknowledge that the firmware supports the version command */
972 pr_debug("%s: firmware acked version msg. Require release cb\n",
974 sm_state->require_released_callback = true;
977 case VC_SM_MSG_TYPE_RELEASED:
979 struct vc_sm_released *release = (struct vc_sm_released *)reply;
980 struct vc_sm_buffer *buffer =
981 lookup_kernel_id(release->kernel_id);
983 pr_err("%s: VC released a buffer that is already released, kernel_id %d\n",
984 __func__, release->kernel_id);
987 mutex_lock(&buffer->lock);
989 pr_debug("%s: Released addr %08x, size %u, id %08x, mem_handle %08x\n",
990 __func__, release->addr, release->size,
991 release->kernel_id, release->vc_handle);
993 buffer->vc_handle = 0;
994 buffer->vpu_state = VPU_NOT_MAPPED;
995 free_kernel_id(release->kernel_id);
997 if (buffer->vpu_allocated) {
998 /* VPU allocation, so release the dmabuf which will
999 * trigger the clean up.
1001 mutex_unlock(&buffer->lock);
1002 dma_buf_put(buffer->dma_buf);
1004 vc_sm_release_resource(buffer);
1008 case VC_SM_MSG_TYPE_VC_MEM_REQUEST:
1010 struct vc_sm_buffer *buffer = NULL;
1011 struct vc_sm_vc_mem_request *req =
1012 (struct vc_sm_vc_mem_request *)reply;
1013 struct vc_sm_vc_mem_request_result reply;
1016 pr_debug("%s: Request %u bytes of memory, align %d name %s, trans_id %08x\n",
1017 __func__, req->size, req->align, req->name,
1019 ret = vc_sm_cma_vpu_alloc(req->size, req->align, req->name,
1020 req->vc_handle, &buffer);
1022 reply.trans_id = req->trans_id;
1024 reply.addr = buffer->dma_addr;
1025 reply.kernel_id = buffer->kernel_id;
1026 pr_debug("%s: Allocated resource buffer %p, addr %pad\n",
1027 __func__, buffer, &buffer->dma_addr);
1029 pr_err("%s: Allocation failed size %u, name %s, vc_handle %u\n",
1030 __func__, req->size, req->name, req->vc_handle);
1032 reply.kernel_id = 0;
1034 vc_sm_vchi_client_vc_mem_req_reply(sm_state->sm_handle, &reply,
1035 &sm_state->int_trans_id);
1040 pr_err("%s: Unknown vpu cmd %x\n", __func__, reply->trans_id);
1045 /* Userspace handling */
1047 * Open the device. Creates a private state to help track all allocation
1048 * associated with this device.
1050 static int vc_sm_cma_open(struct inode *inode, struct file *file)
1052 /* Make sure the device was started properly. */
1054 pr_err("[%s]: invalid device\n", __func__);
1058 file->private_data = vc_sm_cma_create_priv_data(current->tgid);
1059 if (!file->private_data) {
1060 pr_err("[%s]: failed to create data tracker\n", __func__);
1069 * Close the vcsm-cma device.
1070 * All allocations are file descriptors to the dmabuf objects, so we will get
1071 * the clean up request on those as those are cleaned up.
1073 static int vc_sm_cma_release(struct inode *inode, struct file *file)
1075 struct vc_sm_privdata_t *file_data =
1076 (struct vc_sm_privdata_t *)file->private_data;
1079 /* Make sure the device was started properly. */
1080 if (!sm_state || !file_data) {
1081 pr_err("[%s]: invalid device\n", __func__);
1086 pr_debug("[%s]: using private data %p\n", __func__, file_data);
1088 /* Terminate the private data. */
1096 * Allocate a shared memory handle and block.
1097 * Allocation is from CMA, and then imported into the VPU mappings.
1099 int vc_sm_cma_ioctl_alloc(struct vc_sm_privdata_t *private,
1100 struct vc_sm_cma_ioctl_alloc *ioparam)
1102 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1103 struct vc_sm_buffer *buffer = NULL;
1104 struct vc_sm_import import = { 0 };
1105 struct vc_sm_import_result result = { 0 };
1106 struct dma_buf *dmabuf = NULL;
1107 struct sg_table *sgt;
1113 aligned_size = PAGE_ALIGN(ioparam->size);
1118 /* Allocate local buffer to track this allocation. */
1119 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1125 buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
1129 if (!buffer->cookie) {
1130 pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
1131 __func__, aligned_size);
1136 import.type = VC_SM_ALLOC_NON_CACHED;
1137 import.allocator = current->tgid;
1140 memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
1142 memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
1143 sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
1145 mutex_init(&buffer->lock);
1146 INIT_LIST_HEAD(&buffer->attachments);
1147 memcpy(buffer->name, import.name,
1148 min(sizeof(buffer->name), sizeof(import.name) - 1));
1150 exp_info.ops = &dma_buf_ops;
1151 exp_info.size = aligned_size;
1152 exp_info.flags = O_RDWR;
1153 exp_info.priv = buffer;
1155 dmabuf = dma_buf_export(&exp_info);
1156 if (IS_ERR(dmabuf)) {
1157 ret = PTR_ERR(dmabuf);
1160 buffer->dma_buf = dmabuf;
1162 import.addr = buffer->dma_addr;
1163 import.size = aligned_size;
1164 import.kernel_id = get_kernel_id(buffer);
1166 /* Wrap it into a videocore buffer. */
1167 status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
1168 &sm_state->int_trans_id);
1169 if (status == -EINTR) {
1170 pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
1171 __func__, sm_state->int_trans_id);
1173 private->restart_sys = -EINTR;
1174 private->int_action = VC_SM_MSG_TYPE_IMPORT;
1176 } else if (status || !result.res_handle) {
1177 pr_err("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
1178 __func__, status, sm_state->int_trans_id);
1183 /* Keep track of the buffer we created. */
1184 buffer->private = private;
1185 buffer->vc_handle = result.res_handle;
1186 buffer->size = import.size;
1187 buffer->vpu_state = VPU_MAPPED;
1188 buffer->kernel_id = import.kernel_id;
1190 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
1196 ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
1197 buffer->dma_addr, buffer->size);
1199 /* FIXME: error handling */
1200 pr_err("failed to get scatterlist from DMA API\n");
1205 buffer->alloc.sg_table = sgt;
1207 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1211 vc_sm_add_resource(private, buffer);
1213 pr_debug("[%s]: Added resource as fd %d, buffer %p, private %p, dma_addr %pad\n",
1214 __func__, fd, buffer, private, &buffer->dma_addr);
1217 ioparam->handle = fd;
1218 ioparam->vc_handle = buffer->vc_handle;
1219 ioparam->dma_addr = buffer->dma_addr;
1223 pr_err("[%s]: something failed - cleanup. ret %d\n", __func__, ret);
1226 /* dmabuf has been exported, therefore allow dmabuf cleanup to
1229 dma_buf_put(dmabuf);
1231 /* No dmabuf, therefore just free the buffer here */
1233 dma_free_coherent(&sm_state->pdev->dev, buffer->size,
1234 buffer->cookie, buffer->dma_addr);
1240 #ifndef CONFIG_ARM64
1241 /* Converts VCSM_CACHE_OP_* to an operating function. */
1242 static void (*cache_op_to_func(const unsigned int cache_op))
1243 (const void*, const void*)
1246 case VC_SM_CACHE_OP_NOP:
1249 case VC_SM_CACHE_OP_INV:
1250 return dmac_inv_range;
1251 case VC_SM_CACHE_OP_CLEAN:
1252 return dmac_clean_range;
1253 case VC_SM_CACHE_OP_FLUSH:
1254 return dmac_flush_range;
1257 pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
1263 * Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
1265 static int clean_invalid_contig_2d(const void __user *addr,
1266 const size_t block_count,
1267 const size_t block_size,
1268 const size_t stride,
1269 const unsigned int cache_op)
1272 void (*op_fn)(const void *start, const void *end);
1275 pr_err("[%s]: size cannot be 0\n", __func__);
1279 op_fn = cache_op_to_func(cache_op);
1283 for (i = 0; i < block_count; i ++, addr += stride)
1284 op_fn(addr, addr + block_size);
1289 static int vc_sm_cma_clean_invalid2(unsigned int cmdnr, unsigned long arg)
1291 struct vc_sm_cma_ioctl_clean_invalid2 ioparam;
1292 struct vc_sm_cma_ioctl_clean_invalid_block *block = NULL;
1295 /* Get parameter data. */
1296 if (copy_from_user(&ioparam, (void *)arg, sizeof(ioparam))) {
1297 pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
1301 block = kmalloc(ioparam.op_count * sizeof(*block), GFP_KERNEL);
1305 if (copy_from_user(block, (void *)(arg + sizeof(ioparam)),
1306 ioparam.op_count * sizeof(*block)) != 0) {
1307 pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
1313 for (i = 0; i < ioparam.op_count; i++) {
1314 const struct vc_sm_cma_ioctl_clean_invalid_block * const op =
1317 if (op->invalidate_mode == VC_SM_CACHE_OP_NOP)
1320 ret = clean_invalid_contig_2d((void __user *)op->start_address,
1321 op->block_count, op->block_size,
1322 op->inter_block_stride,
1323 op->invalidate_mode);
1334 static long vc_sm_cma_ioctl(struct file *file, unsigned int cmd,
1338 unsigned int cmdnr = _IOC_NR(cmd);
1339 struct vc_sm_privdata_t *file_data =
1340 (struct vc_sm_privdata_t *)file->private_data;
1342 /* Validate we can work with this device. */
1343 if (!sm_state || !file_data) {
1344 pr_err("[%s]: invalid device\n", __func__);
1348 /* Action is a re-post of a previously interrupted action? */
1349 if (file_data->restart_sys == -EINTR) {
1350 pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
1351 __func__, file_data->int_action,
1352 file_data->int_trans_id);
1354 file_data->restart_sys = 0;
1357 /* Now process the command. */
1359 /* New memory allocation.
1361 case VC_SM_CMA_CMD_ALLOC:
1363 struct vc_sm_cma_ioctl_alloc ioparam;
1365 /* Get the parameter data. */
1367 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
1368 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
1374 ret = vc_sm_cma_ioctl_alloc(file_data, &ioparam);
1376 (copy_to_user((void *)arg, &ioparam,
1377 sizeof(ioparam)) != 0)) {
1378 /* FIXME: Release allocation */
1379 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
1386 case VC_SM_CMA_CMD_IMPORT_DMABUF:
1388 struct vc_sm_cma_ioctl_import_dmabuf ioparam;
1389 struct dma_buf *new_dmabuf;
1391 /* Get the parameter data. */
1393 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
1394 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
1400 ret = vc_sm_cma_import_dmabuf_internal(file_data,
1406 struct vc_sm_buffer *buf = new_dmabuf->priv;
1408 ioparam.size = buf->size;
1409 ioparam.handle = dma_buf_fd(new_dmabuf,
1411 ioparam.vc_handle = buf->vc_handle;
1412 ioparam.dma_addr = buf->dma_addr;
1414 if (ioparam.handle < 0 ||
1415 (copy_to_user((void *)arg, &ioparam,
1416 sizeof(ioparam)) != 0)) {
1417 dma_buf_put(new_dmabuf);
1418 /* FIXME: Release allocation */
1425 #ifndef CONFIG_ARM64
1427 * Flush/Invalidate the cache for a given mapping.
1428 * Blocks must be pinned (i.e. accessed) before this call.
1430 case VC_SM_CMA_CMD_CLEAN_INVALID2:
1431 ret = vc_sm_cma_clean_invalid2(cmdnr, arg);
1436 pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
1437 current->tgid, file_data->pid);
1446 #ifdef CONFIG_COMPAT
1447 struct vc_sm_cma_ioctl_clean_invalid2_32 {
1449 struct vc_sm_cma_ioctl_clean_invalid_block_32 {
1450 u16 invalidate_mode;
1452 compat_uptr_t start_address;
1454 u32 inter_block_stride;
1458 #define VC_SM_CMA_CMD_CLEAN_INVALID2_32\
1459 _IOR(VC_SM_CMA_MAGIC_TYPE, VC_SM_CMA_CMD_CLEAN_INVALID2,\
1460 struct vc_sm_cma_ioctl_clean_invalid2_32)
1462 static long vc_sm_cma_compat_ioctl(struct file *file, unsigned int cmd,
1466 case VC_SM_CMA_CMD_CLEAN_INVALID2_32:
1471 return vc_sm_cma_ioctl(file, cmd, arg);
1476 /* Device operations that we managed in this driver. */
1477 static const struct file_operations vc_sm_ops = {
1478 .owner = THIS_MODULE,
1479 .unlocked_ioctl = vc_sm_cma_ioctl,
1480 #ifdef CONFIG_COMPAT
1481 .compat_ioctl = vc_sm_cma_compat_ioctl,
1483 .open = vc_sm_cma_open,
1484 .release = vc_sm_cma_release,
1487 /* Driver load/unload functions */
1488 /* Videocore connected. */
1489 static void vc_sm_connected_init(void)
1492 struct vchiq_instance *vchiq_instance;
1493 struct vc_sm_version version;
1494 struct vc_sm_result_t version_result;
1496 pr_info("[%s]: start\n", __func__);
1499 * Initialize and create a VCHI connection for the shared memory service
1500 * running on videocore.
1502 ret = vchiq_initialise(&vchiq_instance);
1504 pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
1510 ret = vchiq_connect(vchiq_instance);
1512 pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
1518 /* Initialize an instance of the shared memory service. */
1519 sm_state->sm_handle = vc_sm_cma_vchi_init(vchiq_instance, 1,
1521 if (!sm_state->sm_handle) {
1522 pr_err("[%s]: failed to initialize shared memory service\n",
1528 /* Create a debug fs directory entry (root). */
1529 sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
1531 sm_state->dir_state.show = &vc_sm_cma_global_state_show;
1532 sm_state->dir_state.dir_entry =
1533 debugfs_create_file(VC_SM_STATE, 0444, sm_state->dir_root,
1534 &sm_state->dir_state,
1535 &vc_sm_cma_debug_fs_fops);
1537 INIT_LIST_HEAD(&sm_state->buffer_list);
1539 /* Create a shared memory device. */
1540 sm_state->misc_dev.minor = MISC_DYNAMIC_MINOR;
1541 sm_state->misc_dev.name = DEVICE_NAME;
1542 sm_state->misc_dev.fops = &vc_sm_ops;
1543 sm_state->misc_dev.parent = NULL;
1544 /* Temporarily set as 666 until udev rules have been sorted */
1545 sm_state->misc_dev.mode = 0666;
1546 ret = misc_register(&sm_state->misc_dev);
1548 pr_err("vcsm-cma: failed to register misc device.\n");
1549 goto err_remove_debugfs;
1552 sm_state->data_knl = vc_sm_cma_create_priv_data(0);
1553 if (!sm_state->data_knl) {
1554 pr_err("[%s]: failed to create kernel private data tracker\n",
1556 goto err_remove_misc_dev;
1559 version.version = 2;
1560 ret = vc_sm_cma_vchi_client_version(sm_state->sm_handle, &version,
1562 &sm_state->int_trans_id);
1564 pr_err("[%s]: Failed to send version request %d\n", __func__,
1570 pr_info("[%s]: installed successfully\n", __func__);
1573 err_remove_misc_dev:
1574 misc_deregister(&sm_state->misc_dev);
1576 debugfs_remove_recursive(sm_state->dir_root);
1577 vc_sm_cma_vchi_stop(&sm_state->sm_handle);
1580 /* Driver loading. */
1581 static int bcm2835_vc_sm_cma_probe(struct platform_device *pdev)
1583 pr_info("%s: Videocore shared memory driver\n", __func__);
1585 sm_state = devm_kzalloc(&pdev->dev, sizeof(*sm_state), GFP_KERNEL);
1588 sm_state->pdev = pdev;
1589 mutex_init(&sm_state->map_lock);
1591 spin_lock_init(&sm_state->kernelid_map_lock);
1592 idr_init_base(&sm_state->kernelid_map, 1);
1594 pdev->dev.dma_parms = devm_kzalloc(&pdev->dev,
1595 sizeof(*pdev->dev.dma_parms),
1597 /* dma_set_max_seg_size checks if dma_parms is NULL. */
1598 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
1600 vchiq_add_connected_callback(vc_sm_connected_init);
1604 /* Driver unloading. */
1605 static int bcm2835_vc_sm_cma_remove(struct platform_device *pdev)
1607 pr_debug("[%s]: start\n", __func__);
1609 misc_deregister(&sm_state->misc_dev);
1611 /* Remove all proc entries. */
1612 debugfs_remove_recursive(sm_state->dir_root);
1614 /* Stop the videocore shared memory service. */
1615 vc_sm_cma_vchi_stop(&sm_state->sm_handle);
1619 idr_destroy(&sm_state->kernelid_map);
1621 /* Free the memory for the state structure. */
1622 mutex_destroy(&sm_state->map_lock);
1625 pr_debug("[%s]: end\n", __func__);
1629 /* Kernel API calls */
1630 /* Get an internal resource handle mapped from the external one. */
1631 int vc_sm_cma_int_handle(void *handle)
1633 struct dma_buf *dma_buf = (struct dma_buf *)handle;
1634 struct vc_sm_buffer *buf;
1636 /* Validate we can work with this device. */
1637 if (!sm_state || !handle) {
1638 pr_err("[%s]: invalid input\n", __func__);
1642 buf = (struct vc_sm_buffer *)dma_buf->priv;
1643 return buf->vc_handle;
1645 EXPORT_SYMBOL_GPL(vc_sm_cma_int_handle);
1647 /* Free a previously allocated shared memory handle and block. */
1648 int vc_sm_cma_free(void *handle)
1650 struct dma_buf *dma_buf = (struct dma_buf *)handle;
1652 /* Validate we can work with this device. */
1653 if (!sm_state || !handle) {
1654 pr_err("[%s]: invalid input\n", __func__);
1658 pr_debug("%s: handle %p/dmabuf %p\n", __func__, handle, dma_buf);
1660 dma_buf_put(dma_buf);
1664 EXPORT_SYMBOL_GPL(vc_sm_cma_free);
1666 /* Import a dmabuf to be shared with VC. */
1667 int vc_sm_cma_import_dmabuf(struct dma_buf *src_dmabuf, void **handle)
1669 struct dma_buf *new_dma_buf;
1672 /* Validate we can work with this device. */
1673 if (!sm_state || !src_dmabuf || !handle) {
1674 pr_err("[%s]: invalid input\n", __func__);
1678 ret = vc_sm_cma_import_dmabuf_internal(sm_state->data_knl, src_dmabuf,
1682 pr_debug("%s: imported to ptr %p\n", __func__, new_dma_buf);
1684 /* Assign valid handle at this time.*/
1685 *handle = new_dma_buf;
1688 * succeeded in importing the dma_buf, but then
1689 * failed to look it up again. How?
1690 * Release the fd again.
1692 pr_err("%s: imported vc_sm_cma_get_buffer failed %d\n",
1698 EXPORT_SYMBOL_GPL(vc_sm_cma_import_dmabuf);
1700 static struct platform_driver bcm2835_vcsm_cma_driver = {
1701 .probe = bcm2835_vc_sm_cma_probe,
1702 .remove = bcm2835_vc_sm_cma_remove,
1704 .name = DEVICE_NAME,
1705 .owner = THIS_MODULE,
1709 module_platform_driver(bcm2835_vcsm_cma_driver);
1711 MODULE_AUTHOR("Dave Stevenson");
1712 MODULE_DESCRIPTION("VideoCore CMA Shared Memory Driver");
1713 MODULE_LICENSE("GPL v2");
1714 MODULE_ALIAS("platform:vcsm-cma");