Merge tag 'v5.15.57' into rpi-5.15.y
[platform/kernel/linux-rpi.git] / drivers / staging / vc04_services / vc-sm-cma / vc_sm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VideoCore Shared Memory driver using CMA.
4  *
5  * Copyright: 2018, Raspberry Pi (Trading) Ltd
6  * Dave Stevenson <dave.stevenson@raspberrypi.org>
7  *
8  * Based on vmcs_sm driver from Broadcom Corporation for some API,
9  * and taking some code for buffer allocation and dmabuf handling from
10  * videobuf2.
11  *
12  *
13  * This driver has 3 main uses:
14  * 1) Allocating buffers for the kernel or userspace that can be shared with the
15  *    VPU.
16  * 2) Importing dmabufs from elsewhere for sharing with the VPU.
17  * 3) Allocating buffers for use by the VPU.
18  *
19  * In the first and second cases the native handle is a dmabuf. Releasing the
20  * resource inherently comes from releasing the dmabuf, and this will trigger
21  * unmapping on the VPU. The underlying allocation and our buffer structure are
22  * retained until the VPU has confirmed that it has finished with it.
23  *
24  * For the VPU allocations the VPU is responsible for triggering the release,
25  * and therefore the released message decrements the dma_buf refcount (with the
26  * VPU mapping having already been marked as released).
27  */
28
29 /* ---- Include Files ----------------------------------------------------- */
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/debugfs.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dma-buf.h>
35 #include <linux/errno.h>
36 #include <linux/fs.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/miscdevice.h>
40 #include <linux/module.h>
41 #include <linux/mm.h>
42 #include <linux/of_device.h>
43 #include <linux/platform_device.h>
44 #include <linux/proc_fs.h>
45 #include <linux/slab.h>
46 #include <linux/seq_file.h>
47 #include <linux/syscalls.h>
48 #include <linux/types.h>
49 #include <asm/cacheflush.h>
50
51 #include "vchiq_connected.h"
52 #include "vc_sm_cma_vchi.h"
53
54 #include "vc_sm.h"
55 #include "vc_sm_knl.h"
56 #include <linux/broadcom/vc_sm_cma_ioctl.h>
57
58 /* ---- Private Constants and Types --------------------------------------- */
59
60 #define DEVICE_NAME             "vcsm-cma"
61 #define DEVICE_MINOR            0
62
63 #define VC_SM_RESOURCE_NAME_DEFAULT       "sm-host-resource"
64
65 #define VC_SM_DIR_ROOT_NAME     "vcsm-cma"
66 #define VC_SM_STATE             "state"
67
68 /* Private file data associated with each opened device. */
69 struct vc_sm_privdata_t {
70         pid_t pid;                      /* PID of creator. */
71
72         int restart_sys;                /* Tracks restart on interrupt. */
73         enum vc_sm_msg_type int_action; /* Interrupted action. */
74         u32 int_trans_id;               /* Interrupted transaction. */
75 };
76
77 typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
78 struct sm_pde_t {
79         VC_SM_SHOW show;          /* Debug fs function hookup. */
80         struct dentry *dir_entry; /* Debug fs directory entry. */
81         void *priv_data;          /* Private data */
82 };
83
84 /* Global state information. */
85 struct sm_state_t {
86         struct platform_device *pdev;
87
88         struct miscdevice misc_dev;
89
90         struct sm_instance *sm_handle;  /* Handle for videocore service. */
91
92         spinlock_t kernelid_map_lock;   /* Spinlock protecting kernelid_map */
93         struct idr kernelid_map;
94
95         struct mutex map_lock;          /* Global map lock. */
96         struct list_head buffer_list;   /* List of buffer. */
97
98         struct vc_sm_privdata_t *data_knl;  /* Kernel internal data tracking. */
99         struct vc_sm_privdata_t *vpu_allocs; /* All allocations from the VPU */
100         struct dentry *dir_root;        /* Debug fs entries root. */
101         struct sm_pde_t dir_state;      /* Debug fs entries state sub-tree. */
102
103         bool require_released_callback; /* VPU will send a released msg when it
104                                          * has finished with a resource.
105                                          */
106         u32 int_trans_id;               /* Interrupted transaction. */
107 };
108
109 struct vc_sm_dma_buf_attachment {
110         struct device *dev;
111         struct sg_table sg_table;
112         struct list_head list;
113         enum dma_data_direction dma_dir;
114 };
115
116 /* ---- Private Variables ----------------------------------------------- */
117
118 static struct sm_state_t *sm_state;
119 static int sm_inited;
120
121 /* ---- Private Function Prototypes -------------------------------------- */
122
123 /* ---- Private Functions ------------------------------------------------ */
124
125 static int get_kernel_id(struct vc_sm_buffer *buffer)
126 {
127         int handle;
128
129         spin_lock(&sm_state->kernelid_map_lock);
130         handle = idr_alloc(&sm_state->kernelid_map, buffer, 0, 0, GFP_KERNEL);
131         spin_unlock(&sm_state->kernelid_map_lock);
132
133         return handle;
134 }
135
136 static struct vc_sm_buffer *lookup_kernel_id(int handle)
137 {
138         return idr_find(&sm_state->kernelid_map, handle);
139 }
140
141 static void free_kernel_id(int handle)
142 {
143         spin_lock(&sm_state->kernelid_map_lock);
144         idr_remove(&sm_state->kernelid_map, handle);
145         spin_unlock(&sm_state->kernelid_map_lock);
146 }
147
148 static int vc_sm_cma_seq_file_show(struct seq_file *s, void *v)
149 {
150         struct sm_pde_t *sm_pde;
151
152         sm_pde = (struct sm_pde_t *)(s->private);
153
154         if (sm_pde && sm_pde->show)
155                 sm_pde->show(s, v);
156
157         return 0;
158 }
159
160 static int vc_sm_cma_single_open(struct inode *inode, struct file *file)
161 {
162         return single_open(file, vc_sm_cma_seq_file_show, inode->i_private);
163 }
164
165 static const struct file_operations vc_sm_cma_debug_fs_fops = {
166         .open = vc_sm_cma_single_open,
167         .read = seq_read,
168         .llseek = seq_lseek,
169         .release = single_release,
170 };
171
172 static int vc_sm_cma_global_state_show(struct seq_file *s, void *v)
173 {
174         struct vc_sm_buffer *resource = NULL;
175         int resource_count = 0;
176
177         if (!sm_state)
178                 return 0;
179
180         seq_printf(s, "\nVC-ServiceHandle     %p\n", sm_state->sm_handle);
181
182         /* Log all applicable mapping(s). */
183
184         mutex_lock(&sm_state->map_lock);
185         seq_puts(s, "\nResources\n");
186         if (!list_empty(&sm_state->buffer_list)) {
187                 list_for_each_entry(resource, &sm_state->buffer_list,
188                                     global_buffer_list) {
189                         resource_count++;
190
191                         seq_printf(s, "\nResource                %p\n",
192                                    resource);
193                         seq_printf(s, "           NAME         %s\n",
194                                    resource->name);
195                         seq_printf(s, "           SIZE         %zu\n",
196                                    resource->size);
197                         seq_printf(s, "           DMABUF       %p\n",
198                                    resource->dma_buf);
199                         if (resource->imported) {
200                                 seq_printf(s, "           ATTACH       %p\n",
201                                            resource->import.attach);
202                                 seq_printf(s, "           SGT          %p\n",
203                                            resource->import.sgt);
204                         } else {
205                                 seq_printf(s, "           SGT          %p\n",
206                                            resource->alloc.sg_table);
207                         }
208                         seq_printf(s, "           DMA_ADDR     %pad\n",
209                                    &resource->dma_addr);
210                         seq_printf(s, "           VC_HANDLE     %08x\n",
211                                    resource->vc_handle);
212                         seq_printf(s, "           VC_MAPPING    %d\n",
213                                    resource->vpu_state);
214                 }
215         }
216         seq_printf(s, "\n\nTotal resource count:   %d\n\n", resource_count);
217
218         mutex_unlock(&sm_state->map_lock);
219
220         return 0;
221 }
222
223 /*
224  * Adds a buffer to the private data list which tracks all the allocated
225  * data.
226  */
227 static void vc_sm_add_resource(struct vc_sm_privdata_t *privdata,
228                                struct vc_sm_buffer *buffer)
229 {
230         mutex_lock(&sm_state->map_lock);
231         list_add(&buffer->global_buffer_list, &sm_state->buffer_list);
232         mutex_unlock(&sm_state->map_lock);
233
234         pr_debug("[%s]: added buffer %p (name %s, size %zu)\n",
235                  __func__, buffer, buffer->name, buffer->size);
236 }
237
238 /*
239  * Cleans up imported dmabuf.
240  * Should be called with mutex held.
241  */
242 static void vc_sm_clean_up_dmabuf(struct vc_sm_buffer *buffer)
243 {
244         if (!buffer->imported)
245                 return;
246
247         /* Handle cleaning up imported dmabufs */
248         if (buffer->import.sgt) {
249                 dma_buf_unmap_attachment(buffer->import.attach,
250                                          buffer->import.sgt,
251                                          DMA_BIDIRECTIONAL);
252                 buffer->import.sgt = NULL;
253         }
254         if (buffer->import.attach) {
255                 dma_buf_detach(buffer->dma_buf, buffer->import.attach);
256                 buffer->import.attach = NULL;
257         }
258 }
259
260 /*
261  * Instructs VPU to decrement the refcount on a buffer.
262  */
263 static void vc_sm_vpu_free(struct vc_sm_buffer *buffer)
264 {
265         if (buffer->vc_handle && buffer->vpu_state == VPU_MAPPED) {
266                 struct vc_sm_free_t free = { buffer->vc_handle, 0 };
267                 int status = vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
268                                              &sm_state->int_trans_id);
269                 if (status != 0 && status != -EINTR) {
270                         pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
271                                __func__, status, sm_state->int_trans_id);
272                 }
273
274                 if (sm_state->require_released_callback) {
275                         /* Need to wait for the VPU to confirm the free. */
276
277                         /* Retain a reference on this until the VPU has
278                          * released it
279                          */
280                         buffer->vpu_state = VPU_UNMAPPING;
281                 } else {
282                         buffer->vpu_state = VPU_NOT_MAPPED;
283                         buffer->vc_handle = 0;
284                 }
285         }
286 }
287
288 /*
289  * Release an allocation.
290  * All refcounting is done via the dma buf object.
291  *
292  * Must be called with the mutex held. The function will either release the
293  * mutex (if defering the release) or destroy it. The caller must therefore not
294  * reuse the buffer on return.
295  */
296 static void vc_sm_release_resource(struct vc_sm_buffer *buffer)
297 {
298         pr_debug("[%s]: buffer %p (name %s, size %zu), imported %u\n",
299                  __func__, buffer, buffer->name, buffer->size,
300                  buffer->imported);
301
302         if (buffer->vc_handle) {
303                 /* We've sent the unmap request but not had the response. */
304                 pr_debug("[%s]: Waiting for VPU unmap response on %p\n",
305                          __func__, buffer);
306                 goto defer;
307         }
308         if (buffer->in_use) {
309                 /* dmabuf still in use - we await the release */
310                 pr_debug("[%s]: buffer %p is still in use\n", __func__, buffer);
311                 goto defer;
312         }
313
314         /* Release the allocation (whether imported dmabuf or CMA allocation) */
315         if (buffer->imported) {
316                 if (buffer->import.dma_buf)
317                         dma_buf_put(buffer->import.dma_buf);
318                 else
319                         pr_err("%s: Imported dmabuf already been put for buf %p\n",
320                                __func__, buffer);
321                 buffer->import.dma_buf = NULL;
322         } else {
323                 dma_free_coherent(&sm_state->pdev->dev, buffer->size,
324                                   buffer->cookie, buffer->dma_addr);
325         }
326
327         /* Free our buffer. Start by removing it from the list */
328         mutex_lock(&sm_state->map_lock);
329         list_del(&buffer->global_buffer_list);
330         mutex_unlock(&sm_state->map_lock);
331
332         pr_debug("%s: Release our allocation - done\n", __func__);
333         mutex_unlock(&buffer->lock);
334
335         mutex_destroy(&buffer->lock);
336
337         kfree(buffer);
338         return;
339
340 defer:
341         mutex_unlock(&buffer->lock);
342 }
343
344 /* Create support for private data tracking. */
345 static struct vc_sm_privdata_t *vc_sm_cma_create_priv_data(pid_t id)
346 {
347         char alloc_name[32];
348         struct vc_sm_privdata_t *file_data = NULL;
349
350         /* Allocate private structure. */
351         file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
352
353         if (!file_data)
354                 return NULL;
355
356         snprintf(alloc_name, sizeof(alloc_name), "%d", id);
357
358         file_data->pid = id;
359
360         return file_data;
361 }
362
363 /* Dma buf operations for use with our own allocations */
364
365 static int vc_sm_dma_buf_attach(struct dma_buf *dmabuf,
366                                 struct dma_buf_attachment *attachment)
367
368 {
369         struct vc_sm_dma_buf_attachment *a;
370         struct sg_table *sgt;
371         struct vc_sm_buffer *buf = dmabuf->priv;
372         struct scatterlist *rd, *wr;
373         int ret, i;
374
375         a = kzalloc(sizeof(*a), GFP_KERNEL);
376         if (!a)
377                 return -ENOMEM;
378
379         pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
380
381         mutex_lock(&buf->lock);
382
383         INIT_LIST_HEAD(&a->list);
384
385         sgt = &a->sg_table;
386
387         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
388          * map the same scatter list to multiple attachments at the same time.
389          */
390         ret = sg_alloc_table(sgt, buf->alloc.sg_table->orig_nents, GFP_KERNEL);
391         if (ret) {
392                 kfree(a);
393                 return -ENOMEM;
394         }
395
396         rd = buf->alloc.sg_table->sgl;
397         wr = sgt->sgl;
398         for (i = 0; i < sgt->orig_nents; ++i) {
399                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
400                 rd = sg_next(rd);
401                 wr = sg_next(wr);
402         }
403
404         a->dma_dir = DMA_NONE;
405         attachment->priv = a;
406
407         list_add(&a->list, &buf->attachments);
408         mutex_unlock(&buf->lock);
409
410         return 0;
411 }
412
413 static void vc_sm_dma_buf_detach(struct dma_buf *dmabuf,
414                                  struct dma_buf_attachment *attachment)
415 {
416         struct vc_sm_dma_buf_attachment *a = attachment->priv;
417         struct vc_sm_buffer *buf = dmabuf->priv;
418         struct sg_table *sgt;
419
420         pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
421         if (!a)
422                 return;
423
424         sgt = &a->sg_table;
425
426         /* release the scatterlist cache */
427         if (a->dma_dir != DMA_NONE)
428                 dma_unmap_sg(attachment->dev, sgt->sgl, sgt->orig_nents,
429                              a->dma_dir);
430         sg_free_table(sgt);
431
432         mutex_lock(&buf->lock);
433         list_del(&a->list);
434         mutex_unlock(&buf->lock);
435
436         kfree(a);
437 }
438
439 static struct sg_table *vc_sm_map_dma_buf(struct dma_buf_attachment *attachment,
440                                           enum dma_data_direction direction)
441 {
442         struct vc_sm_dma_buf_attachment *a = attachment->priv;
443         /* stealing dmabuf mutex to serialize map/unmap operations */
444         struct mutex *lock = &attachment->dmabuf->lock;
445         struct sg_table *table;
446
447         mutex_lock(lock);
448         pr_debug("%s attachment %p\n", __func__, attachment);
449         table = &a->sg_table;
450
451         /* return previously mapped sg table */
452         if (a->dma_dir == direction) {
453                 mutex_unlock(lock);
454                 return table;
455         }
456
457         /* release any previous cache */
458         if (a->dma_dir != DMA_NONE) {
459                 dma_unmap_sg(attachment->dev, table->sgl, table->orig_nents,
460                              a->dma_dir);
461                 a->dma_dir = DMA_NONE;
462         }
463
464         /* mapping to the client with new direction */
465         table->nents = dma_map_sg(attachment->dev, table->sgl,
466                                   table->orig_nents, direction);
467         if (!table->nents) {
468                 pr_err("failed to map scatterlist\n");
469                 mutex_unlock(lock);
470                 return ERR_PTR(-EIO);
471         }
472
473         a->dma_dir = direction;
474         mutex_unlock(lock);
475
476         pr_debug("%s attachment %p\n", __func__, attachment);
477         return table;
478 }
479
480 static void vc_sm_unmap_dma_buf(struct dma_buf_attachment *attachment,
481                                 struct sg_table *table,
482                                 enum dma_data_direction direction)
483 {
484         pr_debug("%s attachment %p\n", __func__, attachment);
485         dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
486 }
487
488 static int vc_sm_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
489 {
490         struct vc_sm_buffer *buf = dmabuf->priv;
491         int ret;
492
493         pr_debug("%s dmabuf %p, buf %p, vm_start %08lX\n", __func__, dmabuf,
494                  buf, vma->vm_start);
495
496         mutex_lock(&buf->lock);
497
498         /* now map it to userspace */
499         vma->vm_pgoff = 0;
500
501         ret = dma_mmap_coherent(&sm_state->pdev->dev, vma, buf->cookie,
502                                 buf->dma_addr, buf->size);
503
504         if (ret) {
505                 pr_err("Remapping memory failed, error: %d\n", ret);
506                 return ret;
507         }
508
509         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
510
511         mutex_unlock(&buf->lock);
512
513         if (ret)
514                 pr_err("%s: failure mapping buffer to userspace\n",
515                        __func__);
516
517         return ret;
518 }
519
520 static void vc_sm_dma_buf_release(struct dma_buf *dmabuf)
521 {
522         struct vc_sm_buffer *buffer;
523
524         if (!dmabuf)
525                 return;
526
527         buffer = (struct vc_sm_buffer *)dmabuf->priv;
528
529         mutex_lock(&buffer->lock);
530
531         pr_debug("%s dmabuf %p, buffer %p\n", __func__, dmabuf, buffer);
532
533         buffer->in_use = 0;
534
535         /* Unmap on the VPU */
536         vc_sm_vpu_free(buffer);
537         pr_debug("%s vpu_free done\n", __func__);
538
539         /* Unmap our dma_buf object (the vc_sm_buffer remains until released
540          * on the VPU).
541          */
542         vc_sm_clean_up_dmabuf(buffer);
543         pr_debug("%s clean_up dmabuf done\n", __func__);
544
545         /* buffer->lock will be destroyed by vc_sm_release_resource if finished
546          * with, otherwise unlocked. Do NOT unlock here.
547          */
548         vc_sm_release_resource(buffer);
549         pr_debug("%s done\n", __func__);
550 }
551
552 static int vc_sm_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
553                                           enum dma_data_direction direction)
554 {
555         struct vc_sm_buffer *buf;
556         struct vc_sm_dma_buf_attachment *a;
557
558         if (!dmabuf)
559                 return -EFAULT;
560
561         buf = dmabuf->priv;
562         if (!buf)
563                 return -EFAULT;
564
565         mutex_lock(&buf->lock);
566
567         list_for_each_entry(a, &buf->attachments, list) {
568                 dma_sync_sg_for_cpu(a->dev, a->sg_table.sgl,
569                                     a->sg_table.nents, direction);
570         }
571         mutex_unlock(&buf->lock);
572
573         return 0;
574 }
575
576 static int vc_sm_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
577                                         enum dma_data_direction direction)
578 {
579         struct vc_sm_buffer *buf;
580         struct vc_sm_dma_buf_attachment *a;
581
582         if (!dmabuf)
583                 return -EFAULT;
584         buf = dmabuf->priv;
585         if (!buf)
586                 return -EFAULT;
587
588         mutex_lock(&buf->lock);
589
590         list_for_each_entry(a, &buf->attachments, list) {
591                 dma_sync_sg_for_device(a->dev, a->sg_table.sgl,
592                                        a->sg_table.nents, direction);
593         }
594         mutex_unlock(&buf->lock);
595
596         return 0;
597 }
598
599 static const struct dma_buf_ops dma_buf_ops = {
600         .map_dma_buf = vc_sm_map_dma_buf,
601         .unmap_dma_buf = vc_sm_unmap_dma_buf,
602         .mmap = vc_sm_dmabuf_mmap,
603         .release = vc_sm_dma_buf_release,
604         .attach = vc_sm_dma_buf_attach,
605         .detach = vc_sm_dma_buf_detach,
606         .begin_cpu_access = vc_sm_dma_buf_begin_cpu_access,
607         .end_cpu_access = vc_sm_dma_buf_end_cpu_access,
608 };
609
610 /* Dma_buf operations for chaining through to an imported dma_buf */
611
612 static
613 int vc_sm_import_dma_buf_attach(struct dma_buf *dmabuf,
614                                 struct dma_buf_attachment *attachment)
615 {
616         struct vc_sm_buffer *buf = dmabuf->priv;
617
618         if (!buf->imported)
619                 return -EINVAL;
620         return buf->import.dma_buf->ops->attach(buf->import.dma_buf,
621                                                 attachment);
622 }
623
624 static
625 void vc_sm_import_dma_buf_detatch(struct dma_buf *dmabuf,
626                                   struct dma_buf_attachment *attachment)
627 {
628         struct vc_sm_buffer *buf = dmabuf->priv;
629
630         if (!buf->imported)
631                 return;
632         buf->import.dma_buf->ops->detach(buf->import.dma_buf, attachment);
633 }
634
635 static
636 struct sg_table *vc_sm_import_map_dma_buf(struct dma_buf_attachment *attachment,
637                                           enum dma_data_direction direction)
638 {
639         struct vc_sm_buffer *buf = attachment->dmabuf->priv;
640
641         if (!buf->imported)
642                 return NULL;
643         return buf->import.dma_buf->ops->map_dma_buf(attachment,
644                                                      direction);
645 }
646
647 static
648 void vc_sm_import_unmap_dma_buf(struct dma_buf_attachment *attachment,
649                                 struct sg_table *table,
650                                 enum dma_data_direction direction)
651 {
652         struct vc_sm_buffer *buf = attachment->dmabuf->priv;
653
654         if (!buf->imported)
655                 return;
656         buf->import.dma_buf->ops->unmap_dma_buf(attachment, table, direction);
657 }
658
659 static
660 int vc_sm_import_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
661 {
662         struct vc_sm_buffer *buf = dmabuf->priv;
663
664         pr_debug("%s: mmap dma_buf %p, buf %p, imported db %p\n", __func__,
665                  dmabuf, buf, buf->import.dma_buf);
666         if (!buf->imported) {
667                 pr_err("%s: mmap dma_buf %p- not an imported buffer\n",
668                        __func__, dmabuf);
669                 return -EINVAL;
670         }
671         return buf->import.dma_buf->ops->mmap(buf->import.dma_buf, vma);
672 }
673
674 static
675 int vc_sm_import_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
676                                           enum dma_data_direction direction)
677 {
678         struct vc_sm_buffer *buf = dmabuf->priv;
679
680         if (!buf->imported)
681                 return -EINVAL;
682         return buf->import.dma_buf->ops->begin_cpu_access(buf->import.dma_buf,
683                                                           direction);
684 }
685
686 static
687 int vc_sm_import_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
688                                         enum dma_data_direction direction)
689 {
690         struct vc_sm_buffer *buf = dmabuf->priv;
691
692         if (!buf->imported)
693                 return -EINVAL;
694         return buf->import.dma_buf->ops->end_cpu_access(buf->import.dma_buf,
695                                                           direction);
696 }
697
698 static const struct dma_buf_ops dma_buf_import_ops = {
699         .map_dma_buf = vc_sm_import_map_dma_buf,
700         .unmap_dma_buf = vc_sm_import_unmap_dma_buf,
701         .mmap = vc_sm_import_dmabuf_mmap,
702         .release = vc_sm_dma_buf_release,
703         .attach = vc_sm_import_dma_buf_attach,
704         .detach = vc_sm_import_dma_buf_detatch,
705         .begin_cpu_access = vc_sm_import_dma_buf_begin_cpu_access,
706         .end_cpu_access = vc_sm_import_dma_buf_end_cpu_access,
707 };
708
709 /* Import a dma_buf to be shared with VC. */
710 int
711 vc_sm_cma_import_dmabuf_internal(struct vc_sm_privdata_t *private,
712                                  struct dma_buf *dma_buf,
713                                  int fd,
714                                  struct dma_buf **imported_buf)
715 {
716         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
717         struct vc_sm_buffer *buffer = NULL;
718         struct vc_sm_import import = { };
719         struct vc_sm_import_result result = { };
720         struct dma_buf_attachment *attach = NULL;
721         struct sg_table *sgt = NULL;
722         dma_addr_t dma_addr;
723         u32 cache_alias;
724         int ret = 0;
725         int status;
726
727         /* Setup our allocation parameters */
728         pr_debug("%s: importing dma_buf %p/fd %d\n", __func__, dma_buf, fd);
729
730         if (fd < 0)
731                 get_dma_buf(dma_buf);
732         else
733                 dma_buf = dma_buf_get(fd);
734
735         if (!dma_buf)
736                 return -EINVAL;
737
738         attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
739         if (IS_ERR(attach)) {
740                 ret = PTR_ERR(attach);
741                 goto error;
742         }
743
744         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
745         if (IS_ERR(sgt)) {
746                 ret = PTR_ERR(sgt);
747                 goto error;
748         }
749
750         /* Verify that the address block is contiguous */
751         if (sgt->nents != 1) {
752                 ret = -ENOMEM;
753                 goto error;
754         }
755
756         /* Allocate local buffer to track this allocation. */
757         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
758         if (!buffer) {
759                 ret = -ENOMEM;
760                 goto error;
761         }
762
763         import.type = VC_SM_ALLOC_NON_CACHED;
764         dma_addr = sg_dma_address(sgt->sgl);
765         import.addr = (u32)dma_addr;
766         cache_alias = import.addr & 0xC0000000;
767         if (cache_alias != 0xC0000000 && cache_alias != 0x80000000) {
768                 pr_err("%s: Expecting an uncached alias for dma_addr %pad\n",
769                        __func__, &dma_addr);
770                 /* Note that this assumes we're on >= Pi2, but it implies a
771                  * DT configuration error.
772                  */
773                 import.addr |= 0xC0000000;
774         }
775         import.size = sg_dma_len(sgt->sgl);
776         import.allocator = current->tgid;
777         import.kernel_id = get_kernel_id(buffer);
778
779         memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
780                sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
781
782         pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %pad, size %u.\n",
783                  __func__, import.name, import.type, &dma_addr, import.size);
784
785         /* Allocate the videocore buffer. */
786         status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
787                                        &sm_state->int_trans_id);
788         if (status == -EINTR) {
789                 pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
790                          __func__, sm_state->int_trans_id);
791                 ret = -ERESTARTSYS;
792                 private->restart_sys = -EINTR;
793                 private->int_action = VC_SM_MSG_TYPE_IMPORT;
794                 goto error;
795         } else if (status || !result.res_handle) {
796                 pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
797                          __func__, status, sm_state->int_trans_id);
798                 ret = -ENOMEM;
799                 goto error;
800         }
801
802         mutex_init(&buffer->lock);
803         INIT_LIST_HEAD(&buffer->attachments);
804         memcpy(buffer->name, import.name,
805                min(sizeof(buffer->name), sizeof(import.name) - 1));
806
807         /* Keep track of the buffer we created. */
808         buffer->private = private;
809         buffer->vc_handle = result.res_handle;
810         buffer->size = import.size;
811         buffer->vpu_state = VPU_MAPPED;
812
813         buffer->imported = 1;
814         buffer->import.dma_buf = dma_buf;
815
816         buffer->import.attach = attach;
817         buffer->import.sgt = sgt;
818         buffer->dma_addr = dma_addr;
819         buffer->in_use = 1;
820         buffer->kernel_id = import.kernel_id;
821
822         /*
823          * We're done - we need to export a new dmabuf chaining through most
824          * functions, but enabling us to release our own internal references
825          * here.
826          */
827         exp_info.ops = &dma_buf_import_ops;
828         exp_info.size = import.size;
829         exp_info.flags = O_RDWR;
830         exp_info.priv = buffer;
831
832         buffer->dma_buf = dma_buf_export(&exp_info);
833         if (IS_ERR(buffer->dma_buf)) {
834                 ret = PTR_ERR(buffer->dma_buf);
835                 goto error;
836         }
837
838         vc_sm_add_resource(private, buffer);
839
840         *imported_buf = buffer->dma_buf;
841
842         return 0;
843
844 error:
845         if (result.res_handle) {
846                 struct vc_sm_free_t free = { result.res_handle, 0 };
847
848                 vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
849                                     &sm_state->int_trans_id);
850         }
851         free_kernel_id(import.kernel_id);
852         kfree(buffer);
853         if (sgt)
854                 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
855         if (attach)
856                 dma_buf_detach(dma_buf, attach);
857         dma_buf_put(dma_buf);
858         return ret;
859 }
860
861 static int vc_sm_cma_vpu_alloc(u32 size, u32 align, const char *name,
862                                u32 mem_handle, struct vc_sm_buffer **ret_buffer)
863 {
864         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
865         struct vc_sm_buffer *buffer = NULL;
866         struct sg_table *sgt;
867         int aligned_size;
868         int ret = 0;
869
870         /* Align to the user requested align */
871         aligned_size = ALIGN(size, align);
872         /* and then to a page boundary */
873         aligned_size = PAGE_ALIGN(aligned_size);
874
875         if (!aligned_size)
876                 return -EINVAL;
877
878         /* Allocate local buffer to track this allocation. */
879         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
880         if (!buffer)
881                 return -ENOMEM;
882
883         mutex_init(&buffer->lock);
884         /* Acquire the mutex as vc_sm_release_resource will release it in the
885          * error path.
886          */
887         mutex_lock(&buffer->lock);
888
889         buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
890                                             aligned_size, &buffer->dma_addr,
891                                             GFP_KERNEL);
892         if (!buffer->cookie) {
893                 pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
894                        __func__, aligned_size);
895                 ret = -ENOMEM;
896                 goto error;
897         }
898
899         pr_debug("[%s]: alloc of %d bytes success\n",
900                  __func__, aligned_size);
901
902         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
903         if (!sgt) {
904                 ret = -ENOMEM;
905                 goto error;
906         }
907
908         ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
909                               buffer->dma_addr, buffer->size);
910         if (ret < 0) {
911                 pr_err("failed to get scatterlist from DMA API\n");
912                 kfree(sgt);
913                 ret = -ENOMEM;
914                 goto error;
915         }
916         buffer->alloc.sg_table = sgt;
917
918         INIT_LIST_HEAD(&buffer->attachments);
919
920         memcpy(buffer->name, name,
921                min(sizeof(buffer->name), strlen(name)));
922
923         exp_info.ops = &dma_buf_ops;
924         exp_info.size = aligned_size;
925         exp_info.flags = O_RDWR;
926         exp_info.priv = buffer;
927
928         buffer->dma_buf = dma_buf_export(&exp_info);
929         if (IS_ERR(buffer->dma_buf)) {
930                 ret = PTR_ERR(buffer->dma_buf);
931                 goto error;
932         }
933         buffer->dma_addr = (u32)sg_dma_address(buffer->alloc.sg_table->sgl);
934         if ((buffer->dma_addr & 0xC0000000) != 0xC0000000) {
935                 pr_warn_once("%s: Expecting an uncached alias for dma_addr %pad\n",
936                              __func__, &buffer->dma_addr);
937                 buffer->dma_addr |= 0xC0000000;
938         }
939         buffer->private = sm_state->vpu_allocs;
940
941         buffer->vc_handle = mem_handle;
942         buffer->vpu_state = VPU_MAPPED;
943         buffer->vpu_allocated = 1;
944         buffer->size = size;
945         /*
946          * Create an ID that will be passed along with our message so
947          * that when we service the release reply, we can look up which
948          * resource is being released.
949          */
950         buffer->kernel_id = get_kernel_id(buffer);
951
952         vc_sm_add_resource(sm_state->vpu_allocs, buffer);
953
954         mutex_unlock(&buffer->lock);
955
956         *ret_buffer = buffer;
957         return 0;
958 error:
959         if (buffer)
960                 vc_sm_release_resource(buffer);
961         return ret;
962 }
963
964 static void
965 vc_sm_vpu_event(struct sm_instance *instance, struct vc_sm_result_t *reply,
966                 int reply_len)
967 {
968         switch (reply->trans_id & ~0x80000000) {
969         case VC_SM_MSG_TYPE_CLIENT_VERSION:
970         {
971                 /* Acknowledge that the firmware supports the version command */
972                 pr_debug("%s: firmware acked version msg. Require release cb\n",
973                          __func__);
974                 sm_state->require_released_callback = true;
975         }
976         break;
977         case VC_SM_MSG_TYPE_RELEASED:
978         {
979                 struct vc_sm_released *release = (struct vc_sm_released *)reply;
980                 struct vc_sm_buffer *buffer =
981                                         lookup_kernel_id(release->kernel_id);
982                 if (!buffer) {
983                         pr_err("%s: VC released a buffer that is already released, kernel_id %d\n",
984                                __func__, release->kernel_id);
985                         break;
986                 }
987                 mutex_lock(&buffer->lock);
988
989                 pr_debug("%s: Released addr %08x, size %u, id %08x, mem_handle %08x\n",
990                          __func__, release->addr, release->size,
991                          release->kernel_id, release->vc_handle);
992
993                 buffer->vc_handle = 0;
994                 buffer->vpu_state = VPU_NOT_MAPPED;
995                 free_kernel_id(release->kernel_id);
996
997                 if (buffer->vpu_allocated) {
998                         /* VPU allocation, so release the dmabuf which will
999                          * trigger the clean up.
1000                          */
1001                         mutex_unlock(&buffer->lock);
1002                         dma_buf_put(buffer->dma_buf);
1003                 } else {
1004                         vc_sm_release_resource(buffer);
1005                 }
1006         }
1007         break;
1008         case VC_SM_MSG_TYPE_VC_MEM_REQUEST:
1009         {
1010                 struct vc_sm_buffer *buffer = NULL;
1011                 struct vc_sm_vc_mem_request *req =
1012                                         (struct vc_sm_vc_mem_request *)reply;
1013                 struct vc_sm_vc_mem_request_result reply;
1014                 int ret;
1015
1016                 pr_debug("%s: Request %u bytes of memory, align %d name %s, trans_id %08x\n",
1017                          __func__, req->size, req->align, req->name,
1018                          req->trans_id);
1019                 ret = vc_sm_cma_vpu_alloc(req->size, req->align, req->name,
1020                                           req->vc_handle, &buffer);
1021
1022                 reply.trans_id = req->trans_id;
1023                 if (!ret) {
1024                         reply.addr = buffer->dma_addr;
1025                         reply.kernel_id = buffer->kernel_id;
1026                         pr_debug("%s: Allocated resource buffer %p, addr %pad\n",
1027                                  __func__, buffer, &buffer->dma_addr);
1028                 } else {
1029                         pr_err("%s: Allocation failed size %u, name %s, vc_handle %u\n",
1030                                __func__, req->size, req->name, req->vc_handle);
1031                         reply.addr = 0;
1032                         reply.kernel_id = 0;
1033                 }
1034                 vc_sm_vchi_client_vc_mem_req_reply(sm_state->sm_handle, &reply,
1035                                                    &sm_state->int_trans_id);
1036                 break;
1037         }
1038         break;
1039         default:
1040                 pr_err("%s: Unknown vpu cmd %x\n", __func__, reply->trans_id);
1041                 break;
1042         }
1043 }
1044
1045 /* Userspace handling */
1046 /*
1047  * Open the device.  Creates a private state to help track all allocation
1048  * associated with this device.
1049  */
1050 static int vc_sm_cma_open(struct inode *inode, struct file *file)
1051 {
1052         /* Make sure the device was started properly. */
1053         if (!sm_state) {
1054                 pr_err("[%s]: invalid device\n", __func__);
1055                 return -EPERM;
1056         }
1057
1058         file->private_data = vc_sm_cma_create_priv_data(current->tgid);
1059         if (!file->private_data) {
1060                 pr_err("[%s]: failed to create data tracker\n", __func__);
1061
1062                 return -ENOMEM;
1063         }
1064
1065         return 0;
1066 }
1067
1068 /*
1069  * Close the vcsm-cma device.
1070  * All allocations are file descriptors to the dmabuf objects, so we will get
1071  * the clean up request on those as those are cleaned up.
1072  */
1073 static int vc_sm_cma_release(struct inode *inode, struct file *file)
1074 {
1075         struct vc_sm_privdata_t *file_data =
1076             (struct vc_sm_privdata_t *)file->private_data;
1077         int ret = 0;
1078
1079         /* Make sure the device was started properly. */
1080         if (!sm_state || !file_data) {
1081                 pr_err("[%s]: invalid device\n", __func__);
1082                 ret = -EPERM;
1083                 goto out;
1084         }
1085
1086         pr_debug("[%s]: using private data %p\n", __func__, file_data);
1087
1088         /* Terminate the private data. */
1089         kfree(file_data);
1090
1091 out:
1092         return ret;
1093 }
1094
1095 /*
1096  * Allocate a shared memory handle and block.
1097  * Allocation is from CMA, and then imported into the VPU mappings.
1098  */
1099 int vc_sm_cma_ioctl_alloc(struct vc_sm_privdata_t *private,
1100                           struct vc_sm_cma_ioctl_alloc *ioparam)
1101 {
1102         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1103         struct vc_sm_buffer *buffer = NULL;
1104         struct vc_sm_import import = { 0 };
1105         struct vc_sm_import_result result = { 0 };
1106         struct dma_buf *dmabuf = NULL;
1107         struct sg_table *sgt;
1108         int aligned_size;
1109         int ret = 0;
1110         int status;
1111         int fd = -1;
1112
1113         aligned_size = PAGE_ALIGN(ioparam->size);
1114
1115         if (!aligned_size)
1116                 return -EINVAL;
1117
1118         /* Allocate local buffer to track this allocation. */
1119         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1120         if (!buffer) {
1121                 ret = -ENOMEM;
1122                 goto error;
1123         }
1124
1125         buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
1126                                             aligned_size,
1127                                             &buffer->dma_addr,
1128                                             GFP_KERNEL);
1129         if (!buffer->cookie) {
1130                 pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
1131                        __func__, aligned_size);
1132                 ret = -ENOMEM;
1133                 goto error;
1134         }
1135
1136         import.type = VC_SM_ALLOC_NON_CACHED;
1137         import.allocator = current->tgid;
1138
1139         if (*ioparam->name)
1140                 memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
1141         else
1142                 memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
1143                        sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
1144
1145         mutex_init(&buffer->lock);
1146         INIT_LIST_HEAD(&buffer->attachments);
1147         memcpy(buffer->name, import.name,
1148                min(sizeof(buffer->name), sizeof(import.name) - 1));
1149
1150         exp_info.ops = &dma_buf_ops;
1151         exp_info.size = aligned_size;
1152         exp_info.flags = O_RDWR;
1153         exp_info.priv = buffer;
1154
1155         dmabuf = dma_buf_export(&exp_info);
1156         if (IS_ERR(dmabuf)) {
1157                 ret = PTR_ERR(dmabuf);
1158                 goto error;
1159         }
1160         buffer->dma_buf = dmabuf;
1161
1162         import.addr = buffer->dma_addr;
1163         import.size = aligned_size;
1164         import.kernel_id = get_kernel_id(buffer);
1165
1166         /* Wrap it into a videocore buffer. */
1167         status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
1168                                        &sm_state->int_trans_id);
1169         if (status == -EINTR) {
1170                 pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
1171                          __func__, sm_state->int_trans_id);
1172                 ret = -ERESTARTSYS;
1173                 private->restart_sys = -EINTR;
1174                 private->int_action = VC_SM_MSG_TYPE_IMPORT;
1175                 goto error;
1176         } else if (status || !result.res_handle) {
1177                 pr_err("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
1178                        __func__, status, sm_state->int_trans_id);
1179                 ret = -ENOMEM;
1180                 goto error;
1181         }
1182
1183         /* Keep track of the buffer we created. */
1184         buffer->private = private;
1185         buffer->vc_handle = result.res_handle;
1186         buffer->size = import.size;
1187         buffer->vpu_state = VPU_MAPPED;
1188         buffer->kernel_id = import.kernel_id;
1189
1190         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
1191         if (!sgt) {
1192                 ret = -ENOMEM;
1193                 goto error;
1194         }
1195
1196         ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
1197                               buffer->dma_addr, buffer->size);
1198         if (ret < 0) {
1199                 /* FIXME: error handling */
1200                 pr_err("failed to get scatterlist from DMA API\n");
1201                 kfree(sgt);
1202                 ret = -ENOMEM;
1203                 goto error;
1204         }
1205         buffer->alloc.sg_table = sgt;
1206
1207         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1208         if (fd < 0)
1209                 goto error;
1210
1211         vc_sm_add_resource(private, buffer);
1212
1213         pr_debug("[%s]: Added resource as fd %d, buffer %p, private %p, dma_addr %pad\n",
1214                  __func__, fd, buffer, private, &buffer->dma_addr);
1215
1216         /* We're done */
1217         ioparam->handle = fd;
1218         ioparam->vc_handle = buffer->vc_handle;
1219         ioparam->dma_addr = buffer->dma_addr;
1220         return 0;
1221
1222 error:
1223         pr_err("[%s]: something failed - cleanup. ret %d\n", __func__, ret);
1224
1225         if (dmabuf) {
1226                 /* dmabuf has been exported, therefore allow dmabuf cleanup to
1227                  * deal with this
1228                  */
1229                 dma_buf_put(dmabuf);
1230         } else {
1231                 /* No dmabuf, therefore just free the buffer here */
1232                 if (buffer->cookie)
1233                         dma_free_coherent(&sm_state->pdev->dev, buffer->size,
1234                                           buffer->cookie, buffer->dma_addr);
1235                 kfree(buffer);
1236         }
1237         return ret;
1238 }
1239
1240 #ifndef CONFIG_ARM64
1241 /* Converts VCSM_CACHE_OP_* to an operating function. */
1242 static void (*cache_op_to_func(const unsigned int cache_op))
1243                                                 (const void*, const void*)
1244 {
1245         switch (cache_op) {
1246         case VC_SM_CACHE_OP_NOP:
1247                 return NULL;
1248
1249         case VC_SM_CACHE_OP_INV:
1250                 return dmac_inv_range;
1251         case VC_SM_CACHE_OP_CLEAN:
1252                 return dmac_clean_range;
1253         case VC_SM_CACHE_OP_FLUSH:
1254                 return dmac_flush_range;
1255
1256         default:
1257                 pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
1258                 return NULL;
1259         }
1260 }
1261
1262 /*
1263  * Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
1264  */
1265 static int clean_invalid_contig_2d(const void __user *addr,
1266                                    const size_t block_count,
1267                                    const size_t block_size,
1268                                    const size_t stride,
1269                                    const unsigned int cache_op)
1270 {
1271         size_t i;
1272         void (*op_fn)(const void *start, const void *end);
1273
1274         if (!block_size) {
1275                 pr_err("[%s]: size cannot be 0\n", __func__);
1276                 return -EINVAL;
1277         }
1278
1279         op_fn = cache_op_to_func(cache_op);
1280         if (!op_fn)
1281                 return -EINVAL;
1282
1283         for (i = 0; i < block_count; i ++, addr += stride)
1284                 op_fn(addr, addr + block_size);
1285
1286         return 0;
1287 }
1288
1289 static int vc_sm_cma_clean_invalid2(unsigned int cmdnr, unsigned long arg)
1290 {
1291         struct vc_sm_cma_ioctl_clean_invalid2 ioparam;
1292         struct vc_sm_cma_ioctl_clean_invalid_block *block = NULL;
1293         int i, ret = 0;
1294
1295         /* Get parameter data. */
1296         if (copy_from_user(&ioparam, (void *)arg, sizeof(ioparam))) {
1297                 pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
1298                        __func__, cmdnr);
1299                 return -EFAULT;
1300         }
1301         block = kmalloc(ioparam.op_count * sizeof(*block), GFP_KERNEL);
1302         if (!block)
1303                 return -EFAULT;
1304
1305         if (copy_from_user(block, (void *)(arg + sizeof(ioparam)),
1306                            ioparam.op_count * sizeof(*block)) != 0) {
1307                 pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
1308                        __func__, cmdnr);
1309                 ret = -EFAULT;
1310                 goto out;
1311         }
1312
1313         for (i = 0; i < ioparam.op_count; i++) {
1314                 const struct vc_sm_cma_ioctl_clean_invalid_block * const op =
1315                                                                 block + i;
1316
1317                 if (op->invalidate_mode == VC_SM_CACHE_OP_NOP)
1318                         continue;
1319
1320                 ret = clean_invalid_contig_2d((void __user *)op->start_address,
1321                                               op->block_count, op->block_size,
1322                                               op->inter_block_stride,
1323                                               op->invalidate_mode);
1324                 if (ret)
1325                         break;
1326         }
1327 out:
1328         kfree(block);
1329
1330         return ret;
1331 }
1332 #endif
1333
1334 static long vc_sm_cma_ioctl(struct file *file, unsigned int cmd,
1335                             unsigned long arg)
1336 {
1337         int ret = 0;
1338         unsigned int cmdnr = _IOC_NR(cmd);
1339         struct vc_sm_privdata_t *file_data =
1340             (struct vc_sm_privdata_t *)file->private_data;
1341
1342         /* Validate we can work with this device. */
1343         if (!sm_state || !file_data) {
1344                 pr_err("[%s]: invalid device\n", __func__);
1345                 return -EPERM;
1346         }
1347
1348         /* Action is a re-post of a previously interrupted action? */
1349         if (file_data->restart_sys == -EINTR) {
1350                 pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
1351                          __func__, file_data->int_action,
1352                          file_data->int_trans_id);
1353
1354                 file_data->restart_sys = 0;
1355         }
1356
1357         /* Now process the command. */
1358         switch (cmdnr) {
1359                 /* New memory allocation.
1360                  */
1361         case VC_SM_CMA_CMD_ALLOC:
1362         {
1363                 struct vc_sm_cma_ioctl_alloc ioparam;
1364
1365                 /* Get the parameter data. */
1366                 if (copy_from_user
1367                     (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
1368                         pr_err("[%s]: failed to copy-from-user for cmd %x\n",
1369                                __func__, cmdnr);
1370                         ret = -EFAULT;
1371                         break;
1372                 }
1373
1374                 ret = vc_sm_cma_ioctl_alloc(file_data, &ioparam);
1375                 if (!ret &&
1376                     (copy_to_user((void *)arg, &ioparam,
1377                                   sizeof(ioparam)) != 0)) {
1378                         /* FIXME: Release allocation */
1379                         pr_err("[%s]: failed to copy-to-user for cmd %x\n",
1380                                __func__, cmdnr);
1381                         ret = -EFAULT;
1382                 }
1383                 break;
1384         }
1385
1386         case VC_SM_CMA_CMD_IMPORT_DMABUF:
1387         {
1388                 struct vc_sm_cma_ioctl_import_dmabuf ioparam;
1389                 struct dma_buf *new_dmabuf;
1390
1391                 /* Get the parameter data. */
1392                 if (copy_from_user
1393                     (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
1394                         pr_err("[%s]: failed to copy-from-user for cmd %x\n",
1395                                __func__, cmdnr);
1396                         ret = -EFAULT;
1397                         break;
1398                 }
1399
1400                 ret = vc_sm_cma_import_dmabuf_internal(file_data,
1401                                                        NULL,
1402                                                        ioparam.dmabuf_fd,
1403                                                        &new_dmabuf);
1404
1405                 if (!ret) {
1406                         struct vc_sm_buffer *buf = new_dmabuf->priv;
1407
1408                         ioparam.size = buf->size;
1409                         ioparam.handle = dma_buf_fd(new_dmabuf,
1410                                                     O_CLOEXEC);
1411                         ioparam.vc_handle = buf->vc_handle;
1412                         ioparam.dma_addr = buf->dma_addr;
1413
1414                         if (ioparam.handle < 0 ||
1415                             (copy_to_user((void *)arg, &ioparam,
1416                                           sizeof(ioparam)) != 0)) {
1417                                 dma_buf_put(new_dmabuf);
1418                                 /* FIXME: Release allocation */
1419                                 ret = -EFAULT;
1420                         }
1421                 }
1422                 break;
1423         }
1424
1425 #ifndef CONFIG_ARM64
1426         /*
1427          * Flush/Invalidate the cache for a given mapping.
1428          * Blocks must be pinned (i.e. accessed) before this call.
1429          */
1430         case VC_SM_CMA_CMD_CLEAN_INVALID2:
1431                 ret = vc_sm_cma_clean_invalid2(cmdnr, arg);
1432                 break;
1433 #endif
1434
1435         default:
1436                 pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
1437                          current->tgid, file_data->pid);
1438
1439                 ret = -EINVAL;
1440                 break;
1441         }
1442
1443         return ret;
1444 }
1445
1446 #ifdef CONFIG_COMPAT
1447 struct vc_sm_cma_ioctl_clean_invalid2_32 {
1448         u32 op_count;
1449         struct vc_sm_cma_ioctl_clean_invalid_block_32 {
1450                 u16 invalidate_mode;
1451                 u16 block_count;
1452                 compat_uptr_t start_address;
1453                 u32 block_size;
1454                 u32 inter_block_stride;
1455         } s[0];
1456 };
1457
1458 #define VC_SM_CMA_CMD_CLEAN_INVALID2_32\
1459         _IOR(VC_SM_CMA_MAGIC_TYPE, VC_SM_CMA_CMD_CLEAN_INVALID2,\
1460          struct vc_sm_cma_ioctl_clean_invalid2_32)
1461
1462 static long vc_sm_cma_compat_ioctl(struct file *file, unsigned int cmd,
1463                                    unsigned long arg)
1464 {
1465         switch (cmd) {
1466         case VC_SM_CMA_CMD_CLEAN_INVALID2_32:
1467                 /* FIXME */
1468                 return -EINVAL;
1469
1470         default:
1471                 return vc_sm_cma_ioctl(file, cmd, arg);
1472         }
1473 }
1474 #endif
1475
1476 /* Device operations that we managed in this driver. */
1477 static const struct file_operations vc_sm_ops = {
1478         .owner = THIS_MODULE,
1479         .unlocked_ioctl = vc_sm_cma_ioctl,
1480 #ifdef CONFIG_COMPAT
1481         .compat_ioctl = vc_sm_cma_compat_ioctl,
1482 #endif
1483         .open = vc_sm_cma_open,
1484         .release = vc_sm_cma_release,
1485 };
1486
1487 /* Driver load/unload functions */
1488 /* Videocore connected.  */
1489 static void vc_sm_connected_init(void)
1490 {
1491         int ret;
1492         struct vchiq_instance *vchiq_instance;
1493         struct vc_sm_version version;
1494         struct vc_sm_result_t version_result;
1495
1496         pr_info("[%s]: start\n", __func__);
1497
1498         /*
1499          * Initialize and create a VCHI connection for the shared memory service
1500          * running on videocore.
1501          */
1502         ret = vchiq_initialise(&vchiq_instance);
1503         if (ret) {
1504                 pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
1505                        __func__, ret);
1506
1507                 return;
1508         }
1509
1510         ret = vchiq_connect(vchiq_instance);
1511         if (ret) {
1512                 pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
1513                        __func__, ret);
1514
1515                 return;
1516         }
1517
1518         /* Initialize an instance of the shared memory service. */
1519         sm_state->sm_handle = vc_sm_cma_vchi_init(vchiq_instance, 1,
1520                                                   vc_sm_vpu_event);
1521         if (!sm_state->sm_handle) {
1522                 pr_err("[%s]: failed to initialize shared memory service\n",
1523                        __func__);
1524
1525                 return;
1526         }
1527
1528         /* Create a debug fs directory entry (root). */
1529         sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
1530
1531         sm_state->dir_state.show = &vc_sm_cma_global_state_show;
1532         sm_state->dir_state.dir_entry =
1533                 debugfs_create_file(VC_SM_STATE, 0444, sm_state->dir_root,
1534                                     &sm_state->dir_state,
1535                                     &vc_sm_cma_debug_fs_fops);
1536
1537         INIT_LIST_HEAD(&sm_state->buffer_list);
1538
1539         /* Create a shared memory device. */
1540         sm_state->misc_dev.minor = MISC_DYNAMIC_MINOR;
1541         sm_state->misc_dev.name = DEVICE_NAME;
1542         sm_state->misc_dev.fops = &vc_sm_ops;
1543         sm_state->misc_dev.parent = NULL;
1544         /* Temporarily set as 666 until udev rules have been sorted */
1545         sm_state->misc_dev.mode = 0666;
1546         ret = misc_register(&sm_state->misc_dev);
1547         if (ret) {
1548                 pr_err("vcsm-cma: failed to register misc device.\n");
1549                 goto err_remove_debugfs;
1550         }
1551
1552         sm_state->data_knl = vc_sm_cma_create_priv_data(0);
1553         if (!sm_state->data_knl) {
1554                 pr_err("[%s]: failed to create kernel private data tracker\n",
1555                        __func__);
1556                 goto err_remove_misc_dev;
1557         }
1558
1559         version.version = 2;
1560         ret = vc_sm_cma_vchi_client_version(sm_state->sm_handle, &version,
1561                                             &version_result,
1562                                             &sm_state->int_trans_id);
1563         if (ret) {
1564                 pr_err("[%s]: Failed to send version request %d\n", __func__,
1565                        ret);
1566         }
1567
1568         /* Done! */
1569         sm_inited = 1;
1570         pr_info("[%s]: installed successfully\n", __func__);
1571         return;
1572
1573 err_remove_misc_dev:
1574         misc_deregister(&sm_state->misc_dev);
1575 err_remove_debugfs:
1576         debugfs_remove_recursive(sm_state->dir_root);
1577         vc_sm_cma_vchi_stop(&sm_state->sm_handle);
1578 }
1579
1580 /* Driver loading. */
1581 static int bcm2835_vc_sm_cma_probe(struct platform_device *pdev)
1582 {
1583         pr_info("%s: Videocore shared memory driver\n", __func__);
1584
1585         sm_state = devm_kzalloc(&pdev->dev, sizeof(*sm_state), GFP_KERNEL);
1586         if (!sm_state)
1587                 return -ENOMEM;
1588         sm_state->pdev = pdev;
1589         mutex_init(&sm_state->map_lock);
1590
1591         spin_lock_init(&sm_state->kernelid_map_lock);
1592         idr_init_base(&sm_state->kernelid_map, 1);
1593
1594         pdev->dev.dma_parms = devm_kzalloc(&pdev->dev,
1595                                            sizeof(*pdev->dev.dma_parms),
1596                                            GFP_KERNEL);
1597         /* dma_set_max_seg_size checks if dma_parms is NULL. */
1598         dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
1599
1600         vchiq_add_connected_callback(vc_sm_connected_init);
1601         return 0;
1602 }
1603
1604 /* Driver unloading. */
1605 static int bcm2835_vc_sm_cma_remove(struct platform_device *pdev)
1606 {
1607         pr_debug("[%s]: start\n", __func__);
1608         if (sm_inited) {
1609                 misc_deregister(&sm_state->misc_dev);
1610
1611                 /* Remove all proc entries. */
1612                 debugfs_remove_recursive(sm_state->dir_root);
1613
1614                 /* Stop the videocore shared memory service. */
1615                 vc_sm_cma_vchi_stop(&sm_state->sm_handle);
1616         }
1617
1618         if (sm_state) {
1619                 idr_destroy(&sm_state->kernelid_map);
1620
1621                 /* Free the memory for the state structure. */
1622                 mutex_destroy(&sm_state->map_lock);
1623         }
1624
1625         pr_debug("[%s]: end\n", __func__);
1626         return 0;
1627 }
1628
1629 /* Kernel API calls */
1630 /* Get an internal resource handle mapped from the external one. */
1631 int vc_sm_cma_int_handle(void *handle)
1632 {
1633         struct dma_buf *dma_buf = (struct dma_buf *)handle;
1634         struct vc_sm_buffer *buf;
1635
1636         /* Validate we can work with this device. */
1637         if (!sm_state || !handle) {
1638                 pr_err("[%s]: invalid input\n", __func__);
1639                 return 0;
1640         }
1641
1642         buf = (struct vc_sm_buffer *)dma_buf->priv;
1643         return buf->vc_handle;
1644 }
1645 EXPORT_SYMBOL_GPL(vc_sm_cma_int_handle);
1646
1647 /* Free a previously allocated shared memory handle and block. */
1648 int vc_sm_cma_free(void *handle)
1649 {
1650         struct dma_buf *dma_buf = (struct dma_buf *)handle;
1651
1652         /* Validate we can work with this device. */
1653         if (!sm_state || !handle) {
1654                 pr_err("[%s]: invalid input\n", __func__);
1655                 return -EPERM;
1656         }
1657
1658         pr_debug("%s: handle %p/dmabuf %p\n", __func__, handle, dma_buf);
1659
1660         dma_buf_put(dma_buf);
1661
1662         return 0;
1663 }
1664 EXPORT_SYMBOL_GPL(vc_sm_cma_free);
1665
1666 /* Import a dmabuf to be shared with VC. */
1667 int vc_sm_cma_import_dmabuf(struct dma_buf *src_dmabuf, void **handle)
1668 {
1669         struct dma_buf *new_dma_buf;
1670         int ret;
1671
1672         /* Validate we can work with this device. */
1673         if (!sm_state || !src_dmabuf || !handle) {
1674                 pr_err("[%s]: invalid input\n", __func__);
1675                 return -EPERM;
1676         }
1677
1678         ret = vc_sm_cma_import_dmabuf_internal(sm_state->data_knl, src_dmabuf,
1679                                                -1, &new_dma_buf);
1680
1681         if (!ret) {
1682                 pr_debug("%s: imported to ptr %p\n", __func__, new_dma_buf);
1683
1684                 /* Assign valid handle at this time.*/
1685                 *handle = new_dma_buf;
1686         } else {
1687                 /*
1688                  * succeeded in importing the dma_buf, but then
1689                  * failed to look it up again. How?
1690                  * Release the fd again.
1691                  */
1692                 pr_err("%s: imported vc_sm_cma_get_buffer failed %d\n",
1693                        __func__, ret);
1694         }
1695
1696         return ret;
1697 }
1698 EXPORT_SYMBOL_GPL(vc_sm_cma_import_dmabuf);
1699
1700 static struct platform_driver bcm2835_vcsm_cma_driver = {
1701         .probe = bcm2835_vc_sm_cma_probe,
1702         .remove = bcm2835_vc_sm_cma_remove,
1703         .driver = {
1704                    .name = DEVICE_NAME,
1705                    .owner = THIS_MODULE,
1706                    },
1707 };
1708
1709 module_platform_driver(bcm2835_vcsm_cma_driver);
1710
1711 MODULE_AUTHOR("Dave Stevenson");
1712 MODULE_DESCRIPTION("VideoCore CMA Shared Memory Driver");
1713 MODULE_LICENSE("GPL v2");
1714 MODULE_ALIAS("platform:vcsm-cma");