Merge tag 'v5.15.57' into rpi-5.15.y
[platform/kernel/linux-rpi.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmapool.h>
26 #include <linux/rcupdate.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <soc/bcm2835/raspberrypi-firmware.h>
33
34 #include "vchiq_core.h"
35 #include "vchiq_ioctl.h"
36 #include "vchiq_arm.h"
37 #include "vchiq_debugfs.h"
38 #include "vchiq_connected.h"
39 #include "vchiq_pagelist.h"
40
41 #define DEVICE_NAME "vchiq"
42
43 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
44
45 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
46
47 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
48 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
49
50 #define BELL0   0x00
51 #define BELL2   0x08
52
53 #define ARM_DS_ACTIVE   BIT(2)
54
55 #define VCHIQ_DMA_POOL_SIZE PAGE_SIZE
56
57 /* Override the default prefix, which would be vchiq_arm (from the filename) */
58 #undef MODULE_PARAM_PREFIX
59 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
60
61 #define KEEPALIVE_VER 1
62 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
63
64 /* Run time control of log level, based on KERN_XXX level. */
65 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
66 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
67 module_param_named(arm_log_level, vchiq_arm_log_level, int, 0644);
68 module_param_named(susp_log_level, vchiq_susp_log_level, int, 0644);
69 module_param_named(core_log_level, vchiq_core_log_level, int, 0644);
70 module_param_named(core_msg_log_level, vchiq_core_msg_log_level, int, 0644);
71 module_param_named(sync_log_level, vchiq_sync_log_level, int, 0644);
72
73 DEFINE_SPINLOCK(msg_queue_spinlock);
74 struct vchiq_state g_state;
75
76 static struct platform_device *bcm2835_camera;
77 static struct platform_device *bcm2835_audio;
78 static struct platform_device *bcm2835_codec;
79 static struct platform_device *vcsm_cma;
80 static struct platform_device *bcm2835_isp;
81
82 static struct vchiq_drvdata bcm2835_drvdata = {
83         .cache_line_size = 32,
84 };
85
86 static struct vchiq_drvdata bcm2836_drvdata = {
87         .cache_line_size = 64,
88 };
89
90 static struct vchiq_drvdata bcm2711_drvdata = {
91         .cache_line_size = 64,
92         .use_36bit_addrs = true,
93 };
94
95 struct vchiq_2835_state {
96         int inited;
97         struct vchiq_arm_state arm_state;
98 };
99
100 struct vchiq_pagelist_info {
101         struct pagelist *pagelist;
102         size_t pagelist_buffer_size;
103         dma_addr_t dma_addr;
104         bool is_from_pool;
105         enum dma_data_direction dma_dir;
106         unsigned int num_pages;
107         unsigned int pages_need_release;
108         struct page **pages;
109         struct scatterlist *scatterlist;
110         unsigned int scatterlist_mapped;
111 };
112
113 static void __iomem *g_regs;
114 /* This value is the size of the L2 cache lines as understood by the
115  * VPU firmware, which determines the required alignment of the
116  * offsets/sizes in pagelists.
117  *
118  * Modern VPU firmware looks for a DT "cache-line-size" property in
119  * the VCHIQ node and will overwrite it with the actual L2 cache size,
120  * which the kernel must then respect.  That property was rejected
121  * upstream, so we have to use the VPU firmware's compatibility value
122  * of 32.
123  */
124 static unsigned int g_cache_line_size = 32;
125 static struct dma_pool *g_dma_pool;
126 static unsigned int g_use_36bit_addrs = 0;
127 static unsigned int g_fragments_size;
128 static char *g_fragments_base;
129 static char *g_free_fragments;
130 static struct semaphore g_free_fragments_sema;
131 static struct device *g_dev;
132 static struct device *g_dma_dev;
133
134 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
135
136 static enum vchiq_status
137 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
138         unsigned int size, enum vchiq_bulk_dir dir);
139
140 static irqreturn_t
141 vchiq_doorbell_irq(int irq, void *dev_id)
142 {
143         struct vchiq_state *state = dev_id;
144         irqreturn_t ret = IRQ_NONE;
145         unsigned int status;
146
147         /* Read (and clear) the doorbell */
148         status = readl(g_regs + BELL0);
149
150         if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
151                 remote_event_pollall(state);
152                 ret = IRQ_HANDLED;
153         }
154
155         return ret;
156 }
157
158 static void
159 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
160 {
161         if (pagelistinfo->scatterlist_mapped) {
162                 dma_unmap_sg(g_dma_dev, pagelistinfo->scatterlist,
163                              pagelistinfo->num_pages, pagelistinfo->dma_dir);
164         }
165
166         if (pagelistinfo->pages_need_release)
167                 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
168
169         if (pagelistinfo->is_from_pool) {
170                 dma_pool_free(g_dma_pool, pagelistinfo->pagelist,
171                               pagelistinfo->dma_addr);
172         } else {
173                 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
174                                   pagelistinfo->pagelist,
175                                   pagelistinfo->dma_addr);
176         }
177 }
178
179 /* There is a potential problem with partial cache lines (pages?)
180  * at the ends of the block when reading. If the CPU accessed anything in
181  * the same line (page?) then it may have pulled old data into the cache,
182  * obscuring the new data underneath. We can solve this by transferring the
183  * partial cache lines separately, and allowing the ARM to copy into the
184  * cached area.
185  */
186
187 static struct vchiq_pagelist_info *
188 create_pagelist(char *buf, char __user *ubuf,
189                 size_t count, unsigned short type)
190 {
191         struct pagelist *pagelist;
192         struct vchiq_pagelist_info *pagelistinfo;
193         struct page **pages;
194         u32 *addrs;
195         unsigned int num_pages, offset, i, k;
196         int actual_pages;
197         bool is_from_pool;
198         size_t pagelist_size;
199         struct scatterlist *scatterlist, *sg;
200         int dma_buffers;
201         dma_addr_t dma_addr;
202
203         if (count >= INT_MAX - PAGE_SIZE)
204                 return NULL;
205
206         if (buf)
207                 offset = (uintptr_t)buf & (PAGE_SIZE - 1);
208         else
209                 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
210         num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
211
212         if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
213                          sizeof(struct vchiq_pagelist_info)) /
214                         (sizeof(u32) + sizeof(pages[0]) +
215                          sizeof(struct scatterlist)))
216                 return NULL;
217
218         pagelist_size = sizeof(struct pagelist) +
219                         (num_pages * sizeof(u32)) +
220                         (num_pages * sizeof(pages[0]) +
221                         (num_pages * sizeof(struct scatterlist))) +
222                         sizeof(struct vchiq_pagelist_info);
223
224         /* Allocate enough storage to hold the page pointers and the page
225          * list
226          */
227         if (pagelist_size > VCHIQ_DMA_POOL_SIZE) {
228                 pagelist = dma_alloc_coherent(g_dev,
229                                                pagelist_size,
230                                                &dma_addr,
231                                                GFP_KERNEL);
232                 is_from_pool = false;
233         } else {
234                 pagelist = dma_pool_alloc(g_dma_pool, GFP_KERNEL, &dma_addr);
235                 is_from_pool = true;
236         }
237
238         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
239
240         if (!pagelist)
241                 return NULL;
242
243         addrs           = pagelist->addrs;
244         pages           = (struct page **)(addrs + num_pages);
245         scatterlist     = (struct scatterlist *)(pages + num_pages);
246         pagelistinfo    = (struct vchiq_pagelist_info *)
247                           (scatterlist + num_pages);
248
249         pagelist->length = count;
250         pagelist->type = type;
251         pagelist->offset = offset;
252
253         /* Populate the fields of the pagelistinfo structure */
254         pagelistinfo->pagelist = pagelist;
255         pagelistinfo->pagelist_buffer_size = pagelist_size;
256         pagelistinfo->dma_addr = dma_addr;
257         pagelistinfo->is_from_pool = is_from_pool;
258         pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
259                                   DMA_TO_DEVICE : DMA_FROM_DEVICE;
260         pagelistinfo->num_pages = num_pages;
261         pagelistinfo->pages_need_release = 0;
262         pagelistinfo->pages = pages;
263         pagelistinfo->scatterlist = scatterlist;
264         pagelistinfo->scatterlist_mapped = 0;
265
266         if (buf) {
267                 unsigned long length = count;
268                 unsigned int off = offset;
269
270                 for (actual_pages = 0; actual_pages < num_pages;
271                      actual_pages++) {
272                         struct page *pg =
273                                 vmalloc_to_page((buf +
274                                                  (actual_pages * PAGE_SIZE)));
275                         size_t bytes = PAGE_SIZE - off;
276
277                         if (!pg) {
278                                 cleanup_pagelistinfo(pagelistinfo);
279                                 return NULL;
280                         }
281
282                         if (bytes > length)
283                                 bytes = length;
284                         pages[actual_pages] = pg;
285                         length -= bytes;
286                         off = 0;
287                 }
288                 /* do not try and release vmalloc pages */
289         } else {
290                 actual_pages = pin_user_pages_fast(
291                                           (unsigned long)ubuf & PAGE_MASK,
292                                           num_pages,
293                                           type == PAGELIST_READ,
294                                           pages);
295
296                 if (actual_pages != num_pages) {
297                         vchiq_log_info(vchiq_arm_log_level,
298                                        "%s - only %d/%d pages locked",
299                                        __func__, actual_pages, num_pages);
300
301                         /* This is probably due to the process being killed */
302                         if (actual_pages > 0)
303                                 unpin_user_pages(pages, actual_pages);
304                         cleanup_pagelistinfo(pagelistinfo);
305                         return NULL;
306                 }
307                  /* release user pages */
308                 pagelistinfo->pages_need_release = 1;
309         }
310
311         /*
312          * Initialize the scatterlist so that the magic cookie
313          *  is filled if debugging is enabled
314          */
315         sg_init_table(scatterlist, num_pages);
316         /* Now set the pages for each scatterlist */
317         for (i = 0; i < num_pages; i++) {
318                 unsigned int len = PAGE_SIZE - offset;
319
320                 if (len > count)
321                         len = count;
322                 sg_set_page(scatterlist + i, pages[i], len, offset);
323                 offset = 0;
324                 count -= len;
325         }
326
327         dma_buffers = dma_map_sg(g_dma_dev,
328                                  scatterlist,
329                                  num_pages,
330                                  pagelistinfo->dma_dir);
331
332         if (dma_buffers == 0) {
333                 cleanup_pagelistinfo(pagelistinfo);
334                 return NULL;
335         }
336
337         pagelistinfo->scatterlist_mapped = 1;
338
339         /* Combine adjacent blocks for performance */
340         k = 0;
341         if (g_use_36bit_addrs) {
342                 for_each_sg(scatterlist, sg, dma_buffers, i) {
343                         u32 len = sg_dma_len(sg);
344                         u64 addr = sg_dma_address(sg);
345                         u32 page_id = (u32)((addr >> 4) & ~0xff);
346                         u32 sg_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
347
348                         /* Note: addrs is the address + page_count - 1
349                          * The firmware expects blocks after the first to be page-
350                          * aligned and a multiple of the page size
351                          */
352                         WARN_ON(len == 0);
353                         WARN_ON(i &&
354                                 (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
355                         WARN_ON(i && (addr & ~PAGE_MASK));
356                         WARN_ON(upper_32_bits(addr) > 0xf);
357                         if (k > 0 &&
358                             ((addrs[k - 1] & ~0xff) +
359                              (((addrs[k - 1] & 0xff) + 1) << 8)
360                              == page_id)) {
361                                 u32 inc_pages = min(sg_pages,
362                                                     0xff - (addrs[k - 1] & 0xff));
363                                 addrs[k - 1] += inc_pages;
364                                 page_id += inc_pages << 8;
365                                 sg_pages -= inc_pages;
366                         }
367                         while (sg_pages) {
368                                 u32 inc_pages = min(sg_pages, 0x100u);
369                                 addrs[k++] = page_id | (inc_pages - 1);
370                                 page_id += inc_pages << 8;
371                                 sg_pages -= inc_pages;
372                         }
373                 }
374         } else {
375                 for_each_sg(scatterlist, sg, dma_buffers, i) {
376                         u32 len = sg_dma_len(sg);
377                         u32 addr = sg_dma_address(sg);
378                         u32 new_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
379
380                         /* Note: addrs is the address + page_count - 1
381                          * The firmware expects blocks after the first to be page-
382                          * aligned and a multiple of the page size
383                          */
384                         WARN_ON(len == 0);
385                         WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
386                         WARN_ON(i && (addr & ~PAGE_MASK));
387                         if (k > 0 &&
388                             ((addrs[k - 1] & PAGE_MASK) +
389                              (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
390                             == (addr & PAGE_MASK))
391                                 addrs[k - 1] += new_pages;
392                         else
393                                 addrs[k++] = (addr & PAGE_MASK) | (new_pages - 1);
394                 }
395         }
396
397         /* Partial cache lines (fragments) require special measures */
398         if ((type == PAGELIST_READ) &&
399                 ((pagelist->offset & (g_cache_line_size - 1)) ||
400                 ((pagelist->offset + pagelist->length) &
401                 (g_cache_line_size - 1)))) {
402                 char *fragments;
403
404                 if (down_interruptible(&g_free_fragments_sema)) {
405                         cleanup_pagelistinfo(pagelistinfo);
406                         return NULL;
407                 }
408
409                 WARN_ON(!g_free_fragments);
410
411                 down(&g_free_fragments_mutex);
412                 fragments = g_free_fragments;
413                 WARN_ON(!fragments);
414                 g_free_fragments = *(char **) g_free_fragments;
415                 up(&g_free_fragments_mutex);
416                 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
417                         (fragments - g_fragments_base) / g_fragments_size;
418         }
419
420         return pagelistinfo;
421 }
422
423 static void
424 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
425               int actual)
426 {
427         struct pagelist *pagelist = pagelistinfo->pagelist;
428         struct page **pages = pagelistinfo->pages;
429         unsigned int num_pages = pagelistinfo->num_pages;
430
431         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
432                         __func__, pagelistinfo->pagelist, actual);
433
434         /*
435          * NOTE: dma_unmap_sg must be called before the
436          * cpu can touch any of the data/pages.
437          */
438         dma_unmap_sg(g_dma_dev, pagelistinfo->scatterlist,
439                      pagelistinfo->num_pages, pagelistinfo->dma_dir);
440         pagelistinfo->scatterlist_mapped = 0;
441
442         /* Deal with any partial cache lines (fragments) */
443         if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
444                 char *fragments = g_fragments_base +
445                         (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
446                         g_fragments_size;
447                 int head_bytes, tail_bytes;
448
449                 head_bytes = (g_cache_line_size - pagelist->offset) &
450                         (g_cache_line_size - 1);
451                 tail_bytes = (pagelist->offset + actual) &
452                         (g_cache_line_size - 1);
453
454                 if ((actual >= 0) && (head_bytes != 0)) {
455                         if (head_bytes > actual)
456                                 head_bytes = actual;
457
458                         memcpy((char *)kmap(pages[0]) +
459                                 pagelist->offset,
460                                 fragments,
461                                 head_bytes);
462                         kunmap(pages[0]);
463                 }
464                 if ((actual >= 0) && (head_bytes < actual) &&
465                         (tail_bytes != 0)) {
466                         memcpy((char *)kmap(pages[num_pages - 1]) +
467                                 ((pagelist->offset + actual) &
468                                 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
469                                 fragments + g_cache_line_size,
470                                 tail_bytes);
471                         kunmap(pages[num_pages - 1]);
472                 }
473
474                 down(&g_free_fragments_mutex);
475                 *(char **)fragments = g_free_fragments;
476                 g_free_fragments = fragments;
477                 up(&g_free_fragments_mutex);
478                 up(&g_free_fragments_sema);
479         }
480
481         /* Need to mark all the pages dirty. */
482         if (pagelist->type != PAGELIST_WRITE &&
483             pagelistinfo->pages_need_release) {
484                 unsigned int i;
485
486                 for (i = 0; i < num_pages; i++)
487                         set_page_dirty(pages[i]);
488         }
489
490         cleanup_pagelistinfo(pagelistinfo);
491 }
492
493 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
494 {
495         struct device *dev = &pdev->dev;
496         struct device *dma_dev = NULL;
497         struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
498         struct rpi_firmware *fw = drvdata->fw;
499         struct vchiq_slot_zero *vchiq_slot_zero;
500         void *slot_mem;
501         dma_addr_t slot_phys;
502         u32 channelbase;
503         int slot_mem_size, frag_mem_size;
504         int err, irq, i;
505
506         /*
507          * VCHI messages between the CPU and firmware use
508          * 32-bit bus addresses.
509          */
510         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
511
512         if (err < 0)
513                 return err;
514
515         g_cache_line_size = drvdata->cache_line_size;
516         g_fragments_size = 2 * g_cache_line_size;
517
518         if (drvdata->use_36bit_addrs) {
519                 struct device_node *dma_node =
520                         of_find_compatible_node(NULL, NULL, "brcm,bcm2711-dma");
521
522                 if (dma_node) {
523                         struct platform_device *pdev;
524
525                         pdev = of_find_device_by_node(dma_node);
526                         if (pdev)
527                                 dma_dev = &pdev->dev;
528                         of_node_put(dma_node);
529                         g_use_36bit_addrs = true;
530                 } else {
531                         dev_err(dev, "40-bit DMA controller not found\n");
532                         return -EINVAL;
533                 }
534         }
535
536         /* Allocate space for the channels in coherent memory */
537         slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
538         frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
539
540         slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
541                                        &slot_phys, GFP_KERNEL);
542         if (!slot_mem) {
543                 dev_err(dev, "could not allocate DMA memory\n");
544                 return -ENOMEM;
545         }
546
547         WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
548         channelbase = slot_phys;
549
550         vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
551         if (!vchiq_slot_zero)
552                 return -EINVAL;
553
554         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
555                 channelbase + slot_mem_size;
556         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
557                 MAX_FRAGMENTS;
558
559         g_fragments_base = (char *)slot_mem + slot_mem_size;
560
561         g_free_fragments = g_fragments_base;
562         for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
563                 *(char **)&g_fragments_base[i*g_fragments_size] =
564                         &g_fragments_base[(i + 1)*g_fragments_size];
565         }
566         *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
567         sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
568
569         err = vchiq_init_state(state, vchiq_slot_zero);
570         if (err)
571                 return err;
572
573         g_regs = devm_platform_ioremap_resource(pdev, 0);
574         if (IS_ERR(g_regs))
575                 return PTR_ERR(g_regs);
576
577         irq = platform_get_irq(pdev, 0);
578         if (irq <= 0)
579                 return irq;
580
581         err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
582                                "VCHIQ doorbell", state);
583         if (err) {
584                 dev_err(dev, "failed to register irq=%d\n", irq);
585                 return err;
586         }
587
588         /* Send the base address of the slots to VideoCore */
589         err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
590                                     &channelbase, sizeof(channelbase));
591         if (err || channelbase) {
592                 dev_err(dev, "failed to set channelbase\n");
593                 return err ? : -ENXIO;
594         }
595
596         g_dev = dev;
597         g_dma_dev = dma_dev ?: dev;
598         g_dma_pool = dmam_pool_create("vchiq_scatter_pool", dev,
599                                       VCHIQ_DMA_POOL_SIZE, g_cache_line_size,
600                                       0);
601         if (!g_dma_pool) {
602                 dev_err(dev, "failed to create dma pool");
603                 return -ENOMEM;
604         }
605
606         vchiq_log_info(vchiq_arm_log_level,
607                 "vchiq_init - done (slots %pK, phys %pad)",
608                 vchiq_slot_zero, &slot_phys);
609
610         vchiq_call_connected_callbacks();
611
612         return 0;
613 }
614
615 int
616 vchiq_platform_init_state(struct vchiq_state *state)
617 {
618         struct vchiq_2835_state *platform_state;
619
620         state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
621         if (!state->platform_state)
622                 return -ENOMEM;
623
624         platform_state = (struct vchiq_2835_state *)state->platform_state;
625
626         platform_state->inited = 1;
627         vchiq_arm_init_state(state, &platform_state->arm_state);
628
629         return 0;
630 }
631
632 struct vchiq_arm_state*
633 vchiq_platform_get_arm_state(struct vchiq_state *state)
634 {
635         struct vchiq_2835_state *platform_state;
636
637         platform_state   = (struct vchiq_2835_state *)state->platform_state;
638
639         WARN_ON_ONCE(!platform_state->inited);
640
641         return &platform_state->arm_state;
642 }
643
644 void
645 remote_event_signal(struct remote_event *event)
646 {
647         wmb();
648
649         event->fired = 1;
650
651         dsb(sy);         /* data barrier operation */
652
653         if (event->armed)
654                 writel(0, g_regs + BELL2); /* trigger vc interrupt */
655 }
656
657 int
658 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
659                         void __user *uoffset, int size, int dir)
660 {
661         struct vchiq_pagelist_info *pagelistinfo;
662
663         pagelistinfo = create_pagelist(offset, uoffset, size,
664                                        (dir == VCHIQ_BULK_RECEIVE)
665                                        ? PAGELIST_READ
666                                        : PAGELIST_WRITE);
667
668         if (!pagelistinfo)
669                 return -ENOMEM;
670
671         bulk->data = pagelistinfo->dma_addr;
672
673         /*
674          * Store the pagelistinfo address in remote_data,
675          * which isn't used by the slave.
676          */
677         bulk->remote_data = pagelistinfo;
678
679         return 0;
680 }
681
682 void
683 vchiq_complete_bulk(struct vchiq_bulk *bulk)
684 {
685         if (bulk && bulk->remote_data && bulk->actual)
686                 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
687                               bulk->actual);
688 }
689
690 int vchiq_dump_platform_state(void *dump_context)
691 {
692         char buf[80];
693         int len;
694
695         len = snprintf(buf, sizeof(buf),
696                 "  Platform: 2835 (VC master)");
697         return vchiq_dump(dump_context, buf, len + 1);
698 }
699
700 #define VCHIQ_INIT_RETRIES 10
701 int vchiq_initialise(struct vchiq_instance **instance_out)
702 {
703         struct vchiq_state *state;
704         struct vchiq_instance *instance = NULL;
705         int i, ret;
706
707         /*
708          * VideoCore may not be ready due to boot up timing.
709          * It may never be ready if kernel and firmware are mismatched,so don't
710          * block forever.
711          */
712         for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
713                 state = vchiq_get_state();
714                 if (state)
715                         break;
716                 usleep_range(500, 600);
717         }
718         if (i == VCHIQ_INIT_RETRIES) {
719                 vchiq_log_error(vchiq_core_log_level,
720                         "%s: videocore not initialized\n", __func__);
721                 ret = -ENOTCONN;
722                 goto failed;
723         } else if (i > 0) {
724                 vchiq_log_warning(vchiq_core_log_level,
725                         "%s: videocore initialized after %d retries\n",
726                         __func__, i);
727         }
728
729         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
730         if (!instance) {
731                 vchiq_log_error(vchiq_core_log_level,
732                         "%s: error allocating vchiq instance\n", __func__);
733                 ret = -ENOMEM;
734                 goto failed;
735         }
736
737         instance->connected = 0;
738         instance->state = state;
739         mutex_init(&instance->bulk_waiter_list_mutex);
740         INIT_LIST_HEAD(&instance->bulk_waiter_list);
741
742         *instance_out = instance;
743
744         ret = 0;
745
746 failed:
747         vchiq_log_trace(vchiq_core_log_level,
748                 "%s(%p): returning %d", __func__, instance, ret);
749
750         return ret;
751 }
752 EXPORT_SYMBOL(vchiq_initialise);
753
754 void free_bulk_waiter(struct vchiq_instance *instance)
755 {
756         struct bulk_waiter_node *waiter, *next;
757
758         list_for_each_entry_safe(waiter, next,
759                                  &instance->bulk_waiter_list, list) {
760                 list_del(&waiter->list);
761                 vchiq_log_info(vchiq_arm_log_level,
762                                 "bulk_waiter - cleaned up %pK for pid %d",
763                                 waiter, waiter->pid);
764                 kfree(waiter);
765         }
766 }
767
768 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
769 {
770         enum vchiq_status status = VCHIQ_SUCCESS;
771         struct vchiq_state *state = instance->state;
772
773         if (mutex_lock_killable(&state->mutex))
774                 return VCHIQ_RETRY;
775
776         /* Remove all services */
777         vchiq_shutdown_internal(state, instance);
778
779         mutex_unlock(&state->mutex);
780
781         vchiq_log_trace(vchiq_core_log_level,
782                 "%s(%p): returning %d", __func__, instance, status);
783
784         free_bulk_waiter(instance);
785         kfree(instance);
786
787         return status;
788 }
789 EXPORT_SYMBOL(vchiq_shutdown);
790
791 static int vchiq_is_connected(struct vchiq_instance *instance)
792 {
793         return instance->connected;
794 }
795
796 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
797 {
798         enum vchiq_status status;
799         struct vchiq_state *state = instance->state;
800
801         if (mutex_lock_killable(&state->mutex)) {
802                 vchiq_log_trace(vchiq_core_log_level,
803                         "%s: call to mutex_lock failed", __func__);
804                 status = VCHIQ_RETRY;
805                 goto failed;
806         }
807         status = vchiq_connect_internal(state, instance);
808
809         if (status == VCHIQ_SUCCESS)
810                 instance->connected = 1;
811
812         mutex_unlock(&state->mutex);
813
814 failed:
815         vchiq_log_trace(vchiq_core_log_level,
816                 "%s(%p): returning %d", __func__, instance, status);
817
818         return status;
819 }
820 EXPORT_SYMBOL(vchiq_connect);
821
822 static enum vchiq_status
823 vchiq_add_service(struct vchiq_instance *instance,
824                   const struct vchiq_service_params_kernel *params,
825                   unsigned int *phandle)
826 {
827         enum vchiq_status status;
828         struct vchiq_state *state = instance->state;
829         struct vchiq_service *service = NULL;
830         int srvstate;
831
832         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
833
834         srvstate = vchiq_is_connected(instance)
835                 ? VCHIQ_SRVSTATE_LISTENING
836                 : VCHIQ_SRVSTATE_HIDDEN;
837
838         service = vchiq_add_service_internal(
839                 state,
840                 params,
841                 srvstate,
842                 instance,
843                 NULL);
844
845         if (service) {
846                 *phandle = service->handle;
847                 status = VCHIQ_SUCCESS;
848         } else {
849                 status = VCHIQ_ERROR;
850         }
851
852         vchiq_log_trace(vchiq_core_log_level,
853                 "%s(%p): returning %d", __func__, instance, status);
854
855         return status;
856 }
857
858 enum vchiq_status
859 vchiq_open_service(struct vchiq_instance *instance,
860                    const struct vchiq_service_params_kernel *params,
861                    unsigned int *phandle)
862 {
863         enum vchiq_status   status = VCHIQ_ERROR;
864         struct vchiq_state   *state = instance->state;
865         struct vchiq_service *service = NULL;
866
867         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
868
869         if (!vchiq_is_connected(instance))
870                 goto failed;
871
872         service = vchiq_add_service_internal(state,
873                 params,
874                 VCHIQ_SRVSTATE_OPENING,
875                 instance,
876                 NULL);
877
878         if (service) {
879                 *phandle = service->handle;
880                 status = vchiq_open_service_internal(service, current->pid);
881                 if (status != VCHIQ_SUCCESS) {
882                         vchiq_remove_service(service->handle);
883                         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
884                 }
885         }
886
887 failed:
888         vchiq_log_trace(vchiq_core_log_level,
889                 "%s(%p): returning %d", __func__, instance, status);
890
891         return status;
892 }
893 EXPORT_SYMBOL(vchiq_open_service);
894
895 enum vchiq_status
896 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
897                     void *userdata, enum vchiq_bulk_mode mode)
898 {
899         enum vchiq_status status;
900
901         while (1) {
902                 switch (mode) {
903                 case VCHIQ_BULK_MODE_NOCALLBACK:
904                 case VCHIQ_BULK_MODE_CALLBACK:
905                         status = vchiq_bulk_transfer(handle,
906                                                      (void *)data, NULL,
907                                                      size, userdata, mode,
908                                                      VCHIQ_BULK_TRANSMIT);
909                         break;
910                 case VCHIQ_BULK_MODE_BLOCKING:
911                         status = vchiq_blocking_bulk_transfer(handle,
912                                 (void *)data, size, VCHIQ_BULK_TRANSMIT);
913                         break;
914                 default:
915                         return VCHIQ_ERROR;
916                 }
917
918                 /*
919                  * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
920                  * to implement a retry mechanism since this function is
921                  * supposed to block until queued
922                  */
923                 if (status != VCHIQ_RETRY)
924                         break;
925
926                 msleep(1);
927         }
928
929         return status;
930 }
931 EXPORT_SYMBOL(vchiq_bulk_transmit);
932
933 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
934                                      unsigned int size, void *userdata,
935                                      enum vchiq_bulk_mode mode)
936 {
937         enum vchiq_status status;
938
939         while (1) {
940                 switch (mode) {
941                 case VCHIQ_BULK_MODE_NOCALLBACK:
942                 case VCHIQ_BULK_MODE_CALLBACK:
943                         status = vchiq_bulk_transfer(handle, data, NULL,
944                                                      size, userdata,
945                                                      mode, VCHIQ_BULK_RECEIVE);
946                         break;
947                 case VCHIQ_BULK_MODE_BLOCKING:
948                         status = vchiq_blocking_bulk_transfer(handle,
949                                 (void *)data, size, VCHIQ_BULK_RECEIVE);
950                         break;
951                 default:
952                         return VCHIQ_ERROR;
953                 }
954
955                 /*
956                  * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
957                  * to implement a retry mechanism since this function is
958                  * supposed to block until queued
959                  */
960                 if (status != VCHIQ_RETRY)
961                         break;
962
963                 msleep(1);
964         }
965
966         return status;
967 }
968 EXPORT_SYMBOL(vchiq_bulk_receive);
969
970 static enum vchiq_status
971 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
972                              enum vchiq_bulk_dir dir)
973 {
974         struct vchiq_instance *instance;
975         struct vchiq_service *service;
976         enum vchiq_status status;
977         struct bulk_waiter_node *waiter = NULL;
978         bool found = false;
979
980         service = find_service_by_handle(handle);
981         if (!service)
982                 return VCHIQ_ERROR;
983
984         instance = service->instance;
985
986         vchiq_service_put(service);
987
988         mutex_lock(&instance->bulk_waiter_list_mutex);
989         list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
990                 if (waiter->pid == current->pid) {
991                         list_del(&waiter->list);
992                         found = true;
993                         break;
994                 }
995         }
996         mutex_unlock(&instance->bulk_waiter_list_mutex);
997
998         if (found) {
999                 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
1000
1001                 if (bulk) {
1002                         /* This thread has an outstanding bulk transfer. */
1003                         /* FIXME: why compare a dma address to a pointer? */
1004                         if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
1005                                 (bulk->size != size)) {
1006                                 /*
1007                                  * This is not a retry of the previous one.
1008                                  * Cancel the signal when the transfer completes.
1009                                  */
1010                                 spin_lock(&bulk_waiter_spinlock);
1011                                 bulk->userdata = NULL;
1012                                 spin_unlock(&bulk_waiter_spinlock);
1013                         }
1014                 }
1015         } else {
1016                 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
1017                 if (!waiter) {
1018                         vchiq_log_error(vchiq_core_log_level,
1019                                 "%s - out of memory", __func__);
1020                         return VCHIQ_ERROR;
1021                 }
1022         }
1023
1024         status = vchiq_bulk_transfer(handle, data, NULL, size,
1025                                      &waiter->bulk_waiter,
1026                                      VCHIQ_BULK_MODE_BLOCKING, dir);
1027         if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1028                 !waiter->bulk_waiter.bulk) {
1029                 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
1030
1031                 if (bulk) {
1032                         /* Cancel the signal when the transfer completes. */
1033                         spin_lock(&bulk_waiter_spinlock);
1034                         bulk->userdata = NULL;
1035                         spin_unlock(&bulk_waiter_spinlock);
1036                 }
1037                 kfree(waiter);
1038         } else {
1039                 waiter->pid = current->pid;
1040                 mutex_lock(&instance->bulk_waiter_list_mutex);
1041                 list_add(&waiter->list, &instance->bulk_waiter_list);
1042                 mutex_unlock(&instance->bulk_waiter_list_mutex);
1043                 vchiq_log_info(vchiq_arm_log_level,
1044                                 "saved bulk_waiter %pK for pid %d",
1045                                 waiter, current->pid);
1046         }
1047
1048         return status;
1049 }
1050
1051 static enum vchiq_status
1052 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
1053                struct vchiq_header *header, struct user_service *user_service,
1054                void *bulk_userdata)
1055 {
1056         struct vchiq_completion_data_kernel *completion;
1057         int insert;
1058
1059         DEBUG_INITIALISE(g_state.local)
1060
1061         insert = instance->completion_insert;
1062         while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
1063                 /* Out of space - wait for the client */
1064                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1065                 vchiq_log_trace(vchiq_arm_log_level,
1066                         "%s - completion queue full", __func__);
1067                 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1068                 if (wait_for_completion_interruptible(
1069                                         &instance->remove_event)) {
1070                         vchiq_log_info(vchiq_arm_log_level,
1071                                 "service_callback interrupted");
1072                         return VCHIQ_RETRY;
1073                 } else if (instance->closing) {
1074                         vchiq_log_info(vchiq_arm_log_level,
1075                                 "service_callback closing");
1076                         return VCHIQ_SUCCESS;
1077                 }
1078                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1079         }
1080
1081         completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1082
1083         completion->header = header;
1084         completion->reason = reason;
1085         /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1086         completion->service_userdata = user_service->service;
1087         completion->bulk_userdata = bulk_userdata;
1088
1089         if (reason == VCHIQ_SERVICE_CLOSED) {
1090                 /*
1091                  * Take an extra reference, to be held until
1092                  * this CLOSED notification is delivered.
1093                  */
1094                 vchiq_service_get(user_service->service);
1095                 if (instance->use_close_delivered)
1096                         user_service->close_pending = 1;
1097         }
1098
1099         /*
1100          * A write barrier is needed here to ensure that the entire completion
1101          * record is written out before the insert point.
1102          */
1103         wmb();
1104
1105         if (reason == VCHIQ_MESSAGE_AVAILABLE)
1106                 user_service->message_available_pos = insert;
1107
1108         insert++;
1109         instance->completion_insert = insert;
1110
1111         complete(&instance->insert_event);
1112
1113         return VCHIQ_SUCCESS;
1114 }
1115
1116 enum vchiq_status
1117 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
1118                  unsigned int handle, void *bulk_userdata)
1119 {
1120         /*
1121          * How do we ensure the callback goes to the right client?
1122          * The service_user data points to a user_service record
1123          * containing the original callback and the user state structure, which
1124          * contains a circular buffer for completion records.
1125          */
1126         struct user_service *user_service;
1127         struct vchiq_service *service;
1128         struct vchiq_instance *instance;
1129         bool skip_completion = false;
1130
1131         DEBUG_INITIALISE(g_state.local)
1132
1133         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1134
1135         rcu_read_lock();
1136         service = handle_to_service(handle);
1137         if (WARN_ON(!service)) {
1138                 rcu_read_unlock();
1139                 return VCHIQ_SUCCESS;
1140         }
1141
1142         user_service = (struct user_service *)service->base.userdata;
1143         instance = user_service->instance;
1144
1145         if (!instance || instance->closing) {
1146                 rcu_read_unlock();
1147                 return VCHIQ_SUCCESS;
1148         }
1149
1150         /*
1151          * As hopping around different synchronization mechanism,
1152          * taking an extra reference results in simpler implementation.
1153          */
1154         vchiq_service_get(service);
1155         rcu_read_unlock();
1156
1157         vchiq_log_trace(vchiq_arm_log_level,
1158                 "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1159                 __func__, (unsigned long)user_service,
1160                 service->localport, user_service->userdata,
1161                 reason, (unsigned long)header,
1162                 (unsigned long)instance, (unsigned long)bulk_userdata);
1163
1164         if (header && user_service->is_vchi) {
1165                 spin_lock(&msg_queue_spinlock);
1166                 while (user_service->msg_insert ==
1167                         (user_service->msg_remove + MSG_QUEUE_SIZE)) {
1168                         spin_unlock(&msg_queue_spinlock);
1169                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1170                         DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1171                         vchiq_log_trace(vchiq_arm_log_level,
1172                                 "service_callback - msg queue full");
1173                         /*
1174                          * If there is no MESSAGE_AVAILABLE in the completion
1175                          * queue, add one
1176                          */
1177                         if ((user_service->message_available_pos -
1178                                 instance->completion_remove) < 0) {
1179                                 enum vchiq_status status;
1180
1181                                 vchiq_log_info(vchiq_arm_log_level,
1182                                         "Inserting extra MESSAGE_AVAILABLE");
1183                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1184                                 status = add_completion(instance, reason,
1185                                         NULL, user_service, bulk_userdata);
1186                                 if (status != VCHIQ_SUCCESS) {
1187                                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1188                                         vchiq_service_put(service);
1189                                         return status;
1190                                 }
1191                         }
1192
1193                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1194                         if (wait_for_completion_interruptible(
1195                                                 &user_service->remove_event)) {
1196                                 vchiq_log_info(vchiq_arm_log_level,
1197                                         "%s interrupted", __func__);
1198                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1199                                 vchiq_service_put(service);
1200                                 return VCHIQ_RETRY;
1201                         } else if (instance->closing) {
1202                                 vchiq_log_info(vchiq_arm_log_level,
1203                                         "%s closing", __func__);
1204                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1205                                 vchiq_service_put(service);
1206                                 return VCHIQ_ERROR;
1207                         }
1208                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1209                         spin_lock(&msg_queue_spinlock);
1210                 }
1211
1212                 user_service->msg_queue[user_service->msg_insert &
1213                         (MSG_QUEUE_SIZE - 1)] = header;
1214                 user_service->msg_insert++;
1215
1216                 /*
1217                  * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1218                  * there is a MESSAGE_AVAILABLE in the completion queue then
1219                  * bypass the completion queue.
1220                  */
1221                 if (((user_service->message_available_pos -
1222                         instance->completion_remove) >= 0) ||
1223                         user_service->dequeue_pending) {
1224                         user_service->dequeue_pending = 0;
1225                         skip_completion = true;
1226                 }
1227
1228                 spin_unlock(&msg_queue_spinlock);
1229                 complete(&user_service->insert_event);
1230
1231                 header = NULL;
1232         }
1233         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1234         vchiq_service_put(service);
1235
1236         if (skip_completion)
1237                 return VCHIQ_SUCCESS;
1238
1239         return add_completion(instance, reason, header, user_service,
1240                 bulk_userdata);
1241 }
1242
1243 int vchiq_dump(void *dump_context, const char *str, int len)
1244 {
1245         struct dump_context *context = (struct dump_context *)dump_context;
1246         int copy_bytes;
1247
1248         if (context->actual >= context->space)
1249                 return 0;
1250
1251         if (context->offset > 0) {
1252                 int skip_bytes = min_t(int, len, context->offset);
1253
1254                 str += skip_bytes;
1255                 len -= skip_bytes;
1256                 context->offset -= skip_bytes;
1257                 if (context->offset > 0)
1258                         return 0;
1259         }
1260         copy_bytes = min_t(int, len, context->space - context->actual);
1261         if (copy_bytes == 0)
1262                 return 0;
1263         if (copy_to_user(context->buf + context->actual, str,
1264                          copy_bytes))
1265                 return -EFAULT;
1266         context->actual += copy_bytes;
1267         len -= copy_bytes;
1268
1269         /*
1270          * If the terminating NUL is included in the length, then it
1271          * marks the end of a line and should be replaced with a
1272          * carriage return.
1273          */
1274         if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1275                 char cr = '\n';
1276
1277                 if (copy_to_user(context->buf + context->actual - 1,
1278                                  &cr, 1))
1279                         return -EFAULT;
1280         }
1281         return 0;
1282 }
1283
1284 int vchiq_dump_platform_instances(void *dump_context)
1285 {
1286         struct vchiq_state *state = vchiq_get_state();
1287         char buf[80];
1288         int len;
1289         int i;
1290
1291         if (!state)
1292                 return -ENOTCONN;
1293
1294         /*
1295          * There is no list of instances, so instead scan all services,
1296          * marking those that have been dumped.
1297          */
1298
1299         rcu_read_lock();
1300         for (i = 0; i < state->unused_service; i++) {
1301                 struct vchiq_service *service;
1302                 struct vchiq_instance *instance;
1303
1304                 service = rcu_dereference(state->services[i]);
1305                 if (!service || service->base.callback != service_callback)
1306                         continue;
1307
1308                 instance = service->instance;
1309                 if (instance)
1310                         instance->mark = 0;
1311         }
1312         rcu_read_unlock();
1313
1314         for (i = 0; i < state->unused_service; i++) {
1315                 struct vchiq_service *service;
1316                 struct vchiq_instance *instance;
1317                 int err;
1318
1319                 rcu_read_lock();
1320                 service = rcu_dereference(state->services[i]);
1321                 if (!service || service->base.callback != service_callback) {
1322                         rcu_read_unlock();
1323                         continue;
1324                 }
1325
1326                 instance = service->instance;
1327                 if (!instance || instance->mark) {
1328                         rcu_read_unlock();
1329                         continue;
1330                 }
1331                 rcu_read_unlock();
1332
1333                 len = snprintf(buf, sizeof(buf),
1334                                "Instance %pK: pid %d,%s completions %d/%d",
1335                                instance, instance->pid,
1336                                instance->connected ? " connected, " :
1337                                "",
1338                                instance->completion_insert -
1339                                instance->completion_remove,
1340                                MAX_COMPLETIONS);
1341                 err = vchiq_dump(dump_context, buf, len + 1);
1342                 if (err)
1343                         return err;
1344                 instance->mark = 1;
1345         }
1346         return 0;
1347 }
1348
1349 int vchiq_dump_platform_service_state(void *dump_context,
1350                                       struct vchiq_service *service)
1351 {
1352         struct user_service *user_service =
1353                         (struct user_service *)service->base.userdata;
1354         char buf[80];
1355         int len;
1356
1357         len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1358
1359         if ((service->base.callback == service_callback) &&
1360                 user_service->is_vchi) {
1361                 len += scnprintf(buf + len, sizeof(buf) - len,
1362                         ", %d/%d messages",
1363                         user_service->msg_insert - user_service->msg_remove,
1364                         MSG_QUEUE_SIZE);
1365
1366                 if (user_service->dequeue_pending)
1367                         len += scnprintf(buf + len, sizeof(buf) - len,
1368                                 " (dequeue pending)");
1369         }
1370
1371         return vchiq_dump(dump_context, buf, len + 1);
1372 }
1373
1374 struct vchiq_state *
1375 vchiq_get_state(void)
1376 {
1377
1378         if (!g_state.remote)
1379                 pr_err("%s: g_state.remote == NULL\n", __func__);
1380         else if (g_state.remote->initialised != 1)
1381                 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1382                           __func__, g_state.remote->initialised);
1383
1384         return (g_state.remote &&
1385                 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1386 }
1387
1388 /*
1389  * Autosuspend related functionality
1390  */
1391
1392 static enum vchiq_status
1393 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
1394                                struct vchiq_header *header,
1395                                unsigned int service_user, void *bulk_user)
1396 {
1397         vchiq_log_error(vchiq_susp_log_level,
1398                 "%s callback reason %d", __func__, reason);
1399         return 0;
1400 }
1401
1402 static int
1403 vchiq_keepalive_thread_func(void *v)
1404 {
1405         struct vchiq_state *state = (struct vchiq_state *)v;
1406         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1407
1408         enum vchiq_status status;
1409         struct vchiq_instance *instance;
1410         unsigned int ka_handle;
1411         int ret;
1412
1413         struct vchiq_service_params_kernel params = {
1414                 .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1415                 .callback    = vchiq_keepalive_vchiq_callback,
1416                 .version     = KEEPALIVE_VER,
1417                 .version_min = KEEPALIVE_VER_MIN
1418         };
1419
1420         ret = vchiq_initialise(&instance);
1421         if (ret) {
1422                 vchiq_log_error(vchiq_susp_log_level,
1423                         "%s vchiq_initialise failed %d", __func__, ret);
1424                 goto exit;
1425         }
1426
1427         status = vchiq_connect(instance);
1428         if (status != VCHIQ_SUCCESS) {
1429                 vchiq_log_error(vchiq_susp_log_level,
1430                         "%s vchiq_connect failed %d", __func__, status);
1431                 goto shutdown;
1432         }
1433
1434         status = vchiq_add_service(instance, &params, &ka_handle);
1435         if (status != VCHIQ_SUCCESS) {
1436                 vchiq_log_error(vchiq_susp_log_level,
1437                         "%s vchiq_open_service failed %d", __func__, status);
1438                 goto shutdown;
1439         }
1440
1441         while (1) {
1442                 long rc = 0, uc = 0;
1443
1444                 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1445                         vchiq_log_error(vchiq_susp_log_level,
1446                                 "%s interrupted", __func__);
1447                         flush_signals(current);
1448                         continue;
1449                 }
1450
1451                 /*
1452                  * read and clear counters.  Do release_count then use_count to
1453                  * prevent getting more releases than uses
1454                  */
1455                 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1456                 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1457
1458                 /*
1459                  * Call use/release service the requisite number of times.
1460                  * Process use before release so use counts don't go negative
1461                  */
1462                 while (uc--) {
1463                         atomic_inc(&arm_state->ka_use_ack_count);
1464                         status = vchiq_use_service(ka_handle);
1465                         if (status != VCHIQ_SUCCESS) {
1466                                 vchiq_log_error(vchiq_susp_log_level,
1467                                         "%s vchiq_use_service error %d",
1468                                         __func__, status);
1469                         }
1470                 }
1471                 while (rc--) {
1472                         status = vchiq_release_service(ka_handle);
1473                         if (status != VCHIQ_SUCCESS) {
1474                                 vchiq_log_error(vchiq_susp_log_level,
1475                                         "%s vchiq_release_service error %d",
1476                                         __func__, status);
1477                         }
1478                 }
1479         }
1480
1481 shutdown:
1482         vchiq_shutdown(instance);
1483 exit:
1484         return 0;
1485 }
1486
1487 void
1488 vchiq_arm_init_state(struct vchiq_state *state,
1489                      struct vchiq_arm_state *arm_state)
1490 {
1491         if (arm_state) {
1492                 rwlock_init(&arm_state->susp_res_lock);
1493
1494                 init_completion(&arm_state->ka_evt);
1495                 atomic_set(&arm_state->ka_use_count, 0);
1496                 atomic_set(&arm_state->ka_use_ack_count, 0);
1497                 atomic_set(&arm_state->ka_release_count, 0);
1498
1499                 arm_state->state = state;
1500                 arm_state->first_connect = 0;
1501
1502         }
1503 }
1504
1505 int
1506 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1507                    enum USE_TYPE_E use_type)
1508 {
1509         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1510         int ret = 0;
1511         char entity[16];
1512         int *entity_uc;
1513         int local_uc;
1514
1515         if (!arm_state) {
1516                 ret = -EINVAL;
1517                 goto out;
1518         }
1519
1520         if (use_type == USE_TYPE_VCHIQ) {
1521                 sprintf(entity, "VCHIQ:   ");
1522                 entity_uc = &arm_state->peer_use_count;
1523         } else if (service) {
1524                 sprintf(entity, "%c%c%c%c:%03d",
1525                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1526                         service->client_id);
1527                 entity_uc = &service->service_use_count;
1528         } else {
1529                 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1530                 ret = -EINVAL;
1531                 goto out;
1532         }
1533
1534         write_lock_bh(&arm_state->susp_res_lock);
1535         local_uc = ++arm_state->videocore_use_count;
1536         ++(*entity_uc);
1537
1538         vchiq_log_trace(vchiq_susp_log_level,
1539                 "%s %s count %d, state count %d",
1540                 __func__, entity, *entity_uc, local_uc);
1541
1542         write_unlock_bh(&arm_state->susp_res_lock);
1543
1544         if (!ret) {
1545                 enum vchiq_status status = VCHIQ_SUCCESS;
1546                 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1547
1548                 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1549                         /* Send the use notify to videocore */
1550                         status = vchiq_send_remote_use_active(state);
1551                         if (status == VCHIQ_SUCCESS)
1552                                 ack_cnt--;
1553                         else
1554                                 atomic_add(ack_cnt,
1555                                         &arm_state->ka_use_ack_count);
1556                 }
1557         }
1558
1559 out:
1560         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1561         return ret;
1562 }
1563
1564 int
1565 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1566 {
1567         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1568         int ret = 0;
1569         char entity[16];
1570         int *entity_uc;
1571
1572         if (!arm_state) {
1573                 ret = -EINVAL;
1574                 goto out;
1575         }
1576
1577         if (service) {
1578                 sprintf(entity, "%c%c%c%c:%03d",
1579                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1580                         service->client_id);
1581                 entity_uc = &service->service_use_count;
1582         } else {
1583                 sprintf(entity, "PEER:   ");
1584                 entity_uc = &arm_state->peer_use_count;
1585         }
1586
1587         write_lock_bh(&arm_state->susp_res_lock);
1588         if (!arm_state->videocore_use_count || !(*entity_uc)) {
1589                 /* Don't use BUG_ON - don't allow user thread to crash kernel */
1590                 WARN_ON(!arm_state->videocore_use_count);
1591                 WARN_ON(!(*entity_uc));
1592                 ret = -EINVAL;
1593                 goto unlock;
1594         }
1595         --arm_state->videocore_use_count;
1596         --(*entity_uc);
1597
1598         vchiq_log_trace(vchiq_susp_log_level,
1599                 "%s %s count %d, state count %d",
1600                 __func__, entity, *entity_uc,
1601                 arm_state->videocore_use_count);
1602
1603 unlock:
1604         write_unlock_bh(&arm_state->susp_res_lock);
1605
1606 out:
1607         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1608         return ret;
1609 }
1610
1611 void
1612 vchiq_on_remote_use(struct vchiq_state *state)
1613 {
1614         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1615
1616         atomic_inc(&arm_state->ka_use_count);
1617         complete(&arm_state->ka_evt);
1618 }
1619
1620 void
1621 vchiq_on_remote_release(struct vchiq_state *state)
1622 {
1623         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1624
1625         atomic_inc(&arm_state->ka_release_count);
1626         complete(&arm_state->ka_evt);
1627 }
1628
1629 int
1630 vchiq_use_service_internal(struct vchiq_service *service)
1631 {
1632         return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1633 }
1634
1635 int
1636 vchiq_release_service_internal(struct vchiq_service *service)
1637 {
1638         return vchiq_release_internal(service->state, service);
1639 }
1640
1641 struct vchiq_debugfs_node *
1642 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1643 {
1644         return &instance->debugfs_node;
1645 }
1646
1647 int
1648 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1649 {
1650         struct vchiq_service *service;
1651         int use_count = 0, i;
1652
1653         i = 0;
1654         rcu_read_lock();
1655         while ((service = __next_service_by_instance(instance->state,
1656                                                      instance, &i)))
1657                 use_count += service->service_use_count;
1658         rcu_read_unlock();
1659         return use_count;
1660 }
1661
1662 int
1663 vchiq_instance_get_pid(struct vchiq_instance *instance)
1664 {
1665         return instance->pid;
1666 }
1667
1668 int
1669 vchiq_instance_get_trace(struct vchiq_instance *instance)
1670 {
1671         return instance->trace;
1672 }
1673
1674 void
1675 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1676 {
1677         struct vchiq_service *service;
1678         int i;
1679
1680         i = 0;
1681         rcu_read_lock();
1682         while ((service = __next_service_by_instance(instance->state,
1683                                                      instance, &i)))
1684                 service->trace = trace;
1685         rcu_read_unlock();
1686         instance->trace = (trace != 0);
1687 }
1688
1689 enum vchiq_status
1690 vchiq_use_service(unsigned int handle)
1691 {
1692         enum vchiq_status ret = VCHIQ_ERROR;
1693         struct vchiq_service *service = find_service_by_handle(handle);
1694
1695         if (service) {
1696                 ret = vchiq_use_internal(service->state, service,
1697                                 USE_TYPE_SERVICE);
1698                 vchiq_service_put(service);
1699         }
1700         return ret;
1701 }
1702 EXPORT_SYMBOL(vchiq_use_service);
1703
1704 enum vchiq_status
1705 vchiq_release_service(unsigned int handle)
1706 {
1707         enum vchiq_status ret = VCHIQ_ERROR;
1708         struct vchiq_service *service = find_service_by_handle(handle);
1709
1710         if (service) {
1711                 ret = vchiq_release_internal(service->state, service);
1712                 vchiq_service_put(service);
1713         }
1714         return ret;
1715 }
1716 EXPORT_SYMBOL(vchiq_release_service);
1717
1718 struct service_data_struct {
1719         int fourcc;
1720         int clientid;
1721         int use_count;
1722 };
1723
1724 void
1725 vchiq_dump_service_use_state(struct vchiq_state *state)
1726 {
1727         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1728         struct service_data_struct *service_data;
1729         int i, found = 0;
1730         /*
1731          * If there's more than 64 services, only dump ones with
1732          * non-zero counts
1733          */
1734         int only_nonzero = 0;
1735         static const char *nz = "<-- preventing suspend";
1736
1737         int peer_count;
1738         int vc_use_count;
1739         int active_services;
1740
1741         if (!arm_state)
1742                 return;
1743
1744         service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1745                                      GFP_KERNEL);
1746         if (!service_data)
1747                 return;
1748
1749         read_lock_bh(&arm_state->susp_res_lock);
1750         peer_count = arm_state->peer_use_count;
1751         vc_use_count = arm_state->videocore_use_count;
1752         active_services = state->unused_service;
1753         if (active_services > MAX_SERVICES)
1754                 only_nonzero = 1;
1755
1756         rcu_read_lock();
1757         for (i = 0; i < active_services; i++) {
1758                 struct vchiq_service *service_ptr =
1759                         rcu_dereference(state->services[i]);
1760
1761                 if (!service_ptr)
1762                         continue;
1763
1764                 if (only_nonzero && !service_ptr->service_use_count)
1765                         continue;
1766
1767                 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1768                         continue;
1769
1770                 service_data[found].fourcc = service_ptr->base.fourcc;
1771                 service_data[found].clientid = service_ptr->client_id;
1772                 service_data[found].use_count = service_ptr->service_use_count;
1773                 found++;
1774                 if (found >= MAX_SERVICES)
1775                         break;
1776         }
1777         rcu_read_unlock();
1778
1779         read_unlock_bh(&arm_state->susp_res_lock);
1780
1781         if (only_nonzero)
1782                 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
1783                         "services (%d).  Only dumping up to first %d services "
1784                         "with non-zero use-count", active_services, found);
1785
1786         for (i = 0; i < found; i++) {
1787                 vchiq_log_warning(vchiq_susp_log_level,
1788                         "----- %c%c%c%c:%d service count %d %s",
1789                         VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1790                         service_data[i].clientid,
1791                         service_data[i].use_count,
1792                         service_data[i].use_count ? nz : "");
1793         }
1794         vchiq_log_warning(vchiq_susp_log_level,
1795                 "----- VCHIQ use count count %d", peer_count);
1796         vchiq_log_warning(vchiq_susp_log_level,
1797                 "--- Overall vchiq instance use count %d", vc_use_count);
1798
1799         kfree(service_data);
1800 }
1801
1802 enum vchiq_status
1803 vchiq_check_service(struct vchiq_service *service)
1804 {
1805         struct vchiq_arm_state *arm_state;
1806         enum vchiq_status ret = VCHIQ_ERROR;
1807
1808         if (!service || !service->state)
1809                 goto out;
1810
1811         arm_state = vchiq_platform_get_arm_state(service->state);
1812
1813         read_lock_bh(&arm_state->susp_res_lock);
1814         if (service->service_use_count)
1815                 ret = VCHIQ_SUCCESS;
1816         read_unlock_bh(&arm_state->susp_res_lock);
1817
1818         if (ret == VCHIQ_ERROR) {
1819                 vchiq_log_error(vchiq_susp_log_level,
1820                         "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1821                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1822                         service->client_id, service->service_use_count,
1823                         arm_state->videocore_use_count);
1824                 vchiq_dump_service_use_state(service->state);
1825         }
1826 out:
1827         return ret;
1828 }
1829
1830 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1831                                        enum vchiq_connstate oldstate,
1832                                        enum vchiq_connstate newstate)
1833 {
1834         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1835         char threadname[16];
1836
1837         vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1838                 get_conn_state_name(oldstate), get_conn_state_name(newstate));
1839         if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1840                 return;
1841
1842         write_lock_bh(&arm_state->susp_res_lock);
1843         if (arm_state->first_connect) {
1844                 write_unlock_bh(&arm_state->susp_res_lock);
1845                 return;
1846         }
1847
1848         arm_state->first_connect = 1;
1849         write_unlock_bh(&arm_state->susp_res_lock);
1850         snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1851                  state->id);
1852         arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1853                                               (void *)state,
1854                                               threadname);
1855         if (IS_ERR(arm_state->ka_thread)) {
1856                 vchiq_log_error(vchiq_susp_log_level,
1857                                 "vchiq: FATAL: couldn't create thread %s",
1858                                 threadname);
1859         } else {
1860                 wake_up_process(arm_state->ka_thread);
1861         }
1862 }
1863
1864 static const struct of_device_id vchiq_of_match[] = {
1865         { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1866         { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1867         { .compatible = "brcm,bcm2711-vchiq", .data = &bcm2711_drvdata },
1868         {},
1869 };
1870 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1871
1872 static struct platform_device *
1873 vchiq_register_child(struct platform_device *pdev, const char *name)
1874 {
1875         struct platform_device_info pdevinfo;
1876         struct platform_device *child;
1877         struct device_node *np;
1878
1879         memset(&pdevinfo, 0, sizeof(pdevinfo));
1880
1881         pdevinfo.parent = &pdev->dev;
1882         pdevinfo.name = name;
1883         pdevinfo.id = PLATFORM_DEVID_NONE;
1884         pdevinfo.dma_mask = DMA_BIT_MASK(32);
1885
1886         np = of_get_child_by_name(pdev->dev.of_node, name);
1887
1888         /* Skip the child if it is explicitly disabled */
1889         if (np && !of_device_is_available(np))
1890                 return NULL;
1891
1892         child = platform_device_register_full(&pdevinfo);
1893         if (IS_ERR(child)) {
1894                 dev_warn(&pdev->dev, "%s not registered\n", name);
1895                 child = NULL;
1896         }
1897
1898         child->dev.of_node = np;
1899
1900         /*
1901          * We want the dma-ranges etc to be copied from the parent VCHIQ device
1902          * to be passed on to the children without a node of their own.
1903          */
1904         if (!np)
1905                 np = pdev->dev.of_node;
1906
1907         of_dma_configure(&child->dev, np, true);
1908
1909         if (np != pdev->dev.of_node)
1910                 of_node_put(np);
1911
1912         return child;
1913 }
1914
1915 static int vchiq_probe(struct platform_device *pdev)
1916 {
1917         struct device_node *fw_node;
1918         const struct of_device_id *of_id;
1919         struct vchiq_drvdata *drvdata;
1920         int err;
1921
1922         of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1923         drvdata = (struct vchiq_drvdata *)of_id->data;
1924         if (!drvdata)
1925                 return -EINVAL;
1926
1927         fw_node = of_find_compatible_node(NULL, NULL,
1928                                           "raspberrypi,bcm2835-firmware");
1929         if (!fw_node) {
1930                 dev_err(&pdev->dev, "Missing firmware node\n");
1931                 return -ENOENT;
1932         }
1933
1934         drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1935         of_node_put(fw_node);
1936         if (!drvdata->fw)
1937                 return -EPROBE_DEFER;
1938
1939         platform_set_drvdata(pdev, drvdata);
1940
1941         err = vchiq_platform_init(pdev, &g_state);
1942         if (err)
1943                 goto failed_platform_init;
1944
1945         vchiq_debugfs_init();
1946
1947         vchiq_log_info(vchiq_arm_log_level,
1948                        "vchiq: platform initialised - version %d (min %d)",
1949                        VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1950
1951         /*
1952          * Simply exit on error since the function handles cleanup in
1953          * cases of failure.
1954          */
1955         err = vchiq_register_chrdev(&pdev->dev);
1956         if (err) {
1957                 vchiq_log_warning(vchiq_arm_log_level,
1958                                   "Failed to initialize vchiq cdev");
1959                 goto error_exit;
1960         }
1961
1962         vcsm_cma = vchiq_register_child(pdev, "vcsm-cma");
1963         bcm2835_codec = vchiq_register_child(pdev, "bcm2835-codec");
1964         bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1965         bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1966         bcm2835_isp = vchiq_register_child(pdev, "bcm2835-isp");
1967
1968         return 0;
1969
1970 failed_platform_init:
1971         vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1972 error_exit:
1973         return err;
1974 }
1975
1976 static int vchiq_remove(struct platform_device *pdev)
1977 {
1978         platform_device_unregister(bcm2835_isp);
1979         platform_device_unregister(bcm2835_audio);
1980         platform_device_unregister(bcm2835_camera);
1981         platform_device_unregister(bcm2835_codec);
1982         platform_device_unregister(vcsm_cma);
1983         vchiq_debugfs_deinit();
1984         vchiq_deregister_chrdev();
1985
1986         return 0;
1987 }
1988
1989 static struct platform_driver vchiq_driver = {
1990         .driver = {
1991                 .name = "bcm2835_vchiq",
1992                 .of_match_table = vchiq_of_match,
1993         },
1994         .probe = vchiq_probe,
1995         .remove = vchiq_remove,
1996 };
1997
1998 static int __init vchiq_driver_init(void)
1999 {
2000         int ret;
2001
2002         ret = platform_driver_register(&vchiq_driver);
2003         if (ret)
2004                 pr_err("Failed to register vchiq driver\n");
2005
2006         return ret;
2007 }
2008 module_init(vchiq_driver_init);
2009
2010 static void __exit vchiq_driver_exit(void)
2011 {
2012         platform_driver_unregister(&vchiq_driver);
2013 }
2014 module_exit(vchiq_driver_exit);
2015
2016 MODULE_LICENSE("Dual BSD/GPL");
2017 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2018 MODULE_AUTHOR("Broadcom Corporation");