1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
14 #include <linux/device.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmapool.h>
26 #include <linux/rcupdate.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
31 #include <linux/uaccess.h>
32 #include <soc/bcm2835/raspberrypi-firmware.h>
34 #include "vchiq_core.h"
35 #include "vchiq_ioctl.h"
36 #include "vchiq_arm.h"
37 #include "vchiq_debugfs.h"
38 #include "vchiq_connected.h"
39 #include "vchiq_pagelist.h"
41 #define DEVICE_NAME "vchiq"
43 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
45 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
47 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
48 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
53 #define ARM_DS_ACTIVE BIT(2)
55 #define VCHIQ_DMA_POOL_SIZE PAGE_SIZE
57 /* Override the default prefix, which would be vchiq_arm (from the filename) */
58 #undef MODULE_PARAM_PREFIX
59 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
61 #define KEEPALIVE_VER 1
62 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
64 /* Run time control of log level, based on KERN_XXX level. */
65 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
66 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
67 module_param_named(arm_log_level, vchiq_arm_log_level, int, 0644);
68 module_param_named(susp_log_level, vchiq_susp_log_level, int, 0644);
69 module_param_named(core_log_level, vchiq_core_log_level, int, 0644);
70 module_param_named(core_msg_log_level, vchiq_core_msg_log_level, int, 0644);
71 module_param_named(sync_log_level, vchiq_sync_log_level, int, 0644);
73 DEFINE_SPINLOCK(msg_queue_spinlock);
74 struct vchiq_state g_state;
76 static struct platform_device *bcm2835_camera;
77 static struct platform_device *bcm2835_audio;
78 static struct platform_device *bcm2835_codec;
79 static struct platform_device *vcsm_cma;
80 static struct platform_device *bcm2835_isp;
82 static struct vchiq_drvdata bcm2835_drvdata = {
83 .cache_line_size = 32,
86 static struct vchiq_drvdata bcm2836_drvdata = {
87 .cache_line_size = 64,
90 static struct vchiq_drvdata bcm2711_drvdata = {
91 .cache_line_size = 64,
92 .use_36bit_addrs = true,
95 struct vchiq_2835_state {
97 struct vchiq_arm_state arm_state;
100 struct vchiq_pagelist_info {
101 struct pagelist *pagelist;
102 size_t pagelist_buffer_size;
105 enum dma_data_direction dma_dir;
106 unsigned int num_pages;
107 unsigned int pages_need_release;
109 struct scatterlist *scatterlist;
110 unsigned int scatterlist_mapped;
113 static void __iomem *g_regs;
114 /* This value is the size of the L2 cache lines as understood by the
115 * VPU firmware, which determines the required alignment of the
116 * offsets/sizes in pagelists.
118 * Modern VPU firmware looks for a DT "cache-line-size" property in
119 * the VCHIQ node and will overwrite it with the actual L2 cache size,
120 * which the kernel must then respect. That property was rejected
121 * upstream, so we have to use the VPU firmware's compatibility value
124 static unsigned int g_cache_line_size = 32;
125 static struct dma_pool *g_dma_pool;
126 static unsigned int g_use_36bit_addrs = 0;
127 static unsigned int g_fragments_size;
128 static char *g_fragments_base;
129 static char *g_free_fragments;
130 static struct semaphore g_free_fragments_sema;
131 static struct device *g_dev;
132 static struct device *g_dma_dev;
134 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
136 static enum vchiq_status
137 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
138 unsigned int size, enum vchiq_bulk_dir dir);
141 vchiq_doorbell_irq(int irq, void *dev_id)
143 struct vchiq_state *state = dev_id;
144 irqreturn_t ret = IRQ_NONE;
147 /* Read (and clear) the doorbell */
148 status = readl(g_regs + BELL0);
150 if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */
151 remote_event_pollall(state);
159 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
161 if (pagelistinfo->scatterlist_mapped) {
162 dma_unmap_sg(g_dma_dev, pagelistinfo->scatterlist,
163 pagelistinfo->num_pages, pagelistinfo->dma_dir);
166 if (pagelistinfo->pages_need_release)
167 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
169 if (pagelistinfo->is_from_pool) {
170 dma_pool_free(g_dma_pool, pagelistinfo->pagelist,
171 pagelistinfo->dma_addr);
173 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
174 pagelistinfo->pagelist,
175 pagelistinfo->dma_addr);
179 /* There is a potential problem with partial cache lines (pages?)
180 * at the ends of the block when reading. If the CPU accessed anything in
181 * the same line (page?) then it may have pulled old data into the cache,
182 * obscuring the new data underneath. We can solve this by transferring the
183 * partial cache lines separately, and allowing the ARM to copy into the
187 static struct vchiq_pagelist_info *
188 create_pagelist(char *buf, char __user *ubuf,
189 size_t count, unsigned short type)
191 struct pagelist *pagelist;
192 struct vchiq_pagelist_info *pagelistinfo;
195 unsigned int num_pages, offset, i, k;
198 size_t pagelist_size;
199 struct scatterlist *scatterlist, *sg;
203 if (count >= INT_MAX - PAGE_SIZE)
207 offset = (uintptr_t)buf & (PAGE_SIZE - 1);
209 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
210 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
212 if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
213 sizeof(struct vchiq_pagelist_info)) /
214 (sizeof(u32) + sizeof(pages[0]) +
215 sizeof(struct scatterlist)))
218 pagelist_size = sizeof(struct pagelist) +
219 (num_pages * sizeof(u32)) +
220 (num_pages * sizeof(pages[0]) +
221 (num_pages * sizeof(struct scatterlist))) +
222 sizeof(struct vchiq_pagelist_info);
224 /* Allocate enough storage to hold the page pointers and the page
227 if (pagelist_size > VCHIQ_DMA_POOL_SIZE) {
228 pagelist = dma_alloc_coherent(g_dev,
232 is_from_pool = false;
234 pagelist = dma_pool_alloc(g_dma_pool, GFP_KERNEL, &dma_addr);
238 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
243 addrs = pagelist->addrs;
244 pages = (struct page **)(addrs + num_pages);
245 scatterlist = (struct scatterlist *)(pages + num_pages);
246 pagelistinfo = (struct vchiq_pagelist_info *)
247 (scatterlist + num_pages);
249 pagelist->length = count;
250 pagelist->type = type;
251 pagelist->offset = offset;
253 /* Populate the fields of the pagelistinfo structure */
254 pagelistinfo->pagelist = pagelist;
255 pagelistinfo->pagelist_buffer_size = pagelist_size;
256 pagelistinfo->dma_addr = dma_addr;
257 pagelistinfo->is_from_pool = is_from_pool;
258 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
259 DMA_TO_DEVICE : DMA_FROM_DEVICE;
260 pagelistinfo->num_pages = num_pages;
261 pagelistinfo->pages_need_release = 0;
262 pagelistinfo->pages = pages;
263 pagelistinfo->scatterlist = scatterlist;
264 pagelistinfo->scatterlist_mapped = 0;
267 unsigned long length = count;
268 unsigned int off = offset;
270 for (actual_pages = 0; actual_pages < num_pages;
273 vmalloc_to_page((buf +
274 (actual_pages * PAGE_SIZE)));
275 size_t bytes = PAGE_SIZE - off;
278 cleanup_pagelistinfo(pagelistinfo);
284 pages[actual_pages] = pg;
288 /* do not try and release vmalloc pages */
290 actual_pages = pin_user_pages_fast(
291 (unsigned long)ubuf & PAGE_MASK,
293 type == PAGELIST_READ,
296 if (actual_pages != num_pages) {
297 vchiq_log_info(vchiq_arm_log_level,
298 "%s - only %d/%d pages locked",
299 __func__, actual_pages, num_pages);
301 /* This is probably due to the process being killed */
302 if (actual_pages > 0)
303 unpin_user_pages(pages, actual_pages);
304 cleanup_pagelistinfo(pagelistinfo);
307 /* release user pages */
308 pagelistinfo->pages_need_release = 1;
312 * Initialize the scatterlist so that the magic cookie
313 * is filled if debugging is enabled
315 sg_init_table(scatterlist, num_pages);
316 /* Now set the pages for each scatterlist */
317 for (i = 0; i < num_pages; i++) {
318 unsigned int len = PAGE_SIZE - offset;
322 sg_set_page(scatterlist + i, pages[i], len, offset);
327 dma_buffers = dma_map_sg(g_dma_dev,
330 pagelistinfo->dma_dir);
332 if (dma_buffers == 0) {
333 cleanup_pagelistinfo(pagelistinfo);
337 pagelistinfo->scatterlist_mapped = 1;
339 /* Combine adjacent blocks for performance */
341 if (g_use_36bit_addrs) {
342 for_each_sg(scatterlist, sg, dma_buffers, i) {
343 u32 len = sg_dma_len(sg);
344 u64 addr = sg_dma_address(sg);
345 u32 page_id = (u32)((addr >> 4) & ~0xff);
346 u32 sg_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
348 /* Note: addrs is the address + page_count - 1
349 * The firmware expects blocks after the first to be page-
350 * aligned and a multiple of the page size
354 (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
355 WARN_ON(i && (addr & ~PAGE_MASK));
356 WARN_ON(upper_32_bits(addr) > 0xf);
358 ((addrs[k - 1] & ~0xff) +
359 (((addrs[k - 1] & 0xff) + 1) << 8)
361 u32 inc_pages = min(sg_pages,
362 0xff - (addrs[k - 1] & 0xff));
363 addrs[k - 1] += inc_pages;
364 page_id += inc_pages << 8;
365 sg_pages -= inc_pages;
368 u32 inc_pages = min(sg_pages, 0x100u);
369 addrs[k++] = page_id | (inc_pages - 1);
370 page_id += inc_pages << 8;
371 sg_pages -= inc_pages;
375 for_each_sg(scatterlist, sg, dma_buffers, i) {
376 u32 len = sg_dma_len(sg);
377 u32 addr = sg_dma_address(sg);
378 u32 new_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
380 /* Note: addrs is the address + page_count - 1
381 * The firmware expects blocks after the first to be page-
382 * aligned and a multiple of the page size
385 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
386 WARN_ON(i && (addr & ~PAGE_MASK));
388 ((addrs[k - 1] & PAGE_MASK) +
389 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
390 == (addr & PAGE_MASK))
391 addrs[k - 1] += new_pages;
393 addrs[k++] = (addr & PAGE_MASK) | (new_pages - 1);
397 /* Partial cache lines (fragments) require special measures */
398 if ((type == PAGELIST_READ) &&
399 ((pagelist->offset & (g_cache_line_size - 1)) ||
400 ((pagelist->offset + pagelist->length) &
401 (g_cache_line_size - 1)))) {
404 if (down_interruptible(&g_free_fragments_sema)) {
405 cleanup_pagelistinfo(pagelistinfo);
409 WARN_ON(!g_free_fragments);
411 down(&g_free_fragments_mutex);
412 fragments = g_free_fragments;
414 g_free_fragments = *(char **) g_free_fragments;
415 up(&g_free_fragments_mutex);
416 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
417 (fragments - g_fragments_base) / g_fragments_size;
424 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
427 struct pagelist *pagelist = pagelistinfo->pagelist;
428 struct page **pages = pagelistinfo->pages;
429 unsigned int num_pages = pagelistinfo->num_pages;
431 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
432 __func__, pagelistinfo->pagelist, actual);
435 * NOTE: dma_unmap_sg must be called before the
436 * cpu can touch any of the data/pages.
438 dma_unmap_sg(g_dma_dev, pagelistinfo->scatterlist,
439 pagelistinfo->num_pages, pagelistinfo->dma_dir);
440 pagelistinfo->scatterlist_mapped = 0;
442 /* Deal with any partial cache lines (fragments) */
443 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
444 char *fragments = g_fragments_base +
445 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
447 int head_bytes, tail_bytes;
449 head_bytes = (g_cache_line_size - pagelist->offset) &
450 (g_cache_line_size - 1);
451 tail_bytes = (pagelist->offset + actual) &
452 (g_cache_line_size - 1);
454 if ((actual >= 0) && (head_bytes != 0)) {
455 if (head_bytes > actual)
458 memcpy((char *)kmap(pages[0]) +
464 if ((actual >= 0) && (head_bytes < actual) &&
466 memcpy((char *)kmap(pages[num_pages - 1]) +
467 ((pagelist->offset + actual) &
468 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
469 fragments + g_cache_line_size,
471 kunmap(pages[num_pages - 1]);
474 down(&g_free_fragments_mutex);
475 *(char **)fragments = g_free_fragments;
476 g_free_fragments = fragments;
477 up(&g_free_fragments_mutex);
478 up(&g_free_fragments_sema);
481 /* Need to mark all the pages dirty. */
482 if (pagelist->type != PAGELIST_WRITE &&
483 pagelistinfo->pages_need_release) {
486 for (i = 0; i < num_pages; i++)
487 set_page_dirty(pages[i]);
490 cleanup_pagelistinfo(pagelistinfo);
493 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
495 struct device *dev = &pdev->dev;
496 struct device *dma_dev = NULL;
497 struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
498 struct rpi_firmware *fw = drvdata->fw;
499 struct vchiq_slot_zero *vchiq_slot_zero;
501 dma_addr_t slot_phys;
503 int slot_mem_size, frag_mem_size;
507 * VCHI messages between the CPU and firmware use
508 * 32-bit bus addresses.
510 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
515 g_cache_line_size = drvdata->cache_line_size;
516 g_fragments_size = 2 * g_cache_line_size;
518 if (drvdata->use_36bit_addrs) {
519 struct device_node *dma_node =
520 of_find_compatible_node(NULL, NULL, "brcm,bcm2711-dma");
523 struct platform_device *pdev;
525 pdev = of_find_device_by_node(dma_node);
527 dma_dev = &pdev->dev;
528 of_node_put(dma_node);
529 g_use_36bit_addrs = true;
531 dev_err(dev, "40-bit DMA controller not found\n");
536 /* Allocate space for the channels in coherent memory */
537 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
538 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
540 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
541 &slot_phys, GFP_KERNEL);
543 dev_err(dev, "could not allocate DMA memory\n");
547 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
548 channelbase = slot_phys;
550 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
551 if (!vchiq_slot_zero)
554 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
555 channelbase + slot_mem_size;
556 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
559 g_fragments_base = (char *)slot_mem + slot_mem_size;
561 g_free_fragments = g_fragments_base;
562 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
563 *(char **)&g_fragments_base[i*g_fragments_size] =
564 &g_fragments_base[(i + 1)*g_fragments_size];
566 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
567 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
569 err = vchiq_init_state(state, vchiq_slot_zero);
573 g_regs = devm_platform_ioremap_resource(pdev, 0);
575 return PTR_ERR(g_regs);
577 irq = platform_get_irq(pdev, 0);
581 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
582 "VCHIQ doorbell", state);
584 dev_err(dev, "failed to register irq=%d\n", irq);
588 /* Send the base address of the slots to VideoCore */
589 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
590 &channelbase, sizeof(channelbase));
591 if (err || channelbase) {
592 dev_err(dev, "failed to set channelbase\n");
593 return err ? : -ENXIO;
597 g_dma_dev = dma_dev ?: dev;
598 g_dma_pool = dmam_pool_create("vchiq_scatter_pool", dev,
599 VCHIQ_DMA_POOL_SIZE, g_cache_line_size,
602 dev_err(dev, "failed to create dma pool");
606 vchiq_log_info(vchiq_arm_log_level,
607 "vchiq_init - done (slots %pK, phys %pad)",
608 vchiq_slot_zero, &slot_phys);
610 vchiq_call_connected_callbacks();
616 vchiq_platform_init_state(struct vchiq_state *state)
618 struct vchiq_2835_state *platform_state;
620 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
621 if (!state->platform_state)
624 platform_state = (struct vchiq_2835_state *)state->platform_state;
626 platform_state->inited = 1;
627 vchiq_arm_init_state(state, &platform_state->arm_state);
632 struct vchiq_arm_state*
633 vchiq_platform_get_arm_state(struct vchiq_state *state)
635 struct vchiq_2835_state *platform_state;
637 platform_state = (struct vchiq_2835_state *)state->platform_state;
639 WARN_ON_ONCE(!platform_state->inited);
641 return &platform_state->arm_state;
645 remote_event_signal(struct remote_event *event)
651 dsb(sy); /* data barrier operation */
654 writel(0, g_regs + BELL2); /* trigger vc interrupt */
658 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
659 void __user *uoffset, int size, int dir)
661 struct vchiq_pagelist_info *pagelistinfo;
663 pagelistinfo = create_pagelist(offset, uoffset, size,
664 (dir == VCHIQ_BULK_RECEIVE)
671 bulk->data = pagelistinfo->dma_addr;
674 * Store the pagelistinfo address in remote_data,
675 * which isn't used by the slave.
677 bulk->remote_data = pagelistinfo;
683 vchiq_complete_bulk(struct vchiq_bulk *bulk)
685 if (bulk && bulk->remote_data && bulk->actual)
686 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
690 int vchiq_dump_platform_state(void *dump_context)
695 len = snprintf(buf, sizeof(buf),
696 " Platform: 2835 (VC master)");
697 return vchiq_dump(dump_context, buf, len + 1);
700 #define VCHIQ_INIT_RETRIES 10
701 int vchiq_initialise(struct vchiq_instance **instance_out)
703 struct vchiq_state *state;
704 struct vchiq_instance *instance = NULL;
708 * VideoCore may not be ready due to boot up timing.
709 * It may never be ready if kernel and firmware are mismatched,so don't
712 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
713 state = vchiq_get_state();
716 usleep_range(500, 600);
718 if (i == VCHIQ_INIT_RETRIES) {
719 vchiq_log_error(vchiq_core_log_level,
720 "%s: videocore not initialized\n", __func__);
724 vchiq_log_warning(vchiq_core_log_level,
725 "%s: videocore initialized after %d retries\n",
729 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
731 vchiq_log_error(vchiq_core_log_level,
732 "%s: error allocating vchiq instance\n", __func__);
737 instance->connected = 0;
738 instance->state = state;
739 mutex_init(&instance->bulk_waiter_list_mutex);
740 INIT_LIST_HEAD(&instance->bulk_waiter_list);
742 *instance_out = instance;
747 vchiq_log_trace(vchiq_core_log_level,
748 "%s(%p): returning %d", __func__, instance, ret);
752 EXPORT_SYMBOL(vchiq_initialise);
754 void free_bulk_waiter(struct vchiq_instance *instance)
756 struct bulk_waiter_node *waiter, *next;
758 list_for_each_entry_safe(waiter, next,
759 &instance->bulk_waiter_list, list) {
760 list_del(&waiter->list);
761 vchiq_log_info(vchiq_arm_log_level,
762 "bulk_waiter - cleaned up %pK for pid %d",
763 waiter, waiter->pid);
768 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
770 enum vchiq_status status = VCHIQ_SUCCESS;
771 struct vchiq_state *state = instance->state;
773 if (mutex_lock_killable(&state->mutex))
776 /* Remove all services */
777 vchiq_shutdown_internal(state, instance);
779 mutex_unlock(&state->mutex);
781 vchiq_log_trace(vchiq_core_log_level,
782 "%s(%p): returning %d", __func__, instance, status);
784 free_bulk_waiter(instance);
789 EXPORT_SYMBOL(vchiq_shutdown);
791 static int vchiq_is_connected(struct vchiq_instance *instance)
793 return instance->connected;
796 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
798 enum vchiq_status status;
799 struct vchiq_state *state = instance->state;
801 if (mutex_lock_killable(&state->mutex)) {
802 vchiq_log_trace(vchiq_core_log_level,
803 "%s: call to mutex_lock failed", __func__);
804 status = VCHIQ_RETRY;
807 status = vchiq_connect_internal(state, instance);
809 if (status == VCHIQ_SUCCESS)
810 instance->connected = 1;
812 mutex_unlock(&state->mutex);
815 vchiq_log_trace(vchiq_core_log_level,
816 "%s(%p): returning %d", __func__, instance, status);
820 EXPORT_SYMBOL(vchiq_connect);
822 static enum vchiq_status
823 vchiq_add_service(struct vchiq_instance *instance,
824 const struct vchiq_service_params_kernel *params,
825 unsigned int *phandle)
827 enum vchiq_status status;
828 struct vchiq_state *state = instance->state;
829 struct vchiq_service *service = NULL;
832 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
834 srvstate = vchiq_is_connected(instance)
835 ? VCHIQ_SRVSTATE_LISTENING
836 : VCHIQ_SRVSTATE_HIDDEN;
838 service = vchiq_add_service_internal(
846 *phandle = service->handle;
847 status = VCHIQ_SUCCESS;
849 status = VCHIQ_ERROR;
852 vchiq_log_trace(vchiq_core_log_level,
853 "%s(%p): returning %d", __func__, instance, status);
859 vchiq_open_service(struct vchiq_instance *instance,
860 const struct vchiq_service_params_kernel *params,
861 unsigned int *phandle)
863 enum vchiq_status status = VCHIQ_ERROR;
864 struct vchiq_state *state = instance->state;
865 struct vchiq_service *service = NULL;
867 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
869 if (!vchiq_is_connected(instance))
872 service = vchiq_add_service_internal(state,
874 VCHIQ_SRVSTATE_OPENING,
879 *phandle = service->handle;
880 status = vchiq_open_service_internal(service, current->pid);
881 if (status != VCHIQ_SUCCESS) {
882 vchiq_remove_service(service->handle);
883 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
888 vchiq_log_trace(vchiq_core_log_level,
889 "%s(%p): returning %d", __func__, instance, status);
893 EXPORT_SYMBOL(vchiq_open_service);
896 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
897 void *userdata, enum vchiq_bulk_mode mode)
899 enum vchiq_status status;
903 case VCHIQ_BULK_MODE_NOCALLBACK:
904 case VCHIQ_BULK_MODE_CALLBACK:
905 status = vchiq_bulk_transfer(handle,
907 size, userdata, mode,
908 VCHIQ_BULK_TRANSMIT);
910 case VCHIQ_BULK_MODE_BLOCKING:
911 status = vchiq_blocking_bulk_transfer(handle,
912 (void *)data, size, VCHIQ_BULK_TRANSMIT);
919 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
920 * to implement a retry mechanism since this function is
921 * supposed to block until queued
923 if (status != VCHIQ_RETRY)
931 EXPORT_SYMBOL(vchiq_bulk_transmit);
933 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
934 unsigned int size, void *userdata,
935 enum vchiq_bulk_mode mode)
937 enum vchiq_status status;
941 case VCHIQ_BULK_MODE_NOCALLBACK:
942 case VCHIQ_BULK_MODE_CALLBACK:
943 status = vchiq_bulk_transfer(handle, data, NULL,
945 mode, VCHIQ_BULK_RECEIVE);
947 case VCHIQ_BULK_MODE_BLOCKING:
948 status = vchiq_blocking_bulk_transfer(handle,
949 (void *)data, size, VCHIQ_BULK_RECEIVE);
956 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
957 * to implement a retry mechanism since this function is
958 * supposed to block until queued
960 if (status != VCHIQ_RETRY)
968 EXPORT_SYMBOL(vchiq_bulk_receive);
970 static enum vchiq_status
971 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
972 enum vchiq_bulk_dir dir)
974 struct vchiq_instance *instance;
975 struct vchiq_service *service;
976 enum vchiq_status status;
977 struct bulk_waiter_node *waiter = NULL;
980 service = find_service_by_handle(handle);
984 instance = service->instance;
986 vchiq_service_put(service);
988 mutex_lock(&instance->bulk_waiter_list_mutex);
989 list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
990 if (waiter->pid == current->pid) {
991 list_del(&waiter->list);
996 mutex_unlock(&instance->bulk_waiter_list_mutex);
999 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
1002 /* This thread has an outstanding bulk transfer. */
1003 /* FIXME: why compare a dma address to a pointer? */
1004 if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
1005 (bulk->size != size)) {
1007 * This is not a retry of the previous one.
1008 * Cancel the signal when the transfer completes.
1010 spin_lock(&bulk_waiter_spinlock);
1011 bulk->userdata = NULL;
1012 spin_unlock(&bulk_waiter_spinlock);
1016 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
1018 vchiq_log_error(vchiq_core_log_level,
1019 "%s - out of memory", __func__);
1024 status = vchiq_bulk_transfer(handle, data, NULL, size,
1025 &waiter->bulk_waiter,
1026 VCHIQ_BULK_MODE_BLOCKING, dir);
1027 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1028 !waiter->bulk_waiter.bulk) {
1029 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
1032 /* Cancel the signal when the transfer completes. */
1033 spin_lock(&bulk_waiter_spinlock);
1034 bulk->userdata = NULL;
1035 spin_unlock(&bulk_waiter_spinlock);
1039 waiter->pid = current->pid;
1040 mutex_lock(&instance->bulk_waiter_list_mutex);
1041 list_add(&waiter->list, &instance->bulk_waiter_list);
1042 mutex_unlock(&instance->bulk_waiter_list_mutex);
1043 vchiq_log_info(vchiq_arm_log_level,
1044 "saved bulk_waiter %pK for pid %d",
1045 waiter, current->pid);
1051 static enum vchiq_status
1052 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
1053 struct vchiq_header *header, struct user_service *user_service,
1054 void *bulk_userdata)
1056 struct vchiq_completion_data_kernel *completion;
1059 DEBUG_INITIALISE(g_state.local)
1061 insert = instance->completion_insert;
1062 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
1063 /* Out of space - wait for the client */
1064 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1065 vchiq_log_trace(vchiq_arm_log_level,
1066 "%s - completion queue full", __func__);
1067 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1068 if (wait_for_completion_interruptible(
1069 &instance->remove_event)) {
1070 vchiq_log_info(vchiq_arm_log_level,
1071 "service_callback interrupted");
1073 } else if (instance->closing) {
1074 vchiq_log_info(vchiq_arm_log_level,
1075 "service_callback closing");
1076 return VCHIQ_SUCCESS;
1078 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1081 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1083 completion->header = header;
1084 completion->reason = reason;
1085 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1086 completion->service_userdata = user_service->service;
1087 completion->bulk_userdata = bulk_userdata;
1089 if (reason == VCHIQ_SERVICE_CLOSED) {
1091 * Take an extra reference, to be held until
1092 * this CLOSED notification is delivered.
1094 vchiq_service_get(user_service->service);
1095 if (instance->use_close_delivered)
1096 user_service->close_pending = 1;
1100 * A write barrier is needed here to ensure that the entire completion
1101 * record is written out before the insert point.
1105 if (reason == VCHIQ_MESSAGE_AVAILABLE)
1106 user_service->message_available_pos = insert;
1109 instance->completion_insert = insert;
1111 complete(&instance->insert_event);
1113 return VCHIQ_SUCCESS;
1117 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
1118 unsigned int handle, void *bulk_userdata)
1121 * How do we ensure the callback goes to the right client?
1122 * The service_user data points to a user_service record
1123 * containing the original callback and the user state structure, which
1124 * contains a circular buffer for completion records.
1126 struct user_service *user_service;
1127 struct vchiq_service *service;
1128 struct vchiq_instance *instance;
1129 bool skip_completion = false;
1131 DEBUG_INITIALISE(g_state.local)
1133 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1136 service = handle_to_service(handle);
1137 if (WARN_ON(!service)) {
1139 return VCHIQ_SUCCESS;
1142 user_service = (struct user_service *)service->base.userdata;
1143 instance = user_service->instance;
1145 if (!instance || instance->closing) {
1147 return VCHIQ_SUCCESS;
1151 * As hopping around different synchronization mechanism,
1152 * taking an extra reference results in simpler implementation.
1154 vchiq_service_get(service);
1157 vchiq_log_trace(vchiq_arm_log_level,
1158 "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1159 __func__, (unsigned long)user_service,
1160 service->localport, user_service->userdata,
1161 reason, (unsigned long)header,
1162 (unsigned long)instance, (unsigned long)bulk_userdata);
1164 if (header && user_service->is_vchi) {
1165 spin_lock(&msg_queue_spinlock);
1166 while (user_service->msg_insert ==
1167 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
1168 spin_unlock(&msg_queue_spinlock);
1169 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1170 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1171 vchiq_log_trace(vchiq_arm_log_level,
1172 "service_callback - msg queue full");
1174 * If there is no MESSAGE_AVAILABLE in the completion
1177 if ((user_service->message_available_pos -
1178 instance->completion_remove) < 0) {
1179 enum vchiq_status status;
1181 vchiq_log_info(vchiq_arm_log_level,
1182 "Inserting extra MESSAGE_AVAILABLE");
1183 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1184 status = add_completion(instance, reason,
1185 NULL, user_service, bulk_userdata);
1186 if (status != VCHIQ_SUCCESS) {
1187 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1188 vchiq_service_put(service);
1193 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1194 if (wait_for_completion_interruptible(
1195 &user_service->remove_event)) {
1196 vchiq_log_info(vchiq_arm_log_level,
1197 "%s interrupted", __func__);
1198 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1199 vchiq_service_put(service);
1201 } else if (instance->closing) {
1202 vchiq_log_info(vchiq_arm_log_level,
1203 "%s closing", __func__);
1204 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1205 vchiq_service_put(service);
1208 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1209 spin_lock(&msg_queue_spinlock);
1212 user_service->msg_queue[user_service->msg_insert &
1213 (MSG_QUEUE_SIZE - 1)] = header;
1214 user_service->msg_insert++;
1217 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1218 * there is a MESSAGE_AVAILABLE in the completion queue then
1219 * bypass the completion queue.
1221 if (((user_service->message_available_pos -
1222 instance->completion_remove) >= 0) ||
1223 user_service->dequeue_pending) {
1224 user_service->dequeue_pending = 0;
1225 skip_completion = true;
1228 spin_unlock(&msg_queue_spinlock);
1229 complete(&user_service->insert_event);
1233 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1234 vchiq_service_put(service);
1236 if (skip_completion)
1237 return VCHIQ_SUCCESS;
1239 return add_completion(instance, reason, header, user_service,
1243 int vchiq_dump(void *dump_context, const char *str, int len)
1245 struct dump_context *context = (struct dump_context *)dump_context;
1248 if (context->actual >= context->space)
1251 if (context->offset > 0) {
1252 int skip_bytes = min_t(int, len, context->offset);
1256 context->offset -= skip_bytes;
1257 if (context->offset > 0)
1260 copy_bytes = min_t(int, len, context->space - context->actual);
1261 if (copy_bytes == 0)
1263 if (copy_to_user(context->buf + context->actual, str,
1266 context->actual += copy_bytes;
1270 * If the terminating NUL is included in the length, then it
1271 * marks the end of a line and should be replaced with a
1274 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1277 if (copy_to_user(context->buf + context->actual - 1,
1284 int vchiq_dump_platform_instances(void *dump_context)
1286 struct vchiq_state *state = vchiq_get_state();
1295 * There is no list of instances, so instead scan all services,
1296 * marking those that have been dumped.
1300 for (i = 0; i < state->unused_service; i++) {
1301 struct vchiq_service *service;
1302 struct vchiq_instance *instance;
1304 service = rcu_dereference(state->services[i]);
1305 if (!service || service->base.callback != service_callback)
1308 instance = service->instance;
1314 for (i = 0; i < state->unused_service; i++) {
1315 struct vchiq_service *service;
1316 struct vchiq_instance *instance;
1320 service = rcu_dereference(state->services[i]);
1321 if (!service || service->base.callback != service_callback) {
1326 instance = service->instance;
1327 if (!instance || instance->mark) {
1333 len = snprintf(buf, sizeof(buf),
1334 "Instance %pK: pid %d,%s completions %d/%d",
1335 instance, instance->pid,
1336 instance->connected ? " connected, " :
1338 instance->completion_insert -
1339 instance->completion_remove,
1341 err = vchiq_dump(dump_context, buf, len + 1);
1349 int vchiq_dump_platform_service_state(void *dump_context,
1350 struct vchiq_service *service)
1352 struct user_service *user_service =
1353 (struct user_service *)service->base.userdata;
1357 len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
1359 if ((service->base.callback == service_callback) &&
1360 user_service->is_vchi) {
1361 len += scnprintf(buf + len, sizeof(buf) - len,
1363 user_service->msg_insert - user_service->msg_remove,
1366 if (user_service->dequeue_pending)
1367 len += scnprintf(buf + len, sizeof(buf) - len,
1368 " (dequeue pending)");
1371 return vchiq_dump(dump_context, buf, len + 1);
1374 struct vchiq_state *
1375 vchiq_get_state(void)
1378 if (!g_state.remote)
1379 pr_err("%s: g_state.remote == NULL\n", __func__);
1380 else if (g_state.remote->initialised != 1)
1381 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1382 __func__, g_state.remote->initialised);
1384 return (g_state.remote &&
1385 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1389 * Autosuspend related functionality
1392 static enum vchiq_status
1393 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
1394 struct vchiq_header *header,
1395 unsigned int service_user, void *bulk_user)
1397 vchiq_log_error(vchiq_susp_log_level,
1398 "%s callback reason %d", __func__, reason);
1403 vchiq_keepalive_thread_func(void *v)
1405 struct vchiq_state *state = (struct vchiq_state *)v;
1406 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1408 enum vchiq_status status;
1409 struct vchiq_instance *instance;
1410 unsigned int ka_handle;
1413 struct vchiq_service_params_kernel params = {
1414 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1415 .callback = vchiq_keepalive_vchiq_callback,
1416 .version = KEEPALIVE_VER,
1417 .version_min = KEEPALIVE_VER_MIN
1420 ret = vchiq_initialise(&instance);
1422 vchiq_log_error(vchiq_susp_log_level,
1423 "%s vchiq_initialise failed %d", __func__, ret);
1427 status = vchiq_connect(instance);
1428 if (status != VCHIQ_SUCCESS) {
1429 vchiq_log_error(vchiq_susp_log_level,
1430 "%s vchiq_connect failed %d", __func__, status);
1434 status = vchiq_add_service(instance, ¶ms, &ka_handle);
1435 if (status != VCHIQ_SUCCESS) {
1436 vchiq_log_error(vchiq_susp_log_level,
1437 "%s vchiq_open_service failed %d", __func__, status);
1442 long rc = 0, uc = 0;
1444 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1445 vchiq_log_error(vchiq_susp_log_level,
1446 "%s interrupted", __func__);
1447 flush_signals(current);
1452 * read and clear counters. Do release_count then use_count to
1453 * prevent getting more releases than uses
1455 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1456 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1459 * Call use/release service the requisite number of times.
1460 * Process use before release so use counts don't go negative
1463 atomic_inc(&arm_state->ka_use_ack_count);
1464 status = vchiq_use_service(ka_handle);
1465 if (status != VCHIQ_SUCCESS) {
1466 vchiq_log_error(vchiq_susp_log_level,
1467 "%s vchiq_use_service error %d",
1472 status = vchiq_release_service(ka_handle);
1473 if (status != VCHIQ_SUCCESS) {
1474 vchiq_log_error(vchiq_susp_log_level,
1475 "%s vchiq_release_service error %d",
1482 vchiq_shutdown(instance);
1488 vchiq_arm_init_state(struct vchiq_state *state,
1489 struct vchiq_arm_state *arm_state)
1492 rwlock_init(&arm_state->susp_res_lock);
1494 init_completion(&arm_state->ka_evt);
1495 atomic_set(&arm_state->ka_use_count, 0);
1496 atomic_set(&arm_state->ka_use_ack_count, 0);
1497 atomic_set(&arm_state->ka_release_count, 0);
1499 arm_state->state = state;
1500 arm_state->first_connect = 0;
1506 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1507 enum USE_TYPE_E use_type)
1509 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1520 if (use_type == USE_TYPE_VCHIQ) {
1521 sprintf(entity, "VCHIQ: ");
1522 entity_uc = &arm_state->peer_use_count;
1523 } else if (service) {
1524 sprintf(entity, "%c%c%c%c:%03d",
1525 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1526 service->client_id);
1527 entity_uc = &service->service_use_count;
1529 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1534 write_lock_bh(&arm_state->susp_res_lock);
1535 local_uc = ++arm_state->videocore_use_count;
1538 vchiq_log_trace(vchiq_susp_log_level,
1539 "%s %s count %d, state count %d",
1540 __func__, entity, *entity_uc, local_uc);
1542 write_unlock_bh(&arm_state->susp_res_lock);
1545 enum vchiq_status status = VCHIQ_SUCCESS;
1546 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1548 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1549 /* Send the use notify to videocore */
1550 status = vchiq_send_remote_use_active(state);
1551 if (status == VCHIQ_SUCCESS)
1555 &arm_state->ka_use_ack_count);
1560 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1565 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1567 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1578 sprintf(entity, "%c%c%c%c:%03d",
1579 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1580 service->client_id);
1581 entity_uc = &service->service_use_count;
1583 sprintf(entity, "PEER: ");
1584 entity_uc = &arm_state->peer_use_count;
1587 write_lock_bh(&arm_state->susp_res_lock);
1588 if (!arm_state->videocore_use_count || !(*entity_uc)) {
1589 /* Don't use BUG_ON - don't allow user thread to crash kernel */
1590 WARN_ON(!arm_state->videocore_use_count);
1591 WARN_ON(!(*entity_uc));
1595 --arm_state->videocore_use_count;
1598 vchiq_log_trace(vchiq_susp_log_level,
1599 "%s %s count %d, state count %d",
1600 __func__, entity, *entity_uc,
1601 arm_state->videocore_use_count);
1604 write_unlock_bh(&arm_state->susp_res_lock);
1607 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1612 vchiq_on_remote_use(struct vchiq_state *state)
1614 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1616 atomic_inc(&arm_state->ka_use_count);
1617 complete(&arm_state->ka_evt);
1621 vchiq_on_remote_release(struct vchiq_state *state)
1623 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1625 atomic_inc(&arm_state->ka_release_count);
1626 complete(&arm_state->ka_evt);
1630 vchiq_use_service_internal(struct vchiq_service *service)
1632 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1636 vchiq_release_service_internal(struct vchiq_service *service)
1638 return vchiq_release_internal(service->state, service);
1641 struct vchiq_debugfs_node *
1642 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1644 return &instance->debugfs_node;
1648 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1650 struct vchiq_service *service;
1651 int use_count = 0, i;
1655 while ((service = __next_service_by_instance(instance->state,
1657 use_count += service->service_use_count;
1663 vchiq_instance_get_pid(struct vchiq_instance *instance)
1665 return instance->pid;
1669 vchiq_instance_get_trace(struct vchiq_instance *instance)
1671 return instance->trace;
1675 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1677 struct vchiq_service *service;
1682 while ((service = __next_service_by_instance(instance->state,
1684 service->trace = trace;
1686 instance->trace = (trace != 0);
1690 vchiq_use_service(unsigned int handle)
1692 enum vchiq_status ret = VCHIQ_ERROR;
1693 struct vchiq_service *service = find_service_by_handle(handle);
1696 ret = vchiq_use_internal(service->state, service,
1698 vchiq_service_put(service);
1702 EXPORT_SYMBOL(vchiq_use_service);
1705 vchiq_release_service(unsigned int handle)
1707 enum vchiq_status ret = VCHIQ_ERROR;
1708 struct vchiq_service *service = find_service_by_handle(handle);
1711 ret = vchiq_release_internal(service->state, service);
1712 vchiq_service_put(service);
1716 EXPORT_SYMBOL(vchiq_release_service);
1718 struct service_data_struct {
1725 vchiq_dump_service_use_state(struct vchiq_state *state)
1727 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1728 struct service_data_struct *service_data;
1731 * If there's more than 64 services, only dump ones with
1734 int only_nonzero = 0;
1735 static const char *nz = "<-- preventing suspend";
1739 int active_services;
1744 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1749 read_lock_bh(&arm_state->susp_res_lock);
1750 peer_count = arm_state->peer_use_count;
1751 vc_use_count = arm_state->videocore_use_count;
1752 active_services = state->unused_service;
1753 if (active_services > MAX_SERVICES)
1757 for (i = 0; i < active_services; i++) {
1758 struct vchiq_service *service_ptr =
1759 rcu_dereference(state->services[i]);
1764 if (only_nonzero && !service_ptr->service_use_count)
1767 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1770 service_data[found].fourcc = service_ptr->base.fourcc;
1771 service_data[found].clientid = service_ptr->client_id;
1772 service_data[found].use_count = service_ptr->service_use_count;
1774 if (found >= MAX_SERVICES)
1779 read_unlock_bh(&arm_state->susp_res_lock);
1782 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
1783 "services (%d). Only dumping up to first %d services "
1784 "with non-zero use-count", active_services, found);
1786 for (i = 0; i < found; i++) {
1787 vchiq_log_warning(vchiq_susp_log_level,
1788 "----- %c%c%c%c:%d service count %d %s",
1789 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1790 service_data[i].clientid,
1791 service_data[i].use_count,
1792 service_data[i].use_count ? nz : "");
1794 vchiq_log_warning(vchiq_susp_log_level,
1795 "----- VCHIQ use count count %d", peer_count);
1796 vchiq_log_warning(vchiq_susp_log_level,
1797 "--- Overall vchiq instance use count %d", vc_use_count);
1799 kfree(service_data);
1803 vchiq_check_service(struct vchiq_service *service)
1805 struct vchiq_arm_state *arm_state;
1806 enum vchiq_status ret = VCHIQ_ERROR;
1808 if (!service || !service->state)
1811 arm_state = vchiq_platform_get_arm_state(service->state);
1813 read_lock_bh(&arm_state->susp_res_lock);
1814 if (service->service_use_count)
1815 ret = VCHIQ_SUCCESS;
1816 read_unlock_bh(&arm_state->susp_res_lock);
1818 if (ret == VCHIQ_ERROR) {
1819 vchiq_log_error(vchiq_susp_log_level,
1820 "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1821 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1822 service->client_id, service->service_use_count,
1823 arm_state->videocore_use_count);
1824 vchiq_dump_service_use_state(service->state);
1830 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1831 enum vchiq_connstate oldstate,
1832 enum vchiq_connstate newstate)
1834 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1835 char threadname[16];
1837 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1838 get_conn_state_name(oldstate), get_conn_state_name(newstate));
1839 if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1842 write_lock_bh(&arm_state->susp_res_lock);
1843 if (arm_state->first_connect) {
1844 write_unlock_bh(&arm_state->susp_res_lock);
1848 arm_state->first_connect = 1;
1849 write_unlock_bh(&arm_state->susp_res_lock);
1850 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1852 arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1855 if (IS_ERR(arm_state->ka_thread)) {
1856 vchiq_log_error(vchiq_susp_log_level,
1857 "vchiq: FATAL: couldn't create thread %s",
1860 wake_up_process(arm_state->ka_thread);
1864 static const struct of_device_id vchiq_of_match[] = {
1865 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1866 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1867 { .compatible = "brcm,bcm2711-vchiq", .data = &bcm2711_drvdata },
1870 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1872 static struct platform_device *
1873 vchiq_register_child(struct platform_device *pdev, const char *name)
1875 struct platform_device_info pdevinfo;
1876 struct platform_device *child;
1877 struct device_node *np;
1879 memset(&pdevinfo, 0, sizeof(pdevinfo));
1881 pdevinfo.parent = &pdev->dev;
1882 pdevinfo.name = name;
1883 pdevinfo.id = PLATFORM_DEVID_NONE;
1884 pdevinfo.dma_mask = DMA_BIT_MASK(32);
1886 np = of_get_child_by_name(pdev->dev.of_node, name);
1888 /* Skip the child if it is explicitly disabled */
1889 if (np && !of_device_is_available(np))
1892 child = platform_device_register_full(&pdevinfo);
1893 if (IS_ERR(child)) {
1894 dev_warn(&pdev->dev, "%s not registered\n", name);
1898 child->dev.of_node = np;
1901 * We want the dma-ranges etc to be copied from the parent VCHIQ device
1902 * to be passed on to the children without a node of their own.
1905 np = pdev->dev.of_node;
1907 of_dma_configure(&child->dev, np, true);
1909 if (np != pdev->dev.of_node)
1915 static int vchiq_probe(struct platform_device *pdev)
1917 struct device_node *fw_node;
1918 const struct of_device_id *of_id;
1919 struct vchiq_drvdata *drvdata;
1922 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1923 drvdata = (struct vchiq_drvdata *)of_id->data;
1927 fw_node = of_find_compatible_node(NULL, NULL,
1928 "raspberrypi,bcm2835-firmware");
1930 dev_err(&pdev->dev, "Missing firmware node\n");
1934 drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1935 of_node_put(fw_node);
1937 return -EPROBE_DEFER;
1939 platform_set_drvdata(pdev, drvdata);
1941 err = vchiq_platform_init(pdev, &g_state);
1943 goto failed_platform_init;
1945 vchiq_debugfs_init();
1947 vchiq_log_info(vchiq_arm_log_level,
1948 "vchiq: platform initialised - version %d (min %d)",
1949 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1952 * Simply exit on error since the function handles cleanup in
1955 err = vchiq_register_chrdev(&pdev->dev);
1957 vchiq_log_warning(vchiq_arm_log_level,
1958 "Failed to initialize vchiq cdev");
1962 vcsm_cma = vchiq_register_child(pdev, "vcsm-cma");
1963 bcm2835_codec = vchiq_register_child(pdev, "bcm2835-codec");
1964 bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1965 bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1966 bcm2835_isp = vchiq_register_child(pdev, "bcm2835-isp");
1970 failed_platform_init:
1971 vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1976 static int vchiq_remove(struct platform_device *pdev)
1978 platform_device_unregister(bcm2835_isp);
1979 platform_device_unregister(bcm2835_audio);
1980 platform_device_unregister(bcm2835_camera);
1981 platform_device_unregister(bcm2835_codec);
1982 platform_device_unregister(vcsm_cma);
1983 vchiq_debugfs_deinit();
1984 vchiq_deregister_chrdev();
1989 static struct platform_driver vchiq_driver = {
1991 .name = "bcm2835_vchiq",
1992 .of_match_table = vchiq_of_match,
1994 .probe = vchiq_probe,
1995 .remove = vchiq_remove,
1998 static int __init vchiq_driver_init(void)
2002 ret = platform_driver_register(&vchiq_driver);
2004 pr_err("Failed to register vchiq driver\n");
2008 module_init(vchiq_driver_init);
2010 static void __exit vchiq_driver_exit(void)
2012 platform_driver_unregister(&vchiq_driver);
2014 module_exit(vchiq_driver_exit);
2016 MODULE_LICENSE("Dual BSD/GPL");
2017 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2018 MODULE_AUTHOR("Broadcom Corporation");