static char *g_fragments_base;
static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
-static struct device *g_dev;
static DEFINE_SEMAPHORE(g_free_fragments_mutex);
static enum vchiq_status
-vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir);
static irqreturn_t
}
static void
-cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
+cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
{
if (pagelistinfo->scatterlist_mapped) {
- dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
pagelistinfo->num_pages, pagelistinfo->dma_dir);
}
if (pagelistinfo->pages_need_release)
unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
- dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
+ dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
pagelistinfo->pagelist, pagelistinfo->dma_addr);
}
*/
static struct vchiq_pagelist_info *
-create_pagelist(char *buf, char __user *ubuf,
+create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
size_t count, unsigned short type)
{
struct pagelist *pagelist;
/* Allocate enough storage to hold the page pointers and the page
* list
*/
- pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
+ pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
GFP_KERNEL);
vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
size_t bytes = PAGE_SIZE - off;
if (!pg) {
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
/* This is probably due to the process being killed */
if (actual_pages > 0)
unpin_user_pages(pages, actual_pages);
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
/* release user pages */
count -= len;
}
- dma_buffers = dma_map_sg(g_dev,
+ dma_buffers = dma_map_sg(instance->state->dev,
scatterlist,
num_pages,
pagelistinfo->dma_dir);
if (dma_buffers == 0) {
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
char *fragments;
if (down_interruptible(&g_free_fragments_sema)) {
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
}
static void
-free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
int actual)
{
struct pagelist *pagelist = pagelistinfo->pagelist;
* NOTE: dma_unmap_sg must be called before the
* cpu can touch any of the data/pages.
*/
- dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
pagelistinfo->num_pages, pagelistinfo->dma_dir);
pagelistinfo->scatterlist_mapped = 0;
set_page_dirty(pages[i]);
}
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
}
int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
return err ? : -ENXIO;
}
- g_dev = dev;
vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
vchiq_slot_zero, &slot_phys);
}
int
-vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
+vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
void __user *uoffset, int size, int dir)
{
struct vchiq_pagelist_info *pagelistinfo;
- pagelistinfo = create_pagelist(offset, uoffset, size,
+ pagelistinfo = create_pagelist(instance, offset, uoffset, size,
(dir == VCHIQ_BULK_RECEIVE)
? PAGELIST_READ
: PAGELIST_WRITE);
}
void
-vchiq_complete_bulk(struct vchiq_bulk *bulk)
+vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
{
if (bulk && bulk->remote_data && bulk->actual)
- free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
+ free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
bulk->actual);
}
EXPORT_SYMBOL(vchiq_open_service);
enum vchiq_status
-vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
- void *userdata, enum vchiq_bulk_mode mode)
+vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- status = vchiq_bulk_transfer(handle,
+ status = vchiq_bulk_transfer(instance, handle,
(void *)data, NULL,
size, userdata, mode,
VCHIQ_BULK_TRANSMIT);
break;
case VCHIQ_BULK_MODE_BLOCKING:
- status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
+ status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
VCHIQ_BULK_TRANSMIT);
break;
default:
}
EXPORT_SYMBOL(vchiq_bulk_transmit);
-enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
- unsigned int size, void *userdata,
+enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
+ void *data, unsigned int size, void *userdata,
enum vchiq_bulk_mode mode)
{
enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- status = vchiq_bulk_transfer(handle, data, NULL,
+ status = vchiq_bulk_transfer(instance, handle, data, NULL,
size, userdata,
mode, VCHIQ_BULK_RECEIVE);
break;
case VCHIQ_BULK_MODE_BLOCKING:
- status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
+ status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
VCHIQ_BULK_RECEIVE);
break;
default:
EXPORT_SYMBOL(vchiq_bulk_receive);
static enum vchiq_status
-vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
- enum vchiq_bulk_dir dir)
+vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir)
{
- struct vchiq_instance *instance;
struct vchiq_service *service;
enum vchiq_status status;
struct bulk_waiter_node *waiter = NULL, *iter;
if (!service)
return VCHIQ_ERROR;
- instance = service->instance;
-
vchiq_service_put(service);
mutex_lock(&instance->bulk_waiter_list_mutex);
}
}
- status = vchiq_bulk_transfer(handle, data, NULL, size,
+ status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
&waiter->bulk_waiter,
VCHIQ_BULK_MODE_BLOCKING, dir);
if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
}
if (queue->process != queue->local_insert) {
- vchiq_complete_bulk(bulk);
+ vchiq_complete_bulk(service->instance, bulk);
vchiq_log_info(SRVTRACE_LEVEL(service),
"%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
DEBUG_TRACE(PARSE_LINE);
WARN_ON(queue->process == queue->local_insert);
- vchiq_complete_bulk(bulk);
+ vchiq_complete_bulk(service->instance, bulk);
queue->process++;
mutex_unlock(&service->bulk_mutex);
DEBUG_TRACE(PARSE_LINE);
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
-enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
- int size, void *userdata, enum vchiq_bulk_mode mode,
- enum vchiq_bulk_dir dir)
+enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size, void *userdata,
+ enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(handle);
struct vchiq_bulk_queue *queue;
bulk->size = size;
bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
- if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
+ if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
goto unlock_error_exit;
wmb();
unlock_both_error_exit:
mutex_unlock(&state->slot_mutex);
cancel_bulk_error_exit:
- vchiq_complete_bulk(bulk);
+ vchiq_complete_bulk(service->instance, bulk);
unlock_error_exit:
mutex_unlock(&service->bulk_mutex);