1 // SPDX-License-Identifier: GPL-2.0
3 * Broadcom BM2835 V4L2 driver
5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
7 * Authors: Vincent Sanders @ Collabora
8 * Dave Stevenson @ Broadcom
9 * (now dave.stevenson@raspberrypi.org)
10 * Simon Mellor @ Broadcom
11 * Luke Diamand @ Broadcom
13 * V4L2 driver MMAL vchiq interface code
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
28 #include "mmal-common.h"
29 #include "mmal-parameters.h"
30 #include "mmal-vchiq.h"
33 #include "vc-sm-cma/vc_sm_knl.h"
36 #include "interface/vchi/vchi.h"
38 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
39 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION("0.0.1");
44 * maximum number of components supported.
45 * This matches the maximum permitted by default on the VPU
47 #define VCHIQ_MMAL_MAX_COMPONENTS 64
50 * Timeout for synchronous msg responses in seconds.
51 * Helpful to increase this if stopping in the VPU debugger.
53 #define SYNC_MSG_TIMEOUT 3
55 /*#define FULL_MSG_DUMP 1*/
58 static const char *const msg_type_names[] = {
76 "GET_CORE_STATS_FOR_PORT",
80 "OPAQUE_ALLOCATOR_DESC",
83 "BUFFER_FROM_HOST_ZEROLEN",
89 static const char *const port_action_type_names[] = {
100 #if defined(FULL_MSG_DUMP)
101 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
103 pr_debug(TITLE" type:%s(%d) length:%d\n", \
104 msg_type_names[(MSG)->h.type], \
105 (MSG)->h.type, (MSG_LEN)); \
106 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
108 sizeof(struct mmal_msg_header), 1); \
109 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
111 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
112 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
117 pr_debug(TITLE" type:%s(%d) length:%d\n", \
118 msg_type_names[(MSG)->h.type], \
119 (MSG)->h.type, (MSG_LEN)); \
123 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
126 struct vchiq_mmal_instance;
128 /* normal message context */
129 struct mmal_msg_context {
130 struct vchiq_mmal_instance *instance;
132 /* Index in the context_map idr so that we can find the
133 * mmal_msg_context again when servicing the VCHI reply.
139 /* work struct for buffer_cb callback */
140 struct work_struct work;
141 /* work struct for deferred callback */
142 struct work_struct buffer_to_host_work;
144 struct vchiq_mmal_instance *instance;
146 struct vchiq_mmal_port *port;
147 /* actual buffer used to store bulk reply */
148 struct mmal_buffer *buffer;
149 /* amount of buffer used */
150 unsigned long buffer_used;
151 /* MMAL buffer flags */
153 /* Presentation and Decode timestamps */
156 /* MMAL buffer command flag */
159 int status; /* context status */
161 } bulk; /* bulk data */
164 /* message handle to release */
165 struct vchi_held_msg msg_handle;
166 /* pointer to received message */
167 struct mmal_msg *msg;
168 /* received message length */
170 /* completion upon reply */
171 struct completion cmplt;
172 } sync; /* synchronous response */
177 struct vchiq_mmal_instance {
178 VCHI_SERVICE_HANDLE_T handle;
180 /* ensure serialised access to service */
181 struct mutex vchiq_mutex;
183 /* vmalloc page to receive scratch bulk xfers into */
186 struct idr context_map;
187 /* protect accesses to context_map */
188 struct mutex context_map_lock;
190 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
192 /* ordered workqueue to process all bulk operations */
193 struct workqueue_struct *bulk_wq;
196 static struct mmal_msg_context *
197 get_msg_context(struct vchiq_mmal_instance *instance)
199 struct mmal_msg_context *msg_context;
202 /* todo: should this be allocated from a pool to avoid kzalloc */
203 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
206 return ERR_PTR(-ENOMEM);
208 /* Create an ID that will be passed along with our message so
209 * that when we service the VCHI reply, we can look up what
210 * message is being replied to.
212 mutex_lock(&instance->context_map_lock);
213 handle = idr_alloc(&instance->context_map, msg_context,
215 mutex_unlock(&instance->context_map_lock);
219 return ERR_PTR(handle);
222 msg_context->instance = instance;
223 msg_context->handle = handle;
228 static struct mmal_msg_context *
229 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
231 return idr_find(&instance->context_map, handle);
235 release_msg_context(struct mmal_msg_context *msg_context)
237 struct vchiq_mmal_instance *instance = msg_context->instance;
239 mutex_lock(&instance->context_map_lock);
240 idr_remove(&instance->context_map, msg_context->handle);
241 mutex_unlock(&instance->context_map_lock);
245 /* workqueue scheduled callback
247 * we do this because it is important we do not call any other vchiq
248 * sync calls from witin the message delivery thread
250 static void buffer_work_cb(struct work_struct *work)
252 struct mmal_msg_context *msg_context =
253 container_of(work, struct mmal_msg_context, u.bulk.work);
254 struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
257 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
258 __func__, msg_context);
262 buffer->length = msg_context->u.bulk.buffer_used;
263 buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
264 buffer->dts = msg_context->u.bulk.dts;
265 buffer->pts = msg_context->u.bulk.pts;
266 buffer->cmd = msg_context->u.bulk.cmd;
269 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
271 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
272 msg_context->u.bulk.port,
273 msg_context->u.bulk.status,
274 msg_context->u.bulk.buffer);
277 mutex_unlock(&msg_context->u.bulk.port->event_context_mutex);
280 /* workqueue scheduled callback to handle receiving buffers
282 * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
283 * If we block in the service_callback context then we can't process the
284 * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
285 * vchi_bulk_queue_receive() call to complete.
287 static void buffer_to_host_work_cb(struct work_struct *work)
289 struct mmal_msg_context *msg_context =
290 container_of(work, struct mmal_msg_context,
291 u.bulk.buffer_to_host_work);
292 struct vchiq_mmal_instance *instance = msg_context->instance;
293 unsigned long len = msg_context->u.bulk.buffer_used;
297 /* Dummy receive to ensure the buffers remain in order */
299 /* queue the bulk submission */
300 vchi_service_use(instance->handle);
301 ret = vchi_bulk_queue_receive(instance->handle,
302 msg_context->u.bulk.buffer->buffer,
303 /* Actual receive needs to be a multiple
307 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
308 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
311 vchi_service_release(instance->handle);
314 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
315 __func__, msg_context, ret);
318 /* enqueue a bulk receive for a given message context */
319 static int bulk_receive(struct vchiq_mmal_instance *instance,
320 struct mmal_msg *msg,
321 struct mmal_msg_context *msg_context)
323 unsigned long rd_len;
325 rd_len = msg->u.buffer_from_host.buffer_header.length;
327 if (!msg_context->u.bulk.buffer) {
328 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
330 /* todo: this is a serious error, we should never have
331 * committed a buffer_to_host operation to the mmal
332 * port without the buffer to back it up (underflow
333 * handling) and there is no obvious way to deal with
334 * this - how is the mmal servie going to react when
335 * we fail to do the xfer and reschedule a buffer when
336 * it arrives? perhaps a starved flag to indicate a
337 * waiting bulk receive?
343 /* ensure we do not overrun the available buffer */
344 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
345 rd_len = msg_context->u.bulk.buffer->buffer_size;
346 pr_warn("short read as not enough receive buffer space\n");
347 /* todo: is this the correct response, what happens to
348 * the rest of the message data?
353 msg_context->u.bulk.buffer_used = rd_len;
354 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
355 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
356 msg_context->u.bulk.cmd = msg->u.buffer_from_host.buffer_header.cmd;
358 queue_work(msg_context->instance->bulk_wq,
359 &msg_context->u.bulk.buffer_to_host_work);
364 /* data in message, memcpy from packet into output buffer */
365 static int inline_receive(struct vchiq_mmal_instance *instance,
366 struct mmal_msg *msg,
367 struct mmal_msg_context *msg_context)
369 memcpy(msg_context->u.bulk.buffer->buffer,
370 msg->u.buffer_from_host.short_data,
371 msg->u.buffer_from_host.payload_in_message);
373 msg_context->u.bulk.buffer_used =
374 msg->u.buffer_from_host.payload_in_message;
379 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
381 buffer_from_host(struct vchiq_mmal_instance *instance,
382 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
384 struct mmal_msg_context *msg_context;
391 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
394 if (!buf->msg_context) {
395 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
399 msg_context = buf->msg_context;
401 /* store bulk message context for when data arrives */
402 msg_context->u.bulk.instance = instance;
403 msg_context->u.bulk.port = port;
404 msg_context->u.bulk.buffer = buf;
405 msg_context->u.bulk.buffer_used = 0;
407 /* initialise work structure ready to schedule callback */
408 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
409 INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
410 buffer_to_host_work_cb);
412 atomic_inc(&port->buffers_with_vpu);
414 /* prep the buffer from host message */
415 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
417 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
418 m.h.magic = MMAL_MAGIC;
419 m.h.context = msg_context->handle;
422 /* drvbuf is our private data passed back */
423 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
424 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
425 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
426 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
429 m.u.buffer_from_host.buffer_header.cmd = 0;
430 if (port->zero_copy) {
431 m.u.buffer_from_host.buffer_header.data = buf->vc_handle;
433 m.u.buffer_from_host.buffer_header.data =
434 (u32)(unsigned long)buf->buffer;
437 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
438 if (port->type == MMAL_PORT_TYPE_OUTPUT) {
439 m.u.buffer_from_host.buffer_header.length = 0;
440 m.u.buffer_from_host.buffer_header.offset = 0;
441 m.u.buffer_from_host.buffer_header.flags = 0;
442 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
443 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
445 m.u.buffer_from_host.buffer_header.length = buf->length;
446 m.u.buffer_from_host.buffer_header.offset = 0;
447 m.u.buffer_from_host.buffer_header.flags = buf->mmal_flags;
448 m.u.buffer_from_host.buffer_header.pts = buf->pts;
449 m.u.buffer_from_host.buffer_header.dts = buf->dts;
452 /* clear buffer type sepecific data */
453 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
454 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
456 /* no payload in message */
457 m.u.buffer_from_host.payload_in_message = 0;
459 vchi_service_use(instance->handle);
461 ret = vchi_queue_kernel_message(instance->handle,
463 sizeof(struct mmal_msg_header) +
464 sizeof(m.u.buffer_from_host));
466 vchi_service_release(instance->handle);
471 /* deals with receipt of event to host message */
472 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
473 struct mmal_msg *msg, u32 msg_len)
475 int comp_idx = msg->u.event_to_host.client_component;
476 struct vchiq_mmal_component *component =
477 &instance->component[comp_idx];
478 struct vchiq_mmal_port *port = NULL;
479 struct mmal_msg_context *msg_context;
480 u32 port_num = msg->u.event_to_host.port_num;
482 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
483 pr_err("%s: MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n",
488 switch (msg->u.event_to_host.port_type) {
489 case MMAL_PORT_TYPE_CONTROL:
491 pr_err("%s: port_num of %u >= number of ports 1",
495 port = &component->control;
497 case MMAL_PORT_TYPE_INPUT:
498 if (port_num >= component->inputs) {
499 pr_err("%s: port_num of %u >= number of ports %u",
501 port_num >= component->inputs);
504 port = &component->input[port_num];
506 case MMAL_PORT_TYPE_OUTPUT:
507 if (port_num >= component->outputs) {
508 pr_err("%s: port_num of %u >= number of ports %u",
510 port_num >= component->outputs);
513 port = &component->output[port_num];
515 case MMAL_PORT_TYPE_CLOCK:
516 if (port_num >= component->clocks) {
517 pr_err("%s: port_num of %u >= number of ports %u",
519 port_num >= component->clocks);
522 port = &component->clock[port_num];
528 if (!mutex_trylock(&port->event_context_mutex)) {
529 pr_err("dropping event 0x%x\n", msg->u.event_to_host.cmd);
532 msg_context = port->event_context;
534 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
535 /* message reception had an error */
537 pr_err("%s: error %d in reply\n", __func__, msg->h.status);
539 msg_context->u.bulk.status = msg->h.status;
540 } else if (msg->u.event_to_host.length > MMAL_WORKER_EVENT_SPACE) {
541 /* data is not in message, queue a bulk receive */
542 pr_err("%s: payload not in message - bulk receive??! NOT SUPPORTED\n",
544 msg_context->u.bulk.status = -1;
546 memcpy(msg_context->u.bulk.buffer->buffer,
547 msg->u.event_to_host.data,
548 msg->u.event_to_host.length);
550 msg_context->u.bulk.buffer_used =
551 msg->u.event_to_host.length;
553 msg_context->u.bulk.mmal_flags = 0;
554 msg_context->u.bulk.dts = MMAL_TIME_UNKNOWN;
555 msg_context->u.bulk.pts = MMAL_TIME_UNKNOWN;
556 msg_context->u.bulk.cmd = msg->u.event_to_host.cmd;
558 pr_debug("event component:%u port type:%d num:%d cmd:0x%x length:%d\n",
559 msg->u.event_to_host.client_component,
560 msg->u.event_to_host.port_type,
561 msg->u.event_to_host.port_num,
562 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
565 schedule_work(&msg_context->u.bulk.work);
568 /* deals with receipt of buffer to host message */
569 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
570 struct mmal_msg *msg, u32 msg_len)
572 struct mmal_msg_context *msg_context;
575 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
576 __func__, instance, msg, msg_len);
578 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
579 handle = msg->u.buffer_from_host.drvbuf.client_context;
580 msg_context = lookup_msg_context(instance, handle);
583 pr_err("drvbuf.client_context(%u) is invalid\n",
588 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
592 msg_context->u.bulk.mmal_flags =
593 msg->u.buffer_from_host.buffer_header.flags;
595 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
596 /* message reception had an error */
597 pr_warn("error %d in reply\n", msg->h.status);
599 msg_context->u.bulk.status = msg->h.status;
601 } else if (msg->u.buffer_from_host.is_zero_copy) {
603 * Zero copy buffer, so nothing to do.
604 * Copy buffer info and make callback.
606 msg_context->u.bulk.buffer_used =
607 msg->u.buffer_from_host.buffer_header.length;
608 msg_context->u.bulk.mmal_flags =
609 msg->u.buffer_from_host.buffer_header.flags;
610 msg_context->u.bulk.dts =
611 msg->u.buffer_from_host.buffer_header.dts;
612 msg_context->u.bulk.pts =
613 msg->u.buffer_from_host.buffer_header.pts;
614 msg_context->u.bulk.cmd =
615 msg->u.buffer_from_host.buffer_header.cmd;
617 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
619 if (msg->u.buffer_from_host.buffer_header.flags &
620 MMAL_BUFFER_HEADER_FLAG_EOS) {
621 msg_context->u.bulk.status =
622 bulk_receive(instance, msg, msg_context);
623 if (msg_context->u.bulk.status == 0)
624 return; /* successful bulk submission, bulk
625 * completion will trigger callback
628 /* do callback with empty buffer - not EOS though */
629 msg_context->u.bulk.status = 0;
630 msg_context->u.bulk.buffer_used = 0;
632 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
633 /* data is not in message, queue a bulk receive */
634 msg_context->u.bulk.status =
635 bulk_receive(instance, msg, msg_context);
636 if (msg_context->u.bulk.status == 0)
637 return; /* successful bulk submission, bulk
638 * completion will trigger callback
641 /* failed to submit buffer, this will end badly */
642 pr_err("error %d on bulk submission\n",
643 msg_context->u.bulk.status);
645 } else if (msg->u.buffer_from_host.payload_in_message <=
646 MMAL_VC_SHORT_DATA) {
647 /* data payload within message */
648 msg_context->u.bulk.status = inline_receive(instance, msg,
651 pr_err("message with invalid short payload\n");
654 msg_context->u.bulk.status = -EINVAL;
655 msg_context->u.bulk.buffer_used =
656 msg->u.buffer_from_host.payload_in_message;
659 /* schedule the port callback */
660 schedule_work(&msg_context->u.bulk.work);
663 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
664 struct mmal_msg_context *msg_context)
666 msg_context->u.bulk.status = 0;
668 /* schedule the port callback */
669 schedule_work(&msg_context->u.bulk.work);
672 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
673 struct mmal_msg_context *msg_context)
675 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
677 msg_context->u.bulk.status = -EINTR;
679 schedule_work(&msg_context->u.bulk.work);
682 /* incoming event service callback */
683 static void service_callback(void *param,
684 const VCHI_CALLBACK_REASON_T reason,
687 struct vchiq_mmal_instance *instance = param;
690 struct mmal_msg *msg;
691 struct vchi_held_msg msg_handle;
692 struct mmal_msg_context *msg_context;
695 pr_err("Message callback passed NULL instance\n");
700 case VCHI_CALLBACK_MSG_AVAILABLE:
701 status = vchi_msg_hold(instance->handle, (void **)&msg,
702 &msg_len, VCHI_FLAGS_NONE, &msg_handle);
704 pr_err("Unable to dequeue a message (%d)\n", status);
708 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
710 /* handling is different for buffer messages */
711 switch (msg->h.type) {
712 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
713 vchi_held_msg_release(&msg_handle);
716 case MMAL_MSG_TYPE_EVENT_TO_HOST:
717 event_to_host_cb(instance, msg, msg_len);
718 vchi_held_msg_release(&msg_handle);
722 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
723 buffer_to_host_cb(instance, msg, msg_len);
724 vchi_held_msg_release(&msg_handle);
728 /* messages dependent on header context to complete */
729 if (!msg->h.context) {
730 pr_err("received message context was null!\n");
731 vchi_held_msg_release(&msg_handle);
735 msg_context = lookup_msg_context(instance,
738 pr_err("received invalid message context %u!\n",
740 vchi_held_msg_release(&msg_handle);
744 /* fill in context values */
745 msg_context->u.sync.msg_handle = msg_handle;
746 msg_context->u.sync.msg = msg;
747 msg_context->u.sync.msg_len = msg_len;
749 /* todo: should this check (completion_done()
750 * == 1) for no one waiting? or do we need a
751 * flag to tell us the completion has been
752 * interrupted so we can free the message and
753 * its context. This probably also solves the
754 * message arriving after interruption todo
758 /* complete message so caller knows it happened */
759 complete(&msg_context->u.sync.cmplt);
765 case VCHI_CALLBACK_BULK_RECEIVED:
766 bulk_receive_cb(instance, bulk_ctx);
769 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
770 bulk_abort_cb(instance, bulk_ctx);
773 case VCHI_CALLBACK_SERVICE_CLOSED:
774 /* TODO: consider if this requires action if received when
775 * driver is not explicitly closing the service
780 pr_err("Received unhandled message reason %d\n", reason);
785 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
786 struct mmal_msg *msg,
787 unsigned int payload_len,
788 struct mmal_msg **msg_out,
789 struct vchi_held_msg *msg_handle_out)
791 struct mmal_msg_context *msg_context;
793 unsigned long timeout;
795 /* payload size must not cause message to exceed max size */
797 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
798 pr_err("payload length %d exceeds max:%d\n", payload_len,
799 (int)(MMAL_MSG_MAX_SIZE -
800 sizeof(struct mmal_msg_header)));
804 msg_context = get_msg_context(instance);
805 if (IS_ERR(msg_context))
806 return PTR_ERR(msg_context);
808 init_completion(&msg_context->u.sync.cmplt);
810 msg->h.magic = MMAL_MAGIC;
811 msg->h.context = msg_context->handle;
814 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
817 vchi_service_use(instance->handle);
819 ret = vchi_queue_kernel_message(instance->handle,
821 sizeof(struct mmal_msg_header) +
824 vchi_service_release(instance->handle);
827 pr_err("error %d queuing message\n", ret);
828 release_msg_context(msg_context);
832 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
833 SYNC_MSG_TIMEOUT * HZ);
835 pr_err("timed out waiting for sync completion\n");
837 /* todo: what happens if the message arrives after aborting */
838 release_msg_context(msg_context);
842 *msg_out = msg_context->u.sync.msg;
843 *msg_handle_out = msg_context->u.sync.msg_handle;
844 release_msg_context(msg_context);
849 static void dump_port_info(struct vchiq_mmal_port *port)
851 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
853 pr_debug("buffer minimum num:%d size:%d align:%d\n",
854 port->minimum_buffer.num,
855 port->minimum_buffer.size, port->minimum_buffer.alignment);
857 pr_debug("buffer recommended num:%d size:%d align:%d\n",
858 port->recommended_buffer.num,
859 port->recommended_buffer.size,
860 port->recommended_buffer.alignment);
862 pr_debug("buffer current values num:%d size:%d align:%d\n",
863 port->current_buffer.num,
864 port->current_buffer.size, port->current_buffer.alignment);
866 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
868 port->format.encoding, port->format.encoding_variant);
870 pr_debug(" bitrate:%d flags:0x%x\n",
871 port->format.bitrate, port->format.flags);
873 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
875 ("es video format: width:%d height:%d colourspace:0x%x\n",
876 port->es.video.width, port->es.video.height,
877 port->es.video.color_space);
879 pr_debug(" : crop xywh %d,%d,%d,%d\n",
880 port->es.video.crop.x,
881 port->es.video.crop.y,
882 port->es.video.crop.width, port->es.video.crop.height);
883 pr_debug(" : framerate %d/%d aspect %d/%d\n",
884 port->es.video.frame_rate.num,
885 port->es.video.frame_rate.den,
886 port->es.video.par.num, port->es.video.par.den);
890 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
892 /* todo do readonly fields need setting at all? */
893 p->type = port->type;
894 p->index = port->index;
896 p->is_enabled = port->enabled;
897 p->buffer_num_min = port->minimum_buffer.num;
898 p->buffer_size_min = port->minimum_buffer.size;
899 p->buffer_alignment_min = port->minimum_buffer.alignment;
900 p->buffer_num_recommended = port->recommended_buffer.num;
901 p->buffer_size_recommended = port->recommended_buffer.size;
903 /* only three writable fields in a port */
904 p->buffer_num = port->current_buffer.num;
905 p->buffer_size = port->current_buffer.size;
906 p->userdata = (u32)(unsigned long)port;
909 static int port_info_set(struct vchiq_mmal_instance *instance,
910 struct vchiq_mmal_port *port)
914 struct mmal_msg *rmsg;
915 struct vchi_held_msg rmsg_handle;
917 pr_debug("setting port info port %p\n", port);
920 dump_port_info(port);
922 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
924 m.u.port_info_set.component_handle = port->component->handle;
925 m.u.port_info_set.port_type = port->type;
926 m.u.port_info_set.port_index = port->index;
928 port_to_mmal_msg(port, &m.u.port_info_set.port);
930 /* elementary stream format setup */
931 m.u.port_info_set.format.type = port->format.type;
932 m.u.port_info_set.format.encoding = port->format.encoding;
933 m.u.port_info_set.format.encoding_variant =
934 port->format.encoding_variant;
935 m.u.port_info_set.format.bitrate = port->format.bitrate;
936 m.u.port_info_set.format.flags = port->format.flags;
938 memcpy(&m.u.port_info_set.es, &port->es,
939 sizeof(union mmal_es_specific_format));
941 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
942 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
943 port->format.extradata_size);
945 ret = send_synchronous_mmal_msg(instance, &m,
946 sizeof(m.u.port_info_set),
947 &rmsg, &rmsg_handle);
951 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
952 /* got an unexpected message type in reply */
957 /* return operation status */
958 ret = -rmsg->u.port_info_get_reply.status;
960 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
961 port->component->handle, port->handle);
964 vchi_held_msg_release(&rmsg_handle);
969 /* use port info get message to retrieve port information */
970 static int port_info_get(struct vchiq_mmal_instance *instance,
971 struct vchiq_mmal_port *port)
975 struct mmal_msg *rmsg;
976 struct vchi_held_msg rmsg_handle;
979 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
980 m.u.port_info_get.component_handle = port->component->handle;
981 m.u.port_info_get.port_type = port->type;
982 m.u.port_info_get.index = port->index;
984 ret = send_synchronous_mmal_msg(instance, &m,
985 sizeof(m.u.port_info_get),
986 &rmsg, &rmsg_handle);
990 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
991 /* got an unexpected message type in reply */
996 /* return operation status */
997 ret = -rmsg->u.port_info_get_reply.status;
998 if (ret != MMAL_MSG_STATUS_SUCCESS)
1001 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
1006 /* copy the values out of the message */
1007 port->handle = rmsg->u.port_info_get_reply.port_handle;
1009 /* port type and index cached to use on port info set because
1010 * it does not use a port handle
1012 port->type = rmsg->u.port_info_get_reply.port_type;
1013 port->index = rmsg->u.port_info_get_reply.port_index;
1015 port->minimum_buffer.num =
1016 rmsg->u.port_info_get_reply.port.buffer_num_min;
1017 port->minimum_buffer.size =
1018 rmsg->u.port_info_get_reply.port.buffer_size_min;
1019 port->minimum_buffer.alignment =
1020 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1022 port->recommended_buffer.alignment =
1023 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1024 port->recommended_buffer.num =
1025 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
1027 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
1028 port->current_buffer.size =
1029 rmsg->u.port_info_get_reply.port.buffer_size;
1032 port->format.type = rmsg->u.port_info_get_reply.format.type;
1033 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
1034 port->format.encoding_variant =
1035 rmsg->u.port_info_get_reply.format.encoding_variant;
1036 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
1037 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
1039 /* elementary stream format */
1041 &rmsg->u.port_info_get_reply.es,
1042 sizeof(union mmal_es_specific_format));
1043 port->format.es = &port->es;
1045 port->format.extradata_size =
1046 rmsg->u.port_info_get_reply.format.extradata_size;
1047 memcpy(port->format.extradata,
1048 rmsg->u.port_info_get_reply.extradata,
1049 port->format.extradata_size);
1051 pr_debug("received port info\n");
1052 dump_port_info(port);
1056 pr_debug("%s:result:%d component:0x%x port:%d\n",
1057 __func__, ret, port->component->handle, port->handle);
1059 vchi_held_msg_release(&rmsg_handle);
1064 /* create comonent on vc */
1065 static int create_component(struct vchiq_mmal_instance *instance,
1066 struct vchiq_mmal_component *component,
1071 struct mmal_msg *rmsg;
1072 struct vchi_held_msg rmsg_handle;
1074 /* build component create message */
1075 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
1076 m.u.component_create.client_component = component->client_component;
1077 strncpy(m.u.component_create.name, name,
1078 sizeof(m.u.component_create.name));
1080 ret = send_synchronous_mmal_msg(instance, &m,
1081 sizeof(m.u.component_create),
1082 &rmsg, &rmsg_handle);
1086 if (rmsg->h.type != m.h.type) {
1087 /* got an unexpected message type in reply */
1092 ret = -rmsg->u.component_create_reply.status;
1093 if (ret != MMAL_MSG_STATUS_SUCCESS)
1096 /* a valid component response received */
1097 component->handle = rmsg->u.component_create_reply.component_handle;
1098 component->inputs = rmsg->u.component_create_reply.input_num;
1099 component->outputs = rmsg->u.component_create_reply.output_num;
1100 component->clocks = rmsg->u.component_create_reply.clock_num;
1102 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1104 component->inputs, component->outputs, component->clocks);
1107 vchi_held_msg_release(&rmsg_handle);
1112 /* destroys a component on vc */
1113 static int destroy_component(struct vchiq_mmal_instance *instance,
1114 struct vchiq_mmal_component *component)
1118 struct mmal_msg *rmsg;
1119 struct vchi_held_msg rmsg_handle;
1121 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
1122 m.u.component_destroy.component_handle = component->handle;
1124 ret = send_synchronous_mmal_msg(instance, &m,
1125 sizeof(m.u.component_destroy),
1126 &rmsg, &rmsg_handle);
1130 if (rmsg->h.type != m.h.type) {
1131 /* got an unexpected message type in reply */
1136 ret = -rmsg->u.component_destroy_reply.status;
1140 vchi_held_msg_release(&rmsg_handle);
1145 /* enable a component on vc */
1146 static int enable_component(struct vchiq_mmal_instance *instance,
1147 struct vchiq_mmal_component *component)
1151 struct mmal_msg *rmsg;
1152 struct vchi_held_msg rmsg_handle;
1154 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1155 m.u.component_enable.component_handle = component->handle;
1157 ret = send_synchronous_mmal_msg(instance, &m,
1158 sizeof(m.u.component_enable),
1159 &rmsg, &rmsg_handle);
1163 if (rmsg->h.type != m.h.type) {
1164 /* got an unexpected message type in reply */
1169 ret = -rmsg->u.component_enable_reply.status;
1172 vchi_held_msg_release(&rmsg_handle);
1177 /* disable a component on vc */
1178 static int disable_component(struct vchiq_mmal_instance *instance,
1179 struct vchiq_mmal_component *component)
1183 struct mmal_msg *rmsg;
1184 struct vchi_held_msg rmsg_handle;
1186 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1187 m.u.component_disable.component_handle = component->handle;
1189 ret = send_synchronous_mmal_msg(instance, &m,
1190 sizeof(m.u.component_disable),
1191 &rmsg, &rmsg_handle);
1195 if (rmsg->h.type != m.h.type) {
1196 /* got an unexpected message type in reply */
1201 ret = -rmsg->u.component_disable_reply.status;
1205 vchi_held_msg_release(&rmsg_handle);
1210 /* get version of mmal implementation */
1211 static int get_version(struct vchiq_mmal_instance *instance,
1212 u32 *major_out, u32 *minor_out)
1216 struct mmal_msg *rmsg;
1217 struct vchi_held_msg rmsg_handle;
1219 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1221 ret = send_synchronous_mmal_msg(instance, &m,
1222 sizeof(m.u.version),
1223 &rmsg, &rmsg_handle);
1227 if (rmsg->h.type != m.h.type) {
1228 /* got an unexpected message type in reply */
1233 *major_out = rmsg->u.version.major;
1234 *minor_out = rmsg->u.version.minor;
1237 vchi_held_msg_release(&rmsg_handle);
1242 /* do a port action with a port as a parameter */
1243 static int port_action_port(struct vchiq_mmal_instance *instance,
1244 struct vchiq_mmal_port *port,
1245 enum mmal_msg_port_action_type action_type)
1249 struct mmal_msg *rmsg;
1250 struct vchi_held_msg rmsg_handle;
1252 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1253 m.u.port_action_port.component_handle = port->component->handle;
1254 m.u.port_action_port.port_handle = port->handle;
1255 m.u.port_action_port.action = action_type;
1257 port_to_mmal_msg(port, &m.u.port_action_port.port);
1259 ret = send_synchronous_mmal_msg(instance, &m,
1260 sizeof(m.u.port_action_port),
1261 &rmsg, &rmsg_handle);
1265 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1266 /* got an unexpected message type in reply */
1271 ret = -rmsg->u.port_action_reply.status;
1273 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1275 ret, port->component->handle, port->handle,
1276 port_action_type_names[action_type], action_type);
1279 vchi_held_msg_release(&rmsg_handle);
1284 /* do a port action with handles as parameters */
1285 static int port_action_handle(struct vchiq_mmal_instance *instance,
1286 struct vchiq_mmal_port *port,
1287 enum mmal_msg_port_action_type action_type,
1288 u32 connect_component_handle,
1289 u32 connect_port_handle)
1293 struct mmal_msg *rmsg;
1294 struct vchi_held_msg rmsg_handle;
1296 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1298 m.u.port_action_handle.component_handle = port->component->handle;
1299 m.u.port_action_handle.port_handle = port->handle;
1300 m.u.port_action_handle.action = action_type;
1302 m.u.port_action_handle.connect_component_handle =
1303 connect_component_handle;
1304 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1306 ret = send_synchronous_mmal_msg(instance, &m,
1307 sizeof(m.u.port_action_handle),
1308 &rmsg, &rmsg_handle);
1312 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1313 /* got an unexpected message type in reply */
1318 ret = -rmsg->u.port_action_reply.status;
1320 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1322 ret, port->component->handle, port->handle,
1323 port_action_type_names[action_type],
1324 action_type, connect_component_handle, connect_port_handle);
1327 vchi_held_msg_release(&rmsg_handle);
1332 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1333 struct vchiq_mmal_port *port,
1334 u32 parameter_id, void *value, u32 value_size)
1338 struct mmal_msg *rmsg;
1339 struct vchi_held_msg rmsg_handle;
1341 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1343 m.u.port_parameter_set.component_handle = port->component->handle;
1344 m.u.port_parameter_set.port_handle = port->handle;
1345 m.u.port_parameter_set.id = parameter_id;
1346 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1347 memcpy(&m.u.port_parameter_set.value, value, value_size);
1349 ret = send_synchronous_mmal_msg(instance, &m,
1350 (4 * sizeof(u32)) + value_size,
1351 &rmsg, &rmsg_handle);
1355 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1356 /* got an unexpected message type in reply */
1361 ret = -rmsg->u.port_parameter_set_reply.status;
1363 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1365 ret, port->component->handle, port->handle, parameter_id);
1368 vchi_held_msg_release(&rmsg_handle);
1373 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1374 struct vchiq_mmal_port *port,
1375 u32 parameter_id, void *value, u32 *value_size)
1379 struct mmal_msg *rmsg;
1380 struct vchi_held_msg rmsg_handle;
1382 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1384 m.u.port_parameter_get.component_handle = port->component->handle;
1385 m.u.port_parameter_get.port_handle = port->handle;
1386 m.u.port_parameter_get.id = parameter_id;
1387 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1389 ret = send_synchronous_mmal_msg(instance, &m,
1391 mmal_msg_port_parameter_get),
1392 &rmsg, &rmsg_handle);
1396 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1397 /* got an unexpected message type in reply */
1398 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1403 ret = rmsg->u.port_parameter_get_reply.status;
1405 /* port_parameter_get_reply.size includes the header,
1406 * whilst *value_size doesn't.
1408 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1410 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1411 /* Copy only as much as we have space for
1412 * but report true size of parameter
1414 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1417 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1418 rmsg->u.port_parameter_get_reply.size);
1420 /* Always report the size of the returned parameter to the caller */
1421 *value_size = rmsg->u.port_parameter_get_reply.size;
1423 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1424 ret, port->component->handle, port->handle, parameter_id);
1427 vchi_held_msg_release(&rmsg_handle);
1432 /* disables a port and drains buffers from it */
1433 static int port_disable(struct vchiq_mmal_instance *instance,
1434 struct vchiq_mmal_port *port)
1437 struct list_head *q, *buf_head;
1438 unsigned long flags = 0;
1445 ret = port_action_port(instance, port,
1446 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1449 * Drain all queued buffers on port. This should only
1450 * apply to buffers that have been queued before the port
1451 * has been enabled. If the port has been enabled and buffers
1452 * passed, then the buffers should have been removed from this
1453 * list, and we should get the relevant callbacks via VCHIQ
1454 * to release the buffers.
1456 spin_lock_irqsave(&port->slock, flags);
1458 list_for_each_safe(buf_head, q, &port->buffers) {
1459 struct mmal_buffer *mmalbuf;
1461 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1464 if (port->buffer_cb) {
1465 mmalbuf->length = 0;
1466 mmalbuf->mmal_flags = 0;
1467 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1468 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1470 port->buffer_cb(instance,
1475 spin_unlock_irqrestore(&port->slock, flags);
1477 ret = port_info_get(instance, port);
1484 static int port_enable(struct vchiq_mmal_instance *instance,
1485 struct vchiq_mmal_port *port)
1487 unsigned int hdr_count;
1488 struct list_head *q, *buf_head;
1494 ret = port_action_port(instance, port,
1495 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1501 if (port->buffer_cb) {
1502 /* send buffer headers to videocore */
1504 list_for_each_safe(buf_head, q, &port->buffers) {
1505 struct mmal_buffer *mmalbuf;
1507 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1509 ret = buffer_from_host(instance, port, mmalbuf);
1515 if (hdr_count > port->current_buffer.num)
1520 ret = port_info_get(instance, port);
1526 /* ------------------------------------------------------------------
1528 *------------------------------------------------------------------
1531 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1532 struct vchiq_mmal_port *port)
1536 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1539 ret = port_info_set(instance, port);
1541 goto release_unlock;
1543 /* read what has actually been set */
1544 ret = port_info_get(instance, port);
1547 mutex_unlock(&instance->vchiq_mutex);
1551 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1553 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1554 struct vchiq_mmal_port *port,
1555 u32 parameter, void *value, u32 value_size)
1559 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1562 ret = port_parameter_set(instance, port, parameter, value, value_size);
1564 mutex_unlock(&instance->vchiq_mutex);
1566 if (parameter == MMAL_PARAMETER_ZERO_COPY && !ret)
1567 port->zero_copy = !!(*(bool *)value);
1571 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1573 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1574 struct vchiq_mmal_port *port,
1575 u32 parameter, void *value, u32 *value_size)
1579 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1582 ret = port_parameter_get(instance, port, parameter, value, value_size);
1584 mutex_unlock(&instance->vchiq_mutex);
1588 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1592 * enables a port and queues buffers for satisfying callbacks if we
1593 * provide a callback handler
1595 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1596 struct vchiq_mmal_port *port,
1597 vchiq_mmal_buffer_cb buffer_cb)
1601 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1604 /* already enabled - noop */
1605 if (port->enabled) {
1610 port->buffer_cb = buffer_cb;
1612 ret = port_enable(instance, port);
1615 mutex_unlock(&instance->vchiq_mutex);
1619 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1621 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1622 struct vchiq_mmal_port *port)
1626 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1629 if (!port->enabled) {
1630 mutex_unlock(&instance->vchiq_mutex);
1634 ret = port_disable(instance, port);
1636 mutex_unlock(&instance->vchiq_mutex);
1640 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1642 /* ports will be connected in a tunneled manner so data buffers
1643 * are not handled by client.
1645 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1646 struct vchiq_mmal_port *src,
1647 struct vchiq_mmal_port *dst)
1651 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1654 /* disconnect ports if connected */
1655 if (src->connected) {
1656 ret = port_disable(instance, src);
1658 pr_err("failed disabling src port(%d)\n", ret);
1659 goto release_unlock;
1662 /* do not need to disable the destination port as they
1663 * are connected and it is done automatically
1666 ret = port_action_handle(instance, src,
1667 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1668 src->connected->component->handle,
1669 src->connected->handle);
1671 pr_err("failed disconnecting src port\n");
1672 goto release_unlock;
1674 src->connected->enabled = 0;
1675 src->connected = NULL;
1679 /* do not make new connection */
1681 pr_debug("not making new connection\n");
1682 goto release_unlock;
1685 /* copy src port format to dst */
1686 dst->format.encoding = src->format.encoding;
1687 dst->es.video.width = src->es.video.width;
1688 dst->es.video.height = src->es.video.height;
1689 dst->es.video.crop.x = src->es.video.crop.x;
1690 dst->es.video.crop.y = src->es.video.crop.y;
1691 dst->es.video.crop.width = src->es.video.crop.width;
1692 dst->es.video.crop.height = src->es.video.crop.height;
1693 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1694 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1696 /* set new format */
1697 ret = port_info_set(instance, dst);
1699 pr_debug("setting port info failed\n");
1700 goto release_unlock;
1703 /* read what has actually been set */
1704 ret = port_info_get(instance, dst);
1706 pr_debug("read back port info failed\n");
1707 goto release_unlock;
1710 /* connect two ports together */
1711 ret = port_action_handle(instance, src,
1712 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1713 dst->component->handle, dst->handle);
1715 pr_debug("connecting port %d:%d to %d:%d failed\n",
1716 src->component->handle, src->handle,
1717 dst->component->handle, dst->handle);
1718 goto release_unlock;
1720 src->connected = dst;
1724 mutex_unlock(&instance->vchiq_mutex);
1728 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1730 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1731 struct vchiq_mmal_port *port,
1732 struct mmal_buffer *buffer)
1734 unsigned long flags = 0;
1738 * We really want to do this in mmal_vchi_buffer_init but can't as
1739 * videobuf2 won't let us have the dmabuf there.
1741 if (port->zero_copy && buffer->dma_buf && !buffer->vcsm_handle) {
1742 pr_debug("%s: import dmabuf %p\n", __func__, buffer->dma_buf);
1743 ret = vc_sm_cma_import_dmabuf(buffer->dma_buf,
1744 &buffer->vcsm_handle);
1746 pr_err("%s: vc_sm_import_dmabuf_fd failed, ret %d\n",
1751 buffer->vc_handle = vc_sm_cma_int_handle(buffer->vcsm_handle);
1752 if (!buffer->vc_handle) {
1753 pr_err("%s: vc_sm_int_handle failed %d\n",
1755 vc_sm_cma_free(buffer->vcsm_handle);
1758 pr_debug("%s: import dmabuf %p - got vc handle %08X\n",
1759 __func__, buffer->dma_buf, buffer->vc_handle);
1762 ret = buffer_from_host(instance, port, buffer);
1763 if (ret == -EINVAL) {
1764 /* Port is disabled. Queue for when it is enabled. */
1765 spin_lock_irqsave(&port->slock, flags);
1766 list_add_tail(&buffer->list, &port->buffers);
1767 spin_unlock_irqrestore(&port->slock, flags);
1772 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1774 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1775 struct mmal_buffer *buf)
1777 struct mmal_msg_context *msg_context = get_msg_context(instance);
1779 if (IS_ERR(msg_context))
1780 return (PTR_ERR(msg_context));
1782 buf->msg_context = msg_context;
1785 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1787 int mmal_vchi_buffer_unmap(struct mmal_buffer *buf)
1791 if (buf->vcsm_handle) {
1794 pr_debug("%s: vc_sm_cma_free on handle %p\n", __func__,
1796 ret = vc_sm_cma_free(buf->vcsm_handle);
1798 pr_err("%s: vcsm_free failed, ret %d\n", __func__, ret);
1799 buf->vcsm_handle = 0;
1803 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_unmap);
1805 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1807 struct mmal_msg_context *msg_context = buf->msg_context;
1810 release_msg_context(msg_context);
1811 buf->msg_context = NULL;
1813 mmal_vchi_buffer_unmap(buf);
1816 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1818 static void init_event_context(struct vchiq_mmal_instance *instance,
1819 struct vchiq_mmal_port *port)
1821 struct mmal_msg_context *ctx = get_msg_context(instance);
1823 mutex_init(&port->event_context_mutex);
1825 port->event_context = ctx;
1826 ctx->u.bulk.instance = instance;
1827 ctx->u.bulk.port = port;
1828 ctx->u.bulk.buffer =
1829 kzalloc(sizeof(*ctx->u.bulk.buffer), GFP_KERNEL);
1830 if (!ctx->u.bulk.buffer)
1831 goto release_msg_context;
1832 ctx->u.bulk.buffer->buffer = kzalloc(MMAL_WORKER_EVENT_SPACE,
1834 if (!ctx->u.bulk.buffer->buffer)
1835 goto release_buffer;
1837 INIT_WORK(&ctx->u.bulk.work, buffer_work_cb);
1841 kfree(ctx->u.bulk.buffer);
1842 release_msg_context:
1843 release_msg_context(ctx);
1846 static void free_event_context(struct vchiq_mmal_port *port)
1848 struct mmal_msg_context *ctx = port->event_context;
1853 kfree(ctx->u.bulk.buffer->buffer);
1854 kfree(ctx->u.bulk.buffer);
1855 release_msg_context(ctx);
1856 port->event_context = NULL;
1859 static void release_all_event_contexts(struct vchiq_mmal_component *component)
1863 for (idx = 0; idx < component->inputs; idx++)
1864 free_event_context(&component->input[idx]);
1865 for (idx = 0; idx < component->outputs; idx++)
1866 free_event_context(&component->output[idx]);
1867 for (idx = 0; idx < component->clocks; idx++)
1868 free_event_context(&component->clock[idx]);
1869 free_event_context(&component->control);
1872 /* Initialise a mmal component and its ports
1875 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1877 struct vchiq_mmal_component **component_out)
1880 int idx; /* port index */
1881 struct vchiq_mmal_component *component = NULL;
1883 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1886 for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1887 if (!instance->component[idx].in_use) {
1888 component = &instance->component[idx];
1889 component->in_use = 1;
1895 ret = -EINVAL; /* todo is this correct error? */
1899 /* We need a handle to reference back to our component structure.
1900 * Use the array index in instance->component rather than rolling
1903 component->client_component = idx;
1905 ret = create_component(instance, component, name);
1907 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1912 /* ports info needs gathering */
1913 component->control.type = MMAL_PORT_TYPE_CONTROL;
1914 component->control.index = 0;
1915 component->control.component = component;
1916 spin_lock_init(&component->control.slock);
1917 INIT_LIST_HEAD(&component->control.buffers);
1918 ret = port_info_get(instance, &component->control);
1920 goto release_component;
1921 init_event_context(instance, &component->control);
1923 for (idx = 0; idx < component->inputs; idx++) {
1924 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1925 component->input[idx].index = idx;
1926 component->input[idx].component = component;
1927 spin_lock_init(&component->input[idx].slock);
1928 INIT_LIST_HEAD(&component->input[idx].buffers);
1929 ret = port_info_get(instance, &component->input[idx]);
1931 goto release_component;
1932 init_event_context(instance, &component->input[idx]);
1935 for (idx = 0; idx < component->outputs; idx++) {
1936 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1937 component->output[idx].index = idx;
1938 component->output[idx].component = component;
1939 spin_lock_init(&component->output[idx].slock);
1940 INIT_LIST_HEAD(&component->output[idx].buffers);
1941 ret = port_info_get(instance, &component->output[idx]);
1943 goto release_component;
1944 init_event_context(instance, &component->output[idx]);
1947 for (idx = 0; idx < component->clocks; idx++) {
1948 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1949 component->clock[idx].index = idx;
1950 component->clock[idx].component = component;
1951 spin_lock_init(&component->clock[idx].slock);
1952 INIT_LIST_HEAD(&component->clock[idx].buffers);
1953 ret = port_info_get(instance, &component->clock[idx]);
1955 goto release_component;
1956 init_event_context(instance, &component->clock[idx]);
1959 *component_out = component;
1961 mutex_unlock(&instance->vchiq_mutex);
1966 destroy_component(instance, component);
1967 release_all_event_contexts(component);
1970 component->in_use = 0;
1971 mutex_unlock(&instance->vchiq_mutex);
1975 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1978 * cause a mmal component to be destroyed
1980 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1981 struct vchiq_mmal_component *component)
1985 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1988 if (component->enabled)
1989 ret = disable_component(instance, component);
1991 ret = destroy_component(instance, component);
1993 component->in_use = 0;
1995 release_all_event_contexts(component);
1997 mutex_unlock(&instance->vchiq_mutex);
2001 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
2004 * cause a mmal component to be enabled
2006 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
2007 struct vchiq_mmal_component *component)
2011 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2014 if (component->enabled) {
2015 mutex_unlock(&instance->vchiq_mutex);
2019 ret = enable_component(instance, component);
2021 component->enabled = 1;
2023 mutex_unlock(&instance->vchiq_mutex);
2027 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
2030 * cause a mmal component to be enabled
2032 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
2033 struct vchiq_mmal_component *component)
2037 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2040 if (!component->enabled) {
2041 mutex_unlock(&instance->vchiq_mutex);
2045 ret = disable_component(instance, component);
2047 component->enabled = 0;
2049 mutex_unlock(&instance->vchiq_mutex);
2053 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
2055 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
2056 u32 *major_out, u32 *minor_out)
2060 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2063 ret = get_version(instance, major_out, minor_out);
2065 mutex_unlock(&instance->vchiq_mutex);
2069 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
2071 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
2078 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2081 vchi_service_use(instance->handle);
2083 status = vchi_service_close(instance->handle);
2085 pr_err("mmal-vchiq: VCHIQ close failed\n");
2087 mutex_unlock(&instance->vchiq_mutex);
2089 flush_workqueue(instance->bulk_wq);
2090 destroy_workqueue(instance->bulk_wq);
2092 vfree(instance->bulk_scratch);
2094 idr_destroy(&instance->context_map);
2100 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
2102 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
2105 struct vchiq_mmal_instance *instance;
2106 static VCHI_INSTANCE_T vchi_instance;
2107 struct service_creation params = {
2108 .version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
2109 .service_id = VC_MMAL_SERVER_NAME,
2110 .callback = service_callback,
2111 .callback_param = NULL,
2114 /* compile time checks to ensure structure size as they are
2115 * directly (de)serialised from memory.
2118 /* ensure the header structure has packed to the correct size */
2119 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
2121 /* ensure message structure does not exceed maximum length */
2122 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
2124 /* mmal port struct is correct size */
2125 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
2127 /* create a vchi instance */
2128 status = vchi_initialise(&vchi_instance);
2130 pr_err("Failed to initialise VCHI instance (status=%d)\n",
2135 status = vchi_connect(vchi_instance);
2137 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
2141 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
2146 mutex_init(&instance->vchiq_mutex);
2148 instance->bulk_scratch = vmalloc(PAGE_SIZE);
2150 mutex_init(&instance->context_map_lock);
2151 idr_init_base(&instance->context_map, 1);
2153 params.callback_param = instance;
2155 instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
2157 if (!instance->bulk_wq)
2160 status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
2162 pr_err("Failed to open VCHI service connection (status=%d)\n",
2164 goto err_close_services;
2167 vchi_service_release(instance->handle);
2169 *out_instance = instance;
2174 vchi_service_close(instance->handle);
2175 destroy_workqueue(instance->bulk_wq);
2177 vfree(instance->bulk_scratch);
2181 EXPORT_SYMBOL_GPL(vchiq_mmal_init);