1 // SPDX-License-Identifier: GPL-2.0
3 * Broadcom BM2835 V4L2 driver
5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
7 * Authors: Vincent Sanders @ Collabora
8 * Dave Stevenson @ Broadcom
9 * (now dave.stevenson@raspberrypi.org)
10 * Simon Mellor @ Broadcom
11 * Luke Diamand @ Broadcom
13 * V4L2 driver MMAL vchiq interface code
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
28 #include "mmal-common.h"
29 #include "mmal-parameters.h"
30 #include "mmal-vchiq.h"
33 #include "vc-sm-cma/vc_sm_knl.h"
36 #include "interface/vchi/vchi.h"
38 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
39 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION("0.0.1");
44 * maximum number of components supported.
45 * This matches the maximum permitted by default on the VPU
47 #define VCHIQ_MMAL_MAX_COMPONENTS 64
50 * Timeout for synchronous msg responses in seconds.
51 * Helpful to increase this if stopping in the VPU debugger.
53 #define SYNC_MSG_TIMEOUT 3
55 /*#define FULL_MSG_DUMP 1*/
58 static const char *const msg_type_names[] = {
76 "GET_CORE_STATS_FOR_PORT",
80 "OPAQUE_ALLOCATOR_DESC",
83 "BUFFER_FROM_HOST_ZEROLEN",
89 static const char *const port_action_type_names[] = {
100 #if defined(FULL_MSG_DUMP)
101 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
103 pr_debug(TITLE" type:%s(%d) length:%d\n", \
104 msg_type_names[(MSG)->h.type], \
105 (MSG)->h.type, (MSG_LEN)); \
106 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
108 sizeof(struct mmal_msg_header), 1); \
109 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
111 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
112 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
117 pr_debug(TITLE" type:%s(%d) length:%d\n", \
118 msg_type_names[(MSG)->h.type], \
119 (MSG)->h.type, (MSG_LEN)); \
123 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
126 struct vchiq_mmal_instance;
128 /* normal message context */
129 struct mmal_msg_context {
130 struct vchiq_mmal_instance *instance;
132 /* Index in the context_map idr so that we can find the
133 * mmal_msg_context again when servicing the VCHI reply.
139 /* work struct for buffer_cb callback */
140 struct work_struct work;
141 /* work struct for deferred callback */
142 struct work_struct buffer_to_host_work;
144 struct vchiq_mmal_instance *instance;
146 struct vchiq_mmal_port *port;
147 /* actual buffer used to store bulk reply */
148 struct mmal_buffer *buffer;
149 /* amount of buffer used */
150 unsigned long buffer_used;
151 /* MMAL buffer flags */
153 /* Presentation and Decode timestamps */
156 /* MMAL buffer command flag */
159 int status; /* context status */
161 } bulk; /* bulk data */
164 /* message handle to release */
165 struct vchi_held_msg msg_handle;
166 /* pointer to received message */
167 struct mmal_msg *msg;
168 /* received message length */
170 /* completion upon reply */
171 struct completion cmplt;
172 } sync; /* synchronous response */
177 struct vchiq_mmal_instance {
178 VCHI_SERVICE_HANDLE_T handle;
180 /* ensure serialised access to service */
181 struct mutex vchiq_mutex;
183 /* vmalloc page to receive scratch bulk xfers into */
186 struct idr context_map;
187 /* protect accesses to context_map */
188 struct mutex context_map_lock;
190 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
192 /* ordered workqueue to process all bulk operations */
193 struct workqueue_struct *bulk_wq;
195 /* Opaque handle for a VCHI instance */
196 VCHI_INSTANCE_T vchi_instance;
199 static struct mmal_msg_context *
200 get_msg_context(struct vchiq_mmal_instance *instance)
202 struct mmal_msg_context *msg_context;
205 /* todo: should this be allocated from a pool to avoid kzalloc */
206 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
209 return ERR_PTR(-ENOMEM);
211 /* Create an ID that will be passed along with our message so
212 * that when we service the VCHI reply, we can look up what
213 * message is being replied to.
215 mutex_lock(&instance->context_map_lock);
216 handle = idr_alloc(&instance->context_map, msg_context,
218 mutex_unlock(&instance->context_map_lock);
222 return ERR_PTR(handle);
225 msg_context->instance = instance;
226 msg_context->handle = handle;
231 static struct mmal_msg_context *
232 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
234 return idr_find(&instance->context_map, handle);
238 release_msg_context(struct mmal_msg_context *msg_context)
240 struct vchiq_mmal_instance *instance = msg_context->instance;
242 mutex_lock(&instance->context_map_lock);
243 idr_remove(&instance->context_map, msg_context->handle);
244 mutex_unlock(&instance->context_map_lock);
248 /* workqueue scheduled callback
250 * we do this because it is important we do not call any other vchiq
251 * sync calls from witin the message delivery thread
253 static void buffer_work_cb(struct work_struct *work)
255 struct mmal_msg_context *msg_context =
256 container_of(work, struct mmal_msg_context, u.bulk.work);
257 struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
260 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
261 __func__, msg_context);
265 buffer->length = msg_context->u.bulk.buffer_used;
266 buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
267 buffer->dts = msg_context->u.bulk.dts;
268 buffer->pts = msg_context->u.bulk.pts;
269 buffer->cmd = msg_context->u.bulk.cmd;
272 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
274 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
275 msg_context->u.bulk.port,
276 msg_context->u.bulk.status,
277 msg_context->u.bulk.buffer);
280 mutex_unlock(&msg_context->u.bulk.port->event_context_mutex);
283 /* workqueue scheduled callback to handle receiving buffers
285 * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
286 * If we block in the service_callback context then we can't process the
287 * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
288 * vchi_bulk_queue_receive() call to complete.
290 static void buffer_to_host_work_cb(struct work_struct *work)
292 struct mmal_msg_context *msg_context =
293 container_of(work, struct mmal_msg_context,
294 u.bulk.buffer_to_host_work);
295 struct vchiq_mmal_instance *instance = msg_context->instance;
296 unsigned long len = msg_context->u.bulk.buffer_used;
300 /* Dummy receive to ensure the buffers remain in order */
302 /* queue the bulk submission */
303 vchi_service_use(instance->handle);
304 ret = vchi_bulk_queue_receive(instance->handle,
305 msg_context->u.bulk.buffer->buffer,
306 /* Actual receive needs to be a multiple
310 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
311 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
314 vchi_service_release(instance->handle);
317 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
318 __func__, msg_context, ret);
321 /* enqueue a bulk receive for a given message context */
322 static int bulk_receive(struct vchiq_mmal_instance *instance,
323 struct mmal_msg *msg,
324 struct mmal_msg_context *msg_context)
326 unsigned long rd_len;
328 rd_len = msg->u.buffer_from_host.buffer_header.length;
330 if (!msg_context->u.bulk.buffer) {
331 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
333 /* todo: this is a serious error, we should never have
334 * committed a buffer_to_host operation to the mmal
335 * port without the buffer to back it up (underflow
336 * handling) and there is no obvious way to deal with
337 * this - how is the mmal servie going to react when
338 * we fail to do the xfer and reschedule a buffer when
339 * it arrives? perhaps a starved flag to indicate a
340 * waiting bulk receive?
346 /* ensure we do not overrun the available buffer */
347 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
348 rd_len = msg_context->u.bulk.buffer->buffer_size;
349 pr_warn("short read as not enough receive buffer space\n");
350 /* todo: is this the correct response, what happens to
351 * the rest of the message data?
356 msg_context->u.bulk.buffer_used = rd_len;
357 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
358 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
359 msg_context->u.bulk.cmd = msg->u.buffer_from_host.buffer_header.cmd;
361 queue_work(msg_context->instance->bulk_wq,
362 &msg_context->u.bulk.buffer_to_host_work);
367 /* data in message, memcpy from packet into output buffer */
368 static int inline_receive(struct vchiq_mmal_instance *instance,
369 struct mmal_msg *msg,
370 struct mmal_msg_context *msg_context)
372 memcpy(msg_context->u.bulk.buffer->buffer,
373 msg->u.buffer_from_host.short_data,
374 msg->u.buffer_from_host.payload_in_message);
376 msg_context->u.bulk.buffer_used =
377 msg->u.buffer_from_host.payload_in_message;
382 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
384 buffer_from_host(struct vchiq_mmal_instance *instance,
385 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
387 struct mmal_msg_context *msg_context;
394 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
397 if (!buf->msg_context) {
398 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
402 msg_context = buf->msg_context;
404 /* store bulk message context for when data arrives */
405 msg_context->u.bulk.instance = instance;
406 msg_context->u.bulk.port = port;
407 msg_context->u.bulk.buffer = buf;
408 msg_context->u.bulk.buffer_used = 0;
410 /* initialise work structure ready to schedule callback */
411 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
412 INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
413 buffer_to_host_work_cb);
415 atomic_inc(&port->buffers_with_vpu);
417 /* prep the buffer from host message */
418 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
420 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
421 m.h.magic = MMAL_MAGIC;
422 m.h.context = msg_context->handle;
425 /* drvbuf is our private data passed back */
426 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
427 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
428 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
429 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
432 m.u.buffer_from_host.buffer_header.cmd = 0;
433 if (port->zero_copy) {
434 m.u.buffer_from_host.buffer_header.data = buf->vc_handle;
436 m.u.buffer_from_host.buffer_header.data =
437 (u32)(unsigned long)buf->buffer;
440 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
441 if (port->type == MMAL_PORT_TYPE_OUTPUT) {
442 m.u.buffer_from_host.buffer_header.length = 0;
443 m.u.buffer_from_host.buffer_header.offset = 0;
444 m.u.buffer_from_host.buffer_header.flags = 0;
445 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
446 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
448 m.u.buffer_from_host.buffer_header.length = buf->length;
449 m.u.buffer_from_host.buffer_header.offset = 0;
450 m.u.buffer_from_host.buffer_header.flags = buf->mmal_flags;
451 m.u.buffer_from_host.buffer_header.pts = buf->pts;
452 m.u.buffer_from_host.buffer_header.dts = buf->dts;
455 /* clear buffer type sepecific data */
456 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
457 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
459 /* no payload in message */
460 m.u.buffer_from_host.payload_in_message = 0;
462 vchi_service_use(instance->handle);
464 ret = vchi_queue_kernel_message(instance->handle,
466 sizeof(struct mmal_msg_header) +
467 sizeof(m.u.buffer_from_host));
469 vchi_service_release(instance->handle);
474 /* deals with receipt of event to host message */
475 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
476 struct mmal_msg *msg, u32 msg_len)
478 int comp_idx = msg->u.event_to_host.client_component;
479 struct vchiq_mmal_component *component =
480 &instance->component[comp_idx];
481 struct vchiq_mmal_port *port = NULL;
482 struct mmal_msg_context *msg_context;
483 u32 port_num = msg->u.event_to_host.port_num;
485 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
486 pr_err("%s: MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n",
491 switch (msg->u.event_to_host.port_type) {
492 case MMAL_PORT_TYPE_CONTROL:
494 pr_err("%s: port_num of %u >= number of ports 1",
498 port = &component->control;
500 case MMAL_PORT_TYPE_INPUT:
501 if (port_num >= component->inputs) {
502 pr_err("%s: port_num of %u >= number of ports %u",
504 port_num >= component->inputs);
507 port = &component->input[port_num];
509 case MMAL_PORT_TYPE_OUTPUT:
510 if (port_num >= component->outputs) {
511 pr_err("%s: port_num of %u >= number of ports %u",
513 port_num >= component->outputs);
516 port = &component->output[port_num];
518 case MMAL_PORT_TYPE_CLOCK:
519 if (port_num >= component->clocks) {
520 pr_err("%s: port_num of %u >= number of ports %u",
522 port_num >= component->clocks);
525 port = &component->clock[port_num];
531 if (!mutex_trylock(&port->event_context_mutex)) {
532 pr_err("dropping event 0x%x\n", msg->u.event_to_host.cmd);
535 msg_context = port->event_context;
537 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
538 /* message reception had an error */
540 pr_err("%s: error %d in reply\n", __func__, msg->h.status);
542 msg_context->u.bulk.status = msg->h.status;
543 } else if (msg->u.event_to_host.length > MMAL_WORKER_EVENT_SPACE) {
544 /* data is not in message, queue a bulk receive */
545 pr_err("%s: payload not in message - bulk receive??! NOT SUPPORTED\n",
547 msg_context->u.bulk.status = -1;
549 memcpy(msg_context->u.bulk.buffer->buffer,
550 msg->u.event_to_host.data,
551 msg->u.event_to_host.length);
553 msg_context->u.bulk.buffer_used =
554 msg->u.event_to_host.length;
556 msg_context->u.bulk.mmal_flags = 0;
557 msg_context->u.bulk.dts = MMAL_TIME_UNKNOWN;
558 msg_context->u.bulk.pts = MMAL_TIME_UNKNOWN;
559 msg_context->u.bulk.cmd = msg->u.event_to_host.cmd;
561 pr_debug("event component:%u port type:%d num:%d cmd:0x%x length:%d\n",
562 msg->u.event_to_host.client_component,
563 msg->u.event_to_host.port_type,
564 msg->u.event_to_host.port_num,
565 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
568 schedule_work(&msg_context->u.bulk.work);
571 /* deals with receipt of buffer to host message */
572 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
573 struct mmal_msg *msg, u32 msg_len)
575 struct mmal_msg_context *msg_context;
578 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
579 __func__, instance, msg, msg_len);
581 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
582 handle = msg->u.buffer_from_host.drvbuf.client_context;
583 msg_context = lookup_msg_context(instance, handle);
586 pr_err("drvbuf.client_context(%u) is invalid\n",
591 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
595 msg_context->u.bulk.mmal_flags =
596 msg->u.buffer_from_host.buffer_header.flags;
598 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
599 /* message reception had an error */
600 pr_warn("error %d in reply\n", msg->h.status);
602 msg_context->u.bulk.status = msg->h.status;
604 } else if (msg->u.buffer_from_host.is_zero_copy) {
606 * Zero copy buffer, so nothing to do.
607 * Copy buffer info and make callback.
609 msg_context->u.bulk.buffer_used =
610 msg->u.buffer_from_host.buffer_header.length;
611 msg_context->u.bulk.mmal_flags =
612 msg->u.buffer_from_host.buffer_header.flags;
613 msg_context->u.bulk.dts =
614 msg->u.buffer_from_host.buffer_header.dts;
615 msg_context->u.bulk.pts =
616 msg->u.buffer_from_host.buffer_header.pts;
617 msg_context->u.bulk.cmd =
618 msg->u.buffer_from_host.buffer_header.cmd;
620 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
622 if (msg->u.buffer_from_host.buffer_header.flags &
623 MMAL_BUFFER_HEADER_FLAG_EOS) {
624 msg_context->u.bulk.status =
625 bulk_receive(instance, msg, msg_context);
626 if (msg_context->u.bulk.status == 0)
627 return; /* successful bulk submission, bulk
628 * completion will trigger callback
631 /* do callback with empty buffer - not EOS though */
632 msg_context->u.bulk.status = 0;
633 msg_context->u.bulk.buffer_used = 0;
635 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
636 /* data is not in message, queue a bulk receive */
637 msg_context->u.bulk.status =
638 bulk_receive(instance, msg, msg_context);
639 if (msg_context->u.bulk.status == 0)
640 return; /* successful bulk submission, bulk
641 * completion will trigger callback
644 /* failed to submit buffer, this will end badly */
645 pr_err("error %d on bulk submission\n",
646 msg_context->u.bulk.status);
648 } else if (msg->u.buffer_from_host.payload_in_message <=
649 MMAL_VC_SHORT_DATA) {
650 /* data payload within message */
651 msg_context->u.bulk.status = inline_receive(instance, msg,
654 pr_err("message with invalid short payload\n");
657 msg_context->u.bulk.status = -EINVAL;
658 msg_context->u.bulk.buffer_used =
659 msg->u.buffer_from_host.payload_in_message;
662 /* schedule the port callback */
663 schedule_work(&msg_context->u.bulk.work);
666 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
667 struct mmal_msg_context *msg_context)
669 msg_context->u.bulk.status = 0;
671 /* schedule the port callback */
672 schedule_work(&msg_context->u.bulk.work);
675 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
676 struct mmal_msg_context *msg_context)
678 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
680 msg_context->u.bulk.status = -EINTR;
682 schedule_work(&msg_context->u.bulk.work);
685 /* incoming event service callback */
686 static void service_callback(void *param,
687 const VCHI_CALLBACK_REASON_T reason,
690 struct vchiq_mmal_instance *instance = param;
693 struct mmal_msg *msg;
694 struct vchi_held_msg msg_handle;
695 struct mmal_msg_context *msg_context;
698 pr_err("Message callback passed NULL instance\n");
703 case VCHI_CALLBACK_MSG_AVAILABLE:
704 status = vchi_msg_hold(instance->handle, (void **)&msg,
705 &msg_len, VCHI_FLAGS_NONE, &msg_handle);
707 pr_err("Unable to dequeue a message (%d)\n", status);
711 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
713 /* handling is different for buffer messages */
714 switch (msg->h.type) {
715 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
716 vchi_held_msg_release(&msg_handle);
719 case MMAL_MSG_TYPE_EVENT_TO_HOST:
720 event_to_host_cb(instance, msg, msg_len);
721 vchi_held_msg_release(&msg_handle);
725 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
726 buffer_to_host_cb(instance, msg, msg_len);
727 vchi_held_msg_release(&msg_handle);
731 /* messages dependent on header context to complete */
732 if (!msg->h.context) {
733 pr_err("received message context was null!\n");
734 vchi_held_msg_release(&msg_handle);
738 msg_context = lookup_msg_context(instance,
741 pr_err("received invalid message context %u!\n",
743 vchi_held_msg_release(&msg_handle);
747 /* fill in context values */
748 msg_context->u.sync.msg_handle = msg_handle;
749 msg_context->u.sync.msg = msg;
750 msg_context->u.sync.msg_len = msg_len;
752 /* todo: should this check (completion_done()
753 * == 1) for no one waiting? or do we need a
754 * flag to tell us the completion has been
755 * interrupted so we can free the message and
756 * its context. This probably also solves the
757 * message arriving after interruption todo
761 /* complete message so caller knows it happened */
762 complete(&msg_context->u.sync.cmplt);
768 case VCHI_CALLBACK_BULK_RECEIVED:
769 bulk_receive_cb(instance, bulk_ctx);
772 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
773 bulk_abort_cb(instance, bulk_ctx);
776 case VCHI_CALLBACK_SERVICE_CLOSED:
777 /* TODO: consider if this requires action if received when
778 * driver is not explicitly closing the service
783 pr_err("Received unhandled message reason %d\n", reason);
788 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
789 struct mmal_msg *msg,
790 unsigned int payload_len,
791 struct mmal_msg **msg_out,
792 struct vchi_held_msg *msg_handle_out)
794 struct mmal_msg_context *msg_context;
796 unsigned long timeout;
798 /* payload size must not cause message to exceed max size */
800 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
801 pr_err("payload length %d exceeds max:%d\n", payload_len,
802 (int)(MMAL_MSG_MAX_SIZE -
803 sizeof(struct mmal_msg_header)));
807 msg_context = get_msg_context(instance);
808 if (IS_ERR(msg_context))
809 return PTR_ERR(msg_context);
811 init_completion(&msg_context->u.sync.cmplt);
813 msg->h.magic = MMAL_MAGIC;
814 msg->h.context = msg_context->handle;
817 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
820 vchi_service_use(instance->handle);
822 ret = vchi_queue_kernel_message(instance->handle,
824 sizeof(struct mmal_msg_header) +
827 vchi_service_release(instance->handle);
830 pr_err("error %d queuing message\n", ret);
831 release_msg_context(msg_context);
835 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
836 SYNC_MSG_TIMEOUT * HZ);
838 pr_err("timed out waiting for sync completion\n");
840 /* todo: what happens if the message arrives after aborting */
841 release_msg_context(msg_context);
845 *msg_out = msg_context->u.sync.msg;
846 *msg_handle_out = msg_context->u.sync.msg_handle;
847 release_msg_context(msg_context);
852 static void dump_port_info(struct vchiq_mmal_port *port)
854 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
856 pr_debug("buffer minimum num:%d size:%d align:%d\n",
857 port->minimum_buffer.num,
858 port->minimum_buffer.size, port->minimum_buffer.alignment);
860 pr_debug("buffer recommended num:%d size:%d align:%d\n",
861 port->recommended_buffer.num,
862 port->recommended_buffer.size,
863 port->recommended_buffer.alignment);
865 pr_debug("buffer current values num:%d size:%d align:%d\n",
866 port->current_buffer.num,
867 port->current_buffer.size, port->current_buffer.alignment);
869 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
871 port->format.encoding, port->format.encoding_variant);
873 pr_debug(" bitrate:%d flags:0x%x\n",
874 port->format.bitrate, port->format.flags);
876 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
878 ("es video format: width:%d height:%d colourspace:0x%x\n",
879 port->es.video.width, port->es.video.height,
880 port->es.video.color_space);
882 pr_debug(" : crop xywh %d,%d,%d,%d\n",
883 port->es.video.crop.x,
884 port->es.video.crop.y,
885 port->es.video.crop.width, port->es.video.crop.height);
886 pr_debug(" : framerate %d/%d aspect %d/%d\n",
887 port->es.video.frame_rate.num,
888 port->es.video.frame_rate.den,
889 port->es.video.par.num, port->es.video.par.den);
893 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
895 /* todo do readonly fields need setting at all? */
896 p->type = port->type;
897 p->index = port->index;
899 p->is_enabled = port->enabled;
900 p->buffer_num_min = port->minimum_buffer.num;
901 p->buffer_size_min = port->minimum_buffer.size;
902 p->buffer_alignment_min = port->minimum_buffer.alignment;
903 p->buffer_num_recommended = port->recommended_buffer.num;
904 p->buffer_size_recommended = port->recommended_buffer.size;
906 /* only three writable fields in a port */
907 p->buffer_num = port->current_buffer.num;
908 p->buffer_size = port->current_buffer.size;
909 p->userdata = (u32)(unsigned long)port;
912 static int port_info_set(struct vchiq_mmal_instance *instance,
913 struct vchiq_mmal_port *port)
917 struct mmal_msg *rmsg;
918 struct vchi_held_msg rmsg_handle;
920 pr_debug("setting port info port %p\n", port);
923 dump_port_info(port);
925 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
927 m.u.port_info_set.component_handle = port->component->handle;
928 m.u.port_info_set.port_type = port->type;
929 m.u.port_info_set.port_index = port->index;
931 port_to_mmal_msg(port, &m.u.port_info_set.port);
933 /* elementary stream format setup */
934 m.u.port_info_set.format.type = port->format.type;
935 m.u.port_info_set.format.encoding = port->format.encoding;
936 m.u.port_info_set.format.encoding_variant =
937 port->format.encoding_variant;
938 m.u.port_info_set.format.bitrate = port->format.bitrate;
939 m.u.port_info_set.format.flags = port->format.flags;
941 memcpy(&m.u.port_info_set.es, &port->es,
942 sizeof(union mmal_es_specific_format));
944 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
945 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
946 port->format.extradata_size);
948 ret = send_synchronous_mmal_msg(instance, &m,
949 sizeof(m.u.port_info_set),
950 &rmsg, &rmsg_handle);
954 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
955 /* got an unexpected message type in reply */
960 /* return operation status */
961 ret = -rmsg->u.port_info_get_reply.status;
963 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
964 port->component->handle, port->handle);
967 vchi_held_msg_release(&rmsg_handle);
972 /* use port info get message to retrieve port information */
973 static int port_info_get(struct vchiq_mmal_instance *instance,
974 struct vchiq_mmal_port *port)
978 struct mmal_msg *rmsg;
979 struct vchi_held_msg rmsg_handle;
982 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
983 m.u.port_info_get.component_handle = port->component->handle;
984 m.u.port_info_get.port_type = port->type;
985 m.u.port_info_get.index = port->index;
987 ret = send_synchronous_mmal_msg(instance, &m,
988 sizeof(m.u.port_info_get),
989 &rmsg, &rmsg_handle);
993 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
994 /* got an unexpected message type in reply */
999 /* return operation status */
1000 ret = -rmsg->u.port_info_get_reply.status;
1001 if (ret != MMAL_MSG_STATUS_SUCCESS)
1004 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
1009 /* copy the values out of the message */
1010 port->handle = rmsg->u.port_info_get_reply.port_handle;
1012 /* port type and index cached to use on port info set because
1013 * it does not use a port handle
1015 port->type = rmsg->u.port_info_get_reply.port_type;
1016 port->index = rmsg->u.port_info_get_reply.port_index;
1018 port->minimum_buffer.num =
1019 rmsg->u.port_info_get_reply.port.buffer_num_min;
1020 port->minimum_buffer.size =
1021 rmsg->u.port_info_get_reply.port.buffer_size_min;
1022 port->minimum_buffer.alignment =
1023 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1025 port->recommended_buffer.alignment =
1026 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1027 port->recommended_buffer.num =
1028 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
1030 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
1031 port->current_buffer.size =
1032 rmsg->u.port_info_get_reply.port.buffer_size;
1035 port->format.type = rmsg->u.port_info_get_reply.format.type;
1036 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
1037 port->format.encoding_variant =
1038 rmsg->u.port_info_get_reply.format.encoding_variant;
1039 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
1040 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
1042 /* elementary stream format */
1044 &rmsg->u.port_info_get_reply.es,
1045 sizeof(union mmal_es_specific_format));
1046 port->format.es = &port->es;
1048 port->format.extradata_size =
1049 rmsg->u.port_info_get_reply.format.extradata_size;
1050 memcpy(port->format.extradata,
1051 rmsg->u.port_info_get_reply.extradata,
1052 port->format.extradata_size);
1054 pr_debug("received port info\n");
1055 dump_port_info(port);
1059 pr_debug("%s:result:%d component:0x%x port:%d\n",
1060 __func__, ret, port->component->handle, port->handle);
1062 vchi_held_msg_release(&rmsg_handle);
1067 /* create comonent on vc */
1068 static int create_component(struct vchiq_mmal_instance *instance,
1069 struct vchiq_mmal_component *component,
1074 struct mmal_msg *rmsg;
1075 struct vchi_held_msg rmsg_handle;
1077 /* build component create message */
1078 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
1079 m.u.component_create.client_component = component->client_component;
1080 strncpy(m.u.component_create.name, name,
1081 sizeof(m.u.component_create.name));
1083 ret = send_synchronous_mmal_msg(instance, &m,
1084 sizeof(m.u.component_create),
1085 &rmsg, &rmsg_handle);
1089 if (rmsg->h.type != m.h.type) {
1090 /* got an unexpected message type in reply */
1095 ret = -rmsg->u.component_create_reply.status;
1096 if (ret != MMAL_MSG_STATUS_SUCCESS)
1099 /* a valid component response received */
1100 component->handle = rmsg->u.component_create_reply.component_handle;
1101 component->inputs = rmsg->u.component_create_reply.input_num;
1102 component->outputs = rmsg->u.component_create_reply.output_num;
1103 component->clocks = rmsg->u.component_create_reply.clock_num;
1105 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1107 component->inputs, component->outputs, component->clocks);
1110 vchi_held_msg_release(&rmsg_handle);
1115 /* destroys a component on vc */
1116 static int destroy_component(struct vchiq_mmal_instance *instance,
1117 struct vchiq_mmal_component *component)
1121 struct mmal_msg *rmsg;
1122 struct vchi_held_msg rmsg_handle;
1124 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
1125 m.u.component_destroy.component_handle = component->handle;
1127 ret = send_synchronous_mmal_msg(instance, &m,
1128 sizeof(m.u.component_destroy),
1129 &rmsg, &rmsg_handle);
1133 if (rmsg->h.type != m.h.type) {
1134 /* got an unexpected message type in reply */
1139 ret = -rmsg->u.component_destroy_reply.status;
1143 vchi_held_msg_release(&rmsg_handle);
1148 /* enable a component on vc */
1149 static int enable_component(struct vchiq_mmal_instance *instance,
1150 struct vchiq_mmal_component *component)
1154 struct mmal_msg *rmsg;
1155 struct vchi_held_msg rmsg_handle;
1157 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1158 m.u.component_enable.component_handle = component->handle;
1160 ret = send_synchronous_mmal_msg(instance, &m,
1161 sizeof(m.u.component_enable),
1162 &rmsg, &rmsg_handle);
1166 if (rmsg->h.type != m.h.type) {
1167 /* got an unexpected message type in reply */
1172 ret = -rmsg->u.component_enable_reply.status;
1175 vchi_held_msg_release(&rmsg_handle);
1180 /* disable a component on vc */
1181 static int disable_component(struct vchiq_mmal_instance *instance,
1182 struct vchiq_mmal_component *component)
1186 struct mmal_msg *rmsg;
1187 struct vchi_held_msg rmsg_handle;
1189 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1190 m.u.component_disable.component_handle = component->handle;
1192 ret = send_synchronous_mmal_msg(instance, &m,
1193 sizeof(m.u.component_disable),
1194 &rmsg, &rmsg_handle);
1198 if (rmsg->h.type != m.h.type) {
1199 /* got an unexpected message type in reply */
1204 ret = -rmsg->u.component_disable_reply.status;
1208 vchi_held_msg_release(&rmsg_handle);
1213 /* get version of mmal implementation */
1214 static int get_version(struct vchiq_mmal_instance *instance,
1215 u32 *major_out, u32 *minor_out)
1219 struct mmal_msg *rmsg;
1220 struct vchi_held_msg rmsg_handle;
1222 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1224 ret = send_synchronous_mmal_msg(instance, &m,
1225 sizeof(m.u.version),
1226 &rmsg, &rmsg_handle);
1230 if (rmsg->h.type != m.h.type) {
1231 /* got an unexpected message type in reply */
1236 *major_out = rmsg->u.version.major;
1237 *minor_out = rmsg->u.version.minor;
1240 vchi_held_msg_release(&rmsg_handle);
1245 /* do a port action with a port as a parameter */
1246 static int port_action_port(struct vchiq_mmal_instance *instance,
1247 struct vchiq_mmal_port *port,
1248 enum mmal_msg_port_action_type action_type)
1252 struct mmal_msg *rmsg;
1253 struct vchi_held_msg rmsg_handle;
1255 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1256 m.u.port_action_port.component_handle = port->component->handle;
1257 m.u.port_action_port.port_handle = port->handle;
1258 m.u.port_action_port.action = action_type;
1260 port_to_mmal_msg(port, &m.u.port_action_port.port);
1262 ret = send_synchronous_mmal_msg(instance, &m,
1263 sizeof(m.u.port_action_port),
1264 &rmsg, &rmsg_handle);
1268 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1269 /* got an unexpected message type in reply */
1274 ret = -rmsg->u.port_action_reply.status;
1276 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1278 ret, port->component->handle, port->handle,
1279 port_action_type_names[action_type], action_type);
1282 vchi_held_msg_release(&rmsg_handle);
1287 /* do a port action with handles as parameters */
1288 static int port_action_handle(struct vchiq_mmal_instance *instance,
1289 struct vchiq_mmal_port *port,
1290 enum mmal_msg_port_action_type action_type,
1291 u32 connect_component_handle,
1292 u32 connect_port_handle)
1296 struct mmal_msg *rmsg;
1297 struct vchi_held_msg rmsg_handle;
1299 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1301 m.u.port_action_handle.component_handle = port->component->handle;
1302 m.u.port_action_handle.port_handle = port->handle;
1303 m.u.port_action_handle.action = action_type;
1305 m.u.port_action_handle.connect_component_handle =
1306 connect_component_handle;
1307 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1309 ret = send_synchronous_mmal_msg(instance, &m,
1310 sizeof(m.u.port_action_handle),
1311 &rmsg, &rmsg_handle);
1315 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1316 /* got an unexpected message type in reply */
1321 ret = -rmsg->u.port_action_reply.status;
1323 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1325 ret, port->component->handle, port->handle,
1326 port_action_type_names[action_type],
1327 action_type, connect_component_handle, connect_port_handle);
1330 vchi_held_msg_release(&rmsg_handle);
1335 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1336 struct vchiq_mmal_port *port,
1337 u32 parameter_id, void *value, u32 value_size)
1341 struct mmal_msg *rmsg;
1342 struct vchi_held_msg rmsg_handle;
1344 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1346 m.u.port_parameter_set.component_handle = port->component->handle;
1347 m.u.port_parameter_set.port_handle = port->handle;
1348 m.u.port_parameter_set.id = parameter_id;
1349 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1350 memcpy(&m.u.port_parameter_set.value, value, value_size);
1352 ret = send_synchronous_mmal_msg(instance, &m,
1353 (4 * sizeof(u32)) + value_size,
1354 &rmsg, &rmsg_handle);
1358 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1359 /* got an unexpected message type in reply */
1364 ret = -rmsg->u.port_parameter_set_reply.status;
1366 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1368 ret, port->component->handle, port->handle, parameter_id);
1371 vchi_held_msg_release(&rmsg_handle);
1376 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1377 struct vchiq_mmal_port *port,
1378 u32 parameter_id, void *value, u32 *value_size)
1382 struct mmal_msg *rmsg;
1383 struct vchi_held_msg rmsg_handle;
1385 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1387 m.u.port_parameter_get.component_handle = port->component->handle;
1388 m.u.port_parameter_get.port_handle = port->handle;
1389 m.u.port_parameter_get.id = parameter_id;
1390 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1392 ret = send_synchronous_mmal_msg(instance, &m,
1394 mmal_msg_port_parameter_get),
1395 &rmsg, &rmsg_handle);
1399 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1400 /* got an unexpected message type in reply */
1401 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1406 ret = rmsg->u.port_parameter_get_reply.status;
1408 /* port_parameter_get_reply.size includes the header,
1409 * whilst *value_size doesn't.
1411 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1413 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1414 /* Copy only as much as we have space for
1415 * but report true size of parameter
1417 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1420 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1421 rmsg->u.port_parameter_get_reply.size);
1423 /* Always report the size of the returned parameter to the caller */
1424 *value_size = rmsg->u.port_parameter_get_reply.size;
1426 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1427 ret, port->component->handle, port->handle, parameter_id);
1430 vchi_held_msg_release(&rmsg_handle);
1435 /* disables a port and drains buffers from it */
1436 static int port_disable(struct vchiq_mmal_instance *instance,
1437 struct vchiq_mmal_port *port)
1440 struct list_head *q, *buf_head;
1441 unsigned long flags = 0;
1448 ret = port_action_port(instance, port,
1449 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1452 * Drain all queued buffers on port. This should only
1453 * apply to buffers that have been queued before the port
1454 * has been enabled. If the port has been enabled and buffers
1455 * passed, then the buffers should have been removed from this
1456 * list, and we should get the relevant callbacks via VCHIQ
1457 * to release the buffers.
1459 spin_lock_irqsave(&port->slock, flags);
1461 list_for_each_safe(buf_head, q, &port->buffers) {
1462 struct mmal_buffer *mmalbuf;
1464 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1467 if (port->buffer_cb) {
1468 mmalbuf->length = 0;
1469 mmalbuf->mmal_flags = 0;
1470 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1471 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1473 port->buffer_cb(instance,
1478 spin_unlock_irqrestore(&port->slock, flags);
1480 ret = port_info_get(instance, port);
1487 static int port_enable(struct vchiq_mmal_instance *instance,
1488 struct vchiq_mmal_port *port)
1490 unsigned int hdr_count;
1491 struct list_head *q, *buf_head;
1497 ret = port_action_port(instance, port,
1498 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1504 if (port->buffer_cb) {
1505 /* send buffer headers to videocore */
1507 list_for_each_safe(buf_head, q, &port->buffers) {
1508 struct mmal_buffer *mmalbuf;
1510 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1512 ret = buffer_from_host(instance, port, mmalbuf);
1518 if (hdr_count > port->current_buffer.num)
1523 ret = port_info_get(instance, port);
1529 /* ------------------------------------------------------------------
1531 *------------------------------------------------------------------
1534 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1535 struct vchiq_mmal_port *port)
1539 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1542 ret = port_info_set(instance, port);
1544 goto release_unlock;
1546 /* read what has actually been set */
1547 ret = port_info_get(instance, port);
1550 mutex_unlock(&instance->vchiq_mutex);
1554 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1556 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1557 struct vchiq_mmal_port *port,
1558 u32 parameter, void *value, u32 value_size)
1562 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1565 ret = port_parameter_set(instance, port, parameter, value, value_size);
1567 mutex_unlock(&instance->vchiq_mutex);
1569 if (parameter == MMAL_PARAMETER_ZERO_COPY && !ret)
1570 port->zero_copy = !!(*(bool *)value);
1574 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1576 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1577 struct vchiq_mmal_port *port,
1578 u32 parameter, void *value, u32 *value_size)
1582 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1585 ret = port_parameter_get(instance, port, parameter, value, value_size);
1587 mutex_unlock(&instance->vchiq_mutex);
1591 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1595 * enables a port and queues buffers for satisfying callbacks if we
1596 * provide a callback handler
1598 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1599 struct vchiq_mmal_port *port,
1600 vchiq_mmal_buffer_cb buffer_cb)
1604 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1607 /* already enabled - noop */
1608 if (port->enabled) {
1613 port->buffer_cb = buffer_cb;
1615 ret = port_enable(instance, port);
1618 mutex_unlock(&instance->vchiq_mutex);
1622 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1624 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1625 struct vchiq_mmal_port *port)
1629 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1632 if (!port->enabled) {
1633 mutex_unlock(&instance->vchiq_mutex);
1637 ret = port_disable(instance, port);
1639 mutex_unlock(&instance->vchiq_mutex);
1643 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1645 /* ports will be connected in a tunneled manner so data buffers
1646 * are not handled by client.
1648 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1649 struct vchiq_mmal_port *src,
1650 struct vchiq_mmal_port *dst)
1654 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1657 /* disconnect ports if connected */
1658 if (src->connected) {
1659 ret = port_disable(instance, src);
1661 pr_err("failed disabling src port(%d)\n", ret);
1662 goto release_unlock;
1665 /* do not need to disable the destination port as they
1666 * are connected and it is done automatically
1669 ret = port_action_handle(instance, src,
1670 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1671 src->connected->component->handle,
1672 src->connected->handle);
1674 pr_err("failed disconnecting src port\n");
1675 goto release_unlock;
1677 src->connected->enabled = 0;
1678 src->connected = NULL;
1682 /* do not make new connection */
1684 pr_debug("not making new connection\n");
1685 goto release_unlock;
1688 /* copy src port format to dst */
1689 dst->format.encoding = src->format.encoding;
1690 dst->es.video.width = src->es.video.width;
1691 dst->es.video.height = src->es.video.height;
1692 dst->es.video.crop.x = src->es.video.crop.x;
1693 dst->es.video.crop.y = src->es.video.crop.y;
1694 dst->es.video.crop.width = src->es.video.crop.width;
1695 dst->es.video.crop.height = src->es.video.crop.height;
1696 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1697 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1699 /* set new format */
1700 ret = port_info_set(instance, dst);
1702 pr_debug("setting port info failed\n");
1703 goto release_unlock;
1706 /* read what has actually been set */
1707 ret = port_info_get(instance, dst);
1709 pr_debug("read back port info failed\n");
1710 goto release_unlock;
1713 /* connect two ports together */
1714 ret = port_action_handle(instance, src,
1715 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1716 dst->component->handle, dst->handle);
1718 pr_debug("connecting port %d:%d to %d:%d failed\n",
1719 src->component->handle, src->handle,
1720 dst->component->handle, dst->handle);
1721 goto release_unlock;
1723 src->connected = dst;
1727 mutex_unlock(&instance->vchiq_mutex);
1731 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1733 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1734 struct vchiq_mmal_port *port,
1735 struct mmal_buffer *buffer)
1737 unsigned long flags = 0;
1741 * We really want to do this in mmal_vchi_buffer_init but can't as
1742 * videobuf2 won't let us have the dmabuf there.
1744 if (port->zero_copy && buffer->dma_buf && !buffer->vcsm_handle) {
1745 pr_debug("%s: import dmabuf %p\n", __func__, buffer->dma_buf);
1746 ret = vc_sm_cma_import_dmabuf(buffer->dma_buf,
1747 &buffer->vcsm_handle);
1749 pr_err("%s: vc_sm_import_dmabuf_fd failed, ret %d\n",
1754 buffer->vc_handle = vc_sm_cma_int_handle(buffer->vcsm_handle);
1755 if (!buffer->vc_handle) {
1756 pr_err("%s: vc_sm_int_handle failed %d\n",
1758 vc_sm_cma_free(buffer->vcsm_handle);
1761 pr_debug("%s: import dmabuf %p - got vc handle %08X\n",
1762 __func__, buffer->dma_buf, buffer->vc_handle);
1765 ret = buffer_from_host(instance, port, buffer);
1766 if (ret == -EINVAL) {
1767 /* Port is disabled. Queue for when it is enabled. */
1768 spin_lock_irqsave(&port->slock, flags);
1769 list_add_tail(&buffer->list, &port->buffers);
1770 spin_unlock_irqrestore(&port->slock, flags);
1775 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1777 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1778 struct mmal_buffer *buf)
1780 struct mmal_msg_context *msg_context = get_msg_context(instance);
1782 if (IS_ERR(msg_context))
1783 return (PTR_ERR(msg_context));
1785 buf->msg_context = msg_context;
1788 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1790 int mmal_vchi_buffer_unmap(struct mmal_buffer *buf)
1794 if (buf->vcsm_handle) {
1797 pr_debug("%s: vc_sm_cma_free on handle %p\n", __func__,
1799 ret = vc_sm_cma_free(buf->vcsm_handle);
1801 pr_err("%s: vcsm_free failed, ret %d\n", __func__, ret);
1802 buf->vcsm_handle = 0;
1806 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_unmap);
1808 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1810 struct mmal_msg_context *msg_context = buf->msg_context;
1813 release_msg_context(msg_context);
1814 buf->msg_context = NULL;
1816 mmal_vchi_buffer_unmap(buf);
1819 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1821 static void init_event_context(struct vchiq_mmal_instance *instance,
1822 struct vchiq_mmal_port *port)
1824 struct mmal_msg_context *ctx = get_msg_context(instance);
1826 mutex_init(&port->event_context_mutex);
1828 port->event_context = ctx;
1829 ctx->u.bulk.instance = instance;
1830 ctx->u.bulk.port = port;
1831 ctx->u.bulk.buffer =
1832 kzalloc(sizeof(*ctx->u.bulk.buffer), GFP_KERNEL);
1833 if (!ctx->u.bulk.buffer)
1834 goto release_msg_context;
1835 ctx->u.bulk.buffer->buffer = kzalloc(MMAL_WORKER_EVENT_SPACE,
1837 if (!ctx->u.bulk.buffer->buffer)
1838 goto release_buffer;
1840 INIT_WORK(&ctx->u.bulk.work, buffer_work_cb);
1844 kfree(ctx->u.bulk.buffer);
1845 release_msg_context:
1846 release_msg_context(ctx);
1849 static void free_event_context(struct vchiq_mmal_port *port)
1851 struct mmal_msg_context *ctx = port->event_context;
1856 kfree(ctx->u.bulk.buffer->buffer);
1857 kfree(ctx->u.bulk.buffer);
1858 release_msg_context(ctx);
1859 port->event_context = NULL;
1862 static void release_all_event_contexts(struct vchiq_mmal_component *component)
1866 for (idx = 0; idx < component->inputs; idx++)
1867 free_event_context(&component->input[idx]);
1868 for (idx = 0; idx < component->outputs; idx++)
1869 free_event_context(&component->output[idx]);
1870 for (idx = 0; idx < component->clocks; idx++)
1871 free_event_context(&component->clock[idx]);
1872 free_event_context(&component->control);
1875 /* Initialise a mmal component and its ports
1878 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1880 struct vchiq_mmal_component **component_out)
1883 int idx; /* port index */
1884 struct vchiq_mmal_component *component = NULL;
1886 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1889 for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1890 if (!instance->component[idx].in_use) {
1891 component = &instance->component[idx];
1892 component->in_use = 1;
1898 ret = -EINVAL; /* todo is this correct error? */
1902 /* We need a handle to reference back to our component structure.
1903 * Use the array index in instance->component rather than rolling
1906 component->client_component = idx;
1908 ret = create_component(instance, component, name);
1910 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1915 /* ports info needs gathering */
1916 component->control.type = MMAL_PORT_TYPE_CONTROL;
1917 component->control.index = 0;
1918 component->control.component = component;
1919 spin_lock_init(&component->control.slock);
1920 INIT_LIST_HEAD(&component->control.buffers);
1921 ret = port_info_get(instance, &component->control);
1923 goto release_component;
1924 init_event_context(instance, &component->control);
1926 for (idx = 0; idx < component->inputs; idx++) {
1927 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1928 component->input[idx].index = idx;
1929 component->input[idx].component = component;
1930 spin_lock_init(&component->input[idx].slock);
1931 INIT_LIST_HEAD(&component->input[idx].buffers);
1932 ret = port_info_get(instance, &component->input[idx]);
1934 goto release_component;
1935 init_event_context(instance, &component->input[idx]);
1938 for (idx = 0; idx < component->outputs; idx++) {
1939 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1940 component->output[idx].index = idx;
1941 component->output[idx].component = component;
1942 spin_lock_init(&component->output[idx].slock);
1943 INIT_LIST_HEAD(&component->output[idx].buffers);
1944 ret = port_info_get(instance, &component->output[idx]);
1946 goto release_component;
1947 init_event_context(instance, &component->output[idx]);
1950 for (idx = 0; idx < component->clocks; idx++) {
1951 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1952 component->clock[idx].index = idx;
1953 component->clock[idx].component = component;
1954 spin_lock_init(&component->clock[idx].slock);
1955 INIT_LIST_HEAD(&component->clock[idx].buffers);
1956 ret = port_info_get(instance, &component->clock[idx]);
1958 goto release_component;
1959 init_event_context(instance, &component->clock[idx]);
1962 *component_out = component;
1964 mutex_unlock(&instance->vchiq_mutex);
1969 destroy_component(instance, component);
1970 release_all_event_contexts(component);
1973 component->in_use = 0;
1974 mutex_unlock(&instance->vchiq_mutex);
1978 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1981 * cause a mmal component to be destroyed
1983 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1984 struct vchiq_mmal_component *component)
1988 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1991 if (component->enabled)
1992 ret = disable_component(instance, component);
1994 ret = destroy_component(instance, component);
1996 component->in_use = 0;
1998 release_all_event_contexts(component);
2000 mutex_unlock(&instance->vchiq_mutex);
2004 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
2007 * cause a mmal component to be enabled
2009 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
2010 struct vchiq_mmal_component *component)
2014 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2017 if (component->enabled) {
2018 mutex_unlock(&instance->vchiq_mutex);
2022 ret = enable_component(instance, component);
2024 component->enabled = 1;
2026 mutex_unlock(&instance->vchiq_mutex);
2030 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
2033 * cause a mmal component to be enabled
2035 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
2036 struct vchiq_mmal_component *component)
2040 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2043 if (!component->enabled) {
2044 mutex_unlock(&instance->vchiq_mutex);
2048 ret = disable_component(instance, component);
2050 component->enabled = 0;
2052 mutex_unlock(&instance->vchiq_mutex);
2056 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
2058 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
2059 u32 *major_out, u32 *minor_out)
2063 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2066 ret = get_version(instance, major_out, minor_out);
2068 mutex_unlock(&instance->vchiq_mutex);
2072 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
2074 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
2081 if (mutex_lock_interruptible(&instance->vchiq_mutex))
2084 vchi_service_use(instance->handle);
2086 status = vchi_service_close(instance->handle);
2088 pr_err("mmal-vchiq: VCHIQ close failed\n");
2090 mutex_unlock(&instance->vchiq_mutex);
2092 vchi_disconnect(instance->vchi_instance);
2093 flush_workqueue(instance->bulk_wq);
2094 destroy_workqueue(instance->bulk_wq);
2096 vfree(instance->bulk_scratch);
2098 idr_destroy(&instance->context_map);
2104 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
2106 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
2110 struct vchiq_mmal_instance *instance;
2111 static VCHI_INSTANCE_T vchi_instance;
2112 struct service_creation params = {
2113 .version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
2114 .service_id = VC_MMAL_SERVER_NAME,
2115 .callback = service_callback,
2116 .callback_param = NULL,
2119 /* compile time checks to ensure structure size as they are
2120 * directly (de)serialised from memory.
2123 /* ensure the header structure has packed to the correct size */
2124 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
2126 /* ensure message structure does not exceed maximum length */
2127 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
2129 /* mmal port struct is correct size */
2130 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
2132 /* create a vchi instance */
2133 status = vchi_initialise(&vchi_instance);
2135 pr_err("Failed to initialise VCHI instance (status=%d)\n",
2140 status = vchi_connect(vchi_instance);
2142 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
2144 goto err_disconnect_vchi;
2147 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
2151 goto err_disconnect_vchi;
2154 mutex_init(&instance->vchiq_mutex);
2156 instance->bulk_scratch = vmalloc(PAGE_SIZE);
2157 instance->vchi_instance = vchi_instance;
2159 mutex_init(&instance->context_map_lock);
2160 idr_init_base(&instance->context_map, 1);
2162 params.callback_param = instance;
2164 instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
2166 if (!instance->bulk_wq)
2169 status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
2171 pr_err("Failed to open VCHI service connection (status=%d)\n",
2173 goto err_close_services;
2176 vchi_service_release(instance->handle);
2178 *out_instance = instance;
2183 vchi_service_close(instance->handle);
2184 destroy_workqueue(instance->bulk_wq);
2186 vfree(instance->bulk_scratch);
2188 err_disconnect_vchi:
2189 vchi_disconnect(vchi_instance);
2192 EXPORT_SYMBOL_GPL(vchiq_mmal_init);