staging: mmal-vchiq: Fix memory leak for vchi_instance
[platform/kernel/linux-rpi.git] / drivers / staging / vc04_services / vchiq-mmal / mmal-vchiq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *              (now dave.stevenson@raspberrypi.org)
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
27
28 #include "mmal-common.h"
29 #include "mmal-parameters.h"
30 #include "mmal-vchiq.h"
31 #include "mmal-msg.h"
32
33 #include "vc-sm-cma/vc_sm_knl.h"
34
35 #define USE_VCHIQ_ARM
36 #include "interface/vchi/vchi.h"
37
38 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
39 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION("0.0.1");
42
43 /*
44  * maximum number of components supported.
45  * This matches the maximum permitted by default on the VPU
46  */
47 #define VCHIQ_MMAL_MAX_COMPONENTS 64
48
49 /*
50  * Timeout for synchronous msg responses in seconds.
51  * Helpful to increase this if stopping in the VPU debugger.
52  */
53 #define SYNC_MSG_TIMEOUT        3
54
55 /*#define FULL_MSG_DUMP 1*/
56
57 #ifdef DEBUG
58 static const char *const msg_type_names[] = {
59         "UNKNOWN",
60         "QUIT",
61         "SERVICE_CLOSED",
62         "GET_VERSION",
63         "COMPONENT_CREATE",
64         "COMPONENT_DESTROY",
65         "COMPONENT_ENABLE",
66         "COMPONENT_DISABLE",
67         "PORT_INFO_GET",
68         "PORT_INFO_SET",
69         "PORT_ACTION",
70         "BUFFER_FROM_HOST",
71         "BUFFER_TO_HOST",
72         "GET_STATS",
73         "PORT_PARAMETER_SET",
74         "PORT_PARAMETER_GET",
75         "EVENT_TO_HOST",
76         "GET_CORE_STATS_FOR_PORT",
77         "OPAQUE_ALLOCATOR",
78         "CONSUME_MEM",
79         "LMK",
80         "OPAQUE_ALLOCATOR_DESC",
81         "DRM_GET_LHS32",
82         "DRM_GET_TIME",
83         "BUFFER_FROM_HOST_ZEROLEN",
84         "PORT_FLUSH",
85         "HOST_LOG",
86 };
87 #endif
88
89 static const char *const port_action_type_names[] = {
90         "UNKNOWN",
91         "ENABLE",
92         "DISABLE",
93         "FLUSH",
94         "CONNECT",
95         "DISCONNECT",
96         "SET_REQUIREMENTS",
97 };
98
99 #if defined(DEBUG)
100 #if defined(FULL_MSG_DUMP)
101 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
102         do {                                                            \
103                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
104                          msg_type_names[(MSG)->h.type],                 \
105                          (MSG)->h.type, (MSG_LEN));                     \
106                 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
107                                16, 4, (MSG),                            \
108                                sizeof(struct mmal_msg_header), 1);      \
109                 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
110                                16, 4,                                   \
111                                ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
112                                (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
113         } while (0)
114 #else
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
116         {                                                               \
117                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
118                          msg_type_names[(MSG)->h.type],                 \
119                          (MSG)->h.type, (MSG_LEN));                     \
120         }
121 #endif
122 #else
123 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
124 #endif
125
126 struct vchiq_mmal_instance;
127
128 /* normal message context */
129 struct mmal_msg_context {
130         struct vchiq_mmal_instance *instance;
131
132         /* Index in the context_map idr so that we can find the
133          * mmal_msg_context again when servicing the VCHI reply.
134          */
135         int handle;
136
137         union {
138                 struct {
139                         /* work struct for buffer_cb callback */
140                         struct work_struct work;
141                         /* work struct for deferred callback */
142                         struct work_struct buffer_to_host_work;
143                         /* mmal instance */
144                         struct vchiq_mmal_instance *instance;
145                         /* mmal port */
146                         struct vchiq_mmal_port *port;
147                         /* actual buffer used to store bulk reply */
148                         struct mmal_buffer *buffer;
149                         /* amount of buffer used */
150                         unsigned long buffer_used;
151                         /* MMAL buffer flags */
152                         u32 mmal_flags;
153                         /* Presentation and Decode timestamps */
154                         s64 pts;
155                         s64 dts;
156                         /* MMAL buffer command flag */
157                         u32 cmd;
158
159                         int status;     /* context status */
160
161                 } bulk;         /* bulk data */
162
163                 struct {
164                         /* message handle to release */
165                         struct vchi_held_msg msg_handle;
166                         /* pointer to received message */
167                         struct mmal_msg *msg;
168                         /* received message length */
169                         u32 msg_len;
170                         /* completion upon reply */
171                         struct completion cmplt;
172                 } sync;         /* synchronous response */
173         } u;
174
175 };
176
177 struct vchiq_mmal_instance {
178         VCHI_SERVICE_HANDLE_T handle;
179
180         /* ensure serialised access to service */
181         struct mutex vchiq_mutex;
182
183         /* vmalloc page to receive scratch bulk xfers into */
184         void *bulk_scratch;
185
186         struct idr context_map;
187         /* protect accesses to context_map */
188         struct mutex context_map_lock;
189
190         struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
191
192         /* ordered workqueue to process all bulk operations */
193         struct workqueue_struct *bulk_wq;
194
195         /* Opaque handle for a VCHI instance */
196         VCHI_INSTANCE_T vchi_instance;
197 };
198
199 static struct mmal_msg_context *
200 get_msg_context(struct vchiq_mmal_instance *instance)
201 {
202         struct mmal_msg_context *msg_context;
203         int handle;
204
205         /* todo: should this be allocated from a pool to avoid kzalloc */
206         msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
207
208         if (!msg_context)
209                 return ERR_PTR(-ENOMEM);
210
211         /* Create an ID that will be passed along with our message so
212          * that when we service the VCHI reply, we can look up what
213          * message is being replied to.
214          */
215         mutex_lock(&instance->context_map_lock);
216         handle = idr_alloc(&instance->context_map, msg_context,
217                            0, 0, GFP_KERNEL);
218         mutex_unlock(&instance->context_map_lock);
219
220         if (handle < 0) {
221                 kfree(msg_context);
222                 return ERR_PTR(handle);
223         }
224
225         msg_context->instance = instance;
226         msg_context->handle = handle;
227
228         return msg_context;
229 }
230
231 static struct mmal_msg_context *
232 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
233 {
234         return idr_find(&instance->context_map, handle);
235 }
236
237 static void
238 release_msg_context(struct mmal_msg_context *msg_context)
239 {
240         struct vchiq_mmal_instance *instance = msg_context->instance;
241
242         mutex_lock(&instance->context_map_lock);
243         idr_remove(&instance->context_map, msg_context->handle);
244         mutex_unlock(&instance->context_map_lock);
245         kfree(msg_context);
246 }
247
248 /* workqueue scheduled callback
249  *
250  * we do this because it is important we do not call any other vchiq
251  * sync calls from witin the message delivery thread
252  */
253 static void buffer_work_cb(struct work_struct *work)
254 {
255         struct mmal_msg_context *msg_context =
256                 container_of(work, struct mmal_msg_context, u.bulk.work);
257         struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
258
259         if (!buffer) {
260                 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
261                        __func__, msg_context);
262                 return;
263         }
264
265         buffer->length = msg_context->u.bulk.buffer_used;
266         buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
267         buffer->dts = msg_context->u.bulk.dts;
268         buffer->pts = msg_context->u.bulk.pts;
269         buffer->cmd = msg_context->u.bulk.cmd;
270
271         if (!buffer->cmd)
272                 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
273
274         msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
275                                             msg_context->u.bulk.port,
276                                             msg_context->u.bulk.status,
277                                             msg_context->u.bulk.buffer);
278
279         if (buffer->cmd)
280                 mutex_unlock(&msg_context->u.bulk.port->event_context_mutex);
281 }
282
283 /* workqueue scheduled callback to handle receiving buffers
284  *
285  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
286  * If we block in the service_callback context then we can't process the
287  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
288  * vchi_bulk_queue_receive() call to complete.
289  */
290 static void buffer_to_host_work_cb(struct work_struct *work)
291 {
292         struct mmal_msg_context *msg_context =
293                 container_of(work, struct mmal_msg_context,
294                              u.bulk.buffer_to_host_work);
295         struct vchiq_mmal_instance *instance = msg_context->instance;
296         unsigned long len = msg_context->u.bulk.buffer_used;
297         int ret;
298
299         if (!len)
300                 /* Dummy receive to ensure the buffers remain in order */
301                 len = 8;
302         /* queue the bulk submission */
303         vchi_service_use(instance->handle);
304         ret = vchi_bulk_queue_receive(instance->handle,
305                                       msg_context->u.bulk.buffer->buffer,
306                                       /* Actual receive needs to be a multiple
307                                        * of 4 bytes
308                                        */
309                                       (len + 3) & ~3,
310                                       VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
311                                       VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
312                                       msg_context);
313
314         vchi_service_release(instance->handle);
315
316         if (ret != 0)
317                 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
318                        __func__, msg_context, ret);
319 }
320
321 /* enqueue a bulk receive for a given message context */
322 static int bulk_receive(struct vchiq_mmal_instance *instance,
323                         struct mmal_msg *msg,
324                         struct mmal_msg_context *msg_context)
325 {
326         unsigned long rd_len;
327
328         rd_len = msg->u.buffer_from_host.buffer_header.length;
329
330         if (!msg_context->u.bulk.buffer) {
331                 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
332
333                 /* todo: this is a serious error, we should never have
334                  * committed a buffer_to_host operation to the mmal
335                  * port without the buffer to back it up (underflow
336                  * handling) and there is no obvious way to deal with
337                  * this - how is the mmal servie going to react when
338                  * we fail to do the xfer and reschedule a buffer when
339                  * it arrives? perhaps a starved flag to indicate a
340                  * waiting bulk receive?
341                  */
342
343                 return -EINVAL;
344         }
345
346         /* ensure we do not overrun the available buffer */
347         if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
348                 rd_len = msg_context->u.bulk.buffer->buffer_size;
349                 pr_warn("short read as not enough receive buffer space\n");
350                 /* todo: is this the correct response, what happens to
351                  * the rest of the message data?
352                  */
353         }
354
355         /* store length */
356         msg_context->u.bulk.buffer_used = rd_len;
357         msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
358         msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
359         msg_context->u.bulk.cmd = msg->u.buffer_from_host.buffer_header.cmd;
360
361         queue_work(msg_context->instance->bulk_wq,
362                    &msg_context->u.bulk.buffer_to_host_work);
363
364         return 0;
365 }
366
367 /* data in message, memcpy from packet into output buffer */
368 static int inline_receive(struct vchiq_mmal_instance *instance,
369                           struct mmal_msg *msg,
370                           struct mmal_msg_context *msg_context)
371 {
372         memcpy(msg_context->u.bulk.buffer->buffer,
373                msg->u.buffer_from_host.short_data,
374                msg->u.buffer_from_host.payload_in_message);
375
376         msg_context->u.bulk.buffer_used =
377             msg->u.buffer_from_host.payload_in_message;
378
379         return 0;
380 }
381
382 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
383 static int
384 buffer_from_host(struct vchiq_mmal_instance *instance,
385                  struct vchiq_mmal_port *port, struct mmal_buffer *buf)
386 {
387         struct mmal_msg_context *msg_context;
388         struct mmal_msg m;
389         int ret;
390
391         if (!port->enabled)
392                 return -EINVAL;
393
394         pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
395
396         /* get context */
397         if (!buf->msg_context) {
398                 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
399                        buf);
400                 return -EINVAL;
401         }
402         msg_context = buf->msg_context;
403
404         /* store bulk message context for when data arrives */
405         msg_context->u.bulk.instance = instance;
406         msg_context->u.bulk.port = port;
407         msg_context->u.bulk.buffer = buf;
408         msg_context->u.bulk.buffer_used = 0;
409
410         /* initialise work structure ready to schedule callback */
411         INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
412         INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
413                   buffer_to_host_work_cb);
414
415         atomic_inc(&port->buffers_with_vpu);
416
417         /* prep the buffer from host message */
418         memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
419
420         m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
421         m.h.magic = MMAL_MAGIC;
422         m.h.context = msg_context->handle;
423         m.h.status = 0;
424
425         /* drvbuf is our private data passed back */
426         m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
427         m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
428         m.u.buffer_from_host.drvbuf.port_handle = port->handle;
429         m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
430
431         /* buffer header */
432         m.u.buffer_from_host.buffer_header.cmd = 0;
433         if (port->zero_copy) {
434                 m.u.buffer_from_host.buffer_header.data = buf->vc_handle;
435         } else {
436                 m.u.buffer_from_host.buffer_header.data =
437                         (u32)(unsigned long)buf->buffer;
438         }
439
440         m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
441         if (port->type == MMAL_PORT_TYPE_OUTPUT) {
442                 m.u.buffer_from_host.buffer_header.length = 0;
443                 m.u.buffer_from_host.buffer_header.offset = 0;
444                 m.u.buffer_from_host.buffer_header.flags = 0;
445                 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
446                 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
447         } else {
448                 m.u.buffer_from_host.buffer_header.length = buf->length;
449                 m.u.buffer_from_host.buffer_header.offset = 0;
450                 m.u.buffer_from_host.buffer_header.flags = buf->mmal_flags;
451                 m.u.buffer_from_host.buffer_header.pts = buf->pts;
452                 m.u.buffer_from_host.buffer_header.dts = buf->dts;
453         }
454
455         /* clear buffer type sepecific data */
456         memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
457                sizeof(m.u.buffer_from_host.buffer_header_type_specific));
458
459         /* no payload in message */
460         m.u.buffer_from_host.payload_in_message = 0;
461
462         vchi_service_use(instance->handle);
463
464         ret = vchi_queue_kernel_message(instance->handle,
465                                         &m,
466                                         sizeof(struct mmal_msg_header) +
467                                         sizeof(m.u.buffer_from_host));
468
469         vchi_service_release(instance->handle);
470
471         return ret;
472 }
473
474 /* deals with receipt of event to host message */
475 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
476                              struct mmal_msg *msg, u32 msg_len)
477 {
478         int comp_idx = msg->u.event_to_host.client_component;
479         struct vchiq_mmal_component *component =
480                                         &instance->component[comp_idx];
481         struct vchiq_mmal_port *port = NULL;
482         struct mmal_msg_context *msg_context;
483         u32 port_num = msg->u.event_to_host.port_num;
484
485         if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
486                 pr_err("%s: MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n",
487                        __func__);
488                 return;
489         }
490
491         switch (msg->u.event_to_host.port_type) {
492         case MMAL_PORT_TYPE_CONTROL:
493                 if (port_num) {
494                         pr_err("%s: port_num of %u >= number of ports 1",
495                                __func__, port_num);
496                         return;
497                 }
498                 port = &component->control;
499                 break;
500         case MMAL_PORT_TYPE_INPUT:
501                 if (port_num >= component->inputs) {
502                         pr_err("%s: port_num of %u >= number of ports %u",
503                                __func__, port_num,
504                                port_num >= component->inputs);
505                         return;
506                 }
507                 port = &component->input[port_num];
508                 break;
509         case MMAL_PORT_TYPE_OUTPUT:
510                 if (port_num >= component->outputs) {
511                         pr_err("%s: port_num of %u >= number of ports %u",
512                                __func__, port_num,
513                                port_num >= component->outputs);
514                         return;
515                 }
516                 port = &component->output[port_num];
517                 break;
518         case MMAL_PORT_TYPE_CLOCK:
519                 if (port_num >= component->clocks) {
520                         pr_err("%s: port_num of %u >= number of ports %u",
521                                __func__, port_num,
522                                port_num >= component->clocks);
523                         return;
524                 }
525                 port = &component->clock[port_num];
526                 break;
527         default:
528                 break;
529         }
530
531         if (!mutex_trylock(&port->event_context_mutex)) {
532                 pr_err("dropping event 0x%x\n", msg->u.event_to_host.cmd);
533                 return;
534         }
535         msg_context = port->event_context;
536
537         if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
538                 /* message reception had an error */
539                 //pr_warn
540                 pr_err("%s: error %d in reply\n", __func__, msg->h.status);
541
542                 msg_context->u.bulk.status = msg->h.status;
543         } else if (msg->u.event_to_host.length > MMAL_WORKER_EVENT_SPACE) {
544                 /* data is not in message, queue a bulk receive */
545                 pr_err("%s: payload not in message - bulk receive??! NOT SUPPORTED\n",
546                        __func__);
547                 msg_context->u.bulk.status = -1;
548         } else {
549                 memcpy(msg_context->u.bulk.buffer->buffer,
550                        msg->u.event_to_host.data,
551                        msg->u.event_to_host.length);
552
553                 msg_context->u.bulk.buffer_used =
554                     msg->u.event_to_host.length;
555
556                 msg_context->u.bulk.mmal_flags = 0;
557                 msg_context->u.bulk.dts = MMAL_TIME_UNKNOWN;
558                 msg_context->u.bulk.pts = MMAL_TIME_UNKNOWN;
559                 msg_context->u.bulk.cmd = msg->u.event_to_host.cmd;
560
561                 pr_debug("event component:%u port type:%d num:%d cmd:0x%x length:%d\n",
562                          msg->u.event_to_host.client_component,
563                          msg->u.event_to_host.port_type,
564                          msg->u.event_to_host.port_num,
565                          msg->u.event_to_host.cmd, msg->u.event_to_host.length);
566         }
567
568         schedule_work(&msg_context->u.bulk.work);
569 }
570
571 /* deals with receipt of buffer to host message */
572 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
573                               struct mmal_msg *msg, u32 msg_len)
574 {
575         struct mmal_msg_context *msg_context;
576         u32 handle;
577
578         pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
579                  __func__, instance, msg, msg_len);
580
581         if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
582                 handle = msg->u.buffer_from_host.drvbuf.client_context;
583                 msg_context = lookup_msg_context(instance, handle);
584
585                 if (!msg_context) {
586                         pr_err("drvbuf.client_context(%u) is invalid\n",
587                                handle);
588                         return;
589                 }
590         } else {
591                 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
592                 return;
593         }
594
595         msg_context->u.bulk.mmal_flags =
596                                 msg->u.buffer_from_host.buffer_header.flags;
597
598         if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
599                 /* message reception had an error */
600                 pr_warn("error %d in reply\n", msg->h.status);
601
602                 msg_context->u.bulk.status = msg->h.status;
603
604         } else if (msg->u.buffer_from_host.is_zero_copy) {
605                 /*
606                  * Zero copy buffer, so nothing to do.
607                  * Copy buffer info and make callback.
608                  */
609                 msg_context->u.bulk.buffer_used =
610                                 msg->u.buffer_from_host.buffer_header.length;
611                 msg_context->u.bulk.mmal_flags =
612                                 msg->u.buffer_from_host.buffer_header.flags;
613                 msg_context->u.bulk.dts =
614                                 msg->u.buffer_from_host.buffer_header.dts;
615                 msg_context->u.bulk.pts =
616                                 msg->u.buffer_from_host.buffer_header.pts;
617                 msg_context->u.bulk.cmd =
618                                 msg->u.buffer_from_host.buffer_header.cmd;
619
620         } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
621                 /* empty buffer */
622                 if (msg->u.buffer_from_host.buffer_header.flags &
623                     MMAL_BUFFER_HEADER_FLAG_EOS) {
624                         msg_context->u.bulk.status =
625                             bulk_receive(instance, msg, msg_context);
626                         if (msg_context->u.bulk.status == 0)
627                                 return; /* successful bulk submission, bulk
628                                          * completion will trigger callback
629                                          */
630                 } else {
631                         /* do callback with empty buffer - not EOS though */
632                         msg_context->u.bulk.status = 0;
633                         msg_context->u.bulk.buffer_used = 0;
634                 }
635         } else if (msg->u.buffer_from_host.payload_in_message == 0) {
636                 /* data is not in message, queue a bulk receive */
637                 msg_context->u.bulk.status =
638                     bulk_receive(instance, msg, msg_context);
639                 if (msg_context->u.bulk.status == 0)
640                         return; /* successful bulk submission, bulk
641                                  * completion will trigger callback
642                                  */
643
644                 /* failed to submit buffer, this will end badly */
645                 pr_err("error %d on bulk submission\n",
646                        msg_context->u.bulk.status);
647
648         } else if (msg->u.buffer_from_host.payload_in_message <=
649                    MMAL_VC_SHORT_DATA) {
650                 /* data payload within message */
651                 msg_context->u.bulk.status = inline_receive(instance, msg,
652                                                             msg_context);
653         } else {
654                 pr_err("message with invalid short payload\n");
655
656                 /* signal error */
657                 msg_context->u.bulk.status = -EINVAL;
658                 msg_context->u.bulk.buffer_used =
659                     msg->u.buffer_from_host.payload_in_message;
660         }
661
662         /* schedule the port callback */
663         schedule_work(&msg_context->u.bulk.work);
664 }
665
666 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
667                             struct mmal_msg_context *msg_context)
668 {
669         msg_context->u.bulk.status = 0;
670
671         /* schedule the port callback */
672         schedule_work(&msg_context->u.bulk.work);
673 }
674
675 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
676                           struct mmal_msg_context *msg_context)
677 {
678         pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
679
680         msg_context->u.bulk.status = -EINTR;
681
682         schedule_work(&msg_context->u.bulk.work);
683 }
684
685 /* incoming event service callback */
686 static void service_callback(void *param,
687                              const VCHI_CALLBACK_REASON_T reason,
688                              void *bulk_ctx)
689 {
690         struct vchiq_mmal_instance *instance = param;
691         int status;
692         u32 msg_len;
693         struct mmal_msg *msg;
694         struct vchi_held_msg msg_handle;
695         struct mmal_msg_context *msg_context;
696
697         if (!instance) {
698                 pr_err("Message callback passed NULL instance\n");
699                 return;
700         }
701
702         switch (reason) {
703         case VCHI_CALLBACK_MSG_AVAILABLE:
704                 status = vchi_msg_hold(instance->handle, (void **)&msg,
705                                        &msg_len, VCHI_FLAGS_NONE, &msg_handle);
706                 if (status) {
707                         pr_err("Unable to dequeue a message (%d)\n", status);
708                         break;
709                 }
710
711                 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
712
713                 /* handling is different for buffer messages */
714                 switch (msg->h.type) {
715                 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
716                         vchi_held_msg_release(&msg_handle);
717                         break;
718
719                 case MMAL_MSG_TYPE_EVENT_TO_HOST:
720                         event_to_host_cb(instance, msg, msg_len);
721                         vchi_held_msg_release(&msg_handle);
722
723                         break;
724
725                 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
726                         buffer_to_host_cb(instance, msg, msg_len);
727                         vchi_held_msg_release(&msg_handle);
728                         break;
729
730                 default:
731                         /* messages dependent on header context to complete */
732                         if (!msg->h.context) {
733                                 pr_err("received message context was null!\n");
734                                 vchi_held_msg_release(&msg_handle);
735                                 break;
736                         }
737
738                         msg_context = lookup_msg_context(instance,
739                                                          msg->h.context);
740                         if (!msg_context) {
741                                 pr_err("received invalid message context %u!\n",
742                                        msg->h.context);
743                                 vchi_held_msg_release(&msg_handle);
744                                 break;
745                         }
746
747                         /* fill in context values */
748                         msg_context->u.sync.msg_handle = msg_handle;
749                         msg_context->u.sync.msg = msg;
750                         msg_context->u.sync.msg_len = msg_len;
751
752                         /* todo: should this check (completion_done()
753                          * == 1) for no one waiting? or do we need a
754                          * flag to tell us the completion has been
755                          * interrupted so we can free the message and
756                          * its context. This probably also solves the
757                          * message arriving after interruption todo
758                          * below
759                          */
760
761                         /* complete message so caller knows it happened */
762                         complete(&msg_context->u.sync.cmplt);
763                         break;
764                 }
765
766                 break;
767
768         case VCHI_CALLBACK_BULK_RECEIVED:
769                 bulk_receive_cb(instance, bulk_ctx);
770                 break;
771
772         case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
773                 bulk_abort_cb(instance, bulk_ctx);
774                 break;
775
776         case VCHI_CALLBACK_SERVICE_CLOSED:
777                 /* TODO: consider if this requires action if received when
778                  * driver is not explicitly closing the service
779                  */
780                 break;
781
782         default:
783                 pr_err("Received unhandled message reason %d\n", reason);
784                 break;
785         }
786 }
787
788 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
789                                      struct mmal_msg *msg,
790                                      unsigned int payload_len,
791                                      struct mmal_msg **msg_out,
792                                      struct vchi_held_msg *msg_handle_out)
793 {
794         struct mmal_msg_context *msg_context;
795         int ret;
796         unsigned long timeout;
797
798         /* payload size must not cause message to exceed max size */
799         if (payload_len >
800             (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
801                 pr_err("payload length %d exceeds max:%d\n", payload_len,
802                        (int)(MMAL_MSG_MAX_SIZE -
803                             sizeof(struct mmal_msg_header)));
804                 return -EINVAL;
805         }
806
807         msg_context = get_msg_context(instance);
808         if (IS_ERR(msg_context))
809                 return PTR_ERR(msg_context);
810
811         init_completion(&msg_context->u.sync.cmplt);
812
813         msg->h.magic = MMAL_MAGIC;
814         msg->h.context = msg_context->handle;
815         msg->h.status = 0;
816
817         DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
818                      ">>> sync message");
819
820         vchi_service_use(instance->handle);
821
822         ret = vchi_queue_kernel_message(instance->handle,
823                                         msg,
824                                         sizeof(struct mmal_msg_header) +
825                                         payload_len);
826
827         vchi_service_release(instance->handle);
828
829         if (ret) {
830                 pr_err("error %d queuing message\n", ret);
831                 release_msg_context(msg_context);
832                 return ret;
833         }
834
835         timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
836                                               SYNC_MSG_TIMEOUT * HZ);
837         if (timeout == 0) {
838                 pr_err("timed out waiting for sync completion\n");
839                 ret = -ETIME;
840                 /* todo: what happens if the message arrives after aborting */
841                 release_msg_context(msg_context);
842                 return ret;
843         }
844
845         *msg_out = msg_context->u.sync.msg;
846         *msg_handle_out = msg_context->u.sync.msg_handle;
847         release_msg_context(msg_context);
848
849         return 0;
850 }
851
852 static void dump_port_info(struct vchiq_mmal_port *port)
853 {
854         pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
855
856         pr_debug("buffer minimum num:%d size:%d align:%d\n",
857                  port->minimum_buffer.num,
858                  port->minimum_buffer.size, port->minimum_buffer.alignment);
859
860         pr_debug("buffer recommended num:%d size:%d align:%d\n",
861                  port->recommended_buffer.num,
862                  port->recommended_buffer.size,
863                  port->recommended_buffer.alignment);
864
865         pr_debug("buffer current values num:%d size:%d align:%d\n",
866                  port->current_buffer.num,
867                  port->current_buffer.size, port->current_buffer.alignment);
868
869         pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
870                  port->format.type,
871                  port->format.encoding, port->format.encoding_variant);
872
873         pr_debug("                  bitrate:%d flags:0x%x\n",
874                  port->format.bitrate, port->format.flags);
875
876         if (port->format.type == MMAL_ES_TYPE_VIDEO) {
877                 pr_debug
878                     ("es video format: width:%d height:%d colourspace:0x%x\n",
879                      port->es.video.width, port->es.video.height,
880                      port->es.video.color_space);
881
882                 pr_debug("               : crop xywh %d,%d,%d,%d\n",
883                          port->es.video.crop.x,
884                          port->es.video.crop.y,
885                          port->es.video.crop.width, port->es.video.crop.height);
886                 pr_debug("               : framerate %d/%d  aspect %d/%d\n",
887                          port->es.video.frame_rate.num,
888                          port->es.video.frame_rate.den,
889                          port->es.video.par.num, port->es.video.par.den);
890         }
891 }
892
893 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
894 {
895         /* todo do readonly fields need setting at all? */
896         p->type = port->type;
897         p->index = port->index;
898         p->index_all = 0;
899         p->is_enabled = port->enabled;
900         p->buffer_num_min = port->minimum_buffer.num;
901         p->buffer_size_min = port->minimum_buffer.size;
902         p->buffer_alignment_min = port->minimum_buffer.alignment;
903         p->buffer_num_recommended = port->recommended_buffer.num;
904         p->buffer_size_recommended = port->recommended_buffer.size;
905
906         /* only three writable fields in a port */
907         p->buffer_num = port->current_buffer.num;
908         p->buffer_size = port->current_buffer.size;
909         p->userdata = (u32)(unsigned long)port;
910 }
911
912 static int port_info_set(struct vchiq_mmal_instance *instance,
913                          struct vchiq_mmal_port *port)
914 {
915         int ret;
916         struct mmal_msg m;
917         struct mmal_msg *rmsg;
918         struct vchi_held_msg rmsg_handle;
919
920         pr_debug("setting port info port %p\n", port);
921         if (!port)
922                 return -1;
923         dump_port_info(port);
924
925         m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
926
927         m.u.port_info_set.component_handle = port->component->handle;
928         m.u.port_info_set.port_type = port->type;
929         m.u.port_info_set.port_index = port->index;
930
931         port_to_mmal_msg(port, &m.u.port_info_set.port);
932
933         /* elementary stream format setup */
934         m.u.port_info_set.format.type = port->format.type;
935         m.u.port_info_set.format.encoding = port->format.encoding;
936         m.u.port_info_set.format.encoding_variant =
937             port->format.encoding_variant;
938         m.u.port_info_set.format.bitrate = port->format.bitrate;
939         m.u.port_info_set.format.flags = port->format.flags;
940
941         memcpy(&m.u.port_info_set.es, &port->es,
942                sizeof(union mmal_es_specific_format));
943
944         m.u.port_info_set.format.extradata_size = port->format.extradata_size;
945         memcpy(&m.u.port_info_set.extradata, port->format.extradata,
946                port->format.extradata_size);
947
948         ret = send_synchronous_mmal_msg(instance, &m,
949                                         sizeof(m.u.port_info_set),
950                                         &rmsg, &rmsg_handle);
951         if (ret)
952                 return ret;
953
954         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
955                 /* got an unexpected message type in reply */
956                 ret = -EINVAL;
957                 goto release_msg;
958         }
959
960         /* return operation status */
961         ret = -rmsg->u.port_info_get_reply.status;
962
963         pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
964                  port->component->handle, port->handle);
965
966 release_msg:
967         vchi_held_msg_release(&rmsg_handle);
968
969         return ret;
970 }
971
972 /* use port info get message to retrieve port information */
973 static int port_info_get(struct vchiq_mmal_instance *instance,
974                          struct vchiq_mmal_port *port)
975 {
976         int ret;
977         struct mmal_msg m;
978         struct mmal_msg *rmsg;
979         struct vchi_held_msg rmsg_handle;
980
981         /* port info time */
982         m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
983         m.u.port_info_get.component_handle = port->component->handle;
984         m.u.port_info_get.port_type = port->type;
985         m.u.port_info_get.index = port->index;
986
987         ret = send_synchronous_mmal_msg(instance, &m,
988                                         sizeof(m.u.port_info_get),
989                                         &rmsg, &rmsg_handle);
990         if (ret)
991                 return ret;
992
993         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
994                 /* got an unexpected message type in reply */
995                 ret = -EINVAL;
996                 goto release_msg;
997         }
998
999         /* return operation status */
1000         ret = -rmsg->u.port_info_get_reply.status;
1001         if (ret != MMAL_MSG_STATUS_SUCCESS)
1002                 goto release_msg;
1003
1004         if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
1005                 port->enabled = 0;
1006         else
1007                 port->enabled = 1;
1008
1009         /* copy the values out of the message */
1010         port->handle = rmsg->u.port_info_get_reply.port_handle;
1011
1012         /* port type and index cached to use on port info set because
1013          * it does not use a port handle
1014          */
1015         port->type = rmsg->u.port_info_get_reply.port_type;
1016         port->index = rmsg->u.port_info_get_reply.port_index;
1017
1018         port->minimum_buffer.num =
1019             rmsg->u.port_info_get_reply.port.buffer_num_min;
1020         port->minimum_buffer.size =
1021             rmsg->u.port_info_get_reply.port.buffer_size_min;
1022         port->minimum_buffer.alignment =
1023             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1024
1025         port->recommended_buffer.alignment =
1026             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1027         port->recommended_buffer.num =
1028             rmsg->u.port_info_get_reply.port.buffer_num_recommended;
1029
1030         port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
1031         port->current_buffer.size =
1032             rmsg->u.port_info_get_reply.port.buffer_size;
1033
1034         /* stream format */
1035         port->format.type = rmsg->u.port_info_get_reply.format.type;
1036         port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
1037         port->format.encoding_variant =
1038             rmsg->u.port_info_get_reply.format.encoding_variant;
1039         port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
1040         port->format.flags = rmsg->u.port_info_get_reply.format.flags;
1041
1042         /* elementary stream format */
1043         memcpy(&port->es,
1044                &rmsg->u.port_info_get_reply.es,
1045                sizeof(union mmal_es_specific_format));
1046         port->format.es = &port->es;
1047
1048         port->format.extradata_size =
1049             rmsg->u.port_info_get_reply.format.extradata_size;
1050         memcpy(port->format.extradata,
1051                rmsg->u.port_info_get_reply.extradata,
1052                port->format.extradata_size);
1053
1054         pr_debug("received port info\n");
1055         dump_port_info(port);
1056
1057 release_msg:
1058
1059         pr_debug("%s:result:%d component:0x%x port:%d\n",
1060                  __func__, ret, port->component->handle, port->handle);
1061
1062         vchi_held_msg_release(&rmsg_handle);
1063
1064         return ret;
1065 }
1066
1067 /* create comonent on vc */
1068 static int create_component(struct vchiq_mmal_instance *instance,
1069                             struct vchiq_mmal_component *component,
1070                             const char *name)
1071 {
1072         int ret;
1073         struct mmal_msg m;
1074         struct mmal_msg *rmsg;
1075         struct vchi_held_msg rmsg_handle;
1076
1077         /* build component create message */
1078         m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
1079         m.u.component_create.client_component = component->client_component;
1080         strncpy(m.u.component_create.name, name,
1081                 sizeof(m.u.component_create.name));
1082
1083         ret = send_synchronous_mmal_msg(instance, &m,
1084                                         sizeof(m.u.component_create),
1085                                         &rmsg, &rmsg_handle);
1086         if (ret)
1087                 return ret;
1088
1089         if (rmsg->h.type != m.h.type) {
1090                 /* got an unexpected message type in reply */
1091                 ret = -EINVAL;
1092                 goto release_msg;
1093         }
1094
1095         ret = -rmsg->u.component_create_reply.status;
1096         if (ret != MMAL_MSG_STATUS_SUCCESS)
1097                 goto release_msg;
1098
1099         /* a valid component response received */
1100         component->handle = rmsg->u.component_create_reply.component_handle;
1101         component->inputs = rmsg->u.component_create_reply.input_num;
1102         component->outputs = rmsg->u.component_create_reply.output_num;
1103         component->clocks = rmsg->u.component_create_reply.clock_num;
1104
1105         pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1106                  component->handle,
1107                  component->inputs, component->outputs, component->clocks);
1108
1109 release_msg:
1110         vchi_held_msg_release(&rmsg_handle);
1111
1112         return ret;
1113 }
1114
1115 /* destroys a component on vc */
1116 static int destroy_component(struct vchiq_mmal_instance *instance,
1117                              struct vchiq_mmal_component *component)
1118 {
1119         int ret;
1120         struct mmal_msg m;
1121         struct mmal_msg *rmsg;
1122         struct vchi_held_msg rmsg_handle;
1123
1124         m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
1125         m.u.component_destroy.component_handle = component->handle;
1126
1127         ret = send_synchronous_mmal_msg(instance, &m,
1128                                         sizeof(m.u.component_destroy),
1129                                         &rmsg, &rmsg_handle);
1130         if (ret)
1131                 return ret;
1132
1133         if (rmsg->h.type != m.h.type) {
1134                 /* got an unexpected message type in reply */
1135                 ret = -EINVAL;
1136                 goto release_msg;
1137         }
1138
1139         ret = -rmsg->u.component_destroy_reply.status;
1140
1141 release_msg:
1142
1143         vchi_held_msg_release(&rmsg_handle);
1144
1145         return ret;
1146 }
1147
1148 /* enable a component on vc */
1149 static int enable_component(struct vchiq_mmal_instance *instance,
1150                             struct vchiq_mmal_component *component)
1151 {
1152         int ret;
1153         struct mmal_msg m;
1154         struct mmal_msg *rmsg;
1155         struct vchi_held_msg rmsg_handle;
1156
1157         m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1158         m.u.component_enable.component_handle = component->handle;
1159
1160         ret = send_synchronous_mmal_msg(instance, &m,
1161                                         sizeof(m.u.component_enable),
1162                                         &rmsg, &rmsg_handle);
1163         if (ret)
1164                 return ret;
1165
1166         if (rmsg->h.type != m.h.type) {
1167                 /* got an unexpected message type in reply */
1168                 ret = -EINVAL;
1169                 goto release_msg;
1170         }
1171
1172         ret = -rmsg->u.component_enable_reply.status;
1173
1174 release_msg:
1175         vchi_held_msg_release(&rmsg_handle);
1176
1177         return ret;
1178 }
1179
1180 /* disable a component on vc */
1181 static int disable_component(struct vchiq_mmal_instance *instance,
1182                              struct vchiq_mmal_component *component)
1183 {
1184         int ret;
1185         struct mmal_msg m;
1186         struct mmal_msg *rmsg;
1187         struct vchi_held_msg rmsg_handle;
1188
1189         m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1190         m.u.component_disable.component_handle = component->handle;
1191
1192         ret = send_synchronous_mmal_msg(instance, &m,
1193                                         sizeof(m.u.component_disable),
1194                                         &rmsg, &rmsg_handle);
1195         if (ret)
1196                 return ret;
1197
1198         if (rmsg->h.type != m.h.type) {
1199                 /* got an unexpected message type in reply */
1200                 ret = -EINVAL;
1201                 goto release_msg;
1202         }
1203
1204         ret = -rmsg->u.component_disable_reply.status;
1205
1206 release_msg:
1207
1208         vchi_held_msg_release(&rmsg_handle);
1209
1210         return ret;
1211 }
1212
1213 /* get version of mmal implementation */
1214 static int get_version(struct vchiq_mmal_instance *instance,
1215                        u32 *major_out, u32 *minor_out)
1216 {
1217         int ret;
1218         struct mmal_msg m;
1219         struct mmal_msg *rmsg;
1220         struct vchi_held_msg rmsg_handle;
1221
1222         m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1223
1224         ret = send_synchronous_mmal_msg(instance, &m,
1225                                         sizeof(m.u.version),
1226                                         &rmsg, &rmsg_handle);
1227         if (ret)
1228                 return ret;
1229
1230         if (rmsg->h.type != m.h.type) {
1231                 /* got an unexpected message type in reply */
1232                 ret = -EINVAL;
1233                 goto release_msg;
1234         }
1235
1236         *major_out = rmsg->u.version.major;
1237         *minor_out = rmsg->u.version.minor;
1238
1239 release_msg:
1240         vchi_held_msg_release(&rmsg_handle);
1241
1242         return ret;
1243 }
1244
1245 /* do a port action with a port as a parameter */
1246 static int port_action_port(struct vchiq_mmal_instance *instance,
1247                             struct vchiq_mmal_port *port,
1248                             enum mmal_msg_port_action_type action_type)
1249 {
1250         int ret;
1251         struct mmal_msg m;
1252         struct mmal_msg *rmsg;
1253         struct vchi_held_msg rmsg_handle;
1254
1255         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1256         m.u.port_action_port.component_handle = port->component->handle;
1257         m.u.port_action_port.port_handle = port->handle;
1258         m.u.port_action_port.action = action_type;
1259
1260         port_to_mmal_msg(port, &m.u.port_action_port.port);
1261
1262         ret = send_synchronous_mmal_msg(instance, &m,
1263                                         sizeof(m.u.port_action_port),
1264                                         &rmsg, &rmsg_handle);
1265         if (ret)
1266                 return ret;
1267
1268         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1269                 /* got an unexpected message type in reply */
1270                 ret = -EINVAL;
1271                 goto release_msg;
1272         }
1273
1274         ret = -rmsg->u.port_action_reply.status;
1275
1276         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1277                  __func__,
1278                  ret, port->component->handle, port->handle,
1279                  port_action_type_names[action_type], action_type);
1280
1281 release_msg:
1282         vchi_held_msg_release(&rmsg_handle);
1283
1284         return ret;
1285 }
1286
1287 /* do a port action with handles as parameters */
1288 static int port_action_handle(struct vchiq_mmal_instance *instance,
1289                               struct vchiq_mmal_port *port,
1290                               enum mmal_msg_port_action_type action_type,
1291                               u32 connect_component_handle,
1292                               u32 connect_port_handle)
1293 {
1294         int ret;
1295         struct mmal_msg m;
1296         struct mmal_msg *rmsg;
1297         struct vchi_held_msg rmsg_handle;
1298
1299         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1300
1301         m.u.port_action_handle.component_handle = port->component->handle;
1302         m.u.port_action_handle.port_handle = port->handle;
1303         m.u.port_action_handle.action = action_type;
1304
1305         m.u.port_action_handle.connect_component_handle =
1306             connect_component_handle;
1307         m.u.port_action_handle.connect_port_handle = connect_port_handle;
1308
1309         ret = send_synchronous_mmal_msg(instance, &m,
1310                                         sizeof(m.u.port_action_handle),
1311                                         &rmsg, &rmsg_handle);
1312         if (ret)
1313                 return ret;
1314
1315         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1316                 /* got an unexpected message type in reply */
1317                 ret = -EINVAL;
1318                 goto release_msg;
1319         }
1320
1321         ret = -rmsg->u.port_action_reply.status;
1322
1323         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1324                  __func__,
1325                  ret, port->component->handle, port->handle,
1326                  port_action_type_names[action_type],
1327                  action_type, connect_component_handle, connect_port_handle);
1328
1329 release_msg:
1330         vchi_held_msg_release(&rmsg_handle);
1331
1332         return ret;
1333 }
1334
1335 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1336                               struct vchiq_mmal_port *port,
1337                               u32 parameter_id, void *value, u32 value_size)
1338 {
1339         int ret;
1340         struct mmal_msg m;
1341         struct mmal_msg *rmsg;
1342         struct vchi_held_msg rmsg_handle;
1343
1344         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1345
1346         m.u.port_parameter_set.component_handle = port->component->handle;
1347         m.u.port_parameter_set.port_handle = port->handle;
1348         m.u.port_parameter_set.id = parameter_id;
1349         m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1350         memcpy(&m.u.port_parameter_set.value, value, value_size);
1351
1352         ret = send_synchronous_mmal_msg(instance, &m,
1353                                         (4 * sizeof(u32)) + value_size,
1354                                         &rmsg, &rmsg_handle);
1355         if (ret)
1356                 return ret;
1357
1358         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1359                 /* got an unexpected message type in reply */
1360                 ret = -EINVAL;
1361                 goto release_msg;
1362         }
1363
1364         ret = -rmsg->u.port_parameter_set_reply.status;
1365
1366         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1367                  __func__,
1368                  ret, port->component->handle, port->handle, parameter_id);
1369
1370 release_msg:
1371         vchi_held_msg_release(&rmsg_handle);
1372
1373         return ret;
1374 }
1375
1376 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1377                               struct vchiq_mmal_port *port,
1378                               u32 parameter_id, void *value, u32 *value_size)
1379 {
1380         int ret;
1381         struct mmal_msg m;
1382         struct mmal_msg *rmsg;
1383         struct vchi_held_msg rmsg_handle;
1384
1385         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1386
1387         m.u.port_parameter_get.component_handle = port->component->handle;
1388         m.u.port_parameter_get.port_handle = port->handle;
1389         m.u.port_parameter_get.id = parameter_id;
1390         m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1391
1392         ret = send_synchronous_mmal_msg(instance, &m,
1393                                         sizeof(struct
1394                                                mmal_msg_port_parameter_get),
1395                                         &rmsg, &rmsg_handle);
1396         if (ret)
1397                 return ret;
1398
1399         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1400                 /* got an unexpected message type in reply */
1401                 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1402                 ret = -EINVAL;
1403                 goto release_msg;
1404         }
1405
1406         ret = rmsg->u.port_parameter_get_reply.status;
1407
1408         /* port_parameter_get_reply.size includes the header,
1409          * whilst *value_size doesn't.
1410          */
1411         rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1412
1413         if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1414                 /* Copy only as much as we have space for
1415                  * but report true size of parameter
1416                  */
1417                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1418                        *value_size);
1419         } else {
1420                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1421                        rmsg->u.port_parameter_get_reply.size);
1422         }
1423         /* Always report the size of the returned parameter to the caller */
1424         *value_size = rmsg->u.port_parameter_get_reply.size;
1425
1426         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1427                  ret, port->component->handle, port->handle, parameter_id);
1428
1429 release_msg:
1430         vchi_held_msg_release(&rmsg_handle);
1431
1432         return ret;
1433 }
1434
1435 /* disables a port and drains buffers from it */
1436 static int port_disable(struct vchiq_mmal_instance *instance,
1437                         struct vchiq_mmal_port *port)
1438 {
1439         int ret;
1440         struct list_head *q, *buf_head;
1441         unsigned long flags = 0;
1442
1443         if (!port->enabled)
1444                 return 0;
1445
1446         port->enabled = 0;
1447
1448         ret = port_action_port(instance, port,
1449                                MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1450         if (ret == 0) {
1451                 /*
1452                  * Drain all queued buffers on port. This should only
1453                  * apply to buffers that have been queued before the port
1454                  * has been enabled. If the port has been enabled and buffers
1455                  * passed, then the buffers should have been removed from this
1456                  * list, and we should get the relevant callbacks via VCHIQ
1457                  * to release the buffers.
1458                  */
1459                 spin_lock_irqsave(&port->slock, flags);
1460
1461                 list_for_each_safe(buf_head, q, &port->buffers) {
1462                         struct mmal_buffer *mmalbuf;
1463
1464                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1465                                              list);
1466                         list_del(buf_head);
1467                         if (port->buffer_cb) {
1468                                 mmalbuf->length = 0;
1469                                 mmalbuf->mmal_flags = 0;
1470                                 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1471                                 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1472                                 mmalbuf->cmd = 0;
1473                                 port->buffer_cb(instance,
1474                                                 port, 0, mmalbuf);
1475                         }
1476                 }
1477
1478                 spin_unlock_irqrestore(&port->slock, flags);
1479
1480                 ret = port_info_get(instance, port);
1481         }
1482
1483         return ret;
1484 }
1485
1486 /* enable a port */
1487 static int port_enable(struct vchiq_mmal_instance *instance,
1488                        struct vchiq_mmal_port *port)
1489 {
1490         unsigned int hdr_count;
1491         struct list_head *q, *buf_head;
1492         int ret;
1493
1494         if (port->enabled)
1495                 return 0;
1496
1497         ret = port_action_port(instance, port,
1498                                MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1499         if (ret)
1500                 goto done;
1501
1502         port->enabled = 1;
1503
1504         if (port->buffer_cb) {
1505                 /* send buffer headers to videocore */
1506                 hdr_count = 1;
1507                 list_for_each_safe(buf_head, q, &port->buffers) {
1508                         struct mmal_buffer *mmalbuf;
1509
1510                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1511                                              list);
1512                         ret = buffer_from_host(instance, port, mmalbuf);
1513                         if (ret)
1514                                 goto done;
1515
1516                         list_del(buf_head);
1517                         hdr_count++;
1518                         if (hdr_count > port->current_buffer.num)
1519                                 break;
1520                 }
1521         }
1522
1523         ret = port_info_get(instance, port);
1524
1525 done:
1526         return ret;
1527 }
1528
1529 /* ------------------------------------------------------------------
1530  * Exported API
1531  *------------------------------------------------------------------
1532  */
1533
1534 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1535                                struct vchiq_mmal_port *port)
1536 {
1537         int ret;
1538
1539         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1540                 return -EINTR;
1541
1542         ret = port_info_set(instance, port);
1543         if (ret)
1544                 goto release_unlock;
1545
1546         /* read what has actually been set */
1547         ret = port_info_get(instance, port);
1548
1549 release_unlock:
1550         mutex_unlock(&instance->vchiq_mutex);
1551
1552         return ret;
1553 }
1554 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1555
1556 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1557                                   struct vchiq_mmal_port *port,
1558                                   u32 parameter, void *value, u32 value_size)
1559 {
1560         int ret;
1561
1562         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1563                 return -EINTR;
1564
1565         ret = port_parameter_set(instance, port, parameter, value, value_size);
1566
1567         mutex_unlock(&instance->vchiq_mutex);
1568
1569         if (parameter == MMAL_PARAMETER_ZERO_COPY && !ret)
1570                 port->zero_copy = !!(*(bool *)value);
1571
1572         return ret;
1573 }
1574 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1575
1576 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1577                                   struct vchiq_mmal_port *port,
1578                                   u32 parameter, void *value, u32 *value_size)
1579 {
1580         int ret;
1581
1582         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1583                 return -EINTR;
1584
1585         ret = port_parameter_get(instance, port, parameter, value, value_size);
1586
1587         mutex_unlock(&instance->vchiq_mutex);
1588
1589         return ret;
1590 }
1591 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1592
1593 /* enable a port
1594  *
1595  * enables a port and queues buffers for satisfying callbacks if we
1596  * provide a callback handler
1597  */
1598 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1599                            struct vchiq_mmal_port *port,
1600                            vchiq_mmal_buffer_cb buffer_cb)
1601 {
1602         int ret;
1603
1604         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1605                 return -EINTR;
1606
1607         /* already enabled - noop */
1608         if (port->enabled) {
1609                 ret = 0;
1610                 goto unlock;
1611         }
1612
1613         port->buffer_cb = buffer_cb;
1614
1615         ret = port_enable(instance, port);
1616
1617 unlock:
1618         mutex_unlock(&instance->vchiq_mutex);
1619
1620         return ret;
1621 }
1622 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1623
1624 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1625                             struct vchiq_mmal_port *port)
1626 {
1627         int ret;
1628
1629         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1630                 return -EINTR;
1631
1632         if (!port->enabled) {
1633                 mutex_unlock(&instance->vchiq_mutex);
1634                 return 0;
1635         }
1636
1637         ret = port_disable(instance, port);
1638
1639         mutex_unlock(&instance->vchiq_mutex);
1640
1641         return ret;
1642 }
1643 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1644
1645 /* ports will be connected in a tunneled manner so data buffers
1646  * are not handled by client.
1647  */
1648 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1649                                    struct vchiq_mmal_port *src,
1650                                    struct vchiq_mmal_port *dst)
1651 {
1652         int ret;
1653
1654         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1655                 return -EINTR;
1656
1657         /* disconnect ports if connected */
1658         if (src->connected) {
1659                 ret = port_disable(instance, src);
1660                 if (ret) {
1661                         pr_err("failed disabling src port(%d)\n", ret);
1662                         goto release_unlock;
1663                 }
1664
1665                 /* do not need to disable the destination port as they
1666                  * are connected and it is done automatically
1667                  */
1668
1669                 ret = port_action_handle(instance, src,
1670                                          MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1671                                          src->connected->component->handle,
1672                                          src->connected->handle);
1673                 if (ret < 0) {
1674                         pr_err("failed disconnecting src port\n");
1675                         goto release_unlock;
1676                 }
1677                 src->connected->enabled = 0;
1678                 src->connected = NULL;
1679         }
1680
1681         if (!dst) {
1682                 /* do not make new connection */
1683                 ret = 0;
1684                 pr_debug("not making new connection\n");
1685                 goto release_unlock;
1686         }
1687
1688         /* copy src port format to dst */
1689         dst->format.encoding = src->format.encoding;
1690         dst->es.video.width = src->es.video.width;
1691         dst->es.video.height = src->es.video.height;
1692         dst->es.video.crop.x = src->es.video.crop.x;
1693         dst->es.video.crop.y = src->es.video.crop.y;
1694         dst->es.video.crop.width = src->es.video.crop.width;
1695         dst->es.video.crop.height = src->es.video.crop.height;
1696         dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1697         dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1698
1699         /* set new format */
1700         ret = port_info_set(instance, dst);
1701         if (ret) {
1702                 pr_debug("setting port info failed\n");
1703                 goto release_unlock;
1704         }
1705
1706         /* read what has actually been set */
1707         ret = port_info_get(instance, dst);
1708         if (ret) {
1709                 pr_debug("read back port info failed\n");
1710                 goto release_unlock;
1711         }
1712
1713         /* connect two ports together */
1714         ret = port_action_handle(instance, src,
1715                                  MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1716                                  dst->component->handle, dst->handle);
1717         if (ret < 0) {
1718                 pr_debug("connecting port %d:%d to %d:%d failed\n",
1719                          src->component->handle, src->handle,
1720                          dst->component->handle, dst->handle);
1721                 goto release_unlock;
1722         }
1723         src->connected = dst;
1724
1725 release_unlock:
1726
1727         mutex_unlock(&instance->vchiq_mutex);
1728
1729         return ret;
1730 }
1731 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1732
1733 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1734                              struct vchiq_mmal_port *port,
1735                              struct mmal_buffer *buffer)
1736 {
1737         unsigned long flags = 0;
1738         int ret;
1739
1740         /*
1741          * We really want to do this in mmal_vchi_buffer_init but can't as
1742          * videobuf2 won't let us have the dmabuf there.
1743          */
1744         if (port->zero_copy && buffer->dma_buf && !buffer->vcsm_handle) {
1745                 pr_debug("%s: import dmabuf %p\n", __func__, buffer->dma_buf);
1746                 ret = vc_sm_cma_import_dmabuf(buffer->dma_buf,
1747                                               &buffer->vcsm_handle);
1748                 if (ret) {
1749                         pr_err("%s: vc_sm_import_dmabuf_fd failed, ret %d\n",
1750                                __func__, ret);
1751                         return ret;
1752                 }
1753
1754                 buffer->vc_handle = vc_sm_cma_int_handle(buffer->vcsm_handle);
1755                 if (!buffer->vc_handle) {
1756                         pr_err("%s: vc_sm_int_handle failed %d\n",
1757                                __func__, ret);
1758                         vc_sm_cma_free(buffer->vcsm_handle);
1759                         return ret;
1760                 }
1761                 pr_debug("%s: import dmabuf %p - got vc handle %08X\n",
1762                          __func__, buffer->dma_buf, buffer->vc_handle);
1763         }
1764
1765         ret = buffer_from_host(instance, port, buffer);
1766         if (ret == -EINVAL) {
1767                 /* Port is disabled. Queue for when it is enabled. */
1768                 spin_lock_irqsave(&port->slock, flags);
1769                 list_add_tail(&buffer->list, &port->buffers);
1770                 spin_unlock_irqrestore(&port->slock, flags);
1771         }
1772
1773         return 0;
1774 }
1775 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1776
1777 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1778                           struct mmal_buffer *buf)
1779 {
1780         struct mmal_msg_context *msg_context = get_msg_context(instance);
1781
1782         if (IS_ERR(msg_context))
1783                 return (PTR_ERR(msg_context));
1784
1785         buf->msg_context = msg_context;
1786         return 0;
1787 }
1788 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1789
1790 int mmal_vchi_buffer_unmap(struct mmal_buffer *buf)
1791 {
1792         int ret = 0;
1793
1794         if (buf->vcsm_handle) {
1795                 int ret;
1796
1797                 pr_debug("%s: vc_sm_cma_free on handle %p\n", __func__,
1798                          buf->vcsm_handle);
1799                 ret = vc_sm_cma_free(buf->vcsm_handle);
1800                 if (ret)
1801                         pr_err("%s: vcsm_free failed, ret %d\n", __func__, ret);
1802                 buf->vcsm_handle = 0;
1803         }
1804         return ret;
1805 }
1806 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_unmap);
1807
1808 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1809 {
1810         struct mmal_msg_context *msg_context = buf->msg_context;
1811
1812         if (msg_context)
1813                 release_msg_context(msg_context);
1814         buf->msg_context = NULL;
1815
1816         mmal_vchi_buffer_unmap(buf);
1817         return 0;
1818 }
1819 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1820
1821 static void init_event_context(struct vchiq_mmal_instance *instance,
1822                                struct vchiq_mmal_port *port)
1823 {
1824         struct mmal_msg_context *ctx = get_msg_context(instance);
1825
1826         mutex_init(&port->event_context_mutex);
1827
1828         port->event_context = ctx;
1829         ctx->u.bulk.instance = instance;
1830         ctx->u.bulk.port = port;
1831         ctx->u.bulk.buffer =
1832                 kzalloc(sizeof(*ctx->u.bulk.buffer), GFP_KERNEL);
1833         if (!ctx->u.bulk.buffer)
1834                 goto release_msg_context;
1835         ctx->u.bulk.buffer->buffer = kzalloc(MMAL_WORKER_EVENT_SPACE,
1836                                              GFP_KERNEL);
1837         if (!ctx->u.bulk.buffer->buffer)
1838                 goto release_buffer;
1839
1840         INIT_WORK(&ctx->u.bulk.work, buffer_work_cb);
1841         return;
1842
1843 release_buffer:
1844         kfree(ctx->u.bulk.buffer);
1845 release_msg_context:
1846         release_msg_context(ctx);
1847 }
1848
1849 static void free_event_context(struct vchiq_mmal_port *port)
1850 {
1851         struct mmal_msg_context *ctx = port->event_context;
1852
1853         if (!ctx)
1854                 return;
1855
1856         kfree(ctx->u.bulk.buffer->buffer);
1857         kfree(ctx->u.bulk.buffer);
1858         release_msg_context(ctx);
1859         port->event_context = NULL;
1860 }
1861
1862 static void release_all_event_contexts(struct vchiq_mmal_component *component)
1863 {
1864         int idx;
1865
1866         for (idx = 0; idx < component->inputs; idx++)
1867                 free_event_context(&component->input[idx]);
1868         for (idx = 0; idx < component->outputs; idx++)
1869                 free_event_context(&component->output[idx]);
1870         for (idx = 0; idx < component->clocks; idx++)
1871                 free_event_context(&component->clock[idx]);
1872         free_event_context(&component->control);
1873 }
1874
1875 /* Initialise a mmal component and its ports
1876  *
1877  */
1878 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1879                               const char *name,
1880                               struct vchiq_mmal_component **component_out)
1881 {
1882         int ret;
1883         int idx;                /* port index */
1884         struct vchiq_mmal_component *component = NULL;
1885
1886         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1887                 return -EINTR;
1888
1889         for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1890                 if (!instance->component[idx].in_use) {
1891                         component = &instance->component[idx];
1892                         component->in_use = 1;
1893                         break;
1894                 }
1895         }
1896
1897         if (!component) {
1898                 ret = -EINVAL;  /* todo is this correct error? */
1899                 goto unlock;
1900         }
1901
1902         /* We need a handle to reference back to our component structure.
1903          * Use the array index in instance->component rather than rolling
1904          * another IDR.
1905          */
1906         component->client_component = idx;
1907
1908         ret = create_component(instance, component, name);
1909         if (ret < 0) {
1910                 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1911                        __func__, ret);
1912                 goto unlock;
1913         }
1914
1915         /* ports info needs gathering */
1916         component->control.type = MMAL_PORT_TYPE_CONTROL;
1917         component->control.index = 0;
1918         component->control.component = component;
1919         spin_lock_init(&component->control.slock);
1920         INIT_LIST_HEAD(&component->control.buffers);
1921         ret = port_info_get(instance, &component->control);
1922         if (ret < 0)
1923                 goto release_component;
1924         init_event_context(instance, &component->control);
1925
1926         for (idx = 0; idx < component->inputs; idx++) {
1927                 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1928                 component->input[idx].index = idx;
1929                 component->input[idx].component = component;
1930                 spin_lock_init(&component->input[idx].slock);
1931                 INIT_LIST_HEAD(&component->input[idx].buffers);
1932                 ret = port_info_get(instance, &component->input[idx]);
1933                 if (ret < 0)
1934                         goto release_component;
1935                 init_event_context(instance, &component->input[idx]);
1936         }
1937
1938         for (idx = 0; idx < component->outputs; idx++) {
1939                 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1940                 component->output[idx].index = idx;
1941                 component->output[idx].component = component;
1942                 spin_lock_init(&component->output[idx].slock);
1943                 INIT_LIST_HEAD(&component->output[idx].buffers);
1944                 ret = port_info_get(instance, &component->output[idx]);
1945                 if (ret < 0)
1946                         goto release_component;
1947                 init_event_context(instance, &component->output[idx]);
1948         }
1949
1950         for (idx = 0; idx < component->clocks; idx++) {
1951                 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1952                 component->clock[idx].index = idx;
1953                 component->clock[idx].component = component;
1954                 spin_lock_init(&component->clock[idx].slock);
1955                 INIT_LIST_HEAD(&component->clock[idx].buffers);
1956                 ret = port_info_get(instance, &component->clock[idx]);
1957                 if (ret < 0)
1958                         goto release_component;
1959                 init_event_context(instance, &component->clock[idx]);
1960         }
1961
1962         *component_out = component;
1963
1964         mutex_unlock(&instance->vchiq_mutex);
1965
1966         return 0;
1967
1968 release_component:
1969         destroy_component(instance, component);
1970         release_all_event_contexts(component);
1971 unlock:
1972         if (component)
1973                 component->in_use = 0;
1974         mutex_unlock(&instance->vchiq_mutex);
1975
1976         return ret;
1977 }
1978 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1979
1980 /*
1981  * cause a mmal component to be destroyed
1982  */
1983 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1984                                   struct vchiq_mmal_component *component)
1985 {
1986         int ret;
1987
1988         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1989                 return -EINTR;
1990
1991         if (component->enabled)
1992                 ret = disable_component(instance, component);
1993
1994         ret = destroy_component(instance, component);
1995
1996         component->in_use = 0;
1997
1998         release_all_event_contexts(component);
1999
2000         mutex_unlock(&instance->vchiq_mutex);
2001
2002         return ret;
2003 }
2004 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
2005
2006 /*
2007  * cause a mmal component to be enabled
2008  */
2009 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
2010                                 struct vchiq_mmal_component *component)
2011 {
2012         int ret;
2013
2014         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2015                 return -EINTR;
2016
2017         if (component->enabled) {
2018                 mutex_unlock(&instance->vchiq_mutex);
2019                 return 0;
2020         }
2021
2022         ret = enable_component(instance, component);
2023         if (ret == 0)
2024                 component->enabled = 1;
2025
2026         mutex_unlock(&instance->vchiq_mutex);
2027
2028         return ret;
2029 }
2030 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
2031
2032 /*
2033  * cause a mmal component to be enabled
2034  */
2035 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
2036                                  struct vchiq_mmal_component *component)
2037 {
2038         int ret;
2039
2040         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2041                 return -EINTR;
2042
2043         if (!component->enabled) {
2044                 mutex_unlock(&instance->vchiq_mutex);
2045                 return 0;
2046         }
2047
2048         ret = disable_component(instance, component);
2049         if (ret == 0)
2050                 component->enabled = 0;
2051
2052         mutex_unlock(&instance->vchiq_mutex);
2053
2054         return ret;
2055 }
2056 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
2057
2058 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
2059                        u32 *major_out, u32 *minor_out)
2060 {
2061         int ret;
2062
2063         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2064                 return -EINTR;
2065
2066         ret = get_version(instance, major_out, minor_out);
2067
2068         mutex_unlock(&instance->vchiq_mutex);
2069
2070         return ret;
2071 }
2072 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
2073
2074 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
2075 {
2076         int status = 0;
2077
2078         if (!instance)
2079                 return -EINVAL;
2080
2081         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2082                 return -EINTR;
2083
2084         vchi_service_use(instance->handle);
2085
2086         status = vchi_service_close(instance->handle);
2087         if (status != 0)
2088                 pr_err("mmal-vchiq: VCHIQ close failed\n");
2089
2090         mutex_unlock(&instance->vchiq_mutex);
2091
2092         vchi_disconnect(instance->vchi_instance);
2093         flush_workqueue(instance->bulk_wq);
2094         destroy_workqueue(instance->bulk_wq);
2095
2096         vfree(instance->bulk_scratch);
2097
2098         idr_destroy(&instance->context_map);
2099
2100         kfree(instance);
2101
2102         return status;
2103 }
2104 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
2105
2106 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
2107 {
2108         int status;
2109         int err = -ENODEV;
2110         struct vchiq_mmal_instance *instance;
2111         static VCHI_INSTANCE_T vchi_instance;
2112         struct service_creation params = {
2113                 .version                = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
2114                 .service_id             = VC_MMAL_SERVER_NAME,
2115                 .callback               = service_callback,
2116                 .callback_param         = NULL,
2117         };
2118
2119         /* compile time checks to ensure structure size as they are
2120          * directly (de)serialised from memory.
2121          */
2122
2123         /* ensure the header structure has packed to the correct size */
2124         BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
2125
2126         /* ensure message structure does not exceed maximum length */
2127         BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
2128
2129         /* mmal port struct is correct size */
2130         BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
2131
2132         /* create a vchi instance */
2133         status = vchi_initialise(&vchi_instance);
2134         if (status) {
2135                 pr_err("Failed to initialise VCHI instance (status=%d)\n",
2136                        status);
2137                 return -EIO;
2138         }
2139
2140         status = vchi_connect(vchi_instance);
2141         if (status) {
2142                 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
2143                 err = -EIO;
2144                 goto err_disconnect_vchi;
2145         }
2146
2147         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
2148
2149         if (!instance) {
2150                 err = -ENOMEM;
2151                 goto err_disconnect_vchi;
2152         }
2153
2154         mutex_init(&instance->vchiq_mutex);
2155
2156         instance->bulk_scratch = vmalloc(PAGE_SIZE);
2157         instance->vchi_instance = vchi_instance;
2158
2159         mutex_init(&instance->context_map_lock);
2160         idr_init_base(&instance->context_map, 1);
2161
2162         params.callback_param = instance;
2163
2164         instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
2165                                                     WQ_MEM_RECLAIM);
2166         if (!instance->bulk_wq)
2167                 goto err_free;
2168
2169         status = vchi_service_open(vchi_instance, &params, &instance->handle);
2170         if (status) {
2171                 pr_err("Failed to open VCHI service connection (status=%d)\n",
2172                        status);
2173                 goto err_close_services;
2174         }
2175
2176         vchi_service_release(instance->handle);
2177
2178         *out_instance = instance;
2179
2180         return 0;
2181
2182 err_close_services:
2183         vchi_service_close(instance->handle);
2184         destroy_workqueue(instance->bulk_wq);
2185 err_free:
2186         vfree(instance->bulk_scratch);
2187         kfree(instance);
2188 err_disconnect_vchi:
2189         vchi_disconnect(vchi_instance);
2190         return err;
2191 }
2192 EXPORT_SYMBOL_GPL(vchiq_mmal_init);