61a3593cdcfc3b7284c7cf4d6c6a65f8aa247519
[platform/kernel/linux-rpi.git] / drivers / staging / vc04_services / vchiq-mmal / mmal-vchiq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *              (now dave.stevenson@raspberrypi.org)
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
27
28 #include "mmal-common.h"
29 #include "mmal-parameters.h"
30 #include "mmal-vchiq.h"
31 #include "mmal-msg.h"
32
33 #include "vc-sm-cma/vc_sm_knl.h"
34
35 #define USE_VCHIQ_ARM
36 #include "interface/vchi/vchi.h"
37
38 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
39 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION("0.0.1");
42
43 /*
44  * maximum number of components supported.
45  * This matches the maximum permitted by default on the VPU
46  */
47 #define VCHIQ_MMAL_MAX_COMPONENTS 64
48
49 /*
50  * Timeout for synchronous msg responses in seconds.
51  * Helpful to increase this if stopping in the VPU debugger.
52  */
53 #define SYNC_MSG_TIMEOUT        3
54
55 /*#define FULL_MSG_DUMP 1*/
56
57 #ifdef DEBUG
58 static const char *const msg_type_names[] = {
59         "UNKNOWN",
60         "QUIT",
61         "SERVICE_CLOSED",
62         "GET_VERSION",
63         "COMPONENT_CREATE",
64         "COMPONENT_DESTROY",
65         "COMPONENT_ENABLE",
66         "COMPONENT_DISABLE",
67         "PORT_INFO_GET",
68         "PORT_INFO_SET",
69         "PORT_ACTION",
70         "BUFFER_FROM_HOST",
71         "BUFFER_TO_HOST",
72         "GET_STATS",
73         "PORT_PARAMETER_SET",
74         "PORT_PARAMETER_GET",
75         "EVENT_TO_HOST",
76         "GET_CORE_STATS_FOR_PORT",
77         "OPAQUE_ALLOCATOR",
78         "CONSUME_MEM",
79         "LMK",
80         "OPAQUE_ALLOCATOR_DESC",
81         "DRM_GET_LHS32",
82         "DRM_GET_TIME",
83         "BUFFER_FROM_HOST_ZEROLEN",
84         "PORT_FLUSH",
85         "HOST_LOG",
86 };
87 #endif
88
89 static const char *const port_action_type_names[] = {
90         "UNKNOWN",
91         "ENABLE",
92         "DISABLE",
93         "FLUSH",
94         "CONNECT",
95         "DISCONNECT",
96         "SET_REQUIREMENTS",
97 };
98
99 #if defined(DEBUG)
100 #if defined(FULL_MSG_DUMP)
101 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
102         do {                                                            \
103                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
104                          msg_type_names[(MSG)->h.type],                 \
105                          (MSG)->h.type, (MSG_LEN));                     \
106                 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
107                                16, 4, (MSG),                            \
108                                sizeof(struct mmal_msg_header), 1);      \
109                 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
110                                16, 4,                                   \
111                                ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
112                                (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
113         } while (0)
114 #else
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
116         {                                                               \
117                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
118                          msg_type_names[(MSG)->h.type],                 \
119                          (MSG)->h.type, (MSG_LEN));                     \
120         }
121 #endif
122 #else
123 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
124 #endif
125
126 struct vchiq_mmal_instance;
127
128 /* normal message context */
129 struct mmal_msg_context {
130         struct vchiq_mmal_instance *instance;
131
132         /* Index in the context_map idr so that we can find the
133          * mmal_msg_context again when servicing the VCHI reply.
134          */
135         int handle;
136
137         union {
138                 struct {
139                         /* work struct for buffer_cb callback */
140                         struct work_struct work;
141                         /* work struct for deferred callback */
142                         struct work_struct buffer_to_host_work;
143                         /* mmal instance */
144                         struct vchiq_mmal_instance *instance;
145                         /* mmal port */
146                         struct vchiq_mmal_port *port;
147                         /* actual buffer used to store bulk reply */
148                         struct mmal_buffer *buffer;
149                         /* amount of buffer used */
150                         unsigned long buffer_used;
151                         /* MMAL buffer flags */
152                         u32 mmal_flags;
153                         /* Presentation and Decode timestamps */
154                         s64 pts;
155                         s64 dts;
156                         /* MMAL buffer command flag */
157                         u32 cmd;
158
159                         int status;     /* context status */
160
161                 } bulk;         /* bulk data */
162
163                 struct {
164                         /* message handle to release */
165                         struct vchi_held_msg msg_handle;
166                         /* pointer to received message */
167                         struct mmal_msg *msg;
168                         /* received message length */
169                         u32 msg_len;
170                         /* completion upon reply */
171                         struct completion cmplt;
172                 } sync;         /* synchronous response */
173         } u;
174
175 };
176
177 struct vchiq_mmal_instance {
178         VCHI_SERVICE_HANDLE_T handle;
179
180         /* ensure serialised access to service */
181         struct mutex vchiq_mutex;
182
183         /* vmalloc page to receive scratch bulk xfers into */
184         void *bulk_scratch;
185
186         struct idr context_map;
187         /* protect accesses to context_map */
188         struct mutex context_map_lock;
189
190         struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
191
192         /* ordered workqueue to process all bulk operations */
193         struct workqueue_struct *bulk_wq;
194 };
195
196 static struct mmal_msg_context *
197 get_msg_context(struct vchiq_mmal_instance *instance)
198 {
199         struct mmal_msg_context *msg_context;
200         int handle;
201
202         /* todo: should this be allocated from a pool to avoid kzalloc */
203         msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
204
205         if (!msg_context)
206                 return ERR_PTR(-ENOMEM);
207
208         /* Create an ID that will be passed along with our message so
209          * that when we service the VCHI reply, we can look up what
210          * message is being replied to.
211          */
212         mutex_lock(&instance->context_map_lock);
213         handle = idr_alloc(&instance->context_map, msg_context,
214                            0, 0, GFP_KERNEL);
215         mutex_unlock(&instance->context_map_lock);
216
217         if (handle < 0) {
218                 kfree(msg_context);
219                 return ERR_PTR(handle);
220         }
221
222         msg_context->instance = instance;
223         msg_context->handle = handle;
224
225         return msg_context;
226 }
227
228 static struct mmal_msg_context *
229 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
230 {
231         return idr_find(&instance->context_map, handle);
232 }
233
234 static void
235 release_msg_context(struct mmal_msg_context *msg_context)
236 {
237         struct vchiq_mmal_instance *instance = msg_context->instance;
238
239         mutex_lock(&instance->context_map_lock);
240         idr_remove(&instance->context_map, msg_context->handle);
241         mutex_unlock(&instance->context_map_lock);
242         kfree(msg_context);
243 }
244
245 /* workqueue scheduled callback
246  *
247  * we do this because it is important we do not call any other vchiq
248  * sync calls from witin the message delivery thread
249  */
250 static void buffer_work_cb(struct work_struct *work)
251 {
252         struct mmal_msg_context *msg_context =
253                 container_of(work, struct mmal_msg_context, u.bulk.work);
254         struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
255
256         if (!buffer) {
257                 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
258                        __func__, msg_context);
259                 return;
260         }
261
262         buffer->length = msg_context->u.bulk.buffer_used;
263         buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
264         buffer->dts = msg_context->u.bulk.dts;
265         buffer->pts = msg_context->u.bulk.pts;
266         buffer->cmd = msg_context->u.bulk.cmd;
267
268         if (!buffer->cmd)
269                 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
270
271         msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
272                                             msg_context->u.bulk.port,
273                                             msg_context->u.bulk.status,
274                                             msg_context->u.bulk.buffer);
275
276         if (buffer->cmd)
277                 mutex_unlock(&msg_context->u.bulk.port->event_context_mutex);
278 }
279
280 /* workqueue scheduled callback to handle receiving buffers
281  *
282  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
283  * If we block in the service_callback context then we can't process the
284  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
285  * vchi_bulk_queue_receive() call to complete.
286  */
287 static void buffer_to_host_work_cb(struct work_struct *work)
288 {
289         struct mmal_msg_context *msg_context =
290                 container_of(work, struct mmal_msg_context,
291                              u.bulk.buffer_to_host_work);
292         struct vchiq_mmal_instance *instance = msg_context->instance;
293         unsigned long len = msg_context->u.bulk.buffer_used;
294         int ret;
295
296         if (!len)
297                 /* Dummy receive to ensure the buffers remain in order */
298                 len = 8;
299         /* queue the bulk submission */
300         vchi_service_use(instance->handle);
301         ret = vchi_bulk_queue_receive(instance->handle,
302                                       msg_context->u.bulk.buffer->buffer,
303                                       /* Actual receive needs to be a multiple
304                                        * of 4 bytes
305                                        */
306                                       (len + 3) & ~3,
307                                       VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
308                                       VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
309                                       msg_context);
310
311         vchi_service_release(instance->handle);
312
313         if (ret != 0)
314                 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
315                        __func__, msg_context, ret);
316 }
317
318 /* enqueue a bulk receive for a given message context */
319 static int bulk_receive(struct vchiq_mmal_instance *instance,
320                         struct mmal_msg *msg,
321                         struct mmal_msg_context *msg_context)
322 {
323         unsigned long rd_len;
324
325         rd_len = msg->u.buffer_from_host.buffer_header.length;
326
327         if (!msg_context->u.bulk.buffer) {
328                 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
329
330                 /* todo: this is a serious error, we should never have
331                  * committed a buffer_to_host operation to the mmal
332                  * port without the buffer to back it up (underflow
333                  * handling) and there is no obvious way to deal with
334                  * this - how is the mmal servie going to react when
335                  * we fail to do the xfer and reschedule a buffer when
336                  * it arrives? perhaps a starved flag to indicate a
337                  * waiting bulk receive?
338                  */
339
340                 return -EINVAL;
341         }
342
343         /* ensure we do not overrun the available buffer */
344         if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
345                 rd_len = msg_context->u.bulk.buffer->buffer_size;
346                 pr_warn("short read as not enough receive buffer space\n");
347                 /* todo: is this the correct response, what happens to
348                  * the rest of the message data?
349                  */
350         }
351
352         /* store length */
353         msg_context->u.bulk.buffer_used = rd_len;
354         msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
355         msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
356         msg_context->u.bulk.cmd = msg->u.buffer_from_host.buffer_header.cmd;
357
358         queue_work(msg_context->instance->bulk_wq,
359                    &msg_context->u.bulk.buffer_to_host_work);
360
361         return 0;
362 }
363
364 /* data in message, memcpy from packet into output buffer */
365 static int inline_receive(struct vchiq_mmal_instance *instance,
366                           struct mmal_msg *msg,
367                           struct mmal_msg_context *msg_context)
368 {
369         memcpy(msg_context->u.bulk.buffer->buffer,
370                msg->u.buffer_from_host.short_data,
371                msg->u.buffer_from_host.payload_in_message);
372
373         msg_context->u.bulk.buffer_used =
374             msg->u.buffer_from_host.payload_in_message;
375
376         return 0;
377 }
378
379 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
380 static int
381 buffer_from_host(struct vchiq_mmal_instance *instance,
382                  struct vchiq_mmal_port *port, struct mmal_buffer *buf)
383 {
384         struct mmal_msg_context *msg_context;
385         struct mmal_msg m;
386         int ret;
387
388         if (!port->enabled)
389                 return -EINVAL;
390
391         pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
392
393         /* get context */
394         if (!buf->msg_context) {
395                 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
396                        buf);
397                 return -EINVAL;
398         }
399         msg_context = buf->msg_context;
400
401         /* store bulk message context for when data arrives */
402         msg_context->u.bulk.instance = instance;
403         msg_context->u.bulk.port = port;
404         msg_context->u.bulk.buffer = buf;
405         msg_context->u.bulk.buffer_used = 0;
406
407         /* initialise work structure ready to schedule callback */
408         INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
409         INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
410                   buffer_to_host_work_cb);
411
412         atomic_inc(&port->buffers_with_vpu);
413
414         /* prep the buffer from host message */
415         memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
416
417         m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
418         m.h.magic = MMAL_MAGIC;
419         m.h.context = msg_context->handle;
420         m.h.status = 0;
421
422         /* drvbuf is our private data passed back */
423         m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
424         m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
425         m.u.buffer_from_host.drvbuf.port_handle = port->handle;
426         m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
427
428         /* buffer header */
429         m.u.buffer_from_host.buffer_header.cmd = 0;
430         if (port->zero_copy) {
431                 m.u.buffer_from_host.buffer_header.data = buf->vc_handle;
432         } else {
433                 m.u.buffer_from_host.buffer_header.data =
434                         (u32)(unsigned long)buf->buffer;
435         }
436
437         m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
438         if (port->type == MMAL_PORT_TYPE_OUTPUT) {
439                 m.u.buffer_from_host.buffer_header.length = 0;
440                 m.u.buffer_from_host.buffer_header.offset = 0;
441                 m.u.buffer_from_host.buffer_header.flags = 0;
442                 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
443                 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
444         } else {
445                 m.u.buffer_from_host.buffer_header.length = buf->length;
446                 m.u.buffer_from_host.buffer_header.offset = 0;
447                 m.u.buffer_from_host.buffer_header.flags = buf->mmal_flags;
448                 m.u.buffer_from_host.buffer_header.pts = buf->pts;
449                 m.u.buffer_from_host.buffer_header.dts = buf->dts;
450         }
451
452         /* clear buffer type sepecific data */
453         memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
454                sizeof(m.u.buffer_from_host.buffer_header_type_specific));
455
456         /* no payload in message */
457         m.u.buffer_from_host.payload_in_message = 0;
458
459         vchi_service_use(instance->handle);
460
461         ret = vchi_queue_kernel_message(instance->handle,
462                                         &m,
463                                         sizeof(struct mmal_msg_header) +
464                                         sizeof(m.u.buffer_from_host));
465
466         vchi_service_release(instance->handle);
467
468         return ret;
469 }
470
471 /* deals with receipt of event to host message */
472 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
473                              struct mmal_msg *msg, u32 msg_len)
474 {
475         int comp_idx = msg->u.event_to_host.client_component;
476         struct vchiq_mmal_component *component =
477                                         &instance->component[comp_idx];
478         struct vchiq_mmal_port *port = NULL;
479         struct mmal_msg_context *msg_context;
480         u32 port_num = msg->u.event_to_host.port_num;
481
482         if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
483                 pr_err("%s: MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n",
484                        __func__);
485                 return;
486         }
487
488         switch (msg->u.event_to_host.port_type) {
489         case MMAL_PORT_TYPE_CONTROL:
490                 if (port_num) {
491                         pr_err("%s: port_num of %u >= number of ports 1",
492                                __func__, port_num);
493                         return;
494                 }
495                 port = &component->control;
496                 break;
497         case MMAL_PORT_TYPE_INPUT:
498                 if (port_num >= component->inputs) {
499                         pr_err("%s: port_num of %u >= number of ports %u",
500                                __func__, port_num,
501                                port_num >= component->inputs);
502                         return;
503                 }
504                 port = &component->input[port_num];
505                 break;
506         case MMAL_PORT_TYPE_OUTPUT:
507                 if (port_num >= component->outputs) {
508                         pr_err("%s: port_num of %u >= number of ports %u",
509                                __func__, port_num,
510                                port_num >= component->outputs);
511                         return;
512                 }
513                 port = &component->output[port_num];
514                 break;
515         case MMAL_PORT_TYPE_CLOCK:
516                 if (port_num >= component->clocks) {
517                         pr_err("%s: port_num of %u >= number of ports %u",
518                                __func__, port_num,
519                                port_num >= component->clocks);
520                         return;
521                 }
522                 port = &component->clock[port_num];
523                 break;
524         default:
525                 break;
526         }
527
528         if (!mutex_trylock(&port->event_context_mutex)) {
529                 pr_err("dropping event 0x%x\n", msg->u.event_to_host.cmd);
530                 return;
531         }
532         msg_context = port->event_context;
533
534         if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
535                 /* message reception had an error */
536                 //pr_warn
537                 pr_err("%s: error %d in reply\n", __func__, msg->h.status);
538
539                 msg_context->u.bulk.status = msg->h.status;
540         } else if (msg->u.event_to_host.length > MMAL_WORKER_EVENT_SPACE) {
541                 /* data is not in message, queue a bulk receive */
542                 pr_err("%s: payload not in message - bulk receive??! NOT SUPPORTED\n",
543                        __func__);
544                 msg_context->u.bulk.status = -1;
545         } else {
546                 memcpy(msg_context->u.bulk.buffer->buffer,
547                        msg->u.event_to_host.data,
548                        msg->u.event_to_host.length);
549
550                 msg_context->u.bulk.buffer_used =
551                     msg->u.event_to_host.length;
552
553                 msg_context->u.bulk.mmal_flags = 0;
554                 msg_context->u.bulk.dts = MMAL_TIME_UNKNOWN;
555                 msg_context->u.bulk.pts = MMAL_TIME_UNKNOWN;
556                 msg_context->u.bulk.cmd = msg->u.event_to_host.cmd;
557
558                 pr_debug("event component:%u port type:%d num:%d cmd:0x%x length:%d\n",
559                          msg->u.event_to_host.client_component,
560                          msg->u.event_to_host.port_type,
561                          msg->u.event_to_host.port_num,
562                          msg->u.event_to_host.cmd, msg->u.event_to_host.length);
563         }
564
565         schedule_work(&msg_context->u.bulk.work);
566 }
567
568 /* deals with receipt of buffer to host message */
569 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
570                               struct mmal_msg *msg, u32 msg_len)
571 {
572         struct mmal_msg_context *msg_context;
573         u32 handle;
574
575         pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
576                  __func__, instance, msg, msg_len);
577
578         if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
579                 handle = msg->u.buffer_from_host.drvbuf.client_context;
580                 msg_context = lookup_msg_context(instance, handle);
581
582                 if (!msg_context) {
583                         pr_err("drvbuf.client_context(%u) is invalid\n",
584                                handle);
585                         return;
586                 }
587         } else {
588                 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
589                 return;
590         }
591
592         msg_context->u.bulk.mmal_flags =
593                                 msg->u.buffer_from_host.buffer_header.flags;
594
595         if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
596                 /* message reception had an error */
597                 pr_warn("error %d in reply\n", msg->h.status);
598
599                 msg_context->u.bulk.status = msg->h.status;
600
601         } else if (msg->u.buffer_from_host.is_zero_copy) {
602                 /*
603                  * Zero copy buffer, so nothing to do.
604                  * Copy buffer info and make callback.
605                  */
606                 msg_context->u.bulk.buffer_used =
607                                 msg->u.buffer_from_host.buffer_header.length;
608                 msg_context->u.bulk.mmal_flags =
609                                 msg->u.buffer_from_host.buffer_header.flags;
610                 msg_context->u.bulk.dts =
611                                 msg->u.buffer_from_host.buffer_header.dts;
612                 msg_context->u.bulk.pts =
613                                 msg->u.buffer_from_host.buffer_header.pts;
614                 msg_context->u.bulk.cmd =
615                                 msg->u.buffer_from_host.buffer_header.cmd;
616
617         } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
618                 /* empty buffer */
619                 if (msg->u.buffer_from_host.buffer_header.flags &
620                     MMAL_BUFFER_HEADER_FLAG_EOS) {
621                         msg_context->u.bulk.status =
622                             bulk_receive(instance, msg, msg_context);
623                         if (msg_context->u.bulk.status == 0)
624                                 return; /* successful bulk submission, bulk
625                                          * completion will trigger callback
626                                          */
627                 } else {
628                         /* do callback with empty buffer - not EOS though */
629                         msg_context->u.bulk.status = 0;
630                         msg_context->u.bulk.buffer_used = 0;
631                 }
632         } else if (msg->u.buffer_from_host.payload_in_message == 0) {
633                 /* data is not in message, queue a bulk receive */
634                 msg_context->u.bulk.status =
635                     bulk_receive(instance, msg, msg_context);
636                 if (msg_context->u.bulk.status == 0)
637                         return; /* successful bulk submission, bulk
638                                  * completion will trigger callback
639                                  */
640
641                 /* failed to submit buffer, this will end badly */
642                 pr_err("error %d on bulk submission\n",
643                        msg_context->u.bulk.status);
644
645         } else if (msg->u.buffer_from_host.payload_in_message <=
646                    MMAL_VC_SHORT_DATA) {
647                 /* data payload within message */
648                 msg_context->u.bulk.status = inline_receive(instance, msg,
649                                                             msg_context);
650         } else {
651                 pr_err("message with invalid short payload\n");
652
653                 /* signal error */
654                 msg_context->u.bulk.status = -EINVAL;
655                 msg_context->u.bulk.buffer_used =
656                     msg->u.buffer_from_host.payload_in_message;
657         }
658
659         /* schedule the port callback */
660         schedule_work(&msg_context->u.bulk.work);
661 }
662
663 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
664                             struct mmal_msg_context *msg_context)
665 {
666         msg_context->u.bulk.status = 0;
667
668         /* schedule the port callback */
669         schedule_work(&msg_context->u.bulk.work);
670 }
671
672 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
673                           struct mmal_msg_context *msg_context)
674 {
675         pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
676
677         msg_context->u.bulk.status = -EINTR;
678
679         schedule_work(&msg_context->u.bulk.work);
680 }
681
682 /* incoming event service callback */
683 static void service_callback(void *param,
684                              const VCHI_CALLBACK_REASON_T reason,
685                              void *bulk_ctx)
686 {
687         struct vchiq_mmal_instance *instance = param;
688         int status;
689         u32 msg_len;
690         struct mmal_msg *msg;
691         struct vchi_held_msg msg_handle;
692         struct mmal_msg_context *msg_context;
693
694         if (!instance) {
695                 pr_err("Message callback passed NULL instance\n");
696                 return;
697         }
698
699         switch (reason) {
700         case VCHI_CALLBACK_MSG_AVAILABLE:
701                 status = vchi_msg_hold(instance->handle, (void **)&msg,
702                                        &msg_len, VCHI_FLAGS_NONE, &msg_handle);
703                 if (status) {
704                         pr_err("Unable to dequeue a message (%d)\n", status);
705                         break;
706                 }
707
708                 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
709
710                 /* handling is different for buffer messages */
711                 switch (msg->h.type) {
712                 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
713                         vchi_held_msg_release(&msg_handle);
714                         break;
715
716                 case MMAL_MSG_TYPE_EVENT_TO_HOST:
717                         event_to_host_cb(instance, msg, msg_len);
718                         vchi_held_msg_release(&msg_handle);
719
720                         break;
721
722                 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
723                         buffer_to_host_cb(instance, msg, msg_len);
724                         vchi_held_msg_release(&msg_handle);
725                         break;
726
727                 default:
728                         /* messages dependent on header context to complete */
729                         if (!msg->h.context) {
730                                 pr_err("received message context was null!\n");
731                                 vchi_held_msg_release(&msg_handle);
732                                 break;
733                         }
734
735                         msg_context = lookup_msg_context(instance,
736                                                          msg->h.context);
737                         if (!msg_context) {
738                                 pr_err("received invalid message context %u!\n",
739                                        msg->h.context);
740                                 vchi_held_msg_release(&msg_handle);
741                                 break;
742                         }
743
744                         /* fill in context values */
745                         msg_context->u.sync.msg_handle = msg_handle;
746                         msg_context->u.sync.msg = msg;
747                         msg_context->u.sync.msg_len = msg_len;
748
749                         /* todo: should this check (completion_done()
750                          * == 1) for no one waiting? or do we need a
751                          * flag to tell us the completion has been
752                          * interrupted so we can free the message and
753                          * its context. This probably also solves the
754                          * message arriving after interruption todo
755                          * below
756                          */
757
758                         /* complete message so caller knows it happened */
759                         complete(&msg_context->u.sync.cmplt);
760                         break;
761                 }
762
763                 break;
764
765         case VCHI_CALLBACK_BULK_RECEIVED:
766                 bulk_receive_cb(instance, bulk_ctx);
767                 break;
768
769         case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
770                 bulk_abort_cb(instance, bulk_ctx);
771                 break;
772
773         case VCHI_CALLBACK_SERVICE_CLOSED:
774                 /* TODO: consider if this requires action if received when
775                  * driver is not explicitly closing the service
776                  */
777                 break;
778
779         default:
780                 pr_err("Received unhandled message reason %d\n", reason);
781                 break;
782         }
783 }
784
785 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
786                                      struct mmal_msg *msg,
787                                      unsigned int payload_len,
788                                      struct mmal_msg **msg_out,
789                                      struct vchi_held_msg *msg_handle_out)
790 {
791         struct mmal_msg_context *msg_context;
792         int ret;
793         unsigned long timeout;
794
795         /* payload size must not cause message to exceed max size */
796         if (payload_len >
797             (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
798                 pr_err("payload length %d exceeds max:%d\n", payload_len,
799                        (int)(MMAL_MSG_MAX_SIZE -
800                             sizeof(struct mmal_msg_header)));
801                 return -EINVAL;
802         }
803
804         msg_context = get_msg_context(instance);
805         if (IS_ERR(msg_context))
806                 return PTR_ERR(msg_context);
807
808         init_completion(&msg_context->u.sync.cmplt);
809
810         msg->h.magic = MMAL_MAGIC;
811         msg->h.context = msg_context->handle;
812         msg->h.status = 0;
813
814         DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
815                      ">>> sync message");
816
817         vchi_service_use(instance->handle);
818
819         ret = vchi_queue_kernel_message(instance->handle,
820                                         msg,
821                                         sizeof(struct mmal_msg_header) +
822                                         payload_len);
823
824         vchi_service_release(instance->handle);
825
826         if (ret) {
827                 pr_err("error %d queuing message\n", ret);
828                 release_msg_context(msg_context);
829                 return ret;
830         }
831
832         timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
833                                               SYNC_MSG_TIMEOUT * HZ);
834         if (timeout == 0) {
835                 pr_err("timed out waiting for sync completion\n");
836                 ret = -ETIME;
837                 /* todo: what happens if the message arrives after aborting */
838                 release_msg_context(msg_context);
839                 return ret;
840         }
841
842         *msg_out = msg_context->u.sync.msg;
843         *msg_handle_out = msg_context->u.sync.msg_handle;
844         release_msg_context(msg_context);
845
846         return 0;
847 }
848
849 static void dump_port_info(struct vchiq_mmal_port *port)
850 {
851         pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
852
853         pr_debug("buffer minimum num:%d size:%d align:%d\n",
854                  port->minimum_buffer.num,
855                  port->minimum_buffer.size, port->minimum_buffer.alignment);
856
857         pr_debug("buffer recommended num:%d size:%d align:%d\n",
858                  port->recommended_buffer.num,
859                  port->recommended_buffer.size,
860                  port->recommended_buffer.alignment);
861
862         pr_debug("buffer current values num:%d size:%d align:%d\n",
863                  port->current_buffer.num,
864                  port->current_buffer.size, port->current_buffer.alignment);
865
866         pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
867                  port->format.type,
868                  port->format.encoding, port->format.encoding_variant);
869
870         pr_debug("                  bitrate:%d flags:0x%x\n",
871                  port->format.bitrate, port->format.flags);
872
873         if (port->format.type == MMAL_ES_TYPE_VIDEO) {
874                 pr_debug
875                     ("es video format: width:%d height:%d colourspace:0x%x\n",
876                      port->es.video.width, port->es.video.height,
877                      port->es.video.color_space);
878
879                 pr_debug("               : crop xywh %d,%d,%d,%d\n",
880                          port->es.video.crop.x,
881                          port->es.video.crop.y,
882                          port->es.video.crop.width, port->es.video.crop.height);
883                 pr_debug("               : framerate %d/%d  aspect %d/%d\n",
884                          port->es.video.frame_rate.num,
885                          port->es.video.frame_rate.den,
886                          port->es.video.par.num, port->es.video.par.den);
887         }
888 }
889
890 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
891 {
892         /* todo do readonly fields need setting at all? */
893         p->type = port->type;
894         p->index = port->index;
895         p->index_all = 0;
896         p->is_enabled = port->enabled;
897         p->buffer_num_min = port->minimum_buffer.num;
898         p->buffer_size_min = port->minimum_buffer.size;
899         p->buffer_alignment_min = port->minimum_buffer.alignment;
900         p->buffer_num_recommended = port->recommended_buffer.num;
901         p->buffer_size_recommended = port->recommended_buffer.size;
902
903         /* only three writable fields in a port */
904         p->buffer_num = port->current_buffer.num;
905         p->buffer_size = port->current_buffer.size;
906         p->userdata = (u32)(unsigned long)port;
907 }
908
909 static int port_info_set(struct vchiq_mmal_instance *instance,
910                          struct vchiq_mmal_port *port)
911 {
912         int ret;
913         struct mmal_msg m;
914         struct mmal_msg *rmsg;
915         struct vchi_held_msg rmsg_handle;
916
917         pr_debug("setting port info port %p\n", port);
918         if (!port)
919                 return -1;
920         dump_port_info(port);
921
922         m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
923
924         m.u.port_info_set.component_handle = port->component->handle;
925         m.u.port_info_set.port_type = port->type;
926         m.u.port_info_set.port_index = port->index;
927
928         port_to_mmal_msg(port, &m.u.port_info_set.port);
929
930         /* elementary stream format setup */
931         m.u.port_info_set.format.type = port->format.type;
932         m.u.port_info_set.format.encoding = port->format.encoding;
933         m.u.port_info_set.format.encoding_variant =
934             port->format.encoding_variant;
935         m.u.port_info_set.format.bitrate = port->format.bitrate;
936         m.u.port_info_set.format.flags = port->format.flags;
937
938         memcpy(&m.u.port_info_set.es, &port->es,
939                sizeof(union mmal_es_specific_format));
940
941         m.u.port_info_set.format.extradata_size = port->format.extradata_size;
942         memcpy(&m.u.port_info_set.extradata, port->format.extradata,
943                port->format.extradata_size);
944
945         ret = send_synchronous_mmal_msg(instance, &m,
946                                         sizeof(m.u.port_info_set),
947                                         &rmsg, &rmsg_handle);
948         if (ret)
949                 return ret;
950
951         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
952                 /* got an unexpected message type in reply */
953                 ret = -EINVAL;
954                 goto release_msg;
955         }
956
957         /* return operation status */
958         ret = -rmsg->u.port_info_get_reply.status;
959
960         pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
961                  port->component->handle, port->handle);
962
963 release_msg:
964         vchi_held_msg_release(&rmsg_handle);
965
966         return ret;
967 }
968
969 /* use port info get message to retrieve port information */
970 static int port_info_get(struct vchiq_mmal_instance *instance,
971                          struct vchiq_mmal_port *port)
972 {
973         int ret;
974         struct mmal_msg m;
975         struct mmal_msg *rmsg;
976         struct vchi_held_msg rmsg_handle;
977
978         /* port info time */
979         m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
980         m.u.port_info_get.component_handle = port->component->handle;
981         m.u.port_info_get.port_type = port->type;
982         m.u.port_info_get.index = port->index;
983
984         ret = send_synchronous_mmal_msg(instance, &m,
985                                         sizeof(m.u.port_info_get),
986                                         &rmsg, &rmsg_handle);
987         if (ret)
988                 return ret;
989
990         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
991                 /* got an unexpected message type in reply */
992                 ret = -EINVAL;
993                 goto release_msg;
994         }
995
996         /* return operation status */
997         ret = -rmsg->u.port_info_get_reply.status;
998         if (ret != MMAL_MSG_STATUS_SUCCESS)
999                 goto release_msg;
1000
1001         if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
1002                 port->enabled = 0;
1003         else
1004                 port->enabled = 1;
1005
1006         /* copy the values out of the message */
1007         port->handle = rmsg->u.port_info_get_reply.port_handle;
1008
1009         /* port type and index cached to use on port info set because
1010          * it does not use a port handle
1011          */
1012         port->type = rmsg->u.port_info_get_reply.port_type;
1013         port->index = rmsg->u.port_info_get_reply.port_index;
1014
1015         port->minimum_buffer.num =
1016             rmsg->u.port_info_get_reply.port.buffer_num_min;
1017         port->minimum_buffer.size =
1018             rmsg->u.port_info_get_reply.port.buffer_size_min;
1019         port->minimum_buffer.alignment =
1020             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1021
1022         port->recommended_buffer.alignment =
1023             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1024         port->recommended_buffer.num =
1025             rmsg->u.port_info_get_reply.port.buffer_num_recommended;
1026
1027         port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
1028         port->current_buffer.size =
1029             rmsg->u.port_info_get_reply.port.buffer_size;
1030
1031         /* stream format */
1032         port->format.type = rmsg->u.port_info_get_reply.format.type;
1033         port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
1034         port->format.encoding_variant =
1035             rmsg->u.port_info_get_reply.format.encoding_variant;
1036         port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
1037         port->format.flags = rmsg->u.port_info_get_reply.format.flags;
1038
1039         /* elementary stream format */
1040         memcpy(&port->es,
1041                &rmsg->u.port_info_get_reply.es,
1042                sizeof(union mmal_es_specific_format));
1043         port->format.es = &port->es;
1044
1045         port->format.extradata_size =
1046             rmsg->u.port_info_get_reply.format.extradata_size;
1047         memcpy(port->format.extradata,
1048                rmsg->u.port_info_get_reply.extradata,
1049                port->format.extradata_size);
1050
1051         pr_debug("received port info\n");
1052         dump_port_info(port);
1053
1054 release_msg:
1055
1056         pr_debug("%s:result:%d component:0x%x port:%d\n",
1057                  __func__, ret, port->component->handle, port->handle);
1058
1059         vchi_held_msg_release(&rmsg_handle);
1060
1061         return ret;
1062 }
1063
1064 /* create comonent on vc */
1065 static int create_component(struct vchiq_mmal_instance *instance,
1066                             struct vchiq_mmal_component *component,
1067                             const char *name)
1068 {
1069         int ret;
1070         struct mmal_msg m;
1071         struct mmal_msg *rmsg;
1072         struct vchi_held_msg rmsg_handle;
1073
1074         /* build component create message */
1075         m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
1076         m.u.component_create.client_component = component->client_component;
1077         strncpy(m.u.component_create.name, name,
1078                 sizeof(m.u.component_create.name));
1079
1080         ret = send_synchronous_mmal_msg(instance, &m,
1081                                         sizeof(m.u.component_create),
1082                                         &rmsg, &rmsg_handle);
1083         if (ret)
1084                 return ret;
1085
1086         if (rmsg->h.type != m.h.type) {
1087                 /* got an unexpected message type in reply */
1088                 ret = -EINVAL;
1089                 goto release_msg;
1090         }
1091
1092         ret = -rmsg->u.component_create_reply.status;
1093         if (ret != MMAL_MSG_STATUS_SUCCESS)
1094                 goto release_msg;
1095
1096         /* a valid component response received */
1097         component->handle = rmsg->u.component_create_reply.component_handle;
1098         component->inputs = rmsg->u.component_create_reply.input_num;
1099         component->outputs = rmsg->u.component_create_reply.output_num;
1100         component->clocks = rmsg->u.component_create_reply.clock_num;
1101
1102         pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1103                  component->handle,
1104                  component->inputs, component->outputs, component->clocks);
1105
1106 release_msg:
1107         vchi_held_msg_release(&rmsg_handle);
1108
1109         return ret;
1110 }
1111
1112 /* destroys a component on vc */
1113 static int destroy_component(struct vchiq_mmal_instance *instance,
1114                              struct vchiq_mmal_component *component)
1115 {
1116         int ret;
1117         struct mmal_msg m;
1118         struct mmal_msg *rmsg;
1119         struct vchi_held_msg rmsg_handle;
1120
1121         m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
1122         m.u.component_destroy.component_handle = component->handle;
1123
1124         ret = send_synchronous_mmal_msg(instance, &m,
1125                                         sizeof(m.u.component_destroy),
1126                                         &rmsg, &rmsg_handle);
1127         if (ret)
1128                 return ret;
1129
1130         if (rmsg->h.type != m.h.type) {
1131                 /* got an unexpected message type in reply */
1132                 ret = -EINVAL;
1133                 goto release_msg;
1134         }
1135
1136         ret = -rmsg->u.component_destroy_reply.status;
1137
1138 release_msg:
1139
1140         vchi_held_msg_release(&rmsg_handle);
1141
1142         return ret;
1143 }
1144
1145 /* enable a component on vc */
1146 static int enable_component(struct vchiq_mmal_instance *instance,
1147                             struct vchiq_mmal_component *component)
1148 {
1149         int ret;
1150         struct mmal_msg m;
1151         struct mmal_msg *rmsg;
1152         struct vchi_held_msg rmsg_handle;
1153
1154         m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1155         m.u.component_enable.component_handle = component->handle;
1156
1157         ret = send_synchronous_mmal_msg(instance, &m,
1158                                         sizeof(m.u.component_enable),
1159                                         &rmsg, &rmsg_handle);
1160         if (ret)
1161                 return ret;
1162
1163         if (rmsg->h.type != m.h.type) {
1164                 /* got an unexpected message type in reply */
1165                 ret = -EINVAL;
1166                 goto release_msg;
1167         }
1168
1169         ret = -rmsg->u.component_enable_reply.status;
1170
1171 release_msg:
1172         vchi_held_msg_release(&rmsg_handle);
1173
1174         return ret;
1175 }
1176
1177 /* disable a component on vc */
1178 static int disable_component(struct vchiq_mmal_instance *instance,
1179                              struct vchiq_mmal_component *component)
1180 {
1181         int ret;
1182         struct mmal_msg m;
1183         struct mmal_msg *rmsg;
1184         struct vchi_held_msg rmsg_handle;
1185
1186         m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1187         m.u.component_disable.component_handle = component->handle;
1188
1189         ret = send_synchronous_mmal_msg(instance, &m,
1190                                         sizeof(m.u.component_disable),
1191                                         &rmsg, &rmsg_handle);
1192         if (ret)
1193                 return ret;
1194
1195         if (rmsg->h.type != m.h.type) {
1196                 /* got an unexpected message type in reply */
1197                 ret = -EINVAL;
1198                 goto release_msg;
1199         }
1200
1201         ret = -rmsg->u.component_disable_reply.status;
1202
1203 release_msg:
1204
1205         vchi_held_msg_release(&rmsg_handle);
1206
1207         return ret;
1208 }
1209
1210 /* get version of mmal implementation */
1211 static int get_version(struct vchiq_mmal_instance *instance,
1212                        u32 *major_out, u32 *minor_out)
1213 {
1214         int ret;
1215         struct mmal_msg m;
1216         struct mmal_msg *rmsg;
1217         struct vchi_held_msg rmsg_handle;
1218
1219         m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1220
1221         ret = send_synchronous_mmal_msg(instance, &m,
1222                                         sizeof(m.u.version),
1223                                         &rmsg, &rmsg_handle);
1224         if (ret)
1225                 return ret;
1226
1227         if (rmsg->h.type != m.h.type) {
1228                 /* got an unexpected message type in reply */
1229                 ret = -EINVAL;
1230                 goto release_msg;
1231         }
1232
1233         *major_out = rmsg->u.version.major;
1234         *minor_out = rmsg->u.version.minor;
1235
1236 release_msg:
1237         vchi_held_msg_release(&rmsg_handle);
1238
1239         return ret;
1240 }
1241
1242 /* do a port action with a port as a parameter */
1243 static int port_action_port(struct vchiq_mmal_instance *instance,
1244                             struct vchiq_mmal_port *port,
1245                             enum mmal_msg_port_action_type action_type)
1246 {
1247         int ret;
1248         struct mmal_msg m;
1249         struct mmal_msg *rmsg;
1250         struct vchi_held_msg rmsg_handle;
1251
1252         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1253         m.u.port_action_port.component_handle = port->component->handle;
1254         m.u.port_action_port.port_handle = port->handle;
1255         m.u.port_action_port.action = action_type;
1256
1257         port_to_mmal_msg(port, &m.u.port_action_port.port);
1258
1259         ret = send_synchronous_mmal_msg(instance, &m,
1260                                         sizeof(m.u.port_action_port),
1261                                         &rmsg, &rmsg_handle);
1262         if (ret)
1263                 return ret;
1264
1265         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1266                 /* got an unexpected message type in reply */
1267                 ret = -EINVAL;
1268                 goto release_msg;
1269         }
1270
1271         ret = -rmsg->u.port_action_reply.status;
1272
1273         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1274                  __func__,
1275                  ret, port->component->handle, port->handle,
1276                  port_action_type_names[action_type], action_type);
1277
1278 release_msg:
1279         vchi_held_msg_release(&rmsg_handle);
1280
1281         return ret;
1282 }
1283
1284 /* do a port action with handles as parameters */
1285 static int port_action_handle(struct vchiq_mmal_instance *instance,
1286                               struct vchiq_mmal_port *port,
1287                               enum mmal_msg_port_action_type action_type,
1288                               u32 connect_component_handle,
1289                               u32 connect_port_handle)
1290 {
1291         int ret;
1292         struct mmal_msg m;
1293         struct mmal_msg *rmsg;
1294         struct vchi_held_msg rmsg_handle;
1295
1296         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1297
1298         m.u.port_action_handle.component_handle = port->component->handle;
1299         m.u.port_action_handle.port_handle = port->handle;
1300         m.u.port_action_handle.action = action_type;
1301
1302         m.u.port_action_handle.connect_component_handle =
1303             connect_component_handle;
1304         m.u.port_action_handle.connect_port_handle = connect_port_handle;
1305
1306         ret = send_synchronous_mmal_msg(instance, &m,
1307                                         sizeof(m.u.port_action_handle),
1308                                         &rmsg, &rmsg_handle);
1309         if (ret)
1310                 return ret;
1311
1312         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1313                 /* got an unexpected message type in reply */
1314                 ret = -EINVAL;
1315                 goto release_msg;
1316         }
1317
1318         ret = -rmsg->u.port_action_reply.status;
1319
1320         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1321                  __func__,
1322                  ret, port->component->handle, port->handle,
1323                  port_action_type_names[action_type],
1324                  action_type, connect_component_handle, connect_port_handle);
1325
1326 release_msg:
1327         vchi_held_msg_release(&rmsg_handle);
1328
1329         return ret;
1330 }
1331
1332 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1333                               struct vchiq_mmal_port *port,
1334                               u32 parameter_id, void *value, u32 value_size)
1335 {
1336         int ret;
1337         struct mmal_msg m;
1338         struct mmal_msg *rmsg;
1339         struct vchi_held_msg rmsg_handle;
1340
1341         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1342
1343         m.u.port_parameter_set.component_handle = port->component->handle;
1344         m.u.port_parameter_set.port_handle = port->handle;
1345         m.u.port_parameter_set.id = parameter_id;
1346         m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1347         memcpy(&m.u.port_parameter_set.value, value, value_size);
1348
1349         ret = send_synchronous_mmal_msg(instance, &m,
1350                                         (4 * sizeof(u32)) + value_size,
1351                                         &rmsg, &rmsg_handle);
1352         if (ret)
1353                 return ret;
1354
1355         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1356                 /* got an unexpected message type in reply */
1357                 ret = -EINVAL;
1358                 goto release_msg;
1359         }
1360
1361         ret = -rmsg->u.port_parameter_set_reply.status;
1362
1363         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1364                  __func__,
1365                  ret, port->component->handle, port->handle, parameter_id);
1366
1367 release_msg:
1368         vchi_held_msg_release(&rmsg_handle);
1369
1370         return ret;
1371 }
1372
1373 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1374                               struct vchiq_mmal_port *port,
1375                               u32 parameter_id, void *value, u32 *value_size)
1376 {
1377         int ret;
1378         struct mmal_msg m;
1379         struct mmal_msg *rmsg;
1380         struct vchi_held_msg rmsg_handle;
1381
1382         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1383
1384         m.u.port_parameter_get.component_handle = port->component->handle;
1385         m.u.port_parameter_get.port_handle = port->handle;
1386         m.u.port_parameter_get.id = parameter_id;
1387         m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1388
1389         ret = send_synchronous_mmal_msg(instance, &m,
1390                                         sizeof(struct
1391                                                mmal_msg_port_parameter_get),
1392                                         &rmsg, &rmsg_handle);
1393         if (ret)
1394                 return ret;
1395
1396         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1397                 /* got an unexpected message type in reply */
1398                 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1399                 ret = -EINVAL;
1400                 goto release_msg;
1401         }
1402
1403         ret = rmsg->u.port_parameter_get_reply.status;
1404
1405         /* port_parameter_get_reply.size includes the header,
1406          * whilst *value_size doesn't.
1407          */
1408         rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1409
1410         if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1411                 /* Copy only as much as we have space for
1412                  * but report true size of parameter
1413                  */
1414                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1415                        *value_size);
1416         } else {
1417                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1418                        rmsg->u.port_parameter_get_reply.size);
1419         }
1420         /* Always report the size of the returned parameter to the caller */
1421         *value_size = rmsg->u.port_parameter_get_reply.size;
1422
1423         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1424                  ret, port->component->handle, port->handle, parameter_id);
1425
1426 release_msg:
1427         vchi_held_msg_release(&rmsg_handle);
1428
1429         return ret;
1430 }
1431
1432 /* disables a port and drains buffers from it */
1433 static int port_disable(struct vchiq_mmal_instance *instance,
1434                         struct vchiq_mmal_port *port)
1435 {
1436         int ret;
1437         struct list_head *q, *buf_head;
1438         unsigned long flags = 0;
1439
1440         if (!port->enabled)
1441                 return 0;
1442
1443         port->enabled = 0;
1444
1445         ret = port_action_port(instance, port,
1446                                MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1447         if (ret == 0) {
1448                 /*
1449                  * Drain all queued buffers on port. This should only
1450                  * apply to buffers that have been queued before the port
1451                  * has been enabled. If the port has been enabled and buffers
1452                  * passed, then the buffers should have been removed from this
1453                  * list, and we should get the relevant callbacks via VCHIQ
1454                  * to release the buffers.
1455                  */
1456                 spin_lock_irqsave(&port->slock, flags);
1457
1458                 list_for_each_safe(buf_head, q, &port->buffers) {
1459                         struct mmal_buffer *mmalbuf;
1460
1461                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1462                                              list);
1463                         list_del(buf_head);
1464                         if (port->buffer_cb) {
1465                                 mmalbuf->length = 0;
1466                                 mmalbuf->mmal_flags = 0;
1467                                 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1468                                 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1469                                 mmalbuf->cmd = 0;
1470                                 port->buffer_cb(instance,
1471                                                 port, 0, mmalbuf);
1472                         }
1473                 }
1474
1475                 spin_unlock_irqrestore(&port->slock, flags);
1476
1477                 ret = port_info_get(instance, port);
1478         }
1479
1480         return ret;
1481 }
1482
1483 /* enable a port */
1484 static int port_enable(struct vchiq_mmal_instance *instance,
1485                        struct vchiq_mmal_port *port)
1486 {
1487         unsigned int hdr_count;
1488         struct list_head *q, *buf_head;
1489         int ret;
1490
1491         if (port->enabled)
1492                 return 0;
1493
1494         ret = port_action_port(instance, port,
1495                                MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1496         if (ret)
1497                 goto done;
1498
1499         port->enabled = 1;
1500
1501         if (port->buffer_cb) {
1502                 /* send buffer headers to videocore */
1503                 hdr_count = 1;
1504                 list_for_each_safe(buf_head, q, &port->buffers) {
1505                         struct mmal_buffer *mmalbuf;
1506
1507                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1508                                              list);
1509                         ret = buffer_from_host(instance, port, mmalbuf);
1510                         if (ret)
1511                                 goto done;
1512
1513                         list_del(buf_head);
1514                         hdr_count++;
1515                         if (hdr_count > port->current_buffer.num)
1516                                 break;
1517                 }
1518         }
1519
1520         ret = port_info_get(instance, port);
1521
1522 done:
1523         return ret;
1524 }
1525
1526 /* ------------------------------------------------------------------
1527  * Exported API
1528  *------------------------------------------------------------------
1529  */
1530
1531 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1532                                struct vchiq_mmal_port *port)
1533 {
1534         int ret;
1535
1536         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1537                 return -EINTR;
1538
1539         ret = port_info_set(instance, port);
1540         if (ret)
1541                 goto release_unlock;
1542
1543         /* read what has actually been set */
1544         ret = port_info_get(instance, port);
1545
1546 release_unlock:
1547         mutex_unlock(&instance->vchiq_mutex);
1548
1549         return ret;
1550 }
1551 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1552
1553 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1554                                   struct vchiq_mmal_port *port,
1555                                   u32 parameter, void *value, u32 value_size)
1556 {
1557         int ret;
1558
1559         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1560                 return -EINTR;
1561
1562         ret = port_parameter_set(instance, port, parameter, value, value_size);
1563
1564         mutex_unlock(&instance->vchiq_mutex);
1565
1566         if (parameter == MMAL_PARAMETER_ZERO_COPY && !ret)
1567                 port->zero_copy = !!(*(bool *)value);
1568
1569         return ret;
1570 }
1571 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1572
1573 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1574                                   struct vchiq_mmal_port *port,
1575                                   u32 parameter, void *value, u32 *value_size)
1576 {
1577         int ret;
1578
1579         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1580                 return -EINTR;
1581
1582         ret = port_parameter_get(instance, port, parameter, value, value_size);
1583
1584         mutex_unlock(&instance->vchiq_mutex);
1585
1586         return ret;
1587 }
1588 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1589
1590 /* enable a port
1591  *
1592  * enables a port and queues buffers for satisfying callbacks if we
1593  * provide a callback handler
1594  */
1595 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1596                            struct vchiq_mmal_port *port,
1597                            vchiq_mmal_buffer_cb buffer_cb)
1598 {
1599         int ret;
1600
1601         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1602                 return -EINTR;
1603
1604         /* already enabled - noop */
1605         if (port->enabled) {
1606                 ret = 0;
1607                 goto unlock;
1608         }
1609
1610         port->buffer_cb = buffer_cb;
1611
1612         ret = port_enable(instance, port);
1613
1614 unlock:
1615         mutex_unlock(&instance->vchiq_mutex);
1616
1617         return ret;
1618 }
1619 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1620
1621 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1622                             struct vchiq_mmal_port *port)
1623 {
1624         int ret;
1625
1626         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1627                 return -EINTR;
1628
1629         if (!port->enabled) {
1630                 mutex_unlock(&instance->vchiq_mutex);
1631                 return 0;
1632         }
1633
1634         ret = port_disable(instance, port);
1635
1636         mutex_unlock(&instance->vchiq_mutex);
1637
1638         return ret;
1639 }
1640 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1641
1642 /* ports will be connected in a tunneled manner so data buffers
1643  * are not handled by client.
1644  */
1645 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1646                                    struct vchiq_mmal_port *src,
1647                                    struct vchiq_mmal_port *dst)
1648 {
1649         int ret;
1650
1651         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1652                 return -EINTR;
1653
1654         /* disconnect ports if connected */
1655         if (src->connected) {
1656                 ret = port_disable(instance, src);
1657                 if (ret) {
1658                         pr_err("failed disabling src port(%d)\n", ret);
1659                         goto release_unlock;
1660                 }
1661
1662                 /* do not need to disable the destination port as they
1663                  * are connected and it is done automatically
1664                  */
1665
1666                 ret = port_action_handle(instance, src,
1667                                          MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1668                                          src->connected->component->handle,
1669                                          src->connected->handle);
1670                 if (ret < 0) {
1671                         pr_err("failed disconnecting src port\n");
1672                         goto release_unlock;
1673                 }
1674                 src->connected->enabled = 0;
1675                 src->connected = NULL;
1676         }
1677
1678         if (!dst) {
1679                 /* do not make new connection */
1680                 ret = 0;
1681                 pr_debug("not making new connection\n");
1682                 goto release_unlock;
1683         }
1684
1685         /* copy src port format to dst */
1686         dst->format.encoding = src->format.encoding;
1687         dst->es.video.width = src->es.video.width;
1688         dst->es.video.height = src->es.video.height;
1689         dst->es.video.crop.x = src->es.video.crop.x;
1690         dst->es.video.crop.y = src->es.video.crop.y;
1691         dst->es.video.crop.width = src->es.video.crop.width;
1692         dst->es.video.crop.height = src->es.video.crop.height;
1693         dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1694         dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1695
1696         /* set new format */
1697         ret = port_info_set(instance, dst);
1698         if (ret) {
1699                 pr_debug("setting port info failed\n");
1700                 goto release_unlock;
1701         }
1702
1703         /* read what has actually been set */
1704         ret = port_info_get(instance, dst);
1705         if (ret) {
1706                 pr_debug("read back port info failed\n");
1707                 goto release_unlock;
1708         }
1709
1710         /* connect two ports together */
1711         ret = port_action_handle(instance, src,
1712                                  MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1713                                  dst->component->handle, dst->handle);
1714         if (ret < 0) {
1715                 pr_debug("connecting port %d:%d to %d:%d failed\n",
1716                          src->component->handle, src->handle,
1717                          dst->component->handle, dst->handle);
1718                 goto release_unlock;
1719         }
1720         src->connected = dst;
1721
1722 release_unlock:
1723
1724         mutex_unlock(&instance->vchiq_mutex);
1725
1726         return ret;
1727 }
1728 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1729
1730 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1731                              struct vchiq_mmal_port *port,
1732                              struct mmal_buffer *buffer)
1733 {
1734         unsigned long flags = 0;
1735         int ret;
1736
1737         /*
1738          * We really want to do this in mmal_vchi_buffer_init but can't as
1739          * videobuf2 won't let us have the dmabuf there.
1740          */
1741         if (port->zero_copy && buffer->dma_buf && !buffer->vcsm_handle) {
1742                 pr_debug("%s: import dmabuf %p\n", __func__, buffer->dma_buf);
1743                 ret = vc_sm_cma_import_dmabuf(buffer->dma_buf,
1744                                               &buffer->vcsm_handle);
1745                 if (ret) {
1746                         pr_err("%s: vc_sm_import_dmabuf_fd failed, ret %d\n",
1747                                __func__, ret);
1748                         return ret;
1749                 }
1750
1751                 buffer->vc_handle = vc_sm_cma_int_handle(buffer->vcsm_handle);
1752                 if (!buffer->vc_handle) {
1753                         pr_err("%s: vc_sm_int_handle failed %d\n",
1754                                __func__, ret);
1755                         vc_sm_cma_free(buffer->vcsm_handle);
1756                         return ret;
1757                 }
1758                 pr_debug("%s: import dmabuf %p - got vc handle %08X\n",
1759                          __func__, buffer->dma_buf, buffer->vc_handle);
1760         }
1761
1762         ret = buffer_from_host(instance, port, buffer);
1763         if (ret == -EINVAL) {
1764                 /* Port is disabled. Queue for when it is enabled. */
1765                 spin_lock_irqsave(&port->slock, flags);
1766                 list_add_tail(&buffer->list, &port->buffers);
1767                 spin_unlock_irqrestore(&port->slock, flags);
1768         }
1769
1770         return 0;
1771 }
1772 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1773
1774 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1775                           struct mmal_buffer *buf)
1776 {
1777         struct mmal_msg_context *msg_context = get_msg_context(instance);
1778
1779         if (IS_ERR(msg_context))
1780                 return (PTR_ERR(msg_context));
1781
1782         buf->msg_context = msg_context;
1783         return 0;
1784 }
1785 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1786
1787 int mmal_vchi_buffer_unmap(struct mmal_buffer *buf)
1788 {
1789         int ret = 0;
1790
1791         if (buf->vcsm_handle) {
1792                 int ret;
1793
1794                 pr_debug("%s: vc_sm_cma_free on handle %p\n", __func__,
1795                          buf->vcsm_handle);
1796                 ret = vc_sm_cma_free(buf->vcsm_handle);
1797                 if (ret)
1798                         pr_err("%s: vcsm_free failed, ret %d\n", __func__, ret);
1799                 buf->vcsm_handle = 0;
1800         }
1801         return ret;
1802 }
1803 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_unmap);
1804
1805 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1806 {
1807         struct mmal_msg_context *msg_context = buf->msg_context;
1808
1809         if (msg_context)
1810                 release_msg_context(msg_context);
1811         buf->msg_context = NULL;
1812
1813         mmal_vchi_buffer_unmap(buf);
1814         return 0;
1815 }
1816 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1817
1818 static void init_event_context(struct vchiq_mmal_instance *instance,
1819                                struct vchiq_mmal_port *port)
1820 {
1821         struct mmal_msg_context *ctx = get_msg_context(instance);
1822
1823         mutex_init(&port->event_context_mutex);
1824
1825         port->event_context = ctx;
1826         ctx->u.bulk.instance = instance;
1827         ctx->u.bulk.port = port;
1828         ctx->u.bulk.buffer =
1829                 kzalloc(sizeof(*ctx->u.bulk.buffer), GFP_KERNEL);
1830         if (!ctx->u.bulk.buffer)
1831                 goto release_msg_context;
1832         ctx->u.bulk.buffer->buffer = kzalloc(MMAL_WORKER_EVENT_SPACE,
1833                                              GFP_KERNEL);
1834         if (!ctx->u.bulk.buffer->buffer)
1835                 goto release_buffer;
1836
1837         INIT_WORK(&ctx->u.bulk.work, buffer_work_cb);
1838         return;
1839
1840 release_buffer:
1841         kfree(ctx->u.bulk.buffer);
1842 release_msg_context:
1843         release_msg_context(ctx);
1844 }
1845
1846 static void free_event_context(struct vchiq_mmal_port *port)
1847 {
1848         struct mmal_msg_context *ctx = port->event_context;
1849
1850         if (!ctx)
1851                 return;
1852
1853         kfree(ctx->u.bulk.buffer->buffer);
1854         kfree(ctx->u.bulk.buffer);
1855         release_msg_context(ctx);
1856         port->event_context = NULL;
1857 }
1858
1859 static void release_all_event_contexts(struct vchiq_mmal_component *component)
1860 {
1861         int idx;
1862
1863         for (idx = 0; idx < component->inputs; idx++)
1864                 free_event_context(&component->input[idx]);
1865         for (idx = 0; idx < component->outputs; idx++)
1866                 free_event_context(&component->output[idx]);
1867         for (idx = 0; idx < component->clocks; idx++)
1868                 free_event_context(&component->clock[idx]);
1869         free_event_context(&component->control);
1870 }
1871
1872 /* Initialise a mmal component and its ports
1873  *
1874  */
1875 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1876                               const char *name,
1877                               struct vchiq_mmal_component **component_out)
1878 {
1879         int ret;
1880         int idx;                /* port index */
1881         struct vchiq_mmal_component *component = NULL;
1882
1883         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1884                 return -EINTR;
1885
1886         for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1887                 if (!instance->component[idx].in_use) {
1888                         component = &instance->component[idx];
1889                         component->in_use = 1;
1890                         break;
1891                 }
1892         }
1893
1894         if (!component) {
1895                 ret = -EINVAL;  /* todo is this correct error? */
1896                 goto unlock;
1897         }
1898
1899         /* We need a handle to reference back to our component structure.
1900          * Use the array index in instance->component rather than rolling
1901          * another IDR.
1902          */
1903         component->client_component = idx;
1904
1905         ret = create_component(instance, component, name);
1906         if (ret < 0) {
1907                 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1908                        __func__, ret);
1909                 goto unlock;
1910         }
1911
1912         /* ports info needs gathering */
1913         component->control.type = MMAL_PORT_TYPE_CONTROL;
1914         component->control.index = 0;
1915         component->control.component = component;
1916         spin_lock_init(&component->control.slock);
1917         INIT_LIST_HEAD(&component->control.buffers);
1918         ret = port_info_get(instance, &component->control);
1919         if (ret < 0)
1920                 goto release_component;
1921         init_event_context(instance, &component->control);
1922
1923         for (idx = 0; idx < component->inputs; idx++) {
1924                 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1925                 component->input[idx].index = idx;
1926                 component->input[idx].component = component;
1927                 spin_lock_init(&component->input[idx].slock);
1928                 INIT_LIST_HEAD(&component->input[idx].buffers);
1929                 ret = port_info_get(instance, &component->input[idx]);
1930                 if (ret < 0)
1931                         goto release_component;
1932                 init_event_context(instance, &component->input[idx]);
1933         }
1934
1935         for (idx = 0; idx < component->outputs; idx++) {
1936                 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1937                 component->output[idx].index = idx;
1938                 component->output[idx].component = component;
1939                 spin_lock_init(&component->output[idx].slock);
1940                 INIT_LIST_HEAD(&component->output[idx].buffers);
1941                 ret = port_info_get(instance, &component->output[idx]);
1942                 if (ret < 0)
1943                         goto release_component;
1944                 init_event_context(instance, &component->output[idx]);
1945         }
1946
1947         for (idx = 0; idx < component->clocks; idx++) {
1948                 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1949                 component->clock[idx].index = idx;
1950                 component->clock[idx].component = component;
1951                 spin_lock_init(&component->clock[idx].slock);
1952                 INIT_LIST_HEAD(&component->clock[idx].buffers);
1953                 ret = port_info_get(instance, &component->clock[idx]);
1954                 if (ret < 0)
1955                         goto release_component;
1956                 init_event_context(instance, &component->clock[idx]);
1957         }
1958
1959         *component_out = component;
1960
1961         mutex_unlock(&instance->vchiq_mutex);
1962
1963         return 0;
1964
1965 release_component:
1966         destroy_component(instance, component);
1967         release_all_event_contexts(component);
1968 unlock:
1969         if (component)
1970                 component->in_use = 0;
1971         mutex_unlock(&instance->vchiq_mutex);
1972
1973         return ret;
1974 }
1975 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1976
1977 /*
1978  * cause a mmal component to be destroyed
1979  */
1980 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1981                                   struct vchiq_mmal_component *component)
1982 {
1983         int ret;
1984
1985         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1986                 return -EINTR;
1987
1988         if (component->enabled)
1989                 ret = disable_component(instance, component);
1990
1991         ret = destroy_component(instance, component);
1992
1993         component->in_use = 0;
1994
1995         release_all_event_contexts(component);
1996
1997         mutex_unlock(&instance->vchiq_mutex);
1998
1999         return ret;
2000 }
2001 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
2002
2003 /*
2004  * cause a mmal component to be enabled
2005  */
2006 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
2007                                 struct vchiq_mmal_component *component)
2008 {
2009         int ret;
2010
2011         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2012                 return -EINTR;
2013
2014         if (component->enabled) {
2015                 mutex_unlock(&instance->vchiq_mutex);
2016                 return 0;
2017         }
2018
2019         ret = enable_component(instance, component);
2020         if (ret == 0)
2021                 component->enabled = 1;
2022
2023         mutex_unlock(&instance->vchiq_mutex);
2024
2025         return ret;
2026 }
2027 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
2028
2029 /*
2030  * cause a mmal component to be enabled
2031  */
2032 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
2033                                  struct vchiq_mmal_component *component)
2034 {
2035         int ret;
2036
2037         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2038                 return -EINTR;
2039
2040         if (!component->enabled) {
2041                 mutex_unlock(&instance->vchiq_mutex);
2042                 return 0;
2043         }
2044
2045         ret = disable_component(instance, component);
2046         if (ret == 0)
2047                 component->enabled = 0;
2048
2049         mutex_unlock(&instance->vchiq_mutex);
2050
2051         return ret;
2052 }
2053 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
2054
2055 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
2056                        u32 *major_out, u32 *minor_out)
2057 {
2058         int ret;
2059
2060         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2061                 return -EINTR;
2062
2063         ret = get_version(instance, major_out, minor_out);
2064
2065         mutex_unlock(&instance->vchiq_mutex);
2066
2067         return ret;
2068 }
2069 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
2070
2071 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
2072 {
2073         int status = 0;
2074
2075         if (!instance)
2076                 return -EINVAL;
2077
2078         if (mutex_lock_interruptible(&instance->vchiq_mutex))
2079                 return -EINTR;
2080
2081         vchi_service_use(instance->handle);
2082
2083         status = vchi_service_close(instance->handle);
2084         if (status != 0)
2085                 pr_err("mmal-vchiq: VCHIQ close failed\n");
2086
2087         mutex_unlock(&instance->vchiq_mutex);
2088
2089         flush_workqueue(instance->bulk_wq);
2090         destroy_workqueue(instance->bulk_wq);
2091
2092         vfree(instance->bulk_scratch);
2093
2094         idr_destroy(&instance->context_map);
2095
2096         kfree(instance);
2097
2098         return status;
2099 }
2100 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
2101
2102 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
2103 {
2104         int status;
2105         struct vchiq_mmal_instance *instance;
2106         static VCHI_INSTANCE_T vchi_instance;
2107         struct service_creation params = {
2108                 .version                = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
2109                 .service_id             = VC_MMAL_SERVER_NAME,
2110                 .callback               = service_callback,
2111                 .callback_param         = NULL,
2112         };
2113
2114         /* compile time checks to ensure structure size as they are
2115          * directly (de)serialised from memory.
2116          */
2117
2118         /* ensure the header structure has packed to the correct size */
2119         BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
2120
2121         /* ensure message structure does not exceed maximum length */
2122         BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
2123
2124         /* mmal port struct is correct size */
2125         BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
2126
2127         /* create a vchi instance */
2128         status = vchi_initialise(&vchi_instance);
2129         if (status) {
2130                 pr_err("Failed to initialise VCHI instance (status=%d)\n",
2131                        status);
2132                 return -EIO;
2133         }
2134
2135         status = vchi_connect(vchi_instance);
2136         if (status) {
2137                 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
2138                 return -EIO;
2139         }
2140
2141         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
2142
2143         if (!instance)
2144                 return -ENOMEM;
2145
2146         mutex_init(&instance->vchiq_mutex);
2147
2148         instance->bulk_scratch = vmalloc(PAGE_SIZE);
2149
2150         mutex_init(&instance->context_map_lock);
2151         idr_init_base(&instance->context_map, 1);
2152
2153         params.callback_param = instance;
2154
2155         instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
2156                                                     WQ_MEM_RECLAIM);
2157         if (!instance->bulk_wq)
2158                 goto err_free;
2159
2160         status = vchi_service_open(vchi_instance, &params, &instance->handle);
2161         if (status) {
2162                 pr_err("Failed to open VCHI service connection (status=%d)\n",
2163                        status);
2164                 goto err_close_services;
2165         }
2166
2167         vchi_service_release(instance->handle);
2168
2169         *out_instance = instance;
2170
2171         return 0;
2172
2173 err_close_services:
2174         vchi_service_close(instance->handle);
2175         destroy_workqueue(instance->bulk_wq);
2176 err_free:
2177         vfree(instance->bulk_scratch);
2178         kfree(instance);
2179         return -ENODEV;
2180 }
2181 EXPORT_SYMBOL_GPL(vchiq_mmal_init);