union {
struct {
- /* work struct for defered callback - must come first */
+ /* work struct for buffer_cb callback */
struct work_struct work;
+ /* work struct for deferred callback */
+ struct work_struct buffer_to_host_work;
/* mmal instance */
struct vchiq_mmal_instance *instance;
/* mmal port */
/* component to use next */
int component_idx;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
+
+ /* ordered workqueue to process all bulk operations */
+ struct workqueue_struct *bulk_wq;
};
static struct mmal_msg_context *
msg_context->u.bulk.mmal_flags,
msg_context->u.bulk.dts,
msg_context->u.bulk.pts);
+}
+/* workqueue scheduled callback to handle receiving buffers
+ *
+ * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
+ * If we block in the service_callback context then we can't process the
+ * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
+ * vchi_bulk_queue_receive() call to complete.
+ */
+static void buffer_to_host_work_cb(struct work_struct *work)
+{
+ struct mmal_msg_context *msg_context =
+ container_of(work, struct mmal_msg_context,
+ u.bulk.buffer_to_host_work);
+ struct vchiq_mmal_instance *instance = msg_context->instance;
+ unsigned long len = msg_context->u.bulk.buffer_used;
+ int ret;
+
+ if (!len)
+ /* Dummy receive to ensure the buffers remain in order */
+ len = 8;
+ /* queue the bulk submission */
+ vchi_service_use(instance->handle);
+ ret = vchi_bulk_queue_receive(instance->handle,
+ msg_context->u.bulk.buffer->buffer,
+ /* Actual receive needs to be a multiple
+ * of 4 bytes
+ */
+ (len + 3) & ~3,
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
+ msg_context);
+
+ vchi_service_release(instance->handle);
+
+ if (ret != 0)
+ pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
+ __func__, msg_context, ret);
}
/* enqueue a bulk receive for a given message context */
struct mmal_msg_context *msg_context)
{
unsigned long rd_len;
- int ret;
rd_len = msg->u.buffer_from_host.buffer_header.length;
msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
- /* queue the bulk submission */
- vchi_service_use(instance->handle);
- ret = vchi_bulk_queue_receive(instance->handle,
- msg_context->u.bulk.buffer->buffer,
- /* Actual receive needs to be a multiple
- * of 4 bytes
- */
- (rd_len + 3) & ~3,
- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
- msg_context);
-
- vchi_service_release(instance->handle);
+ queue_work(msg_context->instance->bulk_wq,
+ &msg_context->u.bulk.buffer_to_host_work);
- return ret;
-}
-
-/* enque a dummy bulk receive for a given message context */
-static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
- struct mmal_msg_context *msg_context)
-{
- int ret;
-
- /* zero length indicates this was a dummy transfer */
- msg_context->u.bulk.buffer_used = 0;
-
- /* queue the bulk submission */
- vchi_service_use(instance->handle);
-
- ret = vchi_bulk_queue_receive(instance->handle,
- instance->bulk_scratch,
- 8,
- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
- msg_context);
-
- vchi_service_release(instance->handle);
-
- return ret;
+ return 0;
}
/* data in message, memcpy from packet into output buffer */
/* initialise work structure ready to schedule callback */
INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
+ INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
+ buffer_to_host_work_cb);
/* prep the buffer from host message */
memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
if (msg->u.buffer_from_host.buffer_header.flags &
MMAL_BUFFER_HEADER_FLAG_EOS) {
msg_context->u.bulk.status =
- dummy_bulk_receive(instance, msg_context);
+ bulk_receive(instance, msg, msg_context);
if (msg_context->u.bulk.status == 0)
return; /* successful bulk submission, bulk
* completion will trigger callback
mutex_unlock(&instance->vchiq_mutex);
+ flush_workqueue(instance->bulk_wq);
+ destroy_workqueue(instance->bulk_wq);
+
vfree(instance->bulk_scratch);
idr_destroy(&instance->context_map);
params.callback_param = instance;
+ instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
+ WQ_MEM_RECLAIM);
+ if (!instance->bulk_wq)
+ goto err_free;
+
status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
if (status) {
pr_err("Failed to open VCHI service connection (status=%d)\n",
return 0;
err_close_services:
-
vchi_service_close(instance->handle);
+ destroy_workqueue(instance->bulk_wq);
+err_free:
vfree(instance->bulk_scratch);
kfree(instance);
return -ENODEV;