return 0;
}
+static int bcm2835_codec_mmal_buf_cleanup(struct mmal_buffer *mmal_buf)
+{
+ mmal_vchi_buffer_cleanup(mmal_buf);
+
+ if (mmal_buf->dma_buf) {
+ dma_buf_put(mmal_buf->dma_buf);
+ mmal_buf->dma_buf = NULL;
+ }
+
+ return 0;
+}
+
static int bcm2835_codec_buf_init(struct vb2_buffer *vb)
{
struct bcm2835_codec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
vb);
struct m2m_mmal_buffer *buf = container_of(m2m, struct m2m_mmal_buffer,
m2m);
+ struct dma_buf *dma_buf;
int ret;
v4l2_dbg(4, debug, &ctx->dev->v4l2_dev, "%s: type: %d ptr %p\n",
if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
vb2_set_plane_payload(vb, 0, q_data->sizeimage);
- /*
- * We want to do this at init, but vb2_core_expbuf checks that the
- * index < q->num_buffers, and q->num_buffers only gets updated once
- * all the buffers are allocated.
- */
- if (!buf->mmal.dma_buf) {
- ret = vb2_core_expbuf_dmabuf(vb->vb2_queue,
- vb->vb2_queue->type, vb->index, 0,
- O_CLOEXEC, &buf->mmal.dma_buf);
- if (ret)
- v4l2_err(&ctx->dev->v4l2_dev, "%s: Failed to expbuf idx %d, ret %d\n",
- __func__, vb->index, ret);
- } else {
+ switch (vb->memory) {
+ case VB2_MEMORY_DMABUF:
+ dma_buf = dma_buf_get(vb->planes[0].m.fd);
+
+ if (dma_buf != buf->mmal.dma_buf) {
+ /* dmabuf either hasn't already been mapped, or it has
+ * changed.
+ */
+ if (buf->mmal.dma_buf) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "%s Buffer changed - why did the core not call cleanup?\n",
+ __func__);
+ bcm2835_codec_mmal_buf_cleanup(&buf->mmal);
+ }
+
+ buf->mmal.dma_buf = dma_buf;
+ }
ret = 0;
+ break;
+ case VB2_MEMORY_MMAP:
+ /*
+ * We want to do this at init, but vb2_core_expbuf checks that
+ * the index < q->num_buffers, and q->num_buffers only gets
+ * updated once all the buffers are allocated.
+ */
+ if (!buf->mmal.dma_buf) {
+ ret = vb2_core_expbuf_dmabuf(vb->vb2_queue,
+ vb->vb2_queue->type,
+ vb->index, 0,
+ O_CLOEXEC,
+ &buf->mmal.dma_buf);
+ if (ret)
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "%s: Failed to expbuf idx %d, ret %d\n",
+ __func__, vb->index, ret);
+ } else {
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
return ret;
v4l2_dbg(2, debug, &ctx->dev->v4l2_dev, "%s: ctx:%p, vb %p\n",
__func__, ctx, vb);
- mmal_vchi_buffer_cleanup(&buf->mmal);
-
- if (buf->mmal.dma_buf) {
- dma_buf_put(buf->mmal.dma_buf);
- buf->mmal.dma_buf = NULL;
- }
+ bcm2835_codec_mmal_buf_cleanup(&buf->mmal);
}
static int bcm2835_codec_start_streaming(struct vb2_queue *q,
m2m = container_of(vb2, struct v4l2_m2m_buffer, vb);
buf = container_of(m2m, struct m2m_mmal_buffer, m2m);
- mmal_vchi_buffer_cleanup(&buf->mmal);
- if (buf->mmal.dma_buf) {
- dma_buf_put(buf->mmal.dma_buf);
- buf->mmal.dma_buf = NULL;
- }
+ bcm2835_codec_mmal_buf_cleanup(&buf->mmal);
}
/* If both ports disabled, then disable the component */
}
EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
-int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
+int mmal_vchi_buffer_unmap(struct mmal_buffer *buf)
{
- struct mmal_msg_context *msg_context = buf->msg_context;
-
- if (msg_context)
- release_msg_context(msg_context);
- buf->msg_context = NULL;
+ int ret = 0;
if (buf->vcsm_handle) {
int ret;
pr_err("%s: vcsm_free failed, ret %d\n", __func__, ret);
buf->vcsm_handle = 0;
}
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mmal_vchi_buffer_unmap);
+
+int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context = buf->msg_context;
+
+ if (msg_context)
+ release_msg_context(msg_context);
+ buf->msg_context = NULL;
+
+ mmal_vchi_buffer_unmap(buf);
return 0;
}
EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);