/* Initialize the dma descriptor */
desc->p_fbd->fb_address =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
desc->p_fbd->next_fbd_address = 0;
set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
buf = cam->vb_bufs[frame ^ 0x1];
cam->vb_bufs[frame] = buf;
mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
- vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
set_bit(CF_SINGLE_BUFFER, &cam->flags);
singles++;
return;
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
list_del_init(&buf->queue);
mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
- vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
cam->vb_bufs[frame] = buf;
clear_bit(CF_SINGLE_BUFFER, &cam->flags);
}
}
if (buf->state == CSI_BUF_NEEDS_INIT) {
- sg_dma_address(sg) = vb2_dma_contig_plane_paddr(vb, 0);
+ sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0);
sg_dma_len(sg) = new_size;
buf->txd = ichan->dma_chan.device->device_prep_slave_sg(
dbg("memplanes= %d, colplanes= %d, pix_size= %d",
frame->fmt->memplanes, frame->fmt->colplanes, pix_size);
- paddr->y = vb2_dma_contig_plane_paddr(vb, 0);
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
if (frame->fmt->memplanes == 1) {
switch (frame->fmt->colplanes) {
}
} else {
if (frame->fmt->memplanes >= 2)
- paddr->cb = vb2_dma_contig_plane_paddr(vb, 1);
+ paddr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
if (frame->fmt->memplanes == 3)
- paddr->cr = vb2_dma_contig_plane_paddr(vb, 2);
+ paddr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
}
dbg("PHYS_ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
appropraite flags */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
- if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dec_y_addr) {
+ if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
memcpy(&dst_buf->b->v4l2_buf.timecode,
&src_buf->b->v4l2_buf.timecode,
sizeof(struct v4l2_timecode));
* check which videobuf does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
/* Check if this is the buffer we're looking for */
- if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dspl_y_addr) {
+ if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
dst_buf->b->v4l2_buf.sequence = ctx->sequence;
return 0;
for (i = 0; i <= ctx->src_fmt->num_planes ; i++) {
if (IS_ERR_OR_NULL(ERR_PTR(
- vb2_dma_contig_plane_paddr(vb, i)))) {
+ vb2_dma_contig_plane_dma_addr(vb, i)))) {
mfc_err("Plane mem not allocated\n");
return -EINVAL;
}
i = vb->v4l2_buf.index;
ctx->dst_bufs[i].b = vb;
ctx->dst_bufs[i].cookie.raw.luma =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->dst_bufs[i].cookie.raw.chroma =
- vb2_dma_contig_plane_paddr(vb, 1);
+ vb2_dma_contig_plane_dma_addr(vb, 1);
ctx->dst_bufs_cnt++;
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (IS_ERR_OR_NULL(ERR_PTR(
- vb2_dma_contig_plane_paddr(vb, 0)))) {
+ vb2_dma_contig_plane_dma_addr(vb, 0)))) {
mfc_err("Plane memory not allocated\n");
return -EINVAL;
}
i = vb->v4l2_buf.index;
ctx->src_bufs[i].b = vb;
ctx->src_bufs[i].cookie.stream =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->src_bufs_cnt++;
} else {
mfc_err("s5p_mfc_buf_init: unknown queue type\n");
while (!list_empty(&ctx->ref_queue)) {
mb_entry = list_entry((&ctx->ref_queue)->next,
struct s5p_mfc_buf, list);
- mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
list_del(&mb_entry->list);
ctx->ref_queue_cnt--;
list_add_tail(&mb_entry->list, &ctx->src_queue);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
spin_lock_irqsave(&dev->irqlock, flags);
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
- src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
- src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
spin_unlock_irqrestore(&dev->irqlock, flags);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
if (slice_type >= 0) {
s5p_mfc_get_enc_frame_buffer(ctx, &enc_y_addr, &enc_c_addr);
list_for_each_entry(mb_entry, &ctx->src_queue, list) {
- mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
if ((enc_y_addr == mb_y_addr) &&
(enc_c_addr == mb_c_addr)) {
list_del(&mb_entry->list);
}
}
list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
- mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
if ((enc_y_addr == mb_y_addr) &&
(enc_c_addr == mb_c_addr)) {
list_del(&mb_entry->list);
return -EINVAL;
}
for (i = 0; i < fmt->num_planes; i++) {
- if (!vb2_dma_contig_plane_paddr(vb, i)) {
+ if (!vb2_dma_contig_plane_dma_addr(vb, i)) {
mfc_err("failed to get plane cookie\n");
return -EINVAL;
}
mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
vb->v4l2_buf.index, i,
- vb2_dma_contig_plane_paddr(vb, i));
+ vb2_dma_contig_plane_dma_addr(vb, i));
}
return 0;
}
i = vb->v4l2_buf.index;
ctx->dst_bufs[i].b = vb;
ctx->dst_bufs[i].cookie.stream =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->dst_bufs_cnt++;
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
ret = check_vb_with_fmt(ctx->src_fmt, vb);
i = vb->v4l2_buf.index;
ctx->src_bufs[i].b = vb;
ctx->src_bufs[i].cookie.raw.luma =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->src_bufs[i].cookie.raw.chroma =
- vb2_dma_contig_plane_paddr(vb, 1);
+ vb2_dma_contig_plane_dma_addr(vb, 1);
ctx->src_bufs_cnt++;
} else {
mfc_err("inavlid queue type: %d\n", vq->type);
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
temp_vb->used = 1;
s5p_mfc_set_dec_stream_buffer(ctx,
- vb2_dma_contig_plane_paddr(temp_vb->b, 0), ctx->consumed_stream,
+ vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream,
temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
index = temp_vb->b->v4l2_buf.index;
}
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
src_mb->used = 1;
- src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
- src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
dst_mb->used = 1;
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
s5p_mfc_set_dec_desc_buffer(ctx);
mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
s5p_mfc_set_dec_stream_buffer(ctx,
- vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
0, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
s5p_mfc_set_enc_ref_buffer(ctx);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
s5p_mfc_set_dec_stream_buffer(ctx,
- vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
0, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
dma_addr_t addr = 0;
if (buf)
- addr = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
}
mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
return;
}
- luma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
if (layer->fmt->num_subframes == 2) {
- chroma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 1);
+ chroma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 1);
} else {
/* FIXME: mxr_get_plane_size compute integer division,
* which is slow and should not be performed in interrupt */
bottom2 = CDBCR;
}
- phys_addr_top = vb2_dma_contig_plane_paddr(pcdev->active, 0);
+ phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0);
ceu_write(pcdev, top1, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
struct vb2_dc_buf {
struct vb2_dc_conf *conf;
void *vaddr;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
unsigned long size;
struct vm_area_struct *vma;
atomic_t refcount;
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->paddr,
+ buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
GFP_KERNEL);
if (!buf->vaddr) {
dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
if (atomic_dec_and_test(&buf->refcount)) {
dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
- buf->paddr);
+ buf->dma_addr);
kfree(buf);
}
}
{
struct vb2_dc_buf *buf = buf_priv;
- return &buf->paddr;
+ return &buf->dma_addr;
}
static void *vb2_dma_contig_vaddr(void *buf_priv)
return -EINVAL;
}
- return vb2_mmap_pfn_range(vma, buf->paddr, buf->size,
+ return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
&vb2_common_vm_ops, &buf->handler);
}
{
struct vb2_dc_buf *buf;
struct vm_area_struct *vma;
- dma_addr_t paddr = 0;
+ dma_addr_t dma_addr = 0;
int ret;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- ret = vb2_get_contig_userptr(vaddr, size, &vma, &paddr);
+ ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
if (ret) {
printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
vaddr);
}
buf->size = size;
- buf->paddr = paddr;
+ buf->dma_addr = dma_addr;
buf->vma = vma;
return buf;
#include <linux/dma-mapping.h>
static inline dma_addr_t
-vb2_dma_contig_plane_paddr(struct vb2_buffer *vb, unsigned int plane_no)
+vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
{
- dma_addr_t *paddr = vb2_plane_cookie(vb, plane_no);
+ dma_addr_t *addr = vb2_plane_cookie(vb, plane_no);
- return *paddr;
+ return *addr;
}
void *vb2_dma_contig_init_ctx(struct device *dev);