ge2d_log_dbg("buffer %d,exported as %d descriptor\n",
index, ret);
+ buffer->gd_buffer[index].fd = ret;
ge2d_exp_buf->fd = ret;
return 0;
}
return ret;
}
-int ge2d_dma_buffer_get_phys(struct aml_dma_cfg *cfg, unsigned long *addr)
+static int ge2d_dma_buffer_get_phys_internal(struct aml_dma_buffer *buffer,
+ int fd, unsigned long *addr)
+{
+ int i = 0, ret = -1;
+ struct aml_dma_buf *dma_buf;
+
+ for (i = 0; i < AML_MAX_DMABUF; i++) {
+ if (buffer->gd_buffer[i].alloc &&
+ (fd == buffer->gd_buffer[i].fd)) {
+ dma_buf = buffer->gd_buffer[i].mem_priv;
+ *addr = dma_buf->dma_addr;
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+int ge2d_dma_buffer_get_phys(struct aml_dma_buffer *buffer,
+ struct aml_dma_cfg *cfg, unsigned long *addr)
{
struct sg_table *sg_table;
struct page *page;
- int ret;
+ int ret = -1;
- ret = ge2d_dma_buffer_map(cfg);
- if (ret < 0) {
- pr_err("gdc_dma_buffer_map failed\n");
- return ret;
+ if (cfg == NULL || (cfg->fd < 0)) {
+ pr_err("error input param");
+ return -EINVAL;
}
- if (cfg->sg) {
- sg_table = cfg->sg;
- page = sg_page(sg_table->sgl);
- *addr = PFN_PHYS(page_to_pfn(page));
- ret = 0;
+ ret = ge2d_dma_buffer_get_phys_internal(buffer, cfg->fd, addr);
+ if (ret < 0) {
+ ret = ge2d_dma_buffer_map(cfg);
+ if (ret < 0) {
+ pr_err("gdc_dma_buffer_map failed\n");
+ return ret;
+ }
+ if (cfg->sg) {
+ sg_table = cfg->sg;
+ page = sg_page(sg_table->sgl);
+ *addr = PFN_PHYS(page_to_pfn(page));
+ ret = 0;
+ }
}
return ret;
}
+int ge2d_dma_buffer_unmap_info(struct aml_dma_buffer *buffer,
+ struct aml_dma_cfg *cfg)
+{
+ int i, found = 0;
+
+ if (cfg == NULL || (cfg->fd < 0)) {
+ pr_err("error input param");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < AML_MAX_DMABUF; i++) {
+ if (buffer->gd_buffer[i].alloc &&
+ (cfg->fd == buffer->gd_buffer[i].fd)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ ge2d_dma_buffer_unmap(cfg);
+ return 0;
+}
+
void ge2d_dma_buffer_unmap(struct aml_dma_cfg *cfg)
{
int fd = -1;
struct aml_dma_buf_priv {
void *mem_priv;
int index;
+ int fd;
unsigned int alloc;
struct dma_buf *dbuf;
};
struct ge2d_dmabuf_exp_s *ge2d_exp_buf);
int ge2d_dma_buffer_map(struct aml_dma_cfg *cfg);
void ge2d_dma_buffer_unmap(struct aml_dma_cfg *cfg);
-int ge2d_dma_buffer_get_phys(struct aml_dma_cfg *cfg, unsigned long *addr);
+int ge2d_dma_buffer_get_phys(struct aml_dma_buffer *buffer,
+ struct aml_dma_cfg *cfg, unsigned long *addr);
+int ge2d_dma_buffer_unmap_info(struct aml_dma_buffer *buffer,
+ struct aml_dma_cfg *cfg);
void ge2d_dma_buffer_dma_flush(struct device *dev, int fd);
void ge2d_dma_buffer_cache_flush(struct device *dev, int fd);
#endif
GE2D_FORMAT_S32_ARGB,/* BPP_TYPE_32_ARGB=32, */
};
+static int ge2d_buffer_get_phys(struct aml_dma_cfg *cfg,
+ unsigned long *addr);
+static int ge2d_buffer_unmap(struct aml_dma_cfg *cfg);
+
static void ge2d_pre_init(void)
{
struct ge2d_gen_s ge2d_gen_cfg;
/* if dma buf detach it */
for (i = 0; i < MAX_PLANE; i++) {
if (pitem->config.src_dma_cfg[i].dma_used) {
- ge2d_dma_buffer_unmap((struct aml_dma_cfg *
+ ge2d_buffer_unmap((struct aml_dma_cfg *
)pitem->config.src_dma_cfg[i].dma_cfg);
pitem->config.src_dma_cfg[i].dma_used = 0;
kfree(pitem->config.src_dma_cfg[i].dma_cfg);
}
if (pitem->config.src2_dma_cfg[i].dma_used) {
- ge2d_dma_buffer_unmap((struct aml_dma_cfg *
+ ge2d_buffer_unmap((struct aml_dma_cfg *
)pitem->config.src2_dma_cfg[i].dma_cfg);
pitem->config.src2_dma_cfg[i].dma_used = 0;
kfree(pitem->config.src2_dma_cfg[i].dma_cfg);
}
if (pitem->config.dst_dma_cfg[i].dma_used) {
- ge2d_dma_buffer_unmap((struct aml_dma_cfg *
+ ge2d_buffer_unmap((struct aml_dma_cfg *
)pitem->config.dst_dma_cfg[i].dma_cfg);
pitem->config.dst_dma_cfg[i].dma_used = 0;
kfree(pitem->config.dst_dma_cfg[i].dma_cfg);
dma_cfg->dev = &(ge2d_manager.pdev->dev);
dma_cfg->dir = dir;
cfg->dma_cfg = dma_cfg;
- ret = ge2d_dma_buffer_get_phys(dma_cfg, &addr_temp);
+ ret = ge2d_buffer_get_phys(dma_cfg, &addr_temp);
if (ret != 0)
return ret;
}
dma_cfg->dev = &(ge2d_manager.pdev->dev);
dma_cfg->dir = dir;
cfg->dma_cfg = dma_cfg;
- ret = ge2d_dma_buffer_get_phys(dma_cfg, &addr);
+ ret = ge2d_buffer_get_phys(dma_cfg, &addr);
if (ret != 0)
return ret;
plane[i].addr = addr;
*r_offset += 1;
#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS
canvas_config(index++, plane[i].addr,
- plane[i].w * bpp_value,
- plane[i].h,
- CANVAS_ADDR_NOWRAP,
- CANVAS_BLKMODE_LINEAR);
+ plane[i].w * bpp_value,
+ plane[i].h,
+ CANVAS_ADDR_NOWRAP,
+ CANVAS_BLKMODE_LINEAR);
#endif
} else if (plane[i].addr) {
plane[i].addr += plane[0].addr;
*r_offset += 1;
#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS
canvas_config(index++, plane[i].addr,
- plane[i].w * bpp_value,
- plane[i].h,
- CANVAS_ADDR_NOWRAP,
- CANVAS_BLKMODE_LINEAR);
+ plane[i].w * bpp_value,
+ plane[i].h,
+ CANVAS_ADDR_NOWRAP,
+ CANVAS_BLKMODE_LINEAR);
#endif
}
}
ge2d_dma_buffer_cache_flush(dev, dma_fd);
}
+static int ge2d_buffer_get_phys(struct aml_dma_cfg *cfg, unsigned long *addr)
+{
+ return ge2d_dma_buffer_get_phys(ge2d_manager.buffer, cfg, addr);
+}
+
+static int ge2d_buffer_unmap(struct aml_dma_cfg *cfg)
+{
+ return ge2d_dma_buffer_unmap_info(ge2d_manager.buffer, cfg);
+}
+
struct ge2d_context_s *create_ge2d_work_queue(void)
{
int i;