cfg->attach = d_att;
cfg->vaddr = vaddr;
cfg->sg = sg;
+ ge2d_log_dbg("%s\n", __func__);
return ret;
vmap_err:
*addr = PFN_PHYS(page_to_pfn(page));
ret = 0;
}
- ge2d_dma_buffer_unmap(cfg);
return ret;
}
dma_buf_detach(dbuf, d_att);
dma_buf_put(dbuf);
+
+ ge2d_log_dbg("%s\n", __func__);
}
void ge2d_dma_buffer_dma_flush(struct device *dev, int fd)
pos = pos->next;
list_move_tail(&pitem->list, &wq->free_queue);
spin_unlock(&wq->lock);
-
+ /* if dma buf detach it */
+ if (pitem->config.src_dma_cfg.dma_used) {
+ ge2d_dma_buffer_unmap((struct aml_dma_cfg *
+ )pitem->config.src_dma_cfg.dma_cfg);
+ pitem->config.src_dma_cfg.dma_used = 0;
+ kfree(pitem->config.src_dma_cfg.dma_cfg);
+ }
+ if (pitem->config.src2_dma_cfg.dma_used) {
+ ge2d_dma_buffer_unmap((struct aml_dma_cfg *
+ )pitem->config.src2_dma_cfg.dma_cfg);
+ pitem->config.src2_dma_cfg.dma_used = 0;
+ kfree(pitem->config.src2_dma_cfg.dma_cfg);
+ }
+ if (pitem->config.dst_dma_cfg.dma_used) {
+ ge2d_dma_buffer_unmap((struct aml_dma_cfg *
+ )pitem->config.dst_dma_cfg.dma_cfg);
+ pitem->config.dst_dma_cfg.dma_used = 0;
+ kfree(pitem->config.dst_dma_cfg.dma_cfg);
+ }
pitem = (struct ge2d_queue_item_s *)pos;
} while (pos != head);
ge2d_manager.last_wq = wq;
return IRQ_HANDLED;
}
+struct ge2d_dma_cfg_s *ge2d_wq_get_dma_cfg(struct ge2d_context_s *wq,
+ unsigned int data_type)
+{
+ struct ge2d_dma_cfg_s *dma_cfg = NULL;
+
+ switch (data_type) {
+ case AML_GE2D_SRC:
+ dma_cfg = &wq->config.src_dma_cfg;
+ break;
+ case AML_GE2D_SRC2:
+ dma_cfg = &wq->config.src2_dma_cfg;
+ break;
+ case AML_GE2D_DST:
+ dma_cfg = &wq->config.dst_dma_cfg;
+ break;
+ default:
+ ge2d_log_err("wrong data_type\n");
+ break;
+ }
+
+ return dma_cfg;
+}
+
struct ge2d_src1_data_s *ge2d_wq_get_src_data(struct ge2d_context_s *wq)
{
return &wq->config.src1_data;
}
static int build_ge2d_addr_config_dma(
+ struct ge2d_context_s *context,
struct config_planes_ion_s *plane,
unsigned int format,
unsigned int *addr,
unsigned int *stride,
- unsigned int dir
+ unsigned int dir,
+ unsigned int data_type
)
{
int ret = -1;
bpp_value);
if (plane) {
if (plane[0].shared_fd) {
- struct aml_dma_cfg cfg;
+ struct ge2d_dma_cfg_s *cfg = NULL;
+ struct aml_dma_cfg *dma_cfg = NULL;
- cfg.fd = plane[0].shared_fd;
- cfg.dev = &(ge2d_manager.pdev->dev);
- cfg.dir = dir;
- ret = ge2d_dma_buffer_get_phys(&cfg, &addr_temp);
+ cfg = ge2d_wq_get_dma_cfg(context, data_type);
+ if (!cfg)
+ return -1;
+ cfg->dma_used = 1;
+ dma_cfg = kzalloc(sizeof(*dma_cfg), GFP_KERNEL);
+ dma_cfg->fd = plane[0].shared_fd;
+ dma_cfg->dev = &(ge2d_manager.pdev->dev);
+ dma_cfg->dir = dir;
+ cfg->dma_cfg = dma_cfg;
+ ret = ge2d_dma_buffer_get_phys(dma_cfg, &addr_temp);
if (ret != 0)
return ret;
}
}
return ret;
}
-static int build_ge2d_config_ex_dma(struct config_planes_ion_s *plane,
+static int build_ge2d_config_ex_dma(struct ge2d_context_s *context,
+ struct config_planes_ion_s *plane,
unsigned int format,
unsigned int *canvas_index,
int index,
unsigned int *r_offset,
- unsigned int dir)
+ unsigned int dir,
+ unsigned int data_type)
{
int bpp_value = bpp(format);
int ret = -1;
index &= 0xff;
if (plane) {
if (plane[0].shared_fd) {
- struct aml_dma_cfg cfg;
+ struct ge2d_dma_cfg_s *cfg = NULL;
+ struct aml_dma_cfg *dma_cfg = NULL;
- cfg.fd = plane[0].shared_fd;
- cfg.dev = &(ge2d_manager.pdev->dev);
- cfg.dir = dir;
- ret = ge2d_dma_buffer_get_phys(&cfg, &addr);
+ cfg = ge2d_wq_get_dma_cfg(context, data_type);
+ if (!cfg)
+ return -1;
+ cfg->dma_used = 1;
+ dma_cfg = kzalloc(sizeof(*dma_cfg), GFP_KERNEL);
+ dma_cfg->fd = plane[0].shared_fd;
+ dma_cfg->dev = &(ge2d_manager.pdev->dev);
+ dma_cfg->dir = dir;
+ cfg->dma_cfg = dma_cfg;
+ ret = ge2d_dma_buffer_get_phys(dma_cfg, &addr);
ge2d_log_info("phys: addr=%lx\n", addr);
if (ret != 0)
return ret;
} else if (ge2d_config_mem->src1_mem_alloc_type ==
AML_GE2D_MEM_DMABUF) {
if (build_ge2d_addr_config_dma(
+ context,
&ge2d_config->src_planes[0],
ge2d_config->src_para.format,
&src_addr,
&src_stride,
- DMA_TO_DEVICE) < 0)
+ DMA_TO_DEVICE,
+ AML_GE2D_SRC) < 0)
return -1;
ge2d_log_dbg("ge2d dma alloc phy_addr:0x%x,stride=0x%x,format:0x%x\n",
src_addr,
} else if (ge2d_config_mem->src1_mem_alloc_type ==
AML_GE2D_MEM_DMABUF) {
if (build_ge2d_config_ex_dma(
+ context,
&ge2d_config->src_planes[0],
ge2d_config->src_para.format,
&index,
ALLOC_CANVAS_INDEX +
alloc_canvas_offset,
&alloc_canvas_offset,
- DMA_TO_DEVICE) < 0)
+ DMA_TO_DEVICE,
+ AML_GE2D_SRC) < 0)
return -1;
ge2d_config->src_para.canvas_index = index;
ge2d_log_dbg("ge2d dma alloc canvas index:0x%x, format:0x%x\n",
} else if (ge2d_config_mem->src2_mem_alloc_type ==
AML_GE2D_MEM_DMABUF) {
if (build_ge2d_addr_config_dma(
+ context,
&ge2d_config->src2_planes[0],
ge2d_config->src2_para.format,
&src2_addr,
&src2_stride,
- DMA_TO_DEVICE) < 0)
+ DMA_TO_DEVICE,
+ AML_GE2D_SRC2) < 0)
return -1;
ge2d_log_dbg("ge2d dma alloc phy_addr:0x%x,stride=0x%x,format:0x%x\n",
src2_addr,
} else if (ge2d_config_mem->src2_mem_alloc_type ==
AML_GE2D_MEM_DMABUF) {
if (build_ge2d_config_ex_dma(
+ context,
&ge2d_config->src2_planes[0],
ge2d_config->src2_para.format,
&index,
ALLOC_CANVAS_INDEX +
alloc_canvas_offset,
&alloc_canvas_offset,
- DMA_TO_DEVICE) < 0)
+ DMA_TO_DEVICE,
+ AML_GE2D_SRC2) < 0)
return -1;
ge2d_config->src2_para.canvas_index = index;
ge2d_log_dbg("ge2d src2 dma alloc, canvas index:0x%x,format:0x%x\n",
} else if (ge2d_config_mem->dst_mem_alloc_type ==
AML_GE2D_MEM_DMABUF) {
if (build_ge2d_addr_config_dma(
+ context,
&ge2d_config->dst_planes[0],
ge2d_config->dst_para.format,
&dst_addr,
&dst_stride,
- DMA_FROM_DEVICE) < 0)
+ DMA_FROM_DEVICE,
+ AML_GE2D_DST) < 0)
return -1;
ge2d_log_dbg("ge2d dma alloc phy_addr:0x%x,stride=0x%x,format:0x%x\n",
dst_addr,
} else if (ge2d_config_mem->dst_mem_alloc_type ==
AML_GE2D_MEM_DMABUF) {
if (build_ge2d_config_ex_dma(
+ context,
&ge2d_config->dst_planes[0],
ge2d_config->dst_para.format,
&index,
ALLOC_CANVAS_INDEX +
alloc_canvas_offset,
&alloc_canvas_offset,
- DMA_FROM_DEVICE) < 0)
+ DMA_FROM_DEVICE,
+ AML_GE2D_DST) < 0)
return -1;
ge2d_config->dst_para.canvas_index = index;
ge2d_log_dbg("ge2d: dst dma alloc, index:0x%x, format:0x%x\n",
*addr = PFN_PHYS(page_to_pfn(page));
ret = 0;
}
- gdc_dma_buffer_unmap(cfg);
return ret;
}
{
long ret = -1;
unsigned long addr;
- struct aml_dma_cfg cfg;
+ struct aml_dma_cfg *cfg = NULL;
struct gdc_cmd_s *gdc_cmd = &fh->gdc_cmd;
struct gdc_config_s *gc = &gdc_cmd->gdc_config;
switch (gc->format) {
case NV12:
if (gs_ex->input_buffer.plane_number == 1) {
- cfg.fd = gs_ex->input_buffer.y_base_fd;
- cfg.dev = &fh->gdev->pdev->dev;
- cfg.dir = DMA_TO_DEVICE;
- ret = gdc_dma_buffer_get_phys(&cfg, &addr);
+ cfg = &fh->dma_cfg.input_cfg_plane1;
+ cfg->fd = gs_ex->input_buffer.y_base_fd;
+ cfg->dev = &fh->gdev->pdev->dev;
+ cfg->dir = DMA_TO_DEVICE;
+
+ ret = gdc_dma_buffer_get_phys(cfg, &addr);
if (ret < 0) {
gdc_log(LOG_ERR,
"dma import input fd %d failed\n",
gdc_log(LOG_INFO, "1 plane get input addr=%x\n",
gdc_cmd->y_base_addr);
} else if (gs_ex->input_buffer.plane_number == 2) {
- cfg.fd = gs_ex->input_buffer.y_base_fd;
- cfg.dev = &fh->gdev->pdev->dev;
- cfg.dir = DMA_TO_DEVICE;
- ret = gdc_dma_buffer_get_phys(&cfg, &addr);
+ cfg = &fh->dma_cfg.input_cfg_plane1;
+ cfg->fd = gs_ex->input_buffer.y_base_fd;
+ cfg->dev = &fh->gdev->pdev->dev;
+ cfg->dir = DMA_TO_DEVICE;
+ ret = gdc_dma_buffer_get_phys(cfg, &addr);
if (ret < 0) {
gdc_log(LOG_ERR,
"dma import input fd %d failed\n",
return -EINVAL;
}
gdc_cmd->y_base_addr = addr;
-
- cfg.fd = gs_ex->input_buffer.uv_base_fd;
- cfg.dev = &fh->gdev->pdev->dev;
- cfg.dir = DMA_TO_DEVICE;
- ret = gdc_dma_buffer_get_phys(&cfg, &addr);
+ cfg = &fh->dma_cfg.input_cfg_plane2;
+ cfg->fd = gs_ex->input_buffer.uv_base_fd;
+ cfg->dev = &fh->gdev->pdev->dev;
+ cfg->dir = DMA_TO_DEVICE;
+ ret = gdc_dma_buffer_get_phys(cfg, &addr);
if (ret < 0) {
gdc_log(LOG_ERR,
"dma import input fd %d failed\n",
}
break;
case Y_GREY:
- cfg.fd = gs_ex->input_buffer.y_base_fd;
- cfg.dev = &(fh->gdev->pdev->dev);
- cfg.dir = DMA_TO_DEVICE;
- ret = gdc_dma_buffer_get_phys(&cfg, &addr);
+ cfg = &fh->dma_cfg.input_cfg_plane1;
+ cfg->fd = gs_ex->input_buffer.y_base_fd;
+ cfg->dev = &(fh->gdev->pdev->dev);
+ cfg->dir = DMA_TO_DEVICE;
+ ret = gdc_dma_buffer_get_phys(cfg, &addr);
if (ret < 0) {
gdc_log(LOG_ERR,
"dma import input fd %d failed\n",
long ret;
unsigned long addr = 0;
size_t len;
- struct aml_dma_cfg cfg;
+ struct aml_dma_cfg *cfg = NULL;
struct gdc_cmd_s *gdc_cmd = &fh->gdc_cmd;
if (fh == NULL || gs_ex == NULL) {
}
} else if (gs_ex->output_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF) {
/* dma alloc */
- cfg.fd = gs_ex->output_buffer.y_base_fd;
- cfg.dev = &(gdc_manager.gdc_dev->pdev->dev);
- cfg.dir = DMA_FROM_DEVICE;
- ret = gdc_dma_buffer_get_phys(&cfg, &addr);
+ cfg = &fh->dma_cfg.output_cfg;
+ cfg->fd = gs_ex->output_buffer.y_base_fd;
+ cfg->dev = &(gdc_manager.gdc_dev->pdev->dev);
+ cfg->dir = DMA_FROM_DEVICE;
+ ret = gdc_dma_buffer_get_phys(cfg, &addr);
if (ret < 0) {
gdc_log(LOG_ERR, "dma import out fd %d failed\n",
gs_ex->output_buffer.y_base_fd);
}
} else if (gs_ex->config_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF) {
/* dma alloc */
- cfg.fd = gs_ex->config_buffer.y_base_fd;
- cfg.dev = &(gdc_manager.gdc_dev->pdev->dev);
- cfg.dir = DMA_TO_DEVICE;
- ret = gdc_dma_buffer_get_phys(&cfg, &addr);
+ cfg = &fh->dma_cfg.config_cfg;
+ cfg->fd = gs_ex->config_buffer.y_base_fd;
+ cfg->dev = &(gdc_manager.gdc_dev->pdev->dev);
+ cfg->dir = DMA_TO_DEVICE;
+ ret = gdc_dma_buffer_get_phys(cfg, &addr);
if (ret < 0) {
gdc_log(LOG_ERR, "dma import config fd %d failed\n",
gs_ex->config_buffer.shared_fd);
gdc_log(LOG_INFO, "%s, input addr=%x\n",
__func__, fh->gdc_cmd.y_base_addr);
mutex_lock(&fh->gdev->d_mutext);
- #if 1
+
if (gs_ex->config_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF)
gdc_buffer_dma_flush(gs_ex->config_buffer.shared_fd);
- #endif
+
ret = gdc_run(gdc_cmd);
if (ret < 0)
gdc_log(LOG_ERR, "gdc process failed ret = %ld\n", ret);
gdc_log(LOG_ERR, "gdc timeout\n");
gdc_stop(gdc_cmd);
+ mutex_unlock(&fh->gdev->d_mutext);
#if 0
if (gs_ex->output_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF)
gdc_buffer_cache_flush(gs_ex->output_buffer.shared_fd);
#endif
- mutex_unlock(&fh->gdev->d_mutext);
+ if (gs_ex->input_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF) {
+ gdc_dma_buffer_unmap(&fh->dma_cfg.input_cfg_plane1);
+ if (gs_ex->input_buffer.plane_number == 2)
+ gdc_dma_buffer_unmap(&fh->dma_cfg.input_cfg_plane2);
+ }
+ if (gs_ex->config_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF)
+ gdc_dma_buffer_unmap(&fh->dma_cfg.config_cfg);
+ if (gs_ex->output_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF)
+ gdc_dma_buffer_unmap(&fh->dma_cfg.output_cfg);
return 0;
}
#include <linux/miscdevice.h>
#include "system_gdc_io.h"
#include "gdc_api.h"
-
+#include "gdc_dmabuf.h"
struct gdc_cmd_s;
struct gdc_manager_s {
struct miscdevice misc_dev;
};
+struct gdc_dma_cfg_t {
+ struct aml_dma_cfg input_cfg_plane1;
+ struct aml_dma_cfg input_cfg_plane2;
+ struct aml_dma_cfg config_cfg;
+ struct aml_dma_cfg output_cfg;
+};
+
struct mgdc_fh_s {
struct list_head list;
wait_queue_head_t irq_queue;
unsigned long c_len;
struct gdc_dma_cfg y_dma_cfg;
struct gdc_dma_cfg uv_dma_cfg;
+ struct gdc_dma_cfg_t dma_cfg;
};
irqreturn_t interrupt_handler_next(int irq, void *param);
unsigned char hang_flag;
};
+struct ge2d_dma_cfg_s {
+ int dma_used;
+ void *dma_cfg;
+};
+
struct ge2d_config_s {
struct ge2d_gen_s gen;
struct ge2d_src1_data_s src1_data;
unsigned int v_scale_coef_type;
unsigned int h_scale_coef_type;
unsigned int update_flag;
+ struct ge2d_dma_cfg_s src_dma_cfg;
+ struct ge2d_dma_cfg_s src2_dma_cfg;
+ struct ge2d_dma_cfg_s dst_dma_cfg;
};
struct ge2d_dma_buf_s {
int len;
};
+enum ge2d_data_type_e {
+ AML_GE2D_SRC,
+ AML_GE2D_SRC2,
+ AML_GE2D_DST,
+ AML_GE2D_TYPE_INVALID,
+};
+
enum ge2d_src_dst_e {
OSD0_OSD0 = 0,
OSD0_OSD1,