#include <linux/scatterlist.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include "ge2d_log.h"
#include "ge2d_dmabuf.h"
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index);
+
+static void *aml_mm_vmap(phys_addr_t phys, unsigned long size)
+{
+ u32 offset, npages;
+ struct page **pages = NULL;
+ pgprot_t pgprot = PAGE_KERNEL;
+ void *vaddr;
+ int i;
+
+ offset = offset_in_page(phys);
+ npages = DIV_ROUND_UP(size + offset, PAGE_SIZE);
+
+ pages = vmalloc(sizeof(struct page *) * npages);
+ if (!pages)
+ return NULL;
+ for (i = 0; i < npages; i++) {
+ pages[i] = phys_to_page(phys);
+ phys += PAGE_SIZE;
+ }
+ /* pgprot = pgprot_writecombine(PAGE_KERNEL); */
+
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ if (!vaddr) {
+ pr_err("vmaped fail, size: %d\n",
+ npages << PAGE_SHIFT);
+ vfree(pages);
+ return NULL;
+ }
+ vfree(pages);
+ ge2d_log_dbg("[HIGH-MEM-MAP] pa(%lx) to va(%p), size: %d\n",
+ (unsigned long)phys, vaddr, npages << PAGE_SHIFT);
+ return vaddr;
+}
+
+static void *aml_map_phyaddr_to_virt(dma_addr_t phys, unsigned long size)
+{
+ void *vaddr = NULL;
+
+ if (!PageHighMem(phys_to_page(phys)))
+ return phys_to_virt(phys);
+ vaddr = aml_mm_vmap(phys, size);
+ return vaddr;
+}
+
/* dma free*/
static void aml_dma_put(void *buf_priv)
{
struct aml_dma_buf *buf = buf_priv;
+ struct page *cma_pages = NULL;
+ void *vaddr = (void *)(PAGE_MASK & (ulong)buf->vaddr);
if (!atomic_dec_and_test(&buf->refcount)) {
- ge2d_log_dbg("aml_dma_put, refcont=%d\n",
+ ge2d_log_dbg("ge2d aml_dma_put, refcont=%d\n",
atomic_read(&buf->refcount));
return;
}
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
+ cma_pages = phys_to_page(buf->dma_addr);
+ if (is_vmalloc_or_module_addr(vaddr))
+ vunmap(vaddr);
+
+ if (!dma_release_from_contiguous(buf->dev, cma_pages,
+ buf->size >> PAGE_SHIFT)) {
+ pr_err("failed to release cma buffer\n");
}
- dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- buf->attrs);
+ buf->vaddr = NULL;
+ clear_dma_buffer((struct aml_dma_buffer *)buf->priv, buf->index);
put_device(buf->dev);
kfree(buf);
- ge2d_log_dbg("aml_dma_put free!\n");
+ ge2d_log_dbg("ge2d free:aml_dma_buf=0x%p,buf->index=%d\n",
+ buf, buf->index);
}
static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
gfp_t gfp_flags)
{
struct aml_dma_buf *buf;
+ struct page *cma_pages = NULL;
+ dma_addr_t paddr = 0;
if (WARN_ON(!dev))
return (void *)(-EINVAL);
- buf = kzalloc(sizeof(struct aml_dma_buf), GFP_KERNEL);
+ buf = kzalloc(sizeof(struct aml_dma_buf), GFP_KERNEL | gfp_flags);
if (!buf)
return NULL;
if (attrs)
buf->attrs = attrs;
- buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- gfp_flags, buf->attrs);
- if (!buf->cookie) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
+ cma_pages = dma_alloc_from_contiguous(dev,
+ size >> PAGE_SHIFT, 0);
+ if (cma_pages) {
+ paddr = page_to_phys(cma_pages);
+ } else {
+ pr_err("failed to alloc cma pages.\n");
return NULL;
}
-
- if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
- buf->vaddr = buf->cookie;
-
- /* Prevent the device from being released while the buffer is used */
+ buf->vaddr = aml_map_phyaddr_to_virt(paddr, size);
buf->dev = get_device(dev);
buf->size = size;
buf->dma_dir = dma_dir;
-
+ buf->dma_addr = paddr;
atomic_inc(&buf->refcount);
- ge2d_log_dbg("aml_dma_alloc, refcont=%d\n",
- atomic_read(&buf->refcount));
+ ge2d_log_dbg("aml_dma_buf=0x%p, refcont=%d\n",
+ buf, atomic_read(&buf->refcount));
return buf;
}
static int aml_dma_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct aml_dma_buf *buf = buf_priv;
- int ret;
+ unsigned long pfn = 0;
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ int ret = -1;
- if (!buf) {
- pr_err("No buffer to map\n");
+ if (!buf || !vma) {
+ pr_err("No memory to map\n");
return -EINVAL;
}
- /*
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
- * map whole buffer
- */
- vma->vm_pgoff = 0;
-
- ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, buf->attrs);
-
+ pfn = buf->dma_addr >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ vsize, vma->vm_page_prot);
if (ret) {
- pr_err("Remapping memory failed, error: %d\n", ret);
+ pr_err("Remapping memory, error: %d\n", ret);
return ret;
}
+ vma->vm_flags |= VM_DONTEXPAND;
ge2d_log_dbg("mapped dma addr 0x%08lx at 0x%08lx, size %d\n",
(unsigned long)buf->dma_addr, vma->vm_start,
buf->size);
struct dma_buf_attachment *dbuf_attach)
{
struct aml_attachment *attach;
- unsigned int i;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt;
struct aml_dma_buf *buf = dbuf->priv;
+ int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ phys_addr_t phys = buf->dma_addr;
+ unsigned int i;
int ret;
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
* map the same scatter list to multiple attachments at the same time.
*/
- ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
if (ret) {
kfree(attach);
return -ENOMEM;
}
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ struct page *page = phys_to_page(phys);
- rd = buf->sgt_base->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
+ if (!page) {
+ sg_free_table(sgt);
+ kfree(attach);
+ return -ENOMEM;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ phys += PAGE_SIZE;
}
attach->dma_dir = DMA_NONE;
attach->dma_dir);
attach->dma_dir = DMA_NONE;
}
-
/* mapping to the client with new direction */
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
dma_dir);
.release = aml_dmabuf_ops_release,
};
-static struct sg_table *get_base_sgt(struct aml_dma_buf *buf)
-{
- int ret;
- struct sg_table *sgt;
-
- sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!sgt)
- return NULL;
-
- ret = dma_get_sgtable(buf->dev, sgt, buf->cookie,
- buf->dma_addr, buf->size);
- if (ret < 0) {
- dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
- kfree(sgt);
- return NULL;
- }
- return sgt;
-}
-
static struct dma_buf *get_dmabuf(void *buf_priv, unsigned long flags)
{
struct aml_dma_buf *buf = buf_priv;
exp_info.size = buf->size;
exp_info.flags = flags;
exp_info.priv = buf;
-
- if (!buf->sgt_base)
- buf->sgt_base = get_base_sgt(buf);
-
- if (WARN_ON(!buf->sgt_base))
+ if (WARN_ON(!buf->vaddr))
return NULL;
dbuf = dma_buf_export(&exp_info);
return -1;
}
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index)
+{
+ mutex_lock(&(buffer->lock));
+ buffer->gd_buffer[index].mem_priv = NULL;
+ buffer->gd_buffer[index].index = 0;
+ buffer->gd_buffer[index].alloc = 0;
+ mutex_unlock(&(buffer->lock));
+}
+
void *ge2d_dma_buffer_create(void)
{
int i;
struct ge2d_dmabuf_req_s *ge2d_req_buf)
{
void *buf;
+ struct aml_dma_buf *dma_buf;
unsigned int size;
int index;
size = PAGE_ALIGN(ge2d_req_buf->len);
if (size == 0)
return (-EINVAL);
-
- index = find_empty_dma_buffer(buffer);
- if ((index < 0) || (index >= AML_MAX_DMABUF)) {
- pr_err("no empty buffer found\n");
- return (-ENOMEM);
- }
-
buf = aml_dma_alloc(dev, 0, size, ge2d_req_buf->dma_dir,
GFP_HIGHUSER | __GFP_ZERO);
if (!buf)
return (-ENOMEM);
-
mutex_lock(&(buffer->lock));
+ index = find_empty_dma_buffer(buffer);
+ if ((index < 0) || (index >= AML_MAX_DMABUF)) {
+ pr_err("no empty buffer found\n");
+ mutex_unlock(&(buffer->lock));
+ aml_dma_put(buf);
+ return (-ENOMEM);
+ }
+ ((struct aml_dma_buf *)buf)->priv = buffer;
+ ((struct aml_dma_buf *)buf)->index = index;
buffer->gd_buffer[index].mem_priv = buf;
buffer->gd_buffer[index].index = index;
buffer->gd_buffer[index].alloc = 1;
mutex_unlock(&(buffer->lock));
ge2d_req_buf->index = index;
+ dma_buf = (struct aml_dma_buf *)buf;
+ if (dma_buf->dma_dir == DMA_FROM_DEVICE)
+ dma_sync_single_for_cpu(dma_buf->dev,
+ dma_buf->dma_addr,
+ dma_buf->size, DMA_FROM_DEVICE);
return 0;
}
return (-EINVAL);
}
aml_dma_put(buf);
- buffer->gd_buffer[index].alloc = 0;
return 0;
}
dir = cfg->dir;
dbuf = dma_buf_get(fd);
- if (dbuf == NULL) {
+ if (IS_ERR(dbuf)) {
pr_err("failed to get dma buffer");
return -EINVAL;
}
d_att = dma_buf_attach(dbuf, dev);
- if (d_att == NULL) {
+ if (IS_ERR(d_att)) {
pr_err("failed to set dma attach");
goto attach_err;
}
sg = dma_buf_map_attachment(d_att, dir);
- if (sg == NULL) {
+ if (IS_ERR(sg)) {
pr_err("failed to get dma sg");
goto map_attach_err;
}
cfg->attach = d_att;
cfg->vaddr = vaddr;
cfg->sg = sg;
- ge2d_log_dbg("%s\n", __func__);
+ ge2d_log_dbg("%s, dbuf=0x%p\n", __func__, dbuf);
return ret;
vmap_err:
dma_buf_put(dbuf);
- ge2d_log_dbg("%s\n", __func__);
+ ge2d_log_dbg("%s, dbuf=0x%p\n", __func__, dbuf);
}
void ge2d_dma_buffer_dma_flush(struct device *dev, int fd)
pr_err("error input param");
return;
}
- if (buf->size > 0)
+ if ((buf->size > 0) && (buf->dev == dev))
dma_sync_single_for_device(buf->dev, buf->dma_addr,
buf->size, DMA_TO_DEVICE);
dma_buf_put(dmabuf);
pr_err("error input param");
return;
}
- if (buf->size > 0)
- dma_sync_single_for_device(buf->dev, buf->dma_addr,
+ if ((buf->size > 0) && (buf->dev == dev))
+ dma_sync_single_for_cpu(buf->dev, buf->dma_addr,
buf->size, DMA_FROM_DEVICE);
dma_buf_put(dmabuf);
}
unsigned int index;
dma_addr_t dma_addr;
atomic_t refcount;
- struct sg_table *sgt_base;
/* DMABUF related */
struct dma_buf_attachment *db_attach;
+ void *priv;
};
struct aml_dma_buf_priv {
dma_cfg->dir = dir;
cfg->dma_cfg = dma_cfg;
ret = ge2d_dma_buffer_get_phys(dma_cfg, &addr);
- ge2d_log_info("phys: addr=%lx\n", addr);
if (ret != 0)
return ret;
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <api/gdc_api.h>
+#include <linux/dma-contiguous.h>
#include "system_log.h"
#include "gdc_dmabuf.h"
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index);
/* dma free*/
static void aml_dma_put(void *buf_priv)
{
struct aml_dma_buf *buf = buf_priv;
+ struct page *cma_pages = NULL;
if (!atomic_dec_and_test(&buf->refcount)) {
- gdc_log(LOG_INFO, "aml_dma_put, refcont=%d\n",
+ gdc_log(LOG_INFO, "gdc aml_dma_put, refcont=%d\n",
atomic_read(&buf->refcount));
return;
}
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
+ cma_pages = virt_to_page(buf->vaddr);
+ if (!dma_release_from_contiguous(buf->dev, cma_pages,
+ buf->size >> PAGE_SHIFT)) {
+ pr_err("failed to release cma buffer\n");
}
- dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- buf->attrs);
+ buf->vaddr = NULL;
+ clear_dma_buffer((struct aml_dma_buffer *)buf->priv, buf->index);
put_device(buf->dev);
kfree(buf);
- gdc_log(LOG_INFO, "aml_dma_put free!\n");
+ gdc_log(LOG_INFO, "gdc free:aml_dma_buf=0x%p,buf->index=%d\n",
+ buf, buf->index);
}
static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
gfp_t gfp_flags)
{
struct aml_dma_buf *buf;
+ struct page *cma_pages = NULL;
+ dma_addr_t paddr = 0;
if (WARN_ON(!dev))
return (void *)(-EINVAL);
if (attrs)
buf->attrs = attrs;
- buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- gfp_flags, buf->attrs);
- if (!buf->cookie) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
+ cma_pages = dma_alloc_from_contiguous(dev,
+ size >> PAGE_SHIFT, 0);
+ if (cma_pages) {
+ paddr = page_to_phys(cma_pages);
+ } else {
+ pr_err("failed to alloc cma pages.\n");
return NULL;
}
-
- if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
- buf->vaddr = buf->cookie;
-
- /* Prevent the device from being released while the buffer is used */
+ buf->vaddr = phys_to_virt(paddr);
buf->dev = get_device(dev);
buf->size = size;
buf->dma_dir = dma_dir;
-
+ buf->dma_addr = paddr;
atomic_inc(&buf->refcount);
- gdc_log(LOG_INFO, "aml_dma_alloc, refcont=%d\n",
- atomic_read(&buf->refcount));
+ gdc_log(LOG_INFO, "aml_dma_buf=0x%p, refcont=%d\n",
+ buf, atomic_read(&buf->refcount));
return buf;
}
static int aml_dma_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct aml_dma_buf *buf = buf_priv;
- int ret;
+ unsigned long pfn = 0;
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ int ret = -1;
- if (!buf) {
- pr_err("No buffer to map\n");
+ if (!buf || !vma) {
+ pr_err("No memory to map\n");
return -EINVAL;
}
- /*
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
- * map whole buffer
- */
- vma->vm_pgoff = 0;
-
- ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, buf->attrs);
-
+ pfn = virt_to_phys(buf->vaddr) >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ vsize, vma->vm_page_prot);
if (ret) {
- pr_err("Remapping memory failed, error: %d\n", ret);
+ pr_err("Remapping memory, error: %d\n", ret);
return ret;
}
+ vma->vm_flags |= VM_DONTEXPAND;
+
gdc_log(LOG_INFO, "mapped dma addr 0x%08lx at 0x%08lx, size %d\n",
(unsigned long)buf->dma_addr, vma->vm_start,
buf->size);
struct dma_buf_attachment *dbuf_attach)
{
struct aml_attachment *attach;
- unsigned int i;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt;
struct aml_dma_buf *buf = dbuf->priv;
+ int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ void *vaddr = buf->vaddr;
+ unsigned int i;
int ret;
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
* map the same scatter list to multiple attachments at the same time.
*/
- ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
if (ret) {
kfree(attach);
return -ENOMEM;
}
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ struct page *page = virt_to_page(vaddr);
- rd = buf->sgt_base->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
+ if (!page) {
+ sg_free_table(sgt);
+ kfree(attach);
+ return -ENOMEM;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ vaddr += PAGE_SIZE;
}
attach->dma_dir = DMA_NONE;
.release = aml_dmabuf_ops_release,
};
-static struct sg_table *get_base_sgt(struct aml_dma_buf *buf)
-{
- int ret;
- struct sg_table *sgt;
-
- sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!sgt)
- return NULL;
-
- ret = dma_get_sgtable(buf->dev, sgt, buf->cookie,
- buf->dma_addr, buf->size);
- if (ret < 0) {
- dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
- kfree(sgt);
- return NULL;
- }
- return sgt;
-}
-
static struct dma_buf *get_dmabuf(void *buf_priv, unsigned long flags)
{
struct aml_dma_buf *buf = buf_priv;
exp_info.flags = flags;
exp_info.priv = buf;
- if (!buf->sgt_base)
- buf->sgt_base = get_base_sgt(buf);
-
- if (WARN_ON(!buf->sgt_base))
+ if (WARN_ON(!buf->vaddr))
return NULL;
dbuf = dma_buf_export(&exp_info);
return -1;
}
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index)
+{
+ mutex_lock(&(buffer->lock));
+ buffer->gd_buffer[index].mem_priv = NULL;
+ buffer->gd_buffer[index].index = 0;
+ buffer->gd_buffer[index].alloc = 0;
+ mutex_unlock(&(buffer->lock));
+}
+
void *gdc_dma_buffer_create(void)
{
int i;
struct gdc_dmabuf_req_s *gdc_req_buf)
{
void *buf;
+ struct aml_dma_buf *dma_buf;
unsigned int size;
int index;
size = PAGE_ALIGN(gdc_req_buf->len);
if (size == 0)
return (-EINVAL);
-
- index = find_empty_dma_buffer(buffer);
- if ((index < 0) || (index >= AML_MAX_DMABUF)) {
- pr_err("no empty buffer found\n");
- return (-ENOMEM);
- }
-
buf = aml_dma_alloc(dev, 0, size, gdc_req_buf->dma_dir,
GFP_HIGHUSER | __GFP_ZERO);
if (!buf)
return (-ENOMEM);
-
mutex_lock(&(buffer->lock));
+ index = find_empty_dma_buffer(buffer);
+ if ((index < 0) || (index >= AML_MAX_DMABUF)) {
+ pr_err("no empty buffer found\n");
+ mutex_unlock(&(buffer->lock));
+ aml_dma_put(buf);
+ return (-ENOMEM);
+ }
+ ((struct aml_dma_buf *)buf)->priv = buffer;
+ ((struct aml_dma_buf *)buf)->index = index;
buffer->gd_buffer[index].mem_priv = buf;
buffer->gd_buffer[index].index = index;
buffer->gd_buffer[index].alloc = 1;
mutex_unlock(&(buffer->lock));
gdc_req_buf->index = index;
+ dma_buf = (struct aml_dma_buf *)buf;
+ if (dma_buf->dma_dir == DMA_FROM_DEVICE)
+ dma_sync_single_for_cpu(dma_buf->dev,
+ dma_buf->dma_addr,
+ dma_buf->size, DMA_FROM_DEVICE);
return 0;
}
return (-EINVAL);
}
aml_dma_put(buf);
- buffer->gd_buffer[index].alloc = 0;
return 0;
}
pr_err("aml_dma_buf is null\n");
return (-EINVAL);
}
-
dbuf = get_dmabuf(buf, flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
pr_err("failed to export buffer %d\n", index);
dma_buf_put(dbuf);
return ret;
}
-
gdc_log(LOG_INFO, "buffer %d,exported as %d descriptor\n",
index, ret);
gdc_exp_buf->fd = ret;
struct device *dev = NULL;
enum dma_data_direction dir;
+
if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) {
pr_err("error input param");
return -EINVAL;
fd = cfg->fd;
dev = cfg->dev;
dir = cfg->dir;
-
dbuf = dma_buf_get(fd);
- if (dbuf == NULL) {
+ if (IS_ERR(dbuf)) {
pr_err("failed to get dma buffer");
return -EINVAL;
}
-
d_att = dma_buf_attach(dbuf, dev);
- if (d_att == NULL) {
+ if (IS_ERR(d_att)) {
pr_err("failed to set dma attach");
goto attach_err;
}
sg = dma_buf_map_attachment(d_att, dir);
- if (sg == NULL) {
+ if (IS_ERR(sg)) {
pr_err("failed to get dma sg");
goto map_attach_err;
}
cfg->attach = d_att;
cfg->vaddr = vaddr;
cfg->sg = sg;
+ gdc_log(LOG_INFO, "gdc_dma_buffer_map, dbuf=0x%p\n", dbuf);
return ret;
attach_err:
dma_buf_put(dbuf);
-
return ret;
}
vaddr = cfg->vaddr;
d_att = cfg->attach;
sg = cfg->sg;
-
dma_buf_vunmap(dbuf, vaddr);
dma_buf_end_cpu_access(dbuf, dir);
dma_buf_detach(dbuf, d_att);
dma_buf_put(dbuf);
+ gdc_log(LOG_INFO, "gdc_dma_buffer_unmap, dbuf=0x%p\n", dbuf);
}
void gdc_dma_buffer_dma_flush(struct device *dev, int fd)
pr_err("error input param");
return;
}
- if (buf->size > 0)
+ if ((buf->size > 0) && (buf->dev == dev))
dma_sync_single_for_device(buf->dev, buf->dma_addr,
buf->size, DMA_TO_DEVICE);
dma_buf_put(dmabuf);
pr_err("error input param");
return;
}
- if (buf->size > 0)
- dma_sync_single_for_device(buf->dev, buf->dma_addr,
+ if ((buf->size > 0) && (buf->dev == dev))
+ dma_sync_single_for_cpu(buf->dev, buf->dma_addr,
buf->size, DMA_FROM_DEVICE);
dma_buf_put(dmabuf);
}
unsigned int index;
dma_addr_t dma_addr;
atomic_t refcount;
- struct sg_table *sgt_base;
/* DMABUF related */
struct dma_buf_attachment *db_attach;
+ void *priv;
};
struct aml_dma_buf_priv {
{} };
MODULE_DEVICE_TABLE(of, gdc_dt_match);
+static void meson_gdc_cache_flush(struct device *dev,
+ dma_addr_t addr,
+ size_t size);
//////
static int meson_gdc_open(struct inode *inode, struct file *file)
fh->gdev = gdc_dev;
- gdc_log(LOG_CRIT, "Success open\n");
+ gdc_log(LOG_INFO, "Success open\n");
return rc;
}
fh = NULL;
if (ret == 0)
- gdc_log(LOG_CRIT, "Success release\n");
+ gdc_log(LOG_INFO, "Success release\n");
else
gdc_log(LOG_ERR, "Error release\n");
fh->o_paddr = page_to_phys(cma_pages);
fh->o_kaddr = phys_to_virt(fh->o_paddr);
fh->o_len = len;
+ meson_gdc_cache_flush(&fh->gdev->pdev->dev,
+ fh->o_paddr, fh->o_len);
break;
case CONFIG_BUFF_TYPE:
if (fh->c_paddr != 0 && fh->c_kaddr != NULL)
}
gdc_log(LOG_INFO, "1 plane get input addr=%x\n",
gdc_cmd->y_base_addr);
+ meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+ gdc_cmd->y_base_addr,
+ gc->input_y_stride * gc->input_height);
} else if (gs_ex->input_buffer.plane_number == 2) {
cfg = &fh->dma_cfg.input_cfg_plane1;
cfg->fd = gs_ex->input_buffer.y_base_fd;
return -EINVAL;
}
gdc_cmd->y_base_addr = addr;
+ meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+ gdc_cmd->y_base_addr,
+ gc->input_y_stride * gc->input_height);
cfg = &fh->dma_cfg.input_cfg_plane2;
cfg->fd = gs_ex->input_buffer.uv_base_fd;
cfg->dev = &fh->gdev->pdev->dev;
return -EINVAL;
}
gdc_cmd->uv_base_addr = addr;
+ meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+ gdc_cmd->uv_base_addr,
+ gc->input_y_stride * gc->input_height / 2);
gdc_log(LOG_INFO, "2 plane get input addr=%x\n",
gdc_cmd->y_base_addr);
gdc_log(LOG_INFO, "2 plane get input addr=%x\n",
}
gdc_cmd->y_base_addr = addr;
gdc_cmd->uv_base_addr = 0;
+ meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+ gdc_cmd->y_base_addr,
+ gc->input_y_stride * gc->input_height);
break;
default:
gdc_log(LOG_ERR, "Error image format");
}
} else if (gs_ex->input_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF) {
/* dma alloc */
- gdc_process_input_dma_info(fh, gs_ex);
+ ret = gdc_process_input_dma_info(fh, gs_ex);
+ if (ret < 0)
+ return -EINVAL;
}
gdc_log(LOG_INFO, "%s, input addr=%x\n",
__func__, fh->gdc_cmd.y_base_addr);
gdc_log(LOG_ERR, "copy from user failed\n");
memcpy(&gdc_cmd->gdc_config, &gs_ex.gdc_config,
sizeof(struct gdc_config_s));
- gdc_process_ex_info(fh, &gs_ex);
+ ret = gdc_process_ex_info(fh, &gs_ex);
break;
case GDC_REQUEST_DMA_BUFF:
ret = copy_from_user(&gdc_req_buf, argp,