gdc: ge2d: fix ge2d dma-buf no free memory issue [1/1]
authorPengcheng Chen <pengcheng.chen@amlogic.com>
Mon, 4 Mar 2019 04:43:49 +0000 (12:43 +0800)
committerNick Xie <nick@khadas.com>
Mon, 5 Aug 2019 07:11:27 +0000 (15:11 +0800)
PD#SWPL-5685

Problem:
gdc&ge2d dma buf memory leak

Solution:
clear dmabuf alloc flag when dmabuf really free

Verify:
verified by w400

Change-Id: I914d93d795311fd6e3e34ca5bd64aaf4db3d4c7a
Signed-off-by: Pengcheng Chen <pengcheng.chen@amlogic.com>
gdc: fix gdc buffer index error caused crash issue [1/1]

PD#SWPL-5685

Problem:
gdc buffer index error when dma alloc called simultaneously

Solution:
add lock to gdc buffer index

Verify:
verified by w400

Change-Id: Ia5e41562675b594029fd5a0166561c0069739536
Signed-off-by: Pengcheng Chen <pengcheng.chen@amlogic.com>
ge2d: fix ge2d buffer index error caused crash issue [2/2]

PD#SWPL-5685

Problem:
ge2d buffer index error when dma alloc called simultaneously

Solution:
add lock to ge2d buffer index

Verify:
verified by w400

Change-Id: I1efa4127fbcb939ade457a890769d59a146a0798
Signed-off-by: Pengcheng Chen <pengcheng.chen@amlogic.com>
gdc: ge2d: output dma_buf need flush when alloc [1/1]

PD#SWPL-5685

Problem:
gdc output mismatch

Solution:
output dma_buf need flush when alloc

Verify:
verified by w400

Change-Id: Ie206b4f51bd1338420f63e0e06563b67d6d63c88
Signed-off-by: Pengcheng Chen <pengcheng.chen@amlogic.com>
ge2d: change dma_buf to cacheable [1/2]

PD#SWPL-5685

Problem:
ge2d output dmabuf have high variance

Solution:
change dma_buf to cacheable

Verify:
verified by w400

Change-Id: Iff9356dd256ce69bd87e7e5a2b1feb9e74c49744
Signed-off-by: Pengcheng Chen <pengcheng.chen@amlogic.com>
gdc: change dma_buf to cacheable [2/2]

PD#SWPL-5685

Problem:
gdc output dmabuf have high variance

Solution:
change dma_buf to cacheable

Verify:
verified by w400

Change-Id: Ide8cea975c7dd39bb9185fbb9ba0694d859c74e6
Signed-off-by: Pengcheng Chen <pengcheng.chen@amlogic.com>
ge2d: add ge2d dmabuf 32bit compatible [1/2]

PD#SWPL-5685

Problem:
ge2d dmabuf not work under 32bit

Solution:
add ge2d dmabuf 32bit compatible

Verify:
verified by w400

Change-Id: I03011620fc67cdaf251f5ca1c7b0b512a8fc9a76
Signed-off-by: Pengcheng Chen <pengcheng.chen@amlogic.com>
drivers/amlogic/media/common/ge2d/ge2d_dmabuf.c
drivers/amlogic/media/common/ge2d/ge2d_dmabuf.h
drivers/amlogic/media/common/ge2d/ge2d_wq.c
drivers/amlogic/media/gdc/app/gdc_dmabuf.c
drivers/amlogic/media/gdc/app/gdc_dmabuf.h
drivers/amlogic/media/gdc/app/gdc_module.c

index 046cb1e..3ed7684 100644 (file)
 #include <linux/scatterlist.h>
 #include <linux/pagemap.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 
 #include "ge2d_log.h"
 #include "ge2d_dmabuf.h"
 
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index);
+
+static void *aml_mm_vmap(phys_addr_t phys, unsigned long size)
+{
+       u32 offset, npages;
+       struct page **pages = NULL;
+       pgprot_t pgprot = PAGE_KERNEL;
+       void *vaddr;
+       int i;
+
+       offset = offset_in_page(phys);
+       npages = DIV_ROUND_UP(size + offset, PAGE_SIZE);
+
+       pages = vmalloc(sizeof(struct page *) * npages);
+       if (!pages)
+               return NULL;
+       for (i = 0; i < npages; i++) {
+               pages[i] = phys_to_page(phys);
+               phys += PAGE_SIZE;
+       }
+       /* pgprot = pgprot_writecombine(PAGE_KERNEL); */
+
+       vaddr = vmap(pages, npages, VM_MAP, pgprot);
+       if (!vaddr) {
+               pr_err("vmaped fail, size: %d\n",
+                       npages << PAGE_SHIFT);
+               vfree(pages);
+               return NULL;
+       }
+       vfree(pages);
+       ge2d_log_dbg("[HIGH-MEM-MAP] pa(%lx) to va(%p), size: %d\n",
+               (unsigned long)phys, vaddr, npages << PAGE_SHIFT);
+       return vaddr;
+}
+
+static void *aml_map_phyaddr_to_virt(dma_addr_t phys, unsigned long size)
+{
+       void *vaddr = NULL;
+
+       if (!PageHighMem(phys_to_page(phys)))
+               return phys_to_virt(phys);
+       vaddr = aml_mm_vmap(phys, size);
+       return vaddr;
+}
+
 /* dma free*/
 static void aml_dma_put(void *buf_priv)
 {
        struct aml_dma_buf *buf = buf_priv;
+       struct page *cma_pages = NULL;
+       void *vaddr = (void *)(PAGE_MASK & (ulong)buf->vaddr);
 
        if (!atomic_dec_and_test(&buf->refcount)) {
-               ge2d_log_dbg("aml_dma_put, refcont=%d\n",
+               ge2d_log_dbg("ge2d aml_dma_put, refcont=%d\n",
                        atomic_read(&buf->refcount));
                return;
        }
-       if (buf->sgt_base) {
-               sg_free_table(buf->sgt_base);
-               kfree(buf->sgt_base);
+       cma_pages = phys_to_page(buf->dma_addr);
+       if (is_vmalloc_or_module_addr(vaddr))
+               vunmap(vaddr);
+
+       if (!dma_release_from_contiguous(buf->dev, cma_pages,
+                                        buf->size >> PAGE_SHIFT)) {
+               pr_err("failed to release cma buffer\n");
        }
-       dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
-                      buf->attrs);
+       buf->vaddr = NULL;
+       clear_dma_buffer((struct aml_dma_buffer *)buf->priv, buf->index);
        put_device(buf->dev);
        kfree(buf);
-       ge2d_log_dbg("aml_dma_put free!\n");
+       ge2d_log_dbg("ge2d free:aml_dma_buf=0x%p,buf->index=%d\n",
+               buf, buf->index);
 }
 
 static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
@@ -59,35 +112,34 @@ static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
                          gfp_t gfp_flags)
 {
        struct aml_dma_buf *buf;
+       struct page *cma_pages = NULL;
+       dma_addr_t paddr = 0;
 
        if (WARN_ON(!dev))
                return (void *)(-EINVAL);
 
-       buf = kzalloc(sizeof(struct aml_dma_buf), GFP_KERNEL);
+       buf = kzalloc(sizeof(struct aml_dma_buf), GFP_KERNEL | gfp_flags);
        if (!buf)
                return NULL;
 
        if (attrs)
                buf->attrs = attrs;
-       buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
-                        gfp_flags, buf->attrs);
-       if (!buf->cookie) {
-               dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
-               kfree(buf);
+       cma_pages = dma_alloc_from_contiguous(dev,
+               size >> PAGE_SHIFT, 0);
+       if (cma_pages) {
+               paddr = page_to_phys(cma_pages);
+       } else {
+               pr_err("failed to alloc cma pages.\n");
                return NULL;
        }
-
-       if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
-               buf->vaddr = buf->cookie;
-
-       /* Prevent the device from being released while the buffer is used */
+       buf->vaddr = aml_map_phyaddr_to_virt(paddr, size);
        buf->dev = get_device(dev);
        buf->size = size;
        buf->dma_dir = dma_dir;
-
+       buf->dma_addr = paddr;
        atomic_inc(&buf->refcount);
-       ge2d_log_dbg("aml_dma_alloc, refcont=%d\n",
-               atomic_read(&buf->refcount));
+       ge2d_log_dbg("aml_dma_buf=0x%p, refcont=%d\n",
+               buf, atomic_read(&buf->refcount));
 
        return buf;
 }
@@ -95,26 +147,23 @@ static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
 static int aml_dma_mmap(void *buf_priv, struct vm_area_struct *vma)
 {
        struct aml_dma_buf *buf = buf_priv;
-       int ret;
+       unsigned long pfn = 0;
+       unsigned long vsize = vma->vm_end - vma->vm_start;
+       int ret = -1;
 
-       if (!buf) {
-               pr_err("No buffer to map\n");
+       if (!buf || !vma) {
+               pr_err("No memory to map\n");
                return -EINVAL;
        }
 
-       /*
-        * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
-        * map whole buffer
-        */
-       vma->vm_pgoff = 0;
-
-       ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
-               buf->dma_addr, buf->size, buf->attrs);
-
+       pfn = buf->dma_addr >> PAGE_SHIFT;
+       ret = remap_pfn_range(vma, vma->vm_start, pfn,
+               vsize, vma->vm_page_prot);
        if (ret) {
-               pr_err("Remapping memory failed, error: %d\n", ret);
+               pr_err("Remapping memory, error: %d\n", ret);
                return ret;
        }
+       vma->vm_flags |= VM_DONTEXPAND;
        ge2d_log_dbg("mapped dma addr 0x%08lx at 0x%08lx, size %d\n",
                (unsigned long)buf->dma_addr, vma->vm_start,
                buf->size);
@@ -133,10 +182,12 @@ static int aml_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct aml_attachment *attach;
-       unsigned int i;
-       struct scatterlist *rd, *wr;
-       struct sg_table *sgt;
        struct aml_dma_buf *buf = dbuf->priv;
+       int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+       struct sg_table *sgt;
+       struct scatterlist *sg;
+       phys_addr_t phys = buf->dma_addr;
+       unsigned int i;
        int ret;
 
        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
@@ -147,18 +198,21 @@ static int aml_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
        /* Copy the buf->base_sgt scatter list to the attachment, as we can't
         * map the same scatter list to multiple attachments at the same time.
         */
-       ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+       ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
        if (ret) {
                kfree(attach);
                return -ENOMEM;
        }
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               struct page *page = phys_to_page(phys);
 
-       rd = buf->sgt_base->sgl;
-       wr = sgt->sgl;
-       for (i = 0; i < sgt->orig_nents; ++i) {
-               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-               rd = sg_next(rd);
-               wr = sg_next(wr);
+               if (!page) {
+                       sg_free_table(sgt);
+                       kfree(attach);
+                       return -ENOMEM;
+               }
+               sg_set_page(sg, page, PAGE_SIZE, 0);
+               phys += PAGE_SIZE;
        }
 
        attach->dma_dir = DMA_NONE;
@@ -211,7 +265,6 @@ static struct sg_table *aml_dmabuf_ops_map(
                        attach->dma_dir);
                attach->dma_dir = DMA_NONE;
        }
-
        /* mapping to the client with new direction */
        sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
                                dma_dir);
@@ -271,25 +324,6 @@ static struct dma_buf_ops ge2d_dmabuf_ops = {
        .release = aml_dmabuf_ops_release,
 };
 
-static struct sg_table *get_base_sgt(struct aml_dma_buf *buf)
-{
-       int ret;
-       struct sg_table *sgt;
-
-       sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!sgt)
-               return NULL;
-
-       ret = dma_get_sgtable(buf->dev, sgt, buf->cookie,
-               buf->dma_addr, buf->size);
-       if (ret < 0) {
-               dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
-               kfree(sgt);
-               return NULL;
-       }
-       return sgt;
-}
-
 static struct dma_buf *get_dmabuf(void *buf_priv, unsigned long flags)
 {
        struct aml_dma_buf *buf = buf_priv;
@@ -300,11 +334,7 @@ static struct dma_buf *get_dmabuf(void *buf_priv, unsigned long flags)
        exp_info.size = buf->size;
        exp_info.flags = flags;
        exp_info.priv = buf;
-
-       if (!buf->sgt_base)
-               buf->sgt_base = get_base_sgt(buf);
-
-       if (WARN_ON(!buf->sgt_base))
+       if (WARN_ON(!buf->vaddr))
                return NULL;
 
        dbuf = dma_buf_export(&exp_info);
@@ -339,6 +369,15 @@ static int find_empty_dma_buffer(struct aml_dma_buffer *buffer)
                return -1;
 }
 
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index)
+{
+       mutex_lock(&(buffer->lock));
+       buffer->gd_buffer[index].mem_priv = NULL;
+       buffer->gd_buffer[index].index = 0;
+       buffer->gd_buffer[index].alloc = 0;
+       mutex_unlock(&(buffer->lock));
+}
+
 void *ge2d_dma_buffer_create(void)
 {
        int i;
@@ -367,6 +406,7 @@ int ge2d_dma_buffer_alloc(struct aml_dma_buffer *buffer,
        struct ge2d_dmabuf_req_s *ge2d_req_buf)
 {
        void *buf;
+       struct aml_dma_buf *dma_buf;
        unsigned int size;
        int index;
 
@@ -380,24 +420,30 @@ int ge2d_dma_buffer_alloc(struct aml_dma_buffer *buffer,
        size = PAGE_ALIGN(ge2d_req_buf->len);
        if (size == 0)
                return (-EINVAL);
-
-       index = find_empty_dma_buffer(buffer);
-       if ((index < 0) || (index >= AML_MAX_DMABUF)) {
-               pr_err("no empty buffer found\n");
-               return (-ENOMEM);
-       }
-
        buf = aml_dma_alloc(dev, 0, size, ge2d_req_buf->dma_dir,
                GFP_HIGHUSER | __GFP_ZERO);
        if (!buf)
                return (-ENOMEM);
-
        mutex_lock(&(buffer->lock));
+       index = find_empty_dma_buffer(buffer);
+       if ((index < 0) || (index >= AML_MAX_DMABUF)) {
+               pr_err("no empty buffer found\n");
+               mutex_unlock(&(buffer->lock));
+               aml_dma_put(buf);
+               return (-ENOMEM);
+       }
+       ((struct aml_dma_buf *)buf)->priv = buffer;
+       ((struct aml_dma_buf *)buf)->index = index;
        buffer->gd_buffer[index].mem_priv = buf;
        buffer->gd_buffer[index].index = index;
        buffer->gd_buffer[index].alloc = 1;
        mutex_unlock(&(buffer->lock));
        ge2d_req_buf->index = index;
+       dma_buf = (struct aml_dma_buf *)buf;
+       if (dma_buf->dma_dir == DMA_FROM_DEVICE)
+               dma_sync_single_for_cpu(dma_buf->dev,
+                       dma_buf->dma_addr,
+                       dma_buf->size, DMA_FROM_DEVICE);
        return 0;
 }
 
@@ -416,7 +462,6 @@ int ge2d_dma_buffer_free(struct aml_dma_buffer *buffer, int index)
                return (-EINVAL);
        }
        aml_dma_put(buf);
-       buffer->gd_buffer[index].alloc = 0;
        return 0;
 }
 
@@ -483,19 +528,19 @@ int ge2d_dma_buffer_map(struct aml_dma_cfg *cfg)
        dir = cfg->dir;
 
        dbuf = dma_buf_get(fd);
-       if (dbuf == NULL) {
+       if (IS_ERR(dbuf)) {
                pr_err("failed to get dma buffer");
                return -EINVAL;
        }
 
        d_att = dma_buf_attach(dbuf, dev);
-       if (d_att == NULL) {
+       if (IS_ERR(d_att)) {
                pr_err("failed to set dma attach");
                goto attach_err;
        }
 
        sg = dma_buf_map_attachment(d_att, dir);
-       if (sg == NULL) {
+       if (IS_ERR(sg)) {
                pr_err("failed to get dma sg");
                goto map_attach_err;
        }
@@ -515,7 +560,7 @@ int ge2d_dma_buffer_map(struct aml_dma_cfg *cfg)
        cfg->attach = d_att;
        cfg->vaddr = vaddr;
        cfg->sg = sg;
-       ge2d_log_dbg("%s\n", __func__);
+       ge2d_log_dbg("%s, dbuf=0x%p\n", __func__, dbuf);
        return ret;
 
 vmap_err:
@@ -587,7 +632,7 @@ void ge2d_dma_buffer_unmap(struct aml_dma_cfg *cfg)
 
        dma_buf_put(dbuf);
 
-       ge2d_log_dbg("%s\n", __func__);
+       ge2d_log_dbg("%s, dbuf=0x%p\n", __func__, dbuf);
 }
 
 void ge2d_dma_buffer_dma_flush(struct device *dev, int fd)
@@ -606,7 +651,7 @@ void ge2d_dma_buffer_dma_flush(struct device *dev, int fd)
                pr_err("error input param");
                return;
        }
-       if (buf->size > 0)
+       if ((buf->size > 0) && (buf->dev == dev))
                dma_sync_single_for_device(buf->dev, buf->dma_addr,
                        buf->size, DMA_TO_DEVICE);
        dma_buf_put(dmabuf);
@@ -628,8 +673,8 @@ void ge2d_dma_buffer_cache_flush(struct device *dev, int fd)
                pr_err("error input param");
                return;
        }
-       if (buf->size > 0)
-               dma_sync_single_for_device(buf->dev, buf->dma_addr,
+       if ((buf->size > 0) && (buf->dev == dev))
+               dma_sync_single_for_cpu(buf->dev, buf->dma_addr,
                        buf->size, DMA_FROM_DEVICE);
        dma_buf_put(dmabuf);
 }
index 85dc94b..c3c03d3 100644 (file)
@@ -37,9 +37,9 @@ struct aml_dma_buf {
        unsigned int            index;
        dma_addr_t                      dma_addr;
        atomic_t                        refcount;
-       struct sg_table         *sgt_base;
        /* DMABUF related */
        struct dma_buf_attachment       *db_attach;
+       void                *priv;
 };
 
 struct aml_dma_buf_priv {
index e2c4209..d55c4c4 100644 (file)
@@ -1244,7 +1244,6 @@ static int build_ge2d_config_ex_dma(struct ge2d_context_s *context,
                        dma_cfg->dir = dir;
                        cfg->dma_cfg = dma_cfg;
                        ret = ge2d_dma_buffer_get_phys(dma_cfg, &addr);
-                       ge2d_log_info("phys: addr=%lx\n", addr);
                        if (ret != 0)
                                return ret;
 
index 390874a..9cc9523 100644 (file)
 #include <linux/pagemap.h>
 #include <linux/dma-mapping.h>
 #include <api/gdc_api.h>
+#include <linux/dma-contiguous.h>
 
 #include "system_log.h"
 #include "gdc_dmabuf.h"
 
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index);
 /* dma free*/
 static void aml_dma_put(void *buf_priv)
 {
        struct aml_dma_buf *buf = buf_priv;
+       struct page *cma_pages = NULL;
 
        if (!atomic_dec_and_test(&buf->refcount)) {
-               gdc_log(LOG_INFO, "aml_dma_put, refcont=%d\n",
+               gdc_log(LOG_INFO, "gdc aml_dma_put, refcont=%d\n",
                        atomic_read(&buf->refcount));
                return;
        }
-       if (buf->sgt_base) {
-               sg_free_table(buf->sgt_base);
-               kfree(buf->sgt_base);
+       cma_pages = virt_to_page(buf->vaddr);
+       if (!dma_release_from_contiguous(buf->dev, cma_pages,
+                                        buf->size >> PAGE_SHIFT)) {
+               pr_err("failed to release cma buffer\n");
        }
-       dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
-                      buf->attrs);
+       buf->vaddr = NULL;
+       clear_dma_buffer((struct aml_dma_buffer *)buf->priv, buf->index);
        put_device(buf->dev);
        kfree(buf);
-       gdc_log(LOG_INFO, "aml_dma_put free!\n");
+       gdc_log(LOG_INFO, "gdc free:aml_dma_buf=0x%p,buf->index=%d\n",
+               buf, buf->index);
 }
 
 static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
@@ -60,6 +65,8 @@ static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
                          gfp_t gfp_flags)
 {
        struct aml_dma_buf *buf;
+       struct page *cma_pages = NULL;
+       dma_addr_t paddr = 0;
 
        if (WARN_ON(!dev))
                return (void *)(-EINVAL);
@@ -70,25 +77,22 @@ static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
 
        if (attrs)
                buf->attrs = attrs;
-       buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
-                        gfp_flags, buf->attrs);
-       if (!buf->cookie) {
-               dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
-               kfree(buf);
+       cma_pages = dma_alloc_from_contiguous(dev,
+               size >> PAGE_SHIFT, 0);
+       if (cma_pages) {
+               paddr = page_to_phys(cma_pages);
+       } else {
+               pr_err("failed to alloc cma pages.\n");
                return NULL;
        }
-
-       if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
-               buf->vaddr = buf->cookie;
-
-       /* Prevent the device from being released while the buffer is used */
+       buf->vaddr = phys_to_virt(paddr);
        buf->dev = get_device(dev);
        buf->size = size;
        buf->dma_dir = dma_dir;
-
+       buf->dma_addr = paddr;
        atomic_inc(&buf->refcount);
-       gdc_log(LOG_INFO, "aml_dma_alloc, refcont=%d\n",
-               atomic_read(&buf->refcount));
+       gdc_log(LOG_INFO, "aml_dma_buf=0x%p, refcont=%d\n",
+               buf, atomic_read(&buf->refcount));
 
        return buf;
 }
@@ -96,26 +100,24 @@ static void *aml_dma_alloc(struct device *dev, unsigned long attrs,
 static int aml_dma_mmap(void *buf_priv, struct vm_area_struct *vma)
 {
        struct aml_dma_buf *buf = buf_priv;
-       int ret;
+       unsigned long pfn = 0;
+       unsigned long vsize = vma->vm_end - vma->vm_start;
+       int ret = -1;
 
-       if (!buf) {
-               pr_err("No buffer to map\n");
+       if (!buf || !vma) {
+               pr_err("No memory to map\n");
                return -EINVAL;
        }
 
-       /*
-        * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
-        * map whole buffer
-        */
-       vma->vm_pgoff = 0;
-
-       ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
-               buf->dma_addr, buf->size, buf->attrs);
-
+       pfn = virt_to_phys(buf->vaddr) >> PAGE_SHIFT;
+       ret = remap_pfn_range(vma, vma->vm_start, pfn,
+               vsize, vma->vm_page_prot);
        if (ret) {
-               pr_err("Remapping memory failed, error: %d\n", ret);
+               pr_err("Remapping memory, error: %d\n", ret);
                return ret;
        }
+       vma->vm_flags |= VM_DONTEXPAND;
+
        gdc_log(LOG_INFO, "mapped dma addr 0x%08lx at 0x%08lx, size %d\n",
                (unsigned long)buf->dma_addr, vma->vm_start,
                buf->size);
@@ -134,10 +136,12 @@ static int aml_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct aml_attachment *attach;
-       unsigned int i;
-       struct scatterlist *rd, *wr;
-       struct sg_table *sgt;
        struct aml_dma_buf *buf = dbuf->priv;
+       int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+       struct sg_table *sgt;
+       struct scatterlist *sg;
+       void *vaddr = buf->vaddr;
+       unsigned int i;
        int ret;
 
        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
@@ -148,18 +152,21 @@ static int aml_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
        /* Copy the buf->base_sgt scatter list to the attachment, as we can't
         * map the same scatter list to multiple attachments at the same time.
         */
-       ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+       ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
        if (ret) {
                kfree(attach);
                return -ENOMEM;
        }
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               struct page *page = virt_to_page(vaddr);
 
-       rd = buf->sgt_base->sgl;
-       wr = sgt->sgl;
-       for (i = 0; i < sgt->orig_nents; ++i) {
-               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-               rd = sg_next(rd);
-               wr = sg_next(wr);
+               if (!page) {
+                       sg_free_table(sgt);
+                       kfree(attach);
+                       return -ENOMEM;
+               }
+               sg_set_page(sg, page, PAGE_SIZE, 0);
+               vaddr += PAGE_SIZE;
        }
 
        attach->dma_dir = DMA_NONE;
@@ -272,25 +279,6 @@ static struct dma_buf_ops gdc_dmabuf_ops = {
        .release = aml_dmabuf_ops_release,
 };
 
-static struct sg_table *get_base_sgt(struct aml_dma_buf *buf)
-{
-       int ret;
-       struct sg_table *sgt;
-
-       sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!sgt)
-               return NULL;
-
-       ret = dma_get_sgtable(buf->dev, sgt, buf->cookie,
-               buf->dma_addr, buf->size);
-       if (ret < 0) {
-               dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
-               kfree(sgt);
-               return NULL;
-       }
-       return sgt;
-}
-
 static struct dma_buf *get_dmabuf(void *buf_priv, unsigned long flags)
 {
        struct aml_dma_buf *buf = buf_priv;
@@ -302,10 +290,7 @@ static struct dma_buf *get_dmabuf(void *buf_priv, unsigned long flags)
        exp_info.flags = flags;
        exp_info.priv = buf;
 
-       if (!buf->sgt_base)
-               buf->sgt_base = get_base_sgt(buf);
-
-       if (WARN_ON(!buf->sgt_base))
+       if (WARN_ON(!buf->vaddr))
                return NULL;
 
        dbuf = dma_buf_export(&exp_info);
@@ -341,6 +326,15 @@ static int find_empty_dma_buffer(struct aml_dma_buffer *buffer)
                return -1;
 }
 
+static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index)
+{
+       mutex_lock(&(buffer->lock));
+       buffer->gd_buffer[index].mem_priv = NULL;
+       buffer->gd_buffer[index].index = 0;
+       buffer->gd_buffer[index].alloc = 0;
+       mutex_unlock(&(buffer->lock));
+}
+
 void *gdc_dma_buffer_create(void)
 {
        int i;
@@ -369,6 +363,7 @@ int gdc_dma_buffer_alloc(struct aml_dma_buffer *buffer,
        struct gdc_dmabuf_req_s *gdc_req_buf)
 {
        void *buf;
+       struct aml_dma_buf *dma_buf;
        unsigned int size;
        int index;
 
@@ -382,24 +377,30 @@ int gdc_dma_buffer_alloc(struct aml_dma_buffer *buffer,
        size = PAGE_ALIGN(gdc_req_buf->len);
        if (size == 0)
                return (-EINVAL);
-
-       index = find_empty_dma_buffer(buffer);
-       if ((index < 0) || (index >= AML_MAX_DMABUF)) {
-               pr_err("no empty buffer found\n");
-               return (-ENOMEM);
-       }
-
        buf = aml_dma_alloc(dev, 0, size, gdc_req_buf->dma_dir,
                GFP_HIGHUSER | __GFP_ZERO);
        if (!buf)
                return (-ENOMEM);
-
        mutex_lock(&(buffer->lock));
+       index = find_empty_dma_buffer(buffer);
+       if ((index < 0) || (index >= AML_MAX_DMABUF)) {
+               pr_err("no empty buffer found\n");
+               mutex_unlock(&(buffer->lock));
+               aml_dma_put(buf);
+               return (-ENOMEM);
+       }
+       ((struct aml_dma_buf *)buf)->priv = buffer;
+       ((struct aml_dma_buf *)buf)->index = index;
        buffer->gd_buffer[index].mem_priv = buf;
        buffer->gd_buffer[index].index = index;
        buffer->gd_buffer[index].alloc = 1;
        mutex_unlock(&(buffer->lock));
        gdc_req_buf->index = index;
+       dma_buf = (struct aml_dma_buf *)buf;
+       if (dma_buf->dma_dir == DMA_FROM_DEVICE)
+               dma_sync_single_for_cpu(dma_buf->dev,
+                       dma_buf->dma_addr,
+                       dma_buf->size, DMA_FROM_DEVICE);
        return 0;
 }
 
@@ -418,7 +419,6 @@ int gdc_dma_buffer_free(struct aml_dma_buffer *buffer, int index)
                return (-EINVAL);
        }
        aml_dma_put(buf);
-       buffer->gd_buffer[index].alloc = 0;
        return 0;
 }
 
@@ -445,7 +445,6 @@ int gdc_dma_buffer_export(struct aml_dma_buffer *buffer,
                pr_err("aml_dma_buf is null\n");
                return (-EINVAL);
        }
-
        dbuf = get_dmabuf(buf, flags & O_ACCMODE);
        if (IS_ERR_OR_NULL(dbuf)) {
                pr_err("failed to export buffer %d\n", index);
@@ -458,7 +457,6 @@ int gdc_dma_buffer_export(struct aml_dma_buffer *buffer,
                dma_buf_put(dbuf);
                return ret;
        }
-
        gdc_log(LOG_INFO, "buffer %d,exported as %d descriptor\n",
                index, ret);
        gdc_exp_buf->fd = ret;
@@ -476,6 +474,7 @@ int gdc_dma_buffer_map(struct aml_dma_cfg *cfg)
        struct device *dev = NULL;
        enum dma_data_direction dir;
 
+
        if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) {
                pr_err("error input param");
                return -EINVAL;
@@ -483,21 +482,19 @@ int gdc_dma_buffer_map(struct aml_dma_cfg *cfg)
        fd = cfg->fd;
        dev = cfg->dev;
        dir = cfg->dir;
-
        dbuf = dma_buf_get(fd);
-       if (dbuf == NULL) {
+       if (IS_ERR(dbuf)) {
                pr_err("failed to get dma buffer");
                return -EINVAL;
        }
-
        d_att = dma_buf_attach(dbuf, dev);
-       if (d_att == NULL) {
+       if (IS_ERR(d_att)) {
                pr_err("failed to set dma attach");
                goto attach_err;
        }
 
        sg = dma_buf_map_attachment(d_att, dir);
-       if (sg == NULL) {
+       if (IS_ERR(sg)) {
                pr_err("failed to get dma sg");
                goto map_attach_err;
        }
@@ -517,6 +514,7 @@ int gdc_dma_buffer_map(struct aml_dma_cfg *cfg)
        cfg->attach = d_att;
        cfg->vaddr = vaddr;
        cfg->sg = sg;
+       gdc_log(LOG_INFO, "gdc_dma_buffer_map, dbuf=0x%p\n", dbuf);
 
        return ret;
 
@@ -531,7 +529,6 @@ map_attach_err:
 
 attach_err:
        dma_buf_put(dbuf);
-
        return ret;
 }
 
@@ -579,7 +576,6 @@ void gdc_dma_buffer_unmap(struct aml_dma_cfg *cfg)
        vaddr = cfg->vaddr;
        d_att = cfg->attach;
        sg = cfg->sg;
-
        dma_buf_vunmap(dbuf, vaddr);
 
        dma_buf_end_cpu_access(dbuf, dir);
@@ -589,6 +585,7 @@ void gdc_dma_buffer_unmap(struct aml_dma_cfg *cfg)
        dma_buf_detach(dbuf, d_att);
 
        dma_buf_put(dbuf);
+       gdc_log(LOG_INFO, "gdc_dma_buffer_unmap, dbuf=0x%p\n", dbuf);
 }
 
 void gdc_dma_buffer_dma_flush(struct device *dev, int fd)
@@ -607,7 +604,7 @@ void gdc_dma_buffer_dma_flush(struct device *dev, int fd)
                pr_err("error input param");
                return;
        }
-       if (buf->size > 0)
+       if ((buf->size > 0) && (buf->dev == dev))
                dma_sync_single_for_device(buf->dev, buf->dma_addr,
                        buf->size, DMA_TO_DEVICE);
        dma_buf_put(dmabuf);
@@ -629,8 +626,8 @@ void gdc_dma_buffer_cache_flush(struct device *dev, int fd)
                pr_err("error input param");
                return;
        }
-       if (buf->size > 0)
-               dma_sync_single_for_device(buf->dev, buf->dma_addr,
+       if ((buf->size > 0) && (buf->dev == dev))
+               dma_sync_single_for_cpu(buf->dev, buf->dma_addr,
                        buf->size, DMA_FROM_DEVICE);
        dma_buf_put(dmabuf);
 }
index 6de1d57..a6e988d 100644 (file)
@@ -35,9 +35,9 @@ struct aml_dma_buf {
        unsigned int            index;
        dma_addr_t                      dma_addr;
        atomic_t                        refcount;
-       struct sg_table         *sgt_base;
        /* DMABUF related */
        struct dma_buf_attachment       *db_attach;
+       void                *priv;
 };
 
 struct aml_dma_buf_priv {
index e698baf..14dcf10 100644 (file)
@@ -53,6 +53,9 @@ static const struct of_device_id gdc_dt_match[] = {
        {} };
 
 MODULE_DEVICE_TABLE(of, gdc_dt_match);
+static void meson_gdc_cache_flush(struct device *dev,
+                                       dma_addr_t addr,
+                                       size_t size);
 
 //////
 static int meson_gdc_open(struct inode *inode, struct file *file)
@@ -80,7 +83,7 @@ static int meson_gdc_open(struct inode *inode, struct file *file)
 
        fh->gdev = gdc_dev;
 
-       gdc_log(LOG_CRIT, "Success open\n");
+       gdc_log(LOG_INFO, "Success open\n");
 
        return rc;
 }
@@ -146,7 +149,7 @@ static int meson_gdc_release(struct inode *inode, struct file *file)
        fh = NULL;
 
        if (ret == 0)
-               gdc_log(LOG_CRIT, "Success release\n");
+               gdc_log(LOG_INFO, "Success release\n");
        else
                gdc_log(LOG_ERR, "Error release\n");
 
@@ -181,6 +184,8 @@ static long meson_gdc_set_buff(void *f_fh,
                fh->o_paddr = page_to_phys(cma_pages);
                fh->o_kaddr = phys_to_virt(fh->o_paddr);
                fh->o_len = len;
+               meson_gdc_cache_flush(&fh->gdev->pdev->dev,
+                       fh->o_paddr, fh->o_len);
        break;
        case CONFIG_BUFF_TYPE:
                if (fh->c_paddr != 0 && fh->c_kaddr != NULL)
@@ -551,6 +556,9 @@ static long gdc_process_input_dma_info(struct mgdc_fh_s *fh,
                        }
                        gdc_log(LOG_INFO, "1 plane get input addr=%x\n",
                                gdc_cmd->y_base_addr);
+                       meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+                               gdc_cmd->y_base_addr,
+                               gc->input_y_stride * gc->input_height);
                } else if (gs_ex->input_buffer.plane_number == 2) {
                        cfg = &fh->dma_cfg.input_cfg_plane1;
                        cfg->fd = gs_ex->input_buffer.y_base_fd;
@@ -564,6 +572,9 @@ static long gdc_process_input_dma_info(struct mgdc_fh_s *fh,
                                return -EINVAL;
                        }
                        gdc_cmd->y_base_addr = addr;
+                       meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+                               gdc_cmd->y_base_addr,
+                               gc->input_y_stride * gc->input_height);
                        cfg = &fh->dma_cfg.input_cfg_plane2;
                        cfg->fd = gs_ex->input_buffer.uv_base_fd;
                        cfg->dev = &fh->gdev->pdev->dev;
@@ -576,6 +587,9 @@ static long gdc_process_input_dma_info(struct mgdc_fh_s *fh,
                                return -EINVAL;
                        }
                        gdc_cmd->uv_base_addr = addr;
+                       meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+                               gdc_cmd->uv_base_addr,
+                               gc->input_y_stride * gc->input_height / 2);
                        gdc_log(LOG_INFO, "2 plane get input addr=%x\n",
                                gdc_cmd->y_base_addr);
                        gdc_log(LOG_INFO, "2 plane get input addr=%x\n",
@@ -596,6 +610,9 @@ static long gdc_process_input_dma_info(struct mgdc_fh_s *fh,
                }
                gdc_cmd->y_base_addr = addr;
                gdc_cmd->uv_base_addr = 0;
+               meson_gdc_dma_flush(&fh->gdev->pdev->dev,
+                       gdc_cmd->y_base_addr,
+                       gc->input_y_stride * gc->input_height);
        break;
        default:
                gdc_log(LOG_ERR, "Error image format");
@@ -691,7 +708,9 @@ static long gdc_process_ex_info(struct mgdc_fh_s *fh,
                }
        } else if (gs_ex->input_buffer.mem_alloc_type == AML_GDC_MEM_DMABUF) {
                /* dma alloc */
-               gdc_process_input_dma_info(fh, gs_ex);
+               ret = gdc_process_input_dma_info(fh, gs_ex);
+               if (ret < 0)
+                       return -EINVAL;
        }
        gdc_log(LOG_INFO, "%s, input addr=%x\n",
                __func__, fh->gdc_cmd.y_base_addr);
@@ -923,7 +942,7 @@ static long meson_gdc_ioctl(struct file *file, unsigned int cmd,
                        gdc_log(LOG_ERR, "copy from user failed\n");
                memcpy(&gdc_cmd->gdc_config, &gs_ex.gdc_config,
                        sizeof(struct gdc_config_s));
-               gdc_process_ex_info(fh, &gs_ex);
+               ret = gdc_process_ex_info(fh, &gs_ex);
                break;
        case GDC_REQUEST_DMA_BUFF:
                ret = copy_from_user(&gdc_req_buf, argp,