[PORT FROM R2] atomisp: Fix memory leak in pages allocation failure.
authorziyux.jiang <ziyux.jiang@intel.com>
Tue, 8 Nov 2011 17:53:07 +0000 (01:53 +0800)
committerbuildbot <buildbot@intel.com>
Mon, 19 Dec 2011 13:30:22 +0000 (05:30 -0800)
BZ: 17272

to atomisp driver memory allocation, when processing pages
allocation fail, we should do related free.

Change-Id: Ie4071da207f5c52a175209b4889425097f1b4ff2
Orig-Change-Id: I5c51b7e07c6bd3713c669c3bd47de16365ce9bea
Signed-off-by: ziyux.jiang <ziyux.jiang@intel.com>
Reviewed-on: http://android.intel.com:8080/23551
Reviewed-by: buildbot <buildbot@intel.com>
Reviewed-by: Kruger, Jozef <jozef.kruger@intel.com>
Reviewed-by: Wang, Wen W <wen.w.wang@intel.com>
Reviewed-by: Cohen, David A <david.a.cohen@intel.com>
Tested-by: Koski, Anttu <anttu.koski@intel.com>
Reviewed-by: Hu, Gang A <gang.a.hu@intel.com>
Reviewed-on: http://android.intel.com:8080/28005
Reviewed-by: Tuominen, TeemuX <teemux.tuominen@intel.com>
Reviewed-by: Koski, Anttu <anttu.koski@intel.com>
Tested-by: buildbot <buildbot@intel.com>
drivers/media/video/atomisp/hmm/hmm_bo.c
drivers/media/video/atomisp/mmu/isp_mmu.c

index 7957667..b5c5cbf 100644 (file)
@@ -351,7 +351,7 @@ static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem,
        bo->pages = kzalloc(sizeof(struct page *) * pgnr, GFP_KERNEL);
        if (unlikely(!bo->pages)) {
                v4l2_err(&atomisp_dev, "out of memory for bo->pages\n");
-               goto out_of_mem;
+               return -ENOMEM;
        }
 
        i = 0;
@@ -371,7 +371,7 @@ retry:
                        if (order == HMM_MIN_ORDER) {
                                v4l2_err(&atomisp_dev,
                                         "out of memory in alloc_pages\n");
-                               goto out_of_mem;
+                               goto cleanup;
                        }
                        v4l2_warn(&atomisp_dev,
                                  "allocate order=%d pages failed."
@@ -409,21 +409,17 @@ retry:
                                        v4l2_err(&atomisp_dev,
                                                     "set page uncacheable"
                                                        "failed.\n");
-                                       goto set_uc_mem_fail;
+                                       goto cleanup;
                                }
                        }
                }
        }
 
        return 0;
-set_uc_mem_fail:
-       /* FIX ME: select one better */
-       ret = -ENOMEM;
-       goto cleanup;
 out_of_mem:
-       ret = -ENOMEM;
-       goto cleanup;
+       __free_pages(pages, order);
 cleanup:
+       ret = -ENOMEM;
        while (!list_empty(&bo->pgblocks)) {
                pgblk = list_first_entry(&bo->pgblocks,
                                         struct page_block, list);
@@ -438,6 +434,7 @@ cleanup:
                __free_pages(pgblk->pages, pgblk->order);
                kfree(pgblk);
        }
+       kfree(bo->pages);
 
        return ret;
 }
@@ -594,6 +591,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
        up_read(&current->mm->mmap_sem);
        if (vma == NULL) {
                v4l2_err(&atomisp_dev, "find_vma failed\n");
+               kfree(bo->pages);
                return -EFAULT;
        }
        mutex_lock(&bo->mutex);
@@ -626,7 +624,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
                                "get_user_pages err: bo->pgnr = %d, "
                                "pgnr actually pinned = %d.\n",
                                bo->pgnr, page_nr);
-               return -ENOMEM;
+               goto out_of_mem;
        }
 
        pgblk = kzalloc(sizeof(*pgblk) * bo->pgnr, GFP_KERNEL);
@@ -656,6 +654,11 @@ out_of_mem:
                kfree(pgblk);
        }
 
+       if (bo->mem_type == HMM_BO_MEM_TYPE_USER)
+               for (i = 0; i < page_nr; i++)
+                       put_page(bo->pages[i]);
+       kfree(bo->pages);
+
        return ret;
 }
 
@@ -866,6 +869,13 @@ int hmm_bo_bind(struct hmm_buffer_object *bo)
        return 0;
 
 map_err:
+       /* unbind the physical pages with related virtual address space */
+       virt = bo->vm_node->start;
+       for ( ; i > 0; i--) {
+               isp_mmu_unmap(&bdev->mmu, virt, 1);
+               virt += pgnr_to_size(1);
+       }
+
        mutex_unlock(&bo->mutex);
        v4l2_err(&atomisp_dev,
                        "setup MMU address mapping failed.\n");
index 05ef59d..c107b6d 100644 (file)
@@ -39,6 +39,9 @@
 #include "mmu/isp_mmu.h"
 #include "atomisp_internal.h"
 
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+                               unsigned int end_isp_virt);
+
 static unsigned int atomisp_get_pte(unsigned int pt, unsigned int idx)
 {
        unsigned int pt_virt = (unsigned int)phys_to_virt(pt);
@@ -194,10 +197,16 @@ static int mmu_l2_map(struct isp_mmu *mmu, unsigned int l1_pt,
 
                pte = atomisp_get_pte(l2_pt, idx);
 
-               if (ISP_PTE_VALID(mmu, pte))
+               if (ISP_PTE_VALID(mmu, pte)) {
                        mmu_remap_error(mmu, l1_pt, l1_idx,
                                          l2_pt, idx, ptr, pte, phys);
 
+                       /* free all mapped pages */
+                       free_mmu_map(mmu, start, ptr);
+
+                       return -EINVAL;
+               }
+
                pte = isp_pgaddr_to_pte_valid(mmu, phys);
 
                atomisp_set_pte(l2_pt, idx, pte);
@@ -237,6 +246,10 @@ static int mmu_l1_map(struct isp_mmu *mmu, unsigned int l1_pt,
                        if (l2_pt == NULL_PAGE) {
                                v4l2_err(&atomisp_dev,
                                             "alloc page table fail.\n");
+
+                               /* free all mapped pages */
+                               free_mmu_map(mmu, start, ptr);
+
                                return -ENOMEM;
                        }
 
@@ -264,7 +277,11 @@ static int mmu_l1_map(struct isp_mmu *mmu, unsigned int l1_pt,
                if (ret) {
                        v4l2_err(&atomisp_dev,
                                    "setup mapping in L2PT fail.\n");
-                       return ret;
+
+                       /* free all mapped pages */
+                       free_mmu_map(mmu, start, ptr);
+
+                       return -EINVAL;
                }
        } while (ptr < end && idx < ISP_L1PT_PTES - 1);
 
@@ -424,6 +441,22 @@ static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
        mmu_l1_unmap(mmu, l1_pt, start, end);
 }
 
+/*
+ * Free page tables according to isp start virtual address and end virtual
+ * address.
+ */
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+                               unsigned int end_isp_virt)
+{
+       unsigned int pgnr;
+       unsigned int start, end;
+
+       start = (start_isp_virt) & ISP_PAGE_MASK;
+       end = (end_isp_virt) & ISP_PAGE_MASK;
+       pgnr = (end - start) >> ISP_PAGE_OFFSET;
+       mmu_unmap(mmu, start, pgnr);
+}
+
 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
                unsigned int phys, unsigned int pgnr)
 {