bo->pages = kzalloc(sizeof(struct page *) * pgnr, GFP_KERNEL);
if (unlikely(!bo->pages)) {
v4l2_err(&atomisp_dev, "out of memory for bo->pages\n");
- goto out_of_mem;
+ return -ENOMEM;
}
i = 0;
if (order == HMM_MIN_ORDER) {
v4l2_err(&atomisp_dev,
"out of memory in alloc_pages\n");
- goto out_of_mem;
+ goto cleanup;
}
v4l2_warn(&atomisp_dev,
"allocate order=%d pages failed."
v4l2_err(&atomisp_dev,
"set page uncacheable"
"failed.\n");
- goto set_uc_mem_fail;
+ goto cleanup;
}
}
}
}
return 0;
-set_uc_mem_fail:
- /* FIX ME: select one better */
- ret = -ENOMEM;
- goto cleanup;
out_of_mem:
- ret = -ENOMEM;
- goto cleanup;
+ __free_pages(pages, order);
cleanup:
+ ret = -ENOMEM;
while (!list_empty(&bo->pgblocks)) {
pgblk = list_first_entry(&bo->pgblocks,
struct page_block, list);
__free_pages(pgblk->pages, pgblk->order);
kfree(pgblk);
}
+ kfree(bo->pages);
return ret;
}
up_read(¤t->mm->mmap_sem);
if (vma == NULL) {
v4l2_err(&atomisp_dev, "find_vma failed\n");
+ kfree(bo->pages);
return -EFAULT;
}
mutex_lock(&bo->mutex);
"get_user_pages err: bo->pgnr = %d, "
"pgnr actually pinned = %d.\n",
bo->pgnr, page_nr);
- return -ENOMEM;
+ goto out_of_mem;
}
pgblk = kzalloc(sizeof(*pgblk) * bo->pgnr, GFP_KERNEL);
kfree(pgblk);
}
+ if (bo->mem_type == HMM_BO_MEM_TYPE_USER)
+ for (i = 0; i < page_nr; i++)
+ put_page(bo->pages[i]);
+ kfree(bo->pages);
+
return ret;
}
return 0;
map_err:
+ /* unbind the physical pages with related virtual address space */
+ virt = bo->vm_node->start;
+ for ( ; i > 0; i--) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
mutex_unlock(&bo->mutex);
v4l2_err(&atomisp_dev,
"setup MMU address mapping failed.\n");
#include "mmu/isp_mmu.h"
#include "atomisp_internal.h"
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt);
+
static unsigned int atomisp_get_pte(unsigned int pt, unsigned int idx)
{
unsigned int pt_virt = (unsigned int)phys_to_virt(pt);
pte = atomisp_get_pte(l2_pt, idx);
- if (ISP_PTE_VALID(mmu, pte))
+ if (ISP_PTE_VALID(mmu, pte)) {
mmu_remap_error(mmu, l1_pt, l1_idx,
l2_pt, idx, ptr, pte, phys);
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
+ }
+
pte = isp_pgaddr_to_pte_valid(mmu, phys);
atomisp_set_pte(l2_pt, idx, pte);
if (l2_pt == NULL_PAGE) {
v4l2_err(&atomisp_dev,
"alloc page table fail.\n");
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
return -ENOMEM;
}
if (ret) {
v4l2_err(&atomisp_dev,
"setup mapping in L2PT fail.\n");
- return ret;
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
}
} while (ptr < end && idx < ISP_L1PT_PTES - 1);
mmu_l1_unmap(mmu, l1_pt, start, end);
}
+/*
+ * Free page tables according to isp start virtual address and end virtual
+ * address.
+ */
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt)
+{
+ unsigned int pgnr;
+ unsigned int start, end;
+
+ start = (start_isp_virt) & ISP_PAGE_MASK;
+ end = (end_isp_virt) & ISP_PAGE_MASK;
+ pgnr = (end - start) >> ISP_PAGE_OFFSET;
+ mmu_unmap(mmu, start, pgnr);
+}
+
int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
unsigned int phys, unsigned int pgnr)
{