unsigned int kmap_cnt;
};
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+ return ((buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+}
+
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
return ERR_PTR(-ENOMEM);
buffer->heap = heap;
+ buffer->flags = flags;
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
buffer->dev = dev;
buffer->size = len;
- buffer->flags = flags;
table = heap->ops->map_dma(heap, buffer);
if (IS_ERR_OR_NULL(table)) {
return ERR_PTR(PTR_ERR(table));
}
buffer->sg_table = table;
- if (buffer->flags & ION_FLAG_CACHED &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
+ if (ion_buffer_fault_user_mappings(buffer)) {
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
i) {
if (sg_dma_len(sg) == PAGE_SIZE)
continue;
- pr_err("%s: cached mappings must have pagewise "
- "sg_lists\n", __func__);
+ pr_err("%s: cached mappings that will be faulted in "
+ "must have pagewise sg_lists\n", __func__);
ret = -EINVAL;
goto err;
}
pr_debug("%s: syncing for device %s\n", __func__,
dev ? dev_name(dev) : "null");
- if (!(buffer->flags & ION_FLAG_CACHED) ||
- (buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC))
+ if (!ion_buffer_fault_user_mappings(buffer))
return;
mutex_lock(&buffer->lock);
return -EINVAL;
}
- if (buffer->flags & ION_FLAG_CACHED &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
+ if (ion_buffer_fault_user_mappings(buffer)) {
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
ion_vm_open(vma);
struct list_head list;
};
-static struct page_info *alloc_largest_available(unsigned long size)
+static struct page_info *alloc_largest_available(unsigned long size,
+ bool split_pages)
{
static unsigned int orders[] = {8, 4, 0};
struct page *page;
__GFP_NOWARN | __GFP_NORETRY, orders[i]);
if (!page)
continue;
- split_page(page, orders[i]);
+ if (split_pages)
+ split_page(page, orders[i]);
info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
info->page = page;
info->order = orders[i];
int ret;
struct list_head pages;
struct page_info *info, *tmp_info;
- int i;
+ int i = 0;
long size_remaining = PAGE_ALIGN(size);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
- info = alloc_largest_available(size_remaining);
+ info = alloc_largest_available(size_remaining, split_pages);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
size_remaining -= (1 << info->order) * PAGE_SIZE;
+ i++;
}
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
goto err;
- ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
+ if (split_pages)
+ ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
+ GFP_KERNEL);
+ else
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+
if (ret)
goto err1;
sg = table->sgl;
list_for_each_entry_safe(info, tmp_info, &pages, list) {
struct page *page = info->page;
- for (i = 0; i < (1 << info->order); i++) {
- sg_set_page(sg, page + i, PAGE_SIZE, 0);
+
+ if (split_pages) {
+ for (i = 0; i < (1 << info->order); i++) {
+ sg_set_page(sg, page + i, PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ }
+ } else {
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
+ 0);
sg = sg_next(sg);
}
list_del(&info->list);
- memset(info, 0, sizeof(struct page_info));
kfree(info);
}
kfree(table);
err:
list_for_each_entry(info, &pages, list) {
- for (i = 0; i < (1 << info->order); i++)
- __free_page(info->page + i);
+ if (split_pages)
+ for (i = 0; i < (1 << info->order); i++)
+ __free_page(info->page + i);
+ else
+ __free_pages(info->page, info->order);
+
kfree(info);
}
return -ENOMEM;