struct ion_handle *handle;
handle = ion_alloc(client, data.allocation.len,
- data.allocation.align,
data.allocation.heap_id_mask,
data.allocation.flags);
if (IS_ERR(handle))
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
- unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
buffer->flags = flags;
kref_init(&buffer->ref);
- ret = heap->ops->allocate(heap, buffer, len, align, flags);
+ ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
ion_heap_freelist_drain(heap, 0);
- ret = heap->ops->allocate(heap, buffer, len, align,
- flags);
+ ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret)
goto err2;
}
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
+ unsigned int heap_id_mask,
unsigned int flags)
{
struct ion_handle *handle;
struct ion_heap *heap;
int ret;
- pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
- len, align, heap_id_mask, flags);
+ pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
+ len, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
/* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_id_mask))
continue;
- buffer = ion_buffer_create(heap, dev, len, align, flags);
+ buffer = ion_buffer_create(heap, dev, len, flags);
if (!IS_ERR(buffer))
break;
}
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
- * @align: required alignment in physical memory if applicable
* @priv: private info passed from the board file
*
* Provided by the board file.
* ion_alloc - allocate ion memory
* @client: the client
* @len: size of the allocation
- * @align: requested allocation alignment, lots of hardware blocks
- * have alignment requirements of some kind
* @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
* heaps will be tried in order from highest to lowest
* id
* an opaque handle to it.
*/
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
+ unsigned int heap_id_mask,
unsigned int flags);
/**
};
static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+ unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct sg_table *table;
ion_phys_addr_t paddr;
int ret;
- if (align > PAGE_SIZE)
- return -EINVAL;
-
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
if (ret)
goto err_free;
- paddr = ion_carveout_allocate(heap, size, align);
+ paddr = ion_carveout_allocate(heap, size);
if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
ret = -ENOMEM;
goto err_free_table;
static int ion_chunk_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
unsigned long num_chunks;
unsigned long allocated_size;
- if (align > chunk_heap->chunk_size)
- return -EINVAL;
-
allocated_size = ALIGN(size, chunk_heap->chunk_size);
num_chunks = allocated_size / chunk_heap->chunk_size;
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ pr_debug("%s: base %lu size %zu \n", __func__,
+ chunk_heap->base, heap_data->size);
return &chunk_heap->heap;
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
+ unsigned long len,
unsigned long flags)
{
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
if (buffer->flags & ION_FLAG_CACHED)
return -EINVAL;
- if (align > PAGE_SIZE)
- return -EINVAL;
-
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
struct ion_heap_ops {
int (*allocate)(struct ion_heap *heap,
struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
+ unsigned long flags);
void (*free)(struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
static int ion_system_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
- if (align > PAGE_SIZE)
- return -EINVAL;
-
if (size / PAGE_SIZE > totalram_pages / 2)
return -ENOMEM;
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len,
- unsigned long align,
unsigned long flags)
{
int order = get_order(len);
unsigned long i;
int ret;
- if (align > (PAGE_SIZE << order))
- return -EINVAL;
-
page = alloc_pages(low_order_gfp_flags, order);
if (!page)
return -ENOMEM;