};
/**
- * ion_handle - a client local reference to an buffer
+ * ion_handle - a client local reference to a buffer
* @ref: reference count
* @client: back pointer to the client the buffer resides in
* @buffer: pointer to the buffer
* @node: node in the client's handle rbtree
- * @map_cnt: count of times this client has mapped this buffer
- * @addr: return from map
- * @vaddr: return from map_kernel
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @dmap_cnt: count of times this client has mapped for dma
+ * @usermap_cnt: count of times this client has mapped for userspace
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
else if (buffer > entry)
p = &(*p)->rb_right;
else
- WARN(1, "%s: buffer already found.", __func__);
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
}
rb_link_node(&buffer->node, parent, p);
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
- if (ret)
+ if (ret) {
+ kfree(buffer);
return ERR_PTR(ret);
+ }
buffer->dev = dev;
buffer->size = len;
mutex_init(&buffer->lock);
if (!((1 << heap->type) & client->heap_mask))
continue;
/* if the caller didn't specify this heap type */
- if (!((1 << heap->prio) & flags))
+ if (!((1 << heap->id) & flags))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR_OR_NULL(buffer))
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
+ mutex_unlock(&client->lock);
return -EINVAL;
}
if (!buffer->heap->ops->phys) {
pr_err("%s: ion_phys is not implemented by this heap.\n",
__func__);
+ mutex_unlock(&client->lock);
return -ENODEV;
}
mutex_unlock(&client->lock);
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
+ mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
if (!handle->buffer->heap->ops->map_kernel) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_dma.\n",
__func__);
+ mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
if (!handle->buffer->heap->ops->map_dma) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
atomic_read(&buffer->ref.refcount));
}
-#if 0
-static int ion_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_client *client;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* this indicates the client is gone, nothing to do here */
- if (!handle)
- return;
- client = handle->client;
-}
-#endif
-
static struct vm_operations_struct ion_vm_ops = {
.open = ion_vma_open,
.close = ion_vma_close,
mutex_lock(&buffer->lock);
/* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
if (ret) {
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
goto err1;
}
- mutex_unlock(&buffer->lock);
vma->vm_ops = &ion_vm_ops;
/* move the handle into the vm_private_data so we can access it from
case ION_IOC_FREE:
{
struct ion_handle_data data;
+ bool valid;
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_handle_data)))
return -EFAULT;
mutex_lock(&client->lock);
- if (!ion_handle_validate(client, data.handle))
- return -EINVAL;
+ valid = ion_handle_validate(client, data.handle);
mutex_unlock(&client->lock);
+ if (!valid)
+ return -EINVAL;
ion_free(client, data.handle);
break;
}
if (!ion_handle_validate(client, data.handle)) {
pr_err("%s: invalid handle passed to share ioctl.\n",
__func__);
+ mutex_unlock(&client->lock);
return -EINVAL;
}
data.fd = ion_ioctl_share(filp, client, data.handle);
parent = *p;
entry = rb_entry(parent, struct ion_heap, node);
- if (heap->prio < entry->prio) {
+ if (heap->id < entry->id) {
p = &(*p)->rb_left;
- } else if (heap->prio > entry->prio) {
+ } else if (heap->id > entry->id ) {
p = &(*p)->rb_right;
} else {
pr_err("%s: can not insert multiple heaps with "
- "priority %d\n", __func__, heap->prio);
+ "id %d\n", __func__, heap->id);
goto end;
}
}
};
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+ unsigned long size,
+ unsigned long align)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
gen_pool_free(carveout_heap->pool, addr, size);
}
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_carveout_heap *carveout_heap;
+
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
return ERR_PTR(-ENOMEM);
return ERR_PTR(-EINVAL);
}
if (IS_ERR_OR_NULL(heap))
- pr_err("%s: error creating heap %s type %d base %llu size %u\n",
+ pr_err("%s: error creating heap %s type %d base %lu size %u\n",
__func__, heap_data->name, heap_data->type,
heap_data->base, heap_data->size);
heap->name = heap_data->name;
- heap->prio = heap_data->prio;
+ heap->id = heap_data->id;
return heap;
}
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
* @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
*/
struct ion_buffer {
struct kref ref;
* @phys get physical address of a buffer (only define on
* physically contiguous heaps)
* @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
*/
struct ion_heap_ops {
* @dev: back pointer to the ion_device
* @type: type of heap
* @ops: ops struct as above
- * @prio: priority (lower numbers first) of this heap when
+ * @id: id of heap, also indicates priority of this heap when
* allocating. These are specified by platform data and
* MUST be unique
- * @priv: private data used by the heap implementation
+ * @name: used for debugging
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
struct ion_device *dev;
enum ion_heap_type type;
struct ion_heap_ops *ops;
- int prio;
+ int id;
const char *name;
};
/**
* ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
*
* returns a valid device or -PTR_ERR
*/
*/
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-/* CREATE HEAPS */
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
struct ion_heap *ion_heap_create(struct ion_platform_heap *);
void ion_heap_destroy(struct ion_heap *);
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
void ion_carveout_heap_destroy(struct ion_heap *);
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
unsigned long align);
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
-
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
#define ION_CARVEOUT_ALLOCATE_FAIL -1
#endif /* _ION_PRIV_H */
heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
idev = ion_device_create(NULL);
- if (IS_ERR_OR_NULL(idev))
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
return PTR_ERR(idev);
+ }
/* create the heaps as specified in the board file */
for (i = 0; i < num_heaps; i++) {
* enum ion_heap_types - list of all possible types of heaps
* @ION_HEAP_SYSTEM: memory allocated via vmalloc
* @ION_HEAP_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
* @ION_HEAP_END: helper for iterating over heaps
*/
enum ion_heap_type {
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
- ION_HEAP_END,
+ ION_NUM_HEAPS,
};
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_SYSTEM)
do not accept phys_addr_t's that would have to */
#define ion_phys_addr_t unsigned long
-#define ION_NUM_HEAPS ION_HEAP_END
-
/**
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
- * @prio: priority of the heap when allocating (lower numbers will be
- * allocated from first), MUST be unique
+ * @id: unique identifier for heap. When allocating (lower numbers
+ * will be allocated from first)
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
*/
struct ion_platform_heap {
enum ion_heap_type type;
- unsigned int prio;
+ unsigned int id;
const char *name;
ion_phys_addr_t base;
size_t size;
/**
* struct ion_platform_data - array of platform heaps passed from board file
- * @heaps: array of platform_heap structions
* @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
*
* Provided by the board file in the form of platform data to a platform device.
*/
* @len: size of the allocation
* @align: requested allocation alignment, lots of hardware blocks have
* alignment requirements of some kind
- * @flags: flags to pass along to heaps
+ * @flags: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from lowest to highest order bit
*
* Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it.
struct ion_handle *handle;
};
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
struct ion_custom_data {
unsigned int cmd;
unsigned long arg;