From cd983e21aadf5ad8f99e2838d25a1079888f0abf Mon Sep 17 00:00:00 2001 From: Rebecca Schultz Zavin Date: Thu, 30 Jun 2011 12:19:55 -0700 Subject: [PATCH] gpu: ion: Several fixes Fix some cases where locks were not released on error paths Change heap->prio to heap->id to make meaning clearer Fix kernel doc to match sources --- drivers/gpu/ion/ion.c | 55 ++++++++++++++++++------------------- drivers/gpu/ion/ion_carveout_heap.c | 7 +++-- drivers/gpu/ion/ion_heap.c | 4 +-- drivers/gpu/ion/ion_priv.h | 34 +++++++++++++++++++---- drivers/gpu/ion/tegra/tegra_ion.c | 4 ++- include/linux/ion.h | 26 ++++++++++++------ 6 files changed, 84 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 688582b..108469b 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -82,14 +82,14 @@ struct ion_client { }; /** - * ion_handle - a client local reference to an buffer + * ion_handle - a client local reference to a buffer * @ref: reference count * @client: back pointer to the client the buffer resides in * @buffer: pointer to the buffer * @node: node in the client's handle rbtree - * @map_cnt: count of times this client has mapped this buffer - * @addr: return from map - * @vaddr: return from map_kernel + * @kmap_cnt: count of times this client has mapped to kernel + * @dmap_cnt: count of times this client has mapped for dma + * @usermap_cnt: count of times this client has mapped for userspace * * Modifications to node, map_cnt or mapping should be protected by the * lock in the client. Other fields are never changed after initialization. @@ -121,7 +121,8 @@ static void ion_buffer_add(struct ion_device *dev, else if (buffer > entry) p = &(*p)->rb_right; else - WARN(1, "%s: buffer already found.", __func__); + pr_err("%s: buffer already found.", __func__); + BUG(); } rb_link_node(&buffer->node, parent, p); @@ -146,8 +147,10 @@ struct ion_buffer *ion_buffer_create(struct ion_heap *heap, kref_init(&buffer->ref); ret = heap->ops->allocate(heap, buffer, len, align, flags); - if (ret) + if (ret) { + kfree(buffer); return ERR_PTR(ret); + } buffer->dev = dev; buffer->size = len; mutex_init(&buffer->lock); @@ -295,7 +298,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, if (!((1 << heap->type) & client->heap_mask)) continue; /* if the caller didn't specify this heap type */ - if (!((1 << heap->prio) & flags)) + if (!((1 << heap->id) & flags)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) @@ -374,6 +377,7 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle, if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); + mutex_unlock(&client->lock); return -EINVAL; } @@ -382,6 +386,7 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle, if (!buffer->heap->ops->phys) { pr_err("%s: ion_phys is not implemented by this heap.\n", __func__); + mutex_unlock(&client->lock); return -ENODEV; } mutex_unlock(&client->lock); @@ -398,6 +403,7 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); + mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } @@ -407,6 +413,8 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) if (!handle->buffer->heap->ops->map_kernel) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } @@ -433,6 +441,7 @@ struct scatterlist *ion_map_dma(struct ion_client *client, if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_dma.\n", __func__); + mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; @@ -441,6 +450,8 @@ struct scatterlist *ion_map_dma(struct ion_client *client, if (!handle->buffer->heap->ops->map_dma) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { @@ -778,21 +789,6 @@ static void ion_vma_close(struct vm_area_struct *vma) atomic_read(&buffer->ref.refcount)); } -#if 0 -static int ion_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct ion_handle *handle = vma->vm_private_data; - struct ion_buffer *buffer = vma->vm_file->private_data; - struct ion_client *client; - - pr_debug("%s: %d\n", __func__, __LINE__); - /* this indicates the client is gone, nothing to do here */ - if (!handle) - return; - client = handle->client; -} -#endif - static struct vm_operations_struct ion_vm_ops = { .open = ion_vma_open, .close = ion_vma_close, @@ -842,12 +838,12 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) mutex_lock(&buffer->lock); /* now map it to userspace */ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); if (ret) { pr_err("%s: failure mapping buffer to userspace\n", __func__); goto err1; } - mutex_unlock(&buffer->lock); vma->vm_ops = &ion_vm_ops; /* move the handle into the vm_private_data so we can access it from @@ -918,14 +914,16 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case ION_IOC_FREE: { struct ion_handle_data data; + bool valid; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_handle_data))) return -EFAULT; mutex_lock(&client->lock); - if (!ion_handle_validate(client, data.handle)) - return -EINVAL; + valid = ion_handle_validate(client, data.handle); mutex_unlock(&client->lock); + if (!valid) + return -EINVAL; ion_free(client, data.handle); break; } @@ -940,6 +938,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (!ion_handle_validate(client, data.handle)) { pr_err("%s: invalid handle passed to share ioctl.\n", __func__); + mutex_unlock(&client->lock); return -EINVAL; } data.fd = ion_ioctl_share(filp, client, data.handle); @@ -1090,13 +1089,13 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) parent = *p; entry = rb_entry(parent, struct ion_heap, node); - if (heap->prio < entry->prio) { + if (heap->id < entry->id) { p = &(*p)->rb_left; - } else if (heap->prio > entry->prio) { + } else if (heap->id > entry->id ) { p = &(*p)->rb_right; } else { pr_err("%s: can not insert multiple heaps with " - "priority %d\n", __func__, heap->prio); + "id %d\n", __func__, heap->id); goto end; } } diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c index eeb5c53..245d813 100644 --- a/drivers/gpu/ion/ion_carveout_heap.c +++ b/drivers/gpu/ion/ion_carveout_heap.c @@ -34,8 +34,8 @@ struct ion_carveout_heap { }; ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, - unsigned long size, - unsigned long align) + unsigned long size, + unsigned long align) { struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); @@ -53,6 +53,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; gen_pool_free(carveout_heap->pool, addr, size); } @@ -130,6 +132,7 @@ static struct ion_heap_ops carveout_heap_ops = { struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) { struct ion_carveout_heap *carveout_heap; + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); if (!carveout_heap) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c index 7db9537..7c93577 100644 --- a/drivers/gpu/ion/ion_heap.c +++ b/drivers/gpu/ion/ion_heap.c @@ -38,12 +38,12 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) return ERR_PTR(-EINVAL); } if (IS_ERR_OR_NULL(heap)) - pr_err("%s: error creating heap %s type %d base %llu size %u\n", + pr_err("%s: error creating heap %s type %d base %lu size %u\n", __func__, heap_data->name, heap_data->type, heap_data->base, heap_data->size); heap->name = heap_data->name; - heap->prio = heap_data->prio; + heap->id = heap_data->id; return heap; } diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h index fdbd2be..3323954 100644 --- a/drivers/gpu/ion/ion_priv.h +++ b/drivers/gpu/ion/ion_priv.h @@ -45,6 +45,15 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); * @heap: back pointer to the heap the buffer came from * @flags: buffer specific flags * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sglist: the scatterlist for the buffer is dmap_cnt is not zero */ struct ion_buffer { struct kref ref; @@ -71,7 +80,9 @@ struct ion_buffer { * @phys get physical address of a buffer (only define on * physically contiguous heaps) * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel * @map_user map memory to userspace */ struct ion_heap_ops { @@ -96,10 +107,10 @@ struct ion_heap_ops { * @dev: back pointer to the ion_device * @type: type of heap * @ops: ops struct as above - * @prio: priority (lower numbers first) of this heap when + * @id: id of heap, also indicates priority of this heap when * allocating. These are specified by platform data and * MUST be unique - * @priv: private data used by the heap implementation + * @name: used for debugging * * Represents a pool of memory from which buffers can be made. In some * systems the only heap is regular system memory allocated via vmalloc. @@ -111,12 +122,13 @@ struct ion_heap { struct ion_device *dev; enum ion_heap_type type; struct ion_heap_ops *ops; - int prio; + int id; const char *name; }; /** * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable * * returns a valid device or -PTR_ERR */ @@ -138,7 +150,12 @@ void ion_device_destroy(struct ion_device *dev); */ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); -/* CREATE HEAPS */ +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + struct ion_heap *ion_heap_create(struct ion_platform_heap *); void ion_heap_destroy(struct ion_heap *); @@ -150,11 +167,18 @@ void ion_system_contig_heap_destroy(struct ion_heap *); struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); void ion_carveout_heap_destroy(struct ion_heap *); +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, unsigned long align); void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size); - +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ #define ION_CARVEOUT_ALLOCATE_FAIL -1 #endif /* _ION_PRIV_H */ diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c index 4239320..7af6e16 100644 --- a/drivers/gpu/ion/tegra/tegra_ion.c +++ b/drivers/gpu/ion/tegra/tegra_ion.c @@ -36,8 +36,10 @@ int tegra_ion_probe(struct platform_device *pdev) heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); idev = ion_device_create(NULL); - if (IS_ERR_OR_NULL(idev)) + if (IS_ERR_OR_NULL(idev)) { + kfree(heaps); return PTR_ERR(idev); + } /* create the heaps as specified in the board file */ for (i = 0; i < num_heaps; i++) { diff --git a/include/linux/ion.h b/include/linux/ion.h index b783132..4282315 100644 --- a/include/linux/ion.h +++ b/include/linux/ion.h @@ -24,6 +24,9 @@ struct ion_handle; * enum ion_heap_types - list of all possible types of heaps * @ION_HEAP_SYSTEM: memory allocated via vmalloc * @ION_HEAP_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_CARVEOUT: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous * @ION_HEAP_END: helper for iterating over heaps */ enum ion_heap_type { @@ -32,7 +35,7 @@ enum ion_heap_type { ION_HEAP_TYPE_CARVEOUT, ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always are at the end of this enum */ - ION_HEAP_END, + ION_NUM_HEAPS, }; #define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_SYSTEM) @@ -52,13 +55,11 @@ struct ion_buffer; do not accept phys_addr_t's that would have to */ #define ion_phys_addr_t unsigned long -#define ION_NUM_HEAPS ION_HEAP_END - /** * struct ion_platform_heap - defines a heap in the given platform * @type: type of the heap from ion_heap_type enum - * @prio: priority of the heap when allocating (lower numbers will be - * allocated from first), MUST be unique + * @id: unique identifier for heap. When allocating (lower numbers + * will be allocated from first) * @name: used for debug purposes * @base: base address of heap in physical memory if applicable * @size: size of the heap in bytes if applicable @@ -67,7 +68,7 @@ struct ion_buffer; */ struct ion_platform_heap { enum ion_heap_type type; - unsigned int prio; + unsigned int id; const char *name; ion_phys_addr_t base; size_t size; @@ -75,8 +76,8 @@ struct ion_platform_heap { /** * struct ion_platform_data - array of platform heaps passed from board file - * @heaps: array of platform_heap structions * @nr: number of structures in the array + * @heaps: array of platform_heap structions * * Provided by the board file in the form of platform data to a platform device. */ @@ -109,7 +110,8 @@ void ion_client_destroy(struct ion_client *client); * @len: size of the allocation * @align: requested allocation alignment, lots of hardware blocks have * alignment requirements of some kind - * @flags: flags to pass along to heaps + * @flags: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from lowest to highest order bit * * Allocate memory in one of the heaps provided in heap mask and return * an opaque handle to it. @@ -265,6 +267,14 @@ struct ion_handle_data { struct ion_handle *handle; }; +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ struct ion_custom_data { unsigned int cmd; unsigned long arg; -- 2.7.4