Fix sparse warnings in ion.
Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {0};
+ const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
mutex_lock(&client->lock);
mutex_unlock(&buffer->lock);
}
-int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
unsigned long pfn;
mutex_unlock(&buffer->lock);
}
-struct vm_operations_struct ion_vma_ops = {
+static struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
mutex_unlock(&buffer->lock);
}
-struct dma_buf_ops dma_buf_ops = {
+static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
}
-struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
struct sg_table *table;
int ret;
return table;
}
-void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
sg_free_table(buffer->sg_table);
}
kfree(table);
}
-struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
-void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
* This function could be replaced by dma_common_get_sgtable
* as soon as it will avalaible.
*/
-int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = virt_to_page(cpu_addr);
int ret;
return 0;
}
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->table;
}
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
buffer->size);
}
-void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
/* kernel memory mapping has been done at allocation time */
struct page **tmp = pages;
if (!pages)
- return 0;
+ return NULL;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
return total_drained;
}
-int ion_heap_deferred_free(void *data)
+static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
int i;
bool high;
- high = gfp_mask & __GFP_HIGHMEM;
+ high = !!(gfp_mask & __GFP_HIGHMEM);
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
#include "ion.h"
#include "ion_priv.h"
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN | __GFP_NORETRY) &
- ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN);
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
gfp_flags = high_order_gfp_flags;
page = alloc_pages(gfp_flags, order);
if (!page)
- return 0;
+ return NULL;
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
}
if (!page)
- return 0;
+ return NULL;
return page;
}
return -ENOMEM;
}
-void ion_system_heap_free(struct ion_buffer *buffer)
+static void ion_system_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_system_heap *sys_heap = container_of(heap,
kfree(table);
}
-struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
-void ion_system_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
return ret;
}
-void ion_system_contig_heap_free(struct ion_buffer *buffer)
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
return 0;
}
-struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
-void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
}