ion: check return value from remap_pfn_range
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / android / ion / ion_heap.c
1 /*
2  * drivers/staging/android/ion/ion_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/mm.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29                           struct ion_buffer *buffer)
30 {
31         struct scatterlist *sg;
32         int i, j;
33         void *vaddr;
34         pgprot_t pgprot;
35         struct sg_table *table = buffer->sg_table;
36         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37         struct page **pages = vmalloc(sizeof(struct page *) * npages);
38         struct page **tmp = pages;
39
40         if (!pages)
41                 return 0;
42
43         if (buffer->flags & ION_FLAG_CACHED)
44                 pgprot = PAGE_KERNEL;
45         else
46                 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48         for_each_sg(table->sgl, sg, table->nents, i) {
49                 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50                 struct page *page = sg_page(sg);
51                 BUG_ON(i >= npages);
52                 for (j = 0; j < npages_this_entry; j++) {
53                         *(tmp++) = page++;
54                 }
55         }
56         vaddr = vmap(pages, npages, VM_MAP, pgprot);
57         vfree(pages);
58
59         if (vaddr == NULL)
60                 return ERR_PTR(-ENOMEM);
61
62         return vaddr;
63 }
64
65 void ion_heap_unmap_kernel(struct ion_heap *heap,
66                            struct ion_buffer *buffer)
67 {
68         vunmap(buffer->vaddr);
69 }
70
71 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72                       struct vm_area_struct *vma)
73 {
74         struct sg_table *table = buffer->sg_table;
75         unsigned long addr = vma->vm_start;
76         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77         struct scatterlist *sg;
78         int i;
79         int ret;
80
81         for_each_sg(table->sgl, sg, table->nents, i) {
82                 struct page *page = sg_page(sg);
83                 unsigned long remainder = vma->vm_end - addr;
84                 unsigned long len = sg->length;
85
86                 if (offset >= sg->length) {
87                         offset -= sg->length;
88                         continue;
89                 } else if (offset) {
90                         page += offset / PAGE_SIZE;
91                         len = sg->length - offset;
92                         offset = 0;
93                 }
94                 len = min(len, remainder);
95                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
96                                 vma->vm_page_prot);
97                 if (ret)
98                         return ret;
99                 addr += len;
100                 if (addr >= vma->vm_end)
101                         return 0;
102         }
103         return 0;
104 }
105
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107 {
108         void *addr = vm_map_ram(pages, num, -1, pgprot);
109         if (!addr)
110                 return -ENOMEM;
111         memset(addr, 0, PAGE_SIZE * num);
112         vm_unmap_ram(addr, num);
113
114         return 0;
115 }
116
117 int ion_heap_buffer_zero(struct ion_buffer *buffer)
118 {
119         struct sg_table *table = buffer->sg_table;
120         pgprot_t pgprot;
121         struct scatterlist *sg;
122         int i, j, ret = 0;
123         struct page *pages[32];
124         int k = 0;
125
126         if (buffer->flags & ION_FLAG_CACHED)
127                 pgprot = PAGE_KERNEL;
128         else
129                 pgprot = pgprot_writecombine(PAGE_KERNEL);
130
131         for_each_sg(table->sgl, sg, table->nents, i) {
132                 struct page *page = sg_page(sg);
133                 unsigned long len = sg->length;
134
135                 for (j = 0; j < len / PAGE_SIZE; j++) {
136                         pages[k++] = page + j;
137                         if (k == ARRAY_SIZE(pages)) {
138                                 ret = ion_heap_clear_pages(pages, k, pgprot);
139                                 if (ret)
140                                         goto end;
141                                 k = 0;
142                         }
143                 }
144                 if (k)
145                         ret = ion_heap_clear_pages(pages, k, pgprot);
146         }
147 end:
148         return ret;
149 }
150
151 struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
152                                   unsigned int order)
153 {
154         struct page *page = alloc_pages(gfp_flags, order);
155
156         if (!page)
157                 return page;
158
159         if (ion_buffer_fault_user_mappings(buffer))
160                 split_page(page, order);
161
162         return page;
163 }
164
165 void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
166                          unsigned int order)
167 {
168         int i;
169
170         if (!ion_buffer_fault_user_mappings(buffer)) {
171                 __free_pages(page, order);
172                 return;
173         }
174         for (i = 0; i < (1 << order); i++)
175                 __free_page(page + i);
176 }
177
178 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
179 {
180         rt_mutex_lock(&heap->lock);
181         list_add(&buffer->list, &heap->free_list);
182         heap->free_list_size += buffer->size;
183         rt_mutex_unlock(&heap->lock);
184         wake_up(&heap->waitqueue);
185 }
186
187 size_t ion_heap_freelist_size(struct ion_heap *heap)
188 {
189         size_t size;
190
191         rt_mutex_lock(&heap->lock);
192         size = heap->free_list_size;
193         rt_mutex_unlock(&heap->lock);
194
195         return size;
196 }
197
198 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
199 {
200         struct ion_buffer *buffer, *tmp;
201         size_t total_drained = 0;
202
203         if (ion_heap_freelist_size(heap) == 0)
204                 return 0;
205
206         rt_mutex_lock(&heap->lock);
207         if (size == 0)
208                 size = heap->free_list_size;
209
210         list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
211                 if (total_drained >= size)
212                         break;
213                 list_del(&buffer->list);
214                 heap->free_list_size -= buffer->size;
215                 total_drained += buffer->size;
216                 ion_buffer_destroy(buffer);
217         }
218         rt_mutex_unlock(&heap->lock);
219
220         return total_drained;
221 }
222
223 int ion_heap_deferred_free(void *data)
224 {
225         struct ion_heap *heap = data;
226
227         while (true) {
228                 struct ion_buffer *buffer;
229
230                 wait_event_freezable(heap->waitqueue,
231                                      ion_heap_freelist_size(heap) > 0);
232
233                 rt_mutex_lock(&heap->lock);
234                 if (list_empty(&heap->free_list)) {
235                         rt_mutex_unlock(&heap->lock);
236                         continue;
237                 }
238                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
239                                           list);
240                 list_del(&buffer->list);
241                 heap->free_list_size -= buffer->size;
242                 rt_mutex_unlock(&heap->lock);
243                 ion_buffer_destroy(buffer);
244         }
245
246         return 0;
247 }
248
249 int ion_heap_init_deferred_free(struct ion_heap *heap)
250 {
251         struct sched_param param = { .sched_priority = 0 };
252
253         INIT_LIST_HEAD(&heap->free_list);
254         heap->free_list_size = 0;
255         rt_mutex_init(&heap->lock);
256         init_waitqueue_head(&heap->waitqueue);
257         heap->task = kthread_run(ion_heap_deferred_free, heap,
258                                  "%s", heap->name);
259         sched_setscheduler(heap->task, SCHED_IDLE, &param);
260         if (IS_ERR(heap->task)) {
261                 pr_err("%s: creating thread for deferred free failed\n",
262                        __func__);
263                 return PTR_RET(heap->task);
264         }
265         return 0;
266 }
267
268 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
269 {
270         struct ion_heap *heap = NULL;
271
272         switch (heap_data->type) {
273         case ION_HEAP_TYPE_SYSTEM_CONTIG:
274                 heap = ion_system_contig_heap_create(heap_data);
275                 break;
276         case ION_HEAP_TYPE_SYSTEM:
277                 heap = ion_system_heap_create(heap_data);
278                 break;
279         case ION_HEAP_TYPE_CARVEOUT:
280                 heap = ion_carveout_heap_create(heap_data);
281                 break;
282         case ION_HEAP_TYPE_CHUNK:
283                 heap = ion_chunk_heap_create(heap_data);
284                 break;
285         case ION_HEAP_TYPE_DMA:
286                 heap = ion_cma_heap_create(heap_data);
287                 break;
288         default:
289                 pr_err("%s: Invalid heap type %d\n", __func__,
290                        heap_data->type);
291                 return ERR_PTR(-EINVAL);
292         }
293
294         if (IS_ERR_OR_NULL(heap)) {
295                 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
296                        __func__, heap_data->name, heap_data->type,
297                        heap_data->base, heap_data->size);
298                 return ERR_PTR(-EINVAL);
299         }
300
301         heap->name = heap_data->name;
302         heap->id = heap_data->id;
303         return heap;
304 }
305
306 void ion_heap_destroy(struct ion_heap *heap)
307 {
308         if (!heap)
309                 return;
310
311         switch (heap->type) {
312         case ION_HEAP_TYPE_SYSTEM_CONTIG:
313                 ion_system_contig_heap_destroy(heap);
314                 break;
315         case ION_HEAP_TYPE_SYSTEM:
316                 ion_system_heap_destroy(heap);
317                 break;
318         case ION_HEAP_TYPE_CARVEOUT:
319                 ion_carveout_heap_destroy(heap);
320                 break;
321         case ION_HEAP_TYPE_CHUNK:
322                 ion_chunk_heap_destroy(heap);
323                 break;
324         case ION_HEAP_TYPE_DMA:
325                 ion_cma_heap_destroy(heap);
326                 break;
327         default:
328                 pr_err("%s: Invalid heap type %d\n", __func__,
329                        heap->type);
330         }
331 }