staging: ion: fix common struct sg_table related issues
[platform/kernel/linux-starfive.git] / drivers / staging / android / ion / ion_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION Memory Allocator generic heap helpers
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/err.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/mm.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
17
18 #include "ion.h"
19
20 void *ion_heap_map_kernel(struct ion_heap *heap,
21                           struct ion_buffer *buffer)
22 {
23         struct sg_page_iter piter;
24         void *vaddr;
25         pgprot_t pgprot;
26         struct sg_table *table = buffer->sg_table;
27         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
28         struct page **pages = vmalloc(array_size(npages,
29                                                  sizeof(struct page *)));
30         struct page **tmp = pages;
31
32         if (!pages)
33                 return ERR_PTR(-ENOMEM);
34
35         if (buffer->flags & ION_FLAG_CACHED)
36                 pgprot = PAGE_KERNEL;
37         else
38                 pgprot = pgprot_writecombine(PAGE_KERNEL);
39
40         for_each_sgtable_page(table, &piter, 0) {
41                 BUG_ON(tmp - pages >= npages);
42                 *tmp++ = sg_page_iter_page(&piter);
43         }
44
45         vaddr = vmap(pages, npages, VM_MAP, pgprot);
46         vfree(pages);
47
48         if (!vaddr)
49                 return ERR_PTR(-ENOMEM);
50
51         return vaddr;
52 }
53
54 void ion_heap_unmap_kernel(struct ion_heap *heap,
55                            struct ion_buffer *buffer)
56 {
57         vunmap(buffer->vaddr);
58 }
59
60 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
61                       struct vm_area_struct *vma)
62 {
63         struct sg_page_iter piter;
64         struct sg_table *table = buffer->sg_table;
65         unsigned long addr = vma->vm_start;
66         int ret;
67
68         for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
69                 struct page *page = sg_page_iter_page(&piter);
70
71                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
72                                       vma->vm_page_prot);
73                 if (ret)
74                         return ret;
75                 addr += PAGE_SIZE;
76                 if (addr >= vma->vm_end)
77                         return 0;
78         }
79
80         return 0;
81 }
82
83 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
84 {
85         void *addr = vmap(pages, num, VM_MAP, pgprot);
86
87         if (!addr)
88                 return -ENOMEM;
89         memset(addr, 0, PAGE_SIZE * num);
90         vunmap(addr);
91
92         return 0;
93 }
94
95 static int ion_heap_sglist_zero(struct sg_table *sgt, pgprot_t pgprot)
96 {
97         int p = 0;
98         int ret = 0;
99         struct sg_page_iter piter;
100         struct page *pages[32];
101
102         for_each_sgtable_page(sgt, &piter, 0) {
103                 pages[p++] = sg_page_iter_page(&piter);
104                 if (p == ARRAY_SIZE(pages)) {
105                         ret = ion_heap_clear_pages(pages, p, pgprot);
106                         if (ret)
107                                 return ret;
108                         p = 0;
109                 }
110         }
111         if (p)
112                 ret = ion_heap_clear_pages(pages, p, pgprot);
113
114         return ret;
115 }
116
117 int ion_heap_buffer_zero(struct ion_buffer *buffer)
118 {
119         struct sg_table *table = buffer->sg_table;
120         pgprot_t pgprot;
121
122         if (buffer->flags & ION_FLAG_CACHED)
123                 pgprot = PAGE_KERNEL;
124         else
125                 pgprot = pgprot_writecombine(PAGE_KERNEL);
126
127         return ion_heap_sglist_zero(table, pgprot);
128 }
129
130 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
131 {
132         spin_lock(&heap->free_lock);
133         list_add(&buffer->list, &heap->free_list);
134         heap->free_list_size += buffer->size;
135         spin_unlock(&heap->free_lock);
136         wake_up(&heap->waitqueue);
137 }
138
139 size_t ion_heap_freelist_size(struct ion_heap *heap)
140 {
141         size_t size;
142
143         spin_lock(&heap->free_lock);
144         size = heap->free_list_size;
145         spin_unlock(&heap->free_lock);
146
147         return size;
148 }
149
150 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
151                                        bool skip_pools)
152 {
153         struct ion_buffer *buffer;
154         size_t total_drained = 0;
155
156         if (ion_heap_freelist_size(heap) == 0)
157                 return 0;
158
159         spin_lock(&heap->free_lock);
160         if (size == 0)
161                 size = heap->free_list_size;
162
163         while (!list_empty(&heap->free_list)) {
164                 if (total_drained >= size)
165                         break;
166                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
167                                           list);
168                 list_del(&buffer->list);
169                 heap->free_list_size -= buffer->size;
170                 if (skip_pools)
171                         buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
172                 total_drained += buffer->size;
173                 spin_unlock(&heap->free_lock);
174                 ion_buffer_destroy(buffer);
175                 spin_lock(&heap->free_lock);
176         }
177         spin_unlock(&heap->free_lock);
178
179         return total_drained;
180 }
181
182 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
183 {
184         return _ion_heap_freelist_drain(heap, size, false);
185 }
186
187 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
188 {
189         return _ion_heap_freelist_drain(heap, size, true);
190 }
191
192 static int ion_heap_deferred_free(void *data)
193 {
194         struct ion_heap *heap = data;
195
196         while (true) {
197                 struct ion_buffer *buffer;
198
199                 wait_event_freezable(heap->waitqueue,
200                                      ion_heap_freelist_size(heap) > 0);
201
202                 spin_lock(&heap->free_lock);
203                 if (list_empty(&heap->free_list)) {
204                         spin_unlock(&heap->free_lock);
205                         continue;
206                 }
207                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
208                                           list);
209                 list_del(&buffer->list);
210                 heap->free_list_size -= buffer->size;
211                 spin_unlock(&heap->free_lock);
212                 ion_buffer_destroy(buffer);
213         }
214
215         return 0;
216 }
217
218 int ion_heap_init_deferred_free(struct ion_heap *heap)
219 {
220         struct sched_param param = { .sched_priority = 0 };
221
222         INIT_LIST_HEAD(&heap->free_list);
223         init_waitqueue_head(&heap->waitqueue);
224         heap->task = kthread_run(ion_heap_deferred_free, heap,
225                                  "%s", heap->name);
226         if (IS_ERR(heap->task)) {
227                 pr_err("%s: creating thread for deferred free failed\n",
228                        __func__);
229                 return PTR_ERR_OR_ZERO(heap->task);
230         }
231         sched_setscheduler(heap->task, SCHED_IDLE, &param);
232
233         return 0;
234 }
235
236 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
237                                            struct shrink_control *sc)
238 {
239         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
240                                              shrinker);
241         int total = 0;
242
243         total = ion_heap_freelist_size(heap) / PAGE_SIZE;
244
245         if (heap->ops->shrink)
246                 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
247
248         return total;
249 }
250
251 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
252                                           struct shrink_control *sc)
253 {
254         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
255                                              shrinker);
256         int freed = 0;
257         int to_scan = sc->nr_to_scan;
258
259         if (to_scan == 0)
260                 return 0;
261
262         /*
263          * shrink the free list first, no point in zeroing the memory if we're
264          * just going to reclaim it. Also, skip any possible page pooling.
265          */
266         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
267                 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
268                                 PAGE_SIZE;
269
270         to_scan -= freed;
271         if (to_scan <= 0)
272                 return freed;
273
274         if (heap->ops->shrink)
275                 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
276
277         return freed;
278 }
279
280 int ion_heap_init_shrinker(struct ion_heap *heap)
281 {
282         heap->shrinker.count_objects = ion_heap_shrink_count;
283         heap->shrinker.scan_objects = ion_heap_shrink_scan;
284         heap->shrinker.seeks = DEFAULT_SEEKS;
285         heap->shrinker.batch = 0;
286
287         return register_shrinker(&heap->shrinker);
288 }