1 // SPDX-License-Identifier: GPL-2.0
3 * ION Memory Allocator generic heap helpers
5 * Copyright (C) 2011 Google, Inc.
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
20 void *ion_heap_map_kernel(struct ion_heap *heap,
21 struct ion_buffer *buffer)
23 struct sg_page_iter piter;
26 struct sg_table *table = buffer->sg_table;
27 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
28 struct page **pages = vmalloc(array_size(npages,
29 sizeof(struct page *)));
30 struct page **tmp = pages;
33 return ERR_PTR(-ENOMEM);
35 if (buffer->flags & ION_FLAG_CACHED)
38 pgprot = pgprot_writecombine(PAGE_KERNEL);
40 for_each_sgtable_page(table, &piter, 0) {
41 BUG_ON(tmp - pages >= npages);
42 *tmp++ = sg_page_iter_page(&piter);
45 vaddr = vmap(pages, npages, VM_MAP, pgprot);
49 return ERR_PTR(-ENOMEM);
54 void ion_heap_unmap_kernel(struct ion_heap *heap,
55 struct ion_buffer *buffer)
57 vunmap(buffer->vaddr);
60 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
61 struct vm_area_struct *vma)
63 struct sg_page_iter piter;
64 struct sg_table *table = buffer->sg_table;
65 unsigned long addr = vma->vm_start;
68 for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
69 struct page *page = sg_page_iter_page(&piter);
71 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
76 if (addr >= vma->vm_end)
83 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
85 void *addr = vmap(pages, num, VM_MAP, pgprot);
89 memset(addr, 0, PAGE_SIZE * num);
95 static int ion_heap_sglist_zero(struct sg_table *sgt, pgprot_t pgprot)
99 struct sg_page_iter piter;
100 struct page *pages[32];
102 for_each_sgtable_page(sgt, &piter, 0) {
103 pages[p++] = sg_page_iter_page(&piter);
104 if (p == ARRAY_SIZE(pages)) {
105 ret = ion_heap_clear_pages(pages, p, pgprot);
112 ret = ion_heap_clear_pages(pages, p, pgprot);
117 int ion_heap_buffer_zero(struct ion_buffer *buffer)
119 struct sg_table *table = buffer->sg_table;
122 if (buffer->flags & ION_FLAG_CACHED)
123 pgprot = PAGE_KERNEL;
125 pgprot = pgprot_writecombine(PAGE_KERNEL);
127 return ion_heap_sglist_zero(table, pgprot);
130 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
132 spin_lock(&heap->free_lock);
133 list_add(&buffer->list, &heap->free_list);
134 heap->free_list_size += buffer->size;
135 spin_unlock(&heap->free_lock);
136 wake_up(&heap->waitqueue);
139 size_t ion_heap_freelist_size(struct ion_heap *heap)
143 spin_lock(&heap->free_lock);
144 size = heap->free_list_size;
145 spin_unlock(&heap->free_lock);
150 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
153 struct ion_buffer *buffer;
154 size_t total_drained = 0;
156 if (ion_heap_freelist_size(heap) == 0)
159 spin_lock(&heap->free_lock);
161 size = heap->free_list_size;
163 while (!list_empty(&heap->free_list)) {
164 if (total_drained >= size)
166 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
168 list_del(&buffer->list);
169 heap->free_list_size -= buffer->size;
171 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
172 total_drained += buffer->size;
173 spin_unlock(&heap->free_lock);
174 ion_buffer_destroy(buffer);
175 spin_lock(&heap->free_lock);
177 spin_unlock(&heap->free_lock);
179 return total_drained;
182 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
184 return _ion_heap_freelist_drain(heap, size, false);
187 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
189 return _ion_heap_freelist_drain(heap, size, true);
192 static int ion_heap_deferred_free(void *data)
194 struct ion_heap *heap = data;
197 struct ion_buffer *buffer;
199 wait_event_freezable(heap->waitqueue,
200 ion_heap_freelist_size(heap) > 0);
202 spin_lock(&heap->free_lock);
203 if (list_empty(&heap->free_list)) {
204 spin_unlock(&heap->free_lock);
207 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
209 list_del(&buffer->list);
210 heap->free_list_size -= buffer->size;
211 spin_unlock(&heap->free_lock);
212 ion_buffer_destroy(buffer);
218 int ion_heap_init_deferred_free(struct ion_heap *heap)
220 struct sched_param param = { .sched_priority = 0 };
222 INIT_LIST_HEAD(&heap->free_list);
223 init_waitqueue_head(&heap->waitqueue);
224 heap->task = kthread_run(ion_heap_deferred_free, heap,
226 if (IS_ERR(heap->task)) {
227 pr_err("%s: creating thread for deferred free failed\n",
229 return PTR_ERR_OR_ZERO(heap->task);
231 sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
236 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
237 struct shrink_control *sc)
239 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
243 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
245 if (heap->ops->shrink)
246 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
251 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
252 struct shrink_control *sc)
254 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
257 int to_scan = sc->nr_to_scan;
263 * shrink the free list first, no point in zeroing the memory if we're
264 * just going to reclaim it. Also, skip any possible page pooling.
266 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
267 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
274 if (heap->ops->shrink)
275 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
280 int ion_heap_init_shrinker(struct ion_heap *heap)
282 heap->shrinker.count_objects = ion_heap_shrink_count;
283 heap->shrinker.scan_objects = ion_heap_shrink_scan;
284 heap->shrinker.seeks = DEFAULT_SEEKS;
285 heap->shrinker.batch = 0;
287 return register_shrinker(&heap->shrinker);