1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2011 Google, Inc.
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-buf.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/file.h>
14 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/miscdevice.h>
20 #include <linux/mm_types.h>
21 #include <linux/rbtree.h>
22 #include <linux/sched/task.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/vmalloc.h>
29 static struct ion_device *internal_dev;
32 /* this function should only be called while dev->lock is held */
33 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
34 struct ion_device *dev,
38 struct ion_buffer *buffer;
41 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
43 return ERR_PTR(-ENOMEM);
46 buffer->flags = flags;
50 ret = heap->ops->allocate(heap, buffer, len, flags);
53 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
56 ion_heap_freelist_drain(heap, 0);
57 ret = heap->ops->allocate(heap, buffer, len, flags);
62 if (!buffer->sg_table) {
63 WARN_ONCE(1, "This heap needs to set the sgtable");
68 spin_lock(&heap->stat_lock);
69 heap->num_of_buffers++;
70 heap->num_of_alloc_bytes += len;
71 if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
72 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
73 spin_unlock(&heap->stat_lock);
75 INIT_LIST_HEAD(&buffer->attachments);
76 mutex_init(&buffer->lock);
80 heap->ops->free(buffer);
86 void ion_buffer_destroy(struct ion_buffer *buffer)
88 if (buffer->kmap_cnt > 0) {
89 pr_warn_once("%s: buffer still mapped in the kernel\n",
91 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
93 buffer->heap->ops->free(buffer);
94 spin_lock(&buffer->heap->stat_lock);
95 buffer->heap->num_of_buffers--;
96 buffer->heap->num_of_alloc_bytes -= buffer->size;
97 spin_unlock(&buffer->heap->stat_lock);
102 static void _ion_buffer_destroy(struct ion_buffer *buffer)
104 struct ion_heap *heap = buffer->heap;
106 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
107 ion_heap_freelist_add(heap, buffer);
109 ion_buffer_destroy(buffer);
112 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
116 if (buffer->kmap_cnt) {
118 return buffer->vaddr;
120 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
121 if (WARN_ONCE(!vaddr,
122 "heap->ops->map_kernel should return ERR_PTR on error"))
123 return ERR_PTR(-EINVAL);
126 buffer->vaddr = vaddr;
131 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
134 if (!buffer->kmap_cnt) {
135 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
136 buffer->vaddr = NULL;
140 static struct sg_table *dup_sg_table(struct sg_table *table)
142 struct sg_table *new_table;
144 struct scatterlist *sg, *new_sg;
146 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
148 return ERR_PTR(-ENOMEM);
150 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
153 return ERR_PTR(-ENOMEM);
156 new_sg = new_table->sgl;
157 for_each_sgtable_sg(table, sg, i) {
158 memcpy(new_sg, sg, sizeof(*sg));
159 new_sg->dma_address = 0;
160 new_sg = sg_next(new_sg);
166 static void free_duped_table(struct sg_table *table)
168 sg_free_table(table);
172 struct ion_dma_buf_attachment {
174 struct sg_table *table;
175 struct list_head list;
178 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
179 struct dma_buf_attachment *attachment)
181 struct ion_dma_buf_attachment *a;
182 struct sg_table *table;
183 struct ion_buffer *buffer = dmabuf->priv;
185 a = kzalloc(sizeof(*a), GFP_KERNEL);
189 table = dup_sg_table(buffer->sg_table);
196 a->dev = attachment->dev;
197 INIT_LIST_HEAD(&a->list);
199 attachment->priv = a;
201 mutex_lock(&buffer->lock);
202 list_add(&a->list, &buffer->attachments);
203 mutex_unlock(&buffer->lock);
208 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
209 struct dma_buf_attachment *attachment)
211 struct ion_dma_buf_attachment *a = attachment->priv;
212 struct ion_buffer *buffer = dmabuf->priv;
214 mutex_lock(&buffer->lock);
216 mutex_unlock(&buffer->lock);
217 free_duped_table(a->table);
222 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
223 enum dma_data_direction direction)
225 struct ion_dma_buf_attachment *a = attachment->priv;
226 struct sg_table *table;
231 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
238 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
239 struct sg_table *table,
240 enum dma_data_direction direction)
242 dma_unmap_sgtable(attachment->dev, table, direction, 0);
245 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
247 struct ion_buffer *buffer = dmabuf->priv;
250 if (!buffer->heap->ops->map_user) {
251 pr_err("%s: this heap does not define a method for mapping to userspace\n",
256 if (!(buffer->flags & ION_FLAG_CACHED))
257 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
259 mutex_lock(&buffer->lock);
260 /* now map it to userspace */
261 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
262 mutex_unlock(&buffer->lock);
265 pr_err("%s: failure mapping buffer to userspace\n",
271 static void ion_dma_buf_release(struct dma_buf *dmabuf)
273 struct ion_buffer *buffer = dmabuf->priv;
275 _ion_buffer_destroy(buffer);
278 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
279 enum dma_data_direction direction)
281 struct ion_buffer *buffer = dmabuf->priv;
283 struct ion_dma_buf_attachment *a;
287 * TODO: Move this elsewhere because we don't always need a vaddr
289 if (buffer->heap->ops->map_kernel) {
290 mutex_lock(&buffer->lock);
291 vaddr = ion_buffer_kmap_get(buffer);
293 ret = PTR_ERR(vaddr);
296 mutex_unlock(&buffer->lock);
299 mutex_lock(&buffer->lock);
300 list_for_each_entry(a, &buffer->attachments, list)
301 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
304 mutex_unlock(&buffer->lock);
308 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
309 enum dma_data_direction direction)
311 struct ion_buffer *buffer = dmabuf->priv;
312 struct ion_dma_buf_attachment *a;
314 if (buffer->heap->ops->map_kernel) {
315 mutex_lock(&buffer->lock);
316 ion_buffer_kmap_put(buffer);
317 mutex_unlock(&buffer->lock);
320 mutex_lock(&buffer->lock);
321 list_for_each_entry(a, &buffer->attachments, list)
322 dma_sync_sgtable_for_device(a->dev, a->table, direction);
323 mutex_unlock(&buffer->lock);
328 static const struct dma_buf_ops dma_buf_ops = {
329 .map_dma_buf = ion_map_dma_buf,
330 .unmap_dma_buf = ion_unmap_dma_buf,
332 .release = ion_dma_buf_release,
333 .attach = ion_dma_buf_attach,
334 .detach = ion_dma_buf_detatch,
335 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
336 .end_cpu_access = ion_dma_buf_end_cpu_access,
339 static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
341 struct ion_device *dev = internal_dev;
342 struct ion_buffer *buffer = NULL;
343 struct ion_heap *heap;
344 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
346 struct dma_buf *dmabuf;
348 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
349 len, heap_id_mask, flags);
351 * traverse the list of heaps available in this system in priority
352 * order. If the heap type is supported by the client, and matches the
353 * request of the caller allocate from it. Repeat until allocate has
354 * succeeded or all heaps have been tried
356 len = PAGE_ALIGN(len);
361 down_read(&dev->lock);
362 plist_for_each_entry(heap, &dev->heaps, node) {
363 /* if the caller didn't specify this heap id */
364 if (!((1 << heap->id) & heap_id_mask))
366 buffer = ion_buffer_create(heap, dev, len, flags);
376 return PTR_ERR(buffer);
378 exp_info.ops = &dma_buf_ops;
379 exp_info.size = buffer->size;
380 exp_info.flags = O_RDWR;
381 exp_info.priv = buffer;
383 dmabuf = dma_buf_export(&exp_info);
384 if (IS_ERR(dmabuf)) {
385 _ion_buffer_destroy(buffer);
386 return PTR_ERR(dmabuf);
389 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
396 static int ion_query_heaps(struct ion_heap_query *query)
398 struct ion_device *dev = internal_dev;
399 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
400 int ret = -EINVAL, cnt = 0, max_cnt;
401 struct ion_heap *heap;
402 struct ion_heap_data hdata;
404 memset(&hdata, 0, sizeof(hdata));
406 down_read(&dev->lock);
408 query->cnt = dev->heap_cnt;
416 max_cnt = query->cnt;
418 plist_for_each_entry(heap, &dev->heaps, node) {
419 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
420 hdata.name[sizeof(hdata.name) - 1] = '\0';
421 hdata.type = heap->type;
422 hdata.heap_id = heap->id;
424 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
441 union ion_ioctl_arg {
442 struct ion_allocation_data allocation;
443 struct ion_heap_query query;
446 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
449 case ION_IOC_HEAP_QUERY:
450 if (arg->query.reserved0 ||
451 arg->query.reserved1 ||
452 arg->query.reserved2)
462 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
465 union ion_ioctl_arg data;
467 if (_IOC_SIZE(cmd) > sizeof(data))
471 * The copy_from_user is unconditional here for both read and write
472 * to do the validate. If there is no write for the ioctl, the
475 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
478 ret = validate_ioctl_arg(cmd, &data);
480 pr_warn_once("%s: ioctl validate failed\n", __func__);
484 if (!(_IOC_DIR(cmd) & _IOC_WRITE))
485 memset(&data, 0, sizeof(data));
492 fd = ion_alloc(data.allocation.len,
493 data.allocation.heap_id_mask,
494 data.allocation.flags);
498 data.allocation.fd = fd;
502 case ION_IOC_HEAP_QUERY:
503 ret = ion_query_heaps(&data.query);
509 if (_IOC_DIR(cmd) & _IOC_READ) {
510 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
516 static const struct file_operations ion_fops = {
517 .owner = THIS_MODULE,
518 .unlocked_ioctl = ion_ioctl,
519 .compat_ioctl = compat_ptr_ioctl,
522 static int debug_shrink_set(void *data, u64 val)
524 struct ion_heap *heap = data;
525 struct shrink_control sc;
528 sc.gfp_mask = GFP_HIGHUSER;
532 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
533 sc.nr_to_scan = objs;
536 heap->shrinker.scan_objects(&heap->shrinker, &sc);
540 static int debug_shrink_get(void *data, u64 *val)
542 struct ion_heap *heap = data;
543 struct shrink_control sc;
546 sc.gfp_mask = GFP_HIGHUSER;
549 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
554 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
555 debug_shrink_set, "%llu\n");
557 void ion_device_add_heap(struct ion_heap *heap)
559 struct ion_device *dev = internal_dev;
561 struct dentry *heap_root;
564 if (!heap->ops->allocate || !heap->ops->free)
565 pr_err("%s: can not add heap with invalid ops struct.\n",
568 spin_lock_init(&heap->free_lock);
569 spin_lock_init(&heap->stat_lock);
570 heap->free_list_size = 0;
572 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
573 ion_heap_init_deferred_free(heap);
575 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
576 ret = ion_heap_init_shrinker(heap);
578 pr_err("%s: Failed to register shrinker\n", __func__);
582 heap->num_of_buffers = 0;
583 heap->num_of_alloc_bytes = 0;
584 heap->alloc_bytes_wm = 0;
586 heap_root = debugfs_create_dir(heap->name, dev->debug_root);
587 debugfs_create_u64("num_of_buffers",
589 &heap->num_of_buffers);
590 debugfs_create_u64("num_of_alloc_bytes",
593 &heap->num_of_alloc_bytes);
594 debugfs_create_u64("alloc_bytes_wm",
597 &heap->alloc_bytes_wm);
599 if (heap->shrinker.count_objects &&
600 heap->shrinker.scan_objects) {
601 snprintf(debug_name, 64, "%s_shrink", heap->name);
602 debugfs_create_file(debug_name,
609 down_write(&dev->lock);
610 heap->id = heap_id++;
612 * use negative heap->id to reverse the priority -- when traversing
613 * the list later attempt higher id numbers first
615 plist_node_init(&heap->node, -heap->id);
616 plist_add(&heap->node, &dev->heaps);
619 up_write(&dev->lock);
621 EXPORT_SYMBOL(ion_device_add_heap);
623 static int ion_device_create(void)
625 struct ion_device *idev;
628 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
632 idev->dev.minor = MISC_DYNAMIC_MINOR;
633 idev->dev.name = "ion";
634 idev->dev.fops = &ion_fops;
635 idev->dev.parent = NULL;
636 ret = misc_register(&idev->dev);
638 pr_err("ion: failed to register misc device.\n");
643 idev->debug_root = debugfs_create_dir("ion", NULL);
644 init_rwsem(&idev->lock);
645 plist_head_init(&idev->heaps);
649 subsys_initcall(ion_device_create);