2 * Copyright © 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DOC: VC4 GEM BO management support
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
22 #include <linux/dma-buf.h>
25 #include "uapi/drm/vc4_drm.h"
27 static const char * const bo_type_names[] = {
38 static bool is_user_label(int label)
40 return label >= VC4_BO_TYPE_COUNT;
43 static void vc4_bo_stats_dump(struct vc4_dev *vc4)
47 for (i = 0; i < vc4->num_labels; i++) {
48 if (!vc4->bo_labels[i].num_allocated)
51 DRM_INFO("%30s: %6dkb BOs (%d)\n",
52 vc4->bo_labels[i].name,
53 vc4->bo_labels[i].size_allocated / 1024,
54 vc4->bo_labels[i].num_allocated);
57 mutex_lock(&vc4->purgeable.lock);
58 if (vc4->purgeable.num)
59 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
60 vc4->purgeable.size / 1024, vc4->purgeable.num);
62 if (vc4->purgeable.purged_num)
63 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
64 vc4->purgeable.purged_size / 1024,
65 vc4->purgeable.purged_num);
66 mutex_unlock(&vc4->purgeable.lock);
69 #ifdef CONFIG_DEBUG_FS
70 int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
72 struct drm_info_node *node = (struct drm_info_node *)m->private;
73 struct drm_device *dev = node->minor->dev;
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
77 mutex_lock(&vc4->bo_lock);
78 for (i = 0; i < vc4->num_labels; i++) {
79 if (!vc4->bo_labels[i].num_allocated)
82 seq_printf(m, "%30s: %6dkb BOs (%d)\n",
83 vc4->bo_labels[i].name,
84 vc4->bo_labels[i].size_allocated / 1024,
85 vc4->bo_labels[i].num_allocated);
87 mutex_unlock(&vc4->bo_lock);
89 mutex_lock(&vc4->purgeable.lock);
90 if (vc4->purgeable.num)
91 seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
92 vc4->purgeable.size / 1024, vc4->purgeable.num);
94 if (vc4->purgeable.purged_num)
95 seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
96 vc4->purgeable.purged_size / 1024,
97 vc4->purgeable.purged_num);
98 mutex_unlock(&vc4->purgeable.lock);
103 struct vc4_gem_info_data {
104 struct drm_file *filp;
108 static int vc4_gem_one_info(int id, void *ptr, void *data)
110 struct drm_gem_object *obj = (struct drm_gem_object *)ptr;
111 struct vc4_gem_info_data *gem_info_data = data;
112 struct vc4_file *vc4file = gem_info_data->filp->driver_priv;
113 struct drm_vc4_file_private *file_priv = &vc4file->priv;
116 DRM_ERROR("failed to get drm_gem_object\n");
120 drm_gem_object_reference(obj);
122 seq_printf(gem_info_data->m,
123 "%5d\t%5d\t%4d\t%4d\t\t%4d\t0x%08zx\t0x%x\t%4d\t%4d\t\t"
124 "%4d\t\t0x%p\t%6d\n",
128 kref_read(&obj->refcount) - 1,
133 obj->dma_buf ? 1 : 0,
134 obj->import_attach ? 1 : 0,
138 drm_gem_object_unreference(obj);
143 int vc4_debugfs_gem_info(struct seq_file *m, void *data)
145 struct drm_info_node *node = (struct drm_info_node *)m->private;
146 struct drm_minor *minor = node->minor;
147 struct drm_device *drm_dev = minor->dev;
148 struct vc4_gem_info_data gem_info_data;
149 struct drm_file *filp;
153 seq_puts(gem_info_data.m,
154 "pid\ttgid\thandle\trefcount\thcount\tsize\t\tflags\t"
155 "pfnmap\texport_to_fd\timport_from_fd\tobj_addr\t\t"
158 mutex_lock(&drm_dev->struct_mutex);
159 list_for_each_entry(filp, &drm_dev->filelist, lhead) {
160 gem_info_data.filp = filp;
162 spin_lock(&filp->table_lock);
163 idr_for_each(&filp->object_idr, vc4_gem_one_info,
165 spin_unlock(&filp->table_lock);
167 mutex_unlock(&drm_dev->struct_mutex);
174 /* Takes ownership of *name and returns the appropriate slot for it in
175 * the bo_labels[] array, extending it as necessary.
177 * This is inefficient and could use a hash table instead of walking
178 * an array and strcmp()ing. However, the assumption is that user
179 * labeling will be infrequent (scanout buffers and other long-lived
180 * objects, or debug driver builds), so we can live with it for now.
182 static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
187 for (i = 0; i < vc4->num_labels; i++) {
188 if (!vc4->bo_labels[i].name) {
190 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
196 if (free_slot != -1) {
197 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
198 vc4->bo_labels[free_slot].name = name;
201 u32 new_label_count = vc4->num_labels + 1;
202 struct vc4_label *new_labels =
203 krealloc(vc4->bo_labels,
204 new_label_count * sizeof(*new_labels),
212 free_slot = vc4->num_labels;
213 vc4->bo_labels = new_labels;
214 vc4->num_labels = new_label_count;
216 vc4->bo_labels[free_slot].name = name;
217 vc4->bo_labels[free_slot].num_allocated = 0;
218 vc4->bo_labels[free_slot].size_allocated = 0;
224 static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
226 struct vc4_bo *bo = to_vc4_bo(gem_obj);
227 struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
229 lockdep_assert_held(&vc4->bo_lock);
232 vc4->bo_labels[label].num_allocated++;
233 vc4->bo_labels[label].size_allocated += gem_obj->size;
236 vc4->bo_labels[bo->label].num_allocated--;
237 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
239 if (vc4->bo_labels[bo->label].num_allocated == 0 &&
240 is_user_label(bo->label)) {
241 /* Free user BO label slots on last unreference.
242 * Slots are just where we track the stats for a given
243 * name, and once a name is unused we can reuse that
246 kfree(vc4->bo_labels[bo->label].name);
247 vc4->bo_labels[bo->label].name = NULL;
253 static uint32_t bo_page_index(size_t size)
255 return (size / PAGE_SIZE) - 1;
258 static void vc4_bo_destroy(struct vc4_bo *bo)
260 struct drm_gem_object *obj = &bo->base.base;
261 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
263 lockdep_assert_held(&vc4->bo_lock);
265 vc4_bo_set_label(obj, -1);
267 if (bo->validated_shader) {
268 kfree(bo->validated_shader->uniform_addr_offsets);
269 kfree(bo->validated_shader->texture_samples);
270 kfree(bo->validated_shader);
271 bo->validated_shader = NULL;
274 reservation_object_fini(&bo->_resv);
276 drm_gem_cma_free_object(obj);
279 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
281 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
283 lockdep_assert_held(&vc4->bo_lock);
284 list_del(&bo->unref_head);
285 list_del(&bo->size_head);
288 static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
291 struct vc4_dev *vc4 = to_vc4_dev(dev);
292 uint32_t page_index = bo_page_index(size);
294 if (vc4->bo_cache.size_list_size <= page_index) {
295 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
297 struct list_head *new_list;
300 new_list = kmalloc_array(new_size, sizeof(struct list_head),
305 /* Rebase the old cached BO lists to their new list
308 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
309 struct list_head *old_list =
310 &vc4->bo_cache.size_list[i];
312 if (list_empty(old_list))
313 INIT_LIST_HEAD(&new_list[i]);
315 list_replace(old_list, &new_list[i]);
317 /* And initialize the brand new BO list heads. */
318 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
319 INIT_LIST_HEAD(&new_list[i]);
321 kfree(vc4->bo_cache.size_list);
322 vc4->bo_cache.size_list = new_list;
323 vc4->bo_cache.size_list_size = new_size;
326 return &vc4->bo_cache.size_list[page_index];
329 static void vc4_bo_cache_purge(struct drm_device *dev)
331 struct vc4_dev *vc4 = to_vc4_dev(dev);
333 mutex_lock(&vc4->bo_lock);
334 while (!list_empty(&vc4->bo_cache.time_list)) {
335 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
336 struct vc4_bo, unref_head);
337 vc4_bo_remove_from_cache(bo);
340 mutex_unlock(&vc4->bo_lock);
343 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
345 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
347 mutex_lock(&vc4->purgeable.lock);
348 list_add_tail(&bo->size_head, &vc4->purgeable.list);
349 vc4->purgeable.num++;
350 vc4->purgeable.size += bo->base.base.size;
351 mutex_unlock(&vc4->purgeable.lock);
354 static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
356 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
358 /* list_del_init() is used here because the caller might release
359 * the purgeable lock in order to acquire the madv one and update the
361 * During this short period of time a user might decide to mark
362 * the BO as unpurgeable, and if bo->madv is set to
363 * VC4_MADV_DONTNEED it will try to remove the BO from the
364 * purgeable list which will fail if the ->next/prev fields
365 * are set to LIST_POISON1/LIST_POISON2 (which is what
367 * Re-initializing the list element guarantees that list_del()
368 * will work correctly even if it's a NOP.
370 list_del_init(&bo->size_head);
371 vc4->purgeable.num--;
372 vc4->purgeable.size -= bo->base.base.size;
375 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
377 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
379 mutex_lock(&vc4->purgeable.lock);
380 vc4_bo_remove_from_purgeable_pool_locked(bo);
381 mutex_unlock(&vc4->purgeable.lock);
384 static void vc4_bo_purge(struct drm_gem_object *obj)
386 struct vc4_bo *bo = to_vc4_bo(obj);
387 struct drm_device *dev = obj->dev;
389 WARN_ON(!mutex_is_locked(&bo->madv_lock));
390 WARN_ON(bo->madv != VC4_MADV_DONTNEED);
392 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
394 dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
395 bo->base.vaddr = NULL;
396 bo->madv = __VC4_MADV_PURGED;
399 static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
401 struct vc4_dev *vc4 = to_vc4_dev(dev);
403 mutex_lock(&vc4->purgeable.lock);
404 while (!list_empty(&vc4->purgeable.list)) {
405 struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
406 struct vc4_bo, size_head);
407 struct drm_gem_object *obj = &bo->base.base;
408 size_t purged_size = 0;
410 vc4_bo_remove_from_purgeable_pool_locked(bo);
412 /* Release the purgeable lock while we're purging the BO so
413 * that other people can continue inserting things in the
414 * purgeable pool without having to wait for all BOs to be
417 mutex_unlock(&vc4->purgeable.lock);
418 mutex_lock(&bo->madv_lock);
420 /* Since we released the purgeable pool lock before acquiring
421 * the BO madv one, the user may have marked the BO as WILLNEED
422 * and re-used it in the meantime.
423 * Before purging the BO we need to make sure
424 * - it is still marked as DONTNEED
425 * - it has not been re-inserted in the purgeable list
426 * - it is not used by HW blocks
427 * If one of these conditions is not met, just skip the entry.
429 if (bo->madv == VC4_MADV_DONTNEED &&
430 list_empty(&bo->size_head) &&
431 !refcount_read(&bo->usecnt)) {
432 purged_size = bo->base.base.size;
435 mutex_unlock(&bo->madv_lock);
436 mutex_lock(&vc4->purgeable.lock);
439 vc4->purgeable.purged_size += purged_size;
440 vc4->purgeable.purged_num++;
443 mutex_unlock(&vc4->purgeable.lock);
446 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
448 enum vc4_kernel_bo_type type)
450 struct vc4_dev *vc4 = to_vc4_dev(dev);
451 uint32_t page_index = bo_page_index(size);
452 struct vc4_bo *bo = NULL;
454 size = roundup(size, PAGE_SIZE);
456 mutex_lock(&vc4->bo_lock);
457 if (page_index >= vc4->bo_cache.size_list_size)
460 if (list_empty(&vc4->bo_cache.size_list[page_index]))
463 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
464 struct vc4_bo, size_head);
465 vc4_bo_remove_from_cache(bo);
466 kref_init(&bo->base.base.refcount);
470 vc4_bo_set_label(&bo->base.base, type);
471 mutex_unlock(&vc4->bo_lock);
476 * vc4_gem_create_object - Implementation of driver->gem_create_object.
478 * @size: Size in bytes of the memory the object will reference
480 * This lets the CMA helpers allocate object structs for us, and keep
481 * our BO stats correct.
483 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
485 struct vc4_dev *vc4 = to_vc4_dev(dev);
488 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
490 return ERR_PTR(-ENOMEM);
492 bo->madv = VC4_MADV_WILLNEED;
493 refcount_set(&bo->usecnt, 0);
494 mutex_init(&bo->madv_lock);
495 mutex_lock(&vc4->bo_lock);
496 bo->label = VC4_BO_TYPE_KERNEL;
497 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
498 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
499 mutex_unlock(&vc4->bo_lock);
500 bo->resv = &bo->_resv;
501 reservation_object_init(bo->resv);
503 return &bo->base.base;
506 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
507 bool allow_unzeroed, enum vc4_kernel_bo_type type)
509 size_t size = roundup(unaligned_size, PAGE_SIZE);
510 struct vc4_dev *vc4 = to_vc4_dev(dev);
511 struct drm_gem_cma_object *cma_obj;
515 return ERR_PTR(-EINVAL);
517 /* First, try to get a vc4_bo from the kernel BO cache. */
518 bo = vc4_bo_get_from_cache(dev, size, type);
521 memset(bo->base.vaddr, 0, bo->base.base.size);
525 cma_obj = drm_gem_cma_create(dev, size);
526 if (IS_ERR(cma_obj)) {
528 * If we've run out of CMA memory, kill the cache of
529 * CMA allocations we've got laying around and try again.
531 vc4_bo_cache_purge(dev);
532 cma_obj = drm_gem_cma_create(dev, size);
535 if (IS_ERR(cma_obj)) {
537 * Still not enough CMA memory, purge the userspace BO
539 * This is sub-optimal since we purge the whole userspace
540 * BO cache which forces user that want to re-use the BO to
541 * restore its initial content.
542 * Ideally, we should purge entries one by one and retry
543 * after each to see if CMA allocation succeeds. Or even
544 * better, try to find an entry with at least the same
547 vc4_bo_userspace_cache_purge(dev);
548 cma_obj = drm_gem_cma_create(dev, size);
551 if (IS_ERR(cma_obj)) {
552 DRM_ERROR("Failed to allocate from CMA:\n");
553 vc4_bo_stats_dump(vc4);
554 return ERR_PTR(-ENOMEM);
556 bo = to_vc4_bo(&cma_obj->base);
558 /* By default, BOs do not support the MADV ioctl. This will be enabled
559 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
562 bo->madv = __VC4_MADV_NOTSUPP;
564 mutex_lock(&vc4->bo_lock);
565 vc4_bo_set_label(&cma_obj->base, type);
566 mutex_unlock(&vc4->bo_lock);
571 static void vc4_gem_register_pid(struct drm_file *file_priv)
573 struct vc4_file *vc4file = file_priv->driver_priv;
574 struct drm_vc4_file_private *driver_priv = &vc4file->priv;
576 if (!driver_priv->pid && !driver_priv->tgid) {
577 driver_priv->pid = task_pid_nr(current);
578 driver_priv->tgid = task_tgid_nr(current);
580 if (driver_priv->pid != task_pid_nr(current))
581 DRM_DEBUG_KMS("wrong pid: %ld, %ld\n",
582 (unsigned long)driver_priv->pid,
583 (unsigned long)task_pid_nr(current));
584 if (driver_priv->tgid != task_tgid_nr(current))
585 DRM_DEBUG_KMS("wrong tgid: %ld, %ld\n",
586 (unsigned long)driver_priv->tgid,
587 (unsigned long)task_tgid_nr(current));
591 int vc4_dumb_create(struct drm_file *file_priv,
592 struct drm_device *dev,
593 struct drm_mode_create_dumb *args)
595 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
596 struct vc4_bo *bo = NULL;
599 if (args->pitch < min_pitch)
600 args->pitch = min_pitch;
602 if (args->size < args->pitch * args->height)
603 args->size = args->pitch * args->height;
605 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
609 bo->madv = VC4_MADV_WILLNEED;
611 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
612 drm_gem_object_put_unlocked(&bo->base.base);
615 vc4_gem_register_pid(file_priv);
620 static void vc4_bo_cache_free_old(struct drm_device *dev)
622 struct vc4_dev *vc4 = to_vc4_dev(dev);
623 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
625 lockdep_assert_held(&vc4->bo_lock);
627 while (!list_empty(&vc4->bo_cache.time_list)) {
628 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
629 struct vc4_bo, unref_head);
630 if (time_before(expire_time, bo->free_time)) {
631 mod_timer(&vc4->bo_cache.time_timer,
632 round_jiffies_up(jiffies +
633 msecs_to_jiffies(1000)));
637 vc4_bo_remove_from_cache(bo);
642 /* Called on the last userspace/kernel unreference of the BO. Returns
643 * it to the BO cache if possible, otherwise frees it.
645 void vc4_free_object(struct drm_gem_object *gem_bo)
647 struct drm_device *dev = gem_bo->dev;
648 struct vc4_dev *vc4 = to_vc4_dev(dev);
649 struct vc4_bo *bo = to_vc4_bo(gem_bo);
650 struct list_head *cache_list;
652 /* Remove the BO from the purgeable list. */
653 mutex_lock(&bo->madv_lock);
654 if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
655 vc4_bo_remove_from_purgeable_pool(bo);
656 mutex_unlock(&bo->madv_lock);
658 mutex_lock(&vc4->bo_lock);
659 /* If the object references someone else's memory, we can't cache it.
661 if (gem_bo->import_attach) {
666 /* Don't cache if it was publicly named. */
672 /* If this object was partially constructed but CMA allocation
673 * had failed, just free it. Can also happen when the BO has been
676 if (!bo->base.vaddr) {
681 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
687 if (bo->validated_shader) {
688 kfree(bo->validated_shader->uniform_addr_offsets);
689 kfree(bo->validated_shader->texture_samples);
690 kfree(bo->validated_shader);
691 bo->validated_shader = NULL;
694 /* Reset madv and usecnt before adding the BO to the cache. */
695 bo->madv = __VC4_MADV_NOTSUPP;
696 refcount_set(&bo->usecnt, 0);
698 bo->t_format = false;
699 bo->free_time = jiffies;
700 list_add(&bo->size_head, cache_list);
701 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
703 vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
705 vc4_bo_cache_free_old(dev);
708 mutex_unlock(&vc4->bo_lock);
711 static void vc4_bo_cache_time_work(struct work_struct *work)
713 struct vc4_dev *vc4 =
714 container_of(work, struct vc4_dev, bo_cache.time_work);
715 struct drm_device *dev = vc4->dev;
717 mutex_lock(&vc4->bo_lock);
718 vc4_bo_cache_free_old(dev);
719 mutex_unlock(&vc4->bo_lock);
722 int vc4_bo_inc_usecnt(struct vc4_bo *bo)
726 /* Fast path: if the BO is already retained by someone, no need to
727 * check the madv status.
729 if (refcount_inc_not_zero(&bo->usecnt))
732 mutex_lock(&bo->madv_lock);
734 case VC4_MADV_WILLNEED:
735 if (!refcount_inc_not_zero(&bo->usecnt))
736 refcount_set(&bo->usecnt, 1);
739 case VC4_MADV_DONTNEED:
740 /* We shouldn't use a BO marked as purgeable if at least
741 * someone else retained its content by incrementing usecnt.
742 * Luckily the BO hasn't been purged yet, but something wrong
743 * is happening here. Just throw an error instead of
744 * authorizing this use case.
746 case __VC4_MADV_PURGED:
747 /* We can't use a purged BO. */
749 /* Invalid madv value. */
753 mutex_unlock(&bo->madv_lock);
758 void vc4_bo_dec_usecnt(struct vc4_bo *bo)
760 /* Fast path: if the BO is still retained by someone, no need to test
763 if (refcount_dec_not_one(&bo->usecnt))
766 mutex_lock(&bo->madv_lock);
767 if (refcount_dec_and_test(&bo->usecnt) &&
768 bo->madv == VC4_MADV_DONTNEED)
769 vc4_bo_add_to_purgeable_pool(bo);
770 mutex_unlock(&bo->madv_lock);
773 static void vc4_bo_cache_time_timer(struct timer_list *t)
775 struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
777 schedule_work(&vc4->bo_cache.time_work);
780 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
782 struct vc4_bo *bo = to_vc4_bo(obj);
788 vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
790 struct vc4_bo *bo = to_vc4_bo(obj);
791 struct dma_buf *dmabuf;
794 if (bo->validated_shader) {
795 DRM_DEBUG("Attempting to export shader BO\n");
796 return ERR_PTR(-EINVAL);
799 /* Note: as soon as the BO is exported it becomes unpurgeable, because
800 * noone ever decrements the usecnt even if the reference held by the
801 * exported BO is released. This shouldn't be a problem since we don't
802 * expect exported BOs to be marked as purgeable.
804 ret = vc4_bo_inc_usecnt(bo);
806 DRM_ERROR("Failed to increment BO usecnt\n");
810 dmabuf = drm_gem_prime_export(dev, obj, flags);
812 vc4_bo_dec_usecnt(bo);
817 vm_fault_t vc4_fault(struct vm_fault *vmf)
819 struct vm_area_struct *vma = vmf->vma;
820 struct drm_gem_object *obj = vma->vm_private_data;
821 struct vc4_bo *bo = to_vc4_bo(obj);
823 /* The only reason we would end up here is when user-space accesses
824 * BO's memory after it's been purged.
826 mutex_lock(&bo->madv_lock);
827 WARN_ON(bo->madv != __VC4_MADV_PURGED);
828 mutex_unlock(&bo->madv_lock);
830 return VM_FAULT_SIGBUS;
833 int vc4_drm_gem_prime_fd_to_handle(struct drm_device *dev,
834 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
838 ret = drm_gem_prime_fd_to_handle(dev, file_priv, prime_fd, handle);
842 vc4_gem_register_pid(file_priv);
848 int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
850 struct drm_gem_object *gem_obj;
851 unsigned long vm_pgoff;
855 ret = drm_gem_mmap(filp, vma);
859 gem_obj = vma->vm_private_data;
860 bo = to_vc4_bo(gem_obj);
862 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
863 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
867 if (bo->madv != VC4_MADV_WILLNEED) {
868 DRM_DEBUG("mmaping of %s BO not allowed\n",
869 bo->madv == VC4_MADV_DONTNEED ?
870 "purgeable" : "purged");
875 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
876 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
879 vma->vm_flags &= ~VM_PFNMAP;
881 /* This ->vm_pgoff dance is needed to make all parties happy:
882 * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
883 * mem-region, hence the need to set it to zero (the value set by
884 * the DRM core is a virtual offset encoding the GEM object-id)
885 * - the mmap() core logic needs ->vm_pgoff to be restored to its
886 * initial value before returning from this function because it
887 * encodes the offset of this GEM in the dev->anon_inode pseudo-file
888 * and this information will be used when we invalidate userspace
889 * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
891 vm_pgoff = vma->vm_pgoff;
893 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
894 bo->base.paddr, vma->vm_end - vma->vm_start);
895 vma->vm_pgoff = vm_pgoff;
898 drm_gem_vm_close(vma);
903 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
905 struct vc4_bo *bo = to_vc4_bo(obj);
907 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
908 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
912 return drm_gem_cma_prime_mmap(obj, vma);
915 void *vc4_prime_vmap(struct drm_gem_object *obj)
917 struct vc4_bo *bo = to_vc4_bo(obj);
919 if (bo->validated_shader) {
920 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
921 return ERR_PTR(-EINVAL);
924 return drm_gem_cma_prime_vmap(obj);
927 struct drm_gem_object *
928 vc4_prime_import_sg_table(struct drm_device *dev,
929 struct dma_buf_attachment *attach,
930 struct sg_table *sgt)
932 struct drm_gem_object *obj;
935 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
940 bo->resv = attach->dmabuf->resv;
945 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
946 struct drm_file *file_priv)
948 struct drm_vc4_create_bo *args = data;
949 struct vc4_bo *bo = NULL;
953 * We can't allocate from the BO cache, because the BOs don't
954 * get zeroed, and that might leak data between users.
956 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
960 bo->madv = VC4_MADV_WILLNEED;
962 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
963 drm_gem_object_put_unlocked(&bo->base.base);
966 vc4_gem_register_pid(file_priv);
971 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
972 struct drm_file *file_priv)
974 struct drm_vc4_mmap_bo *args = data;
975 struct drm_gem_object *gem_obj;
977 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
979 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
983 /* The mmap offset was set up at BO allocation time. */
984 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
986 drm_gem_object_put_unlocked(gem_obj);
991 vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv)
994 struct drm_vc4_create_shader_bo *args = data;
995 struct vc4_bo *bo = NULL;
1001 if (args->size % sizeof(u64) != 0)
1004 if (args->flags != 0) {
1005 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
1009 if (args->pad != 0) {
1010 DRM_INFO("Pad set: 0x%08x\n", args->pad);
1014 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
1018 bo->madv = VC4_MADV_WILLNEED;
1020 if (copy_from_user(bo->base.vaddr,
1021 (void __user *)(uintptr_t)args->data,
1026 /* Clear the rest of the memory from allocating from the BO
1029 memset(bo->base.vaddr + args->size, 0,
1030 bo->base.base.size - args->size);
1032 bo->validated_shader = vc4_validate_shader(&bo->base);
1033 if (!bo->validated_shader) {
1038 /* We have to create the handle after validation, to avoid
1039 * races for users to do doing things like mmap the shader BO.
1041 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
1044 vc4_gem_register_pid(file_priv);
1047 drm_gem_object_put_unlocked(&bo->base.base);
1053 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
1055 * @data: ioctl argument
1056 * @file_priv: DRM file for this fd
1058 * The tiling state of the BO decides the default modifier of an fb if
1059 * no specific modifier was set by userspace, and the return value of
1060 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
1061 * received from dmabuf as the same tiling format as the producer
1064 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
1065 struct drm_file *file_priv)
1067 struct drm_vc4_set_tiling *args = data;
1068 struct drm_gem_object *gem_obj;
1072 if (args->flags != 0)
1075 switch (args->modifier) {
1076 case DRM_FORMAT_MOD_NONE:
1079 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
1086 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1088 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1091 bo = to_vc4_bo(gem_obj);
1092 bo->t_format = t_format;
1094 drm_gem_object_put_unlocked(gem_obj);
1100 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
1102 * @data: ioctl argument
1103 * @file_priv: DRM file for this fd
1105 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
1107 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
1108 struct drm_file *file_priv)
1110 struct drm_vc4_get_tiling *args = data;
1111 struct drm_gem_object *gem_obj;
1114 if (args->flags != 0 || args->modifier != 0)
1117 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1119 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1122 bo = to_vc4_bo(gem_obj);
1125 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1127 args->modifier = DRM_FORMAT_MOD_NONE;
1129 drm_gem_object_put_unlocked(gem_obj);
1134 int vc4_bo_cache_init(struct drm_device *dev)
1136 struct vc4_dev *vc4 = to_vc4_dev(dev);
1139 /* Create the initial set of BO labels that the kernel will
1140 * use. This lets us avoid a bunch of string reallocation in
1141 * the kernel's draw and BO allocation paths.
1143 vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1145 if (!vc4->bo_labels)
1147 vc4->num_labels = VC4_BO_TYPE_COUNT;
1149 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1150 for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1151 vc4->bo_labels[i].name = bo_type_names[i];
1153 mutex_init(&vc4->bo_lock);
1155 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1157 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
1158 timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
1163 void vc4_bo_cache_destroy(struct drm_device *dev)
1165 struct vc4_dev *vc4 = to_vc4_dev(dev);
1168 del_timer(&vc4->bo_cache.time_timer);
1169 cancel_work_sync(&vc4->bo_cache.time_work);
1171 vc4_bo_cache_purge(dev);
1173 for (i = 0; i < vc4->num_labels; i++) {
1174 if (vc4->bo_labels[i].num_allocated) {
1175 DRM_ERROR("Destroying BO cache with %d %s "
1176 "BOs still allocated\n",
1177 vc4->bo_labels[i].num_allocated,
1178 vc4->bo_labels[i].name);
1181 if (is_user_label(i))
1182 kfree(vc4->bo_labels[i].name);
1184 kfree(vc4->bo_labels);
1187 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1188 struct drm_file *file_priv)
1190 struct vc4_dev *vc4 = to_vc4_dev(dev);
1191 struct drm_vc4_label_bo *args = data;
1193 struct drm_gem_object *gem_obj;
1199 name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1201 return PTR_ERR(name);
1203 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1205 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1210 mutex_lock(&vc4->bo_lock);
1211 label = vc4_get_user_label(vc4, name);
1213 vc4_bo_set_label(gem_obj, label);
1216 mutex_unlock(&vc4->bo_lock);
1218 drm_gem_object_put_unlocked(gem_obj);