[intel] Quirk away MSI support on 945G/GM.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads,
40  * Hash tables and hash heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those,
44  * we need both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47  * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48  * the list traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
56
57 static inline uint64_t drm_bo_type_flags(unsigned type)
58 {
59         return (1ULL << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
137 {
138         struct drm_device *dev = bo->dev;
139         int ret = 0;
140         uint32_t page_flags = 0;
141
142         DRM_ASSERT_LOCKED(&bo->mutex);
143         bo->ttm = NULL;
144
145         if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
146                 page_flags |= DRM_TTM_PAGE_WRITE;
147
148         switch (bo->type) {
149         case drm_bo_type_device:
150         case drm_bo_type_kernel:
151                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
152                                          page_flags, dev->bm.dummy_read_page);
153                 if (!bo->ttm)
154                         ret = -ENOMEM;
155                 break;
156         case drm_bo_type_user:
157                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
158                                          page_flags | DRM_TTM_PAGE_USER,
159                                          dev->bm.dummy_read_page);
160                 if (!bo->ttm)
161                         ret = -ENOMEM;
162
163                 ret = drm_ttm_set_user(bo->ttm, current,
164                                        bo->buffer_start,
165                                        bo->num_pages);
166                 if (ret)
167                         return ret;
168
169                 break;
170         default:
171                 DRM_ERROR("Illegal buffer object type\n");
172                 ret = -EINVAL;
173                 break;
174         }
175
176         return ret;
177 }
178
179 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
180                                   struct drm_bo_mem_reg *mem,
181                                   int evict, int no_wait)
182 {
183         struct drm_device *dev = bo->dev;
184         struct drm_buffer_manager *bm = &dev->bm;
185         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
186         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
187         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
188         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
189         int ret = 0;
190
191         if (old_is_pci || new_is_pci ||
192             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
193                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
194         if (ret)
195                 return ret;
196
197         /*
198          * Create and bind a ttm if required.
199          */
200
201         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
202                 ret = drm_bo_add_ttm(bo);
203                 if (ret)
204                         goto out_err;
205
206                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
207                         ret = drm_ttm_bind(bo->ttm, mem);
208                         if (ret)
209                                 goto out_err;
210                 }
211
212                 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
213                         
214                         struct drm_bo_mem_reg *old_mem = &bo->mem;
215                         uint64_t save_flags = old_mem->flags;
216                         uint64_t save_proposed_flags = old_mem->proposed_flags;
217                         
218                         *old_mem = *mem;
219                         mem->mm_node = NULL;
220                         old_mem->proposed_flags = save_proposed_flags;
221                         DRM_FLAG_MASKED(save_flags, mem->flags,
222                                         DRM_BO_MASK_MEMTYPE);
223                         goto moved;
224                 }
225                 
226         }
227
228         if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
229             !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))                
230                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
231         else if (dev->driver->bo_driver->move) 
232                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
233         else
234                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
235
236         if (ret)
237                 goto out_err;
238
239 moved:
240         if (old_is_pci || new_is_pci)
241                 drm_bo_vm_post_move(bo);
242
243         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
244                 ret =
245                     dev->driver->bo_driver->invalidate_caches(dev,
246                                                               bo->mem.flags);
247                 if (ret)
248                         DRM_ERROR("Can not flush read caches\n");
249         }
250
251         DRM_FLAG_MASKED(bo->priv_flags,
252                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
253                         _DRM_BO_FLAG_EVICTED);
254
255         if (bo->mem.mm_node)
256                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
257                         bm->man[bo->mem.mem_type].gpu_offset;
258
259
260         return 0;
261
262 out_err:
263         if (old_is_pci || new_is_pci)
264                 drm_bo_vm_post_move(bo);
265
266         new_man = &bm->man[bo->mem.mem_type];
267         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
268                 drm_ttm_unbind(bo->ttm);
269                 drm_ttm_destroy(bo->ttm);
270                 bo->ttm = NULL;
271         }
272
273         return ret;
274 }
275
276 /*
277  * Call bo->mutex locked.
278  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
279  */
280
281 static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
282 {
283         struct drm_fence_object *fence = bo->fence;
284
285         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
286                 return -EBUSY;
287
288         if (fence) {
289                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
290                         drm_fence_usage_deref_unlocked(&bo->fence);
291                         return 0;
292                 }
293                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
294                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
295                         drm_fence_usage_deref_unlocked(&bo->fence);
296                         return 0;
297                 }
298                 return -EBUSY;
299         }
300         return 0;
301 }
302
303 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
304 {
305         int ret;
306
307         mutex_lock(&bo->mutex);
308         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
309         mutex_unlock(&bo->mutex);
310         return ret;
311 }
312
313
314 /*
315  * Call bo->mutex locked.
316  * Wait until the buffer is idle.
317  */
318
319 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
320                 int no_wait, int check_unfenced)
321 {
322         int ret;
323
324         DRM_ASSERT_LOCKED(&bo->mutex);
325         while(unlikely(drm_bo_busy(bo, check_unfenced))) {
326                 if (no_wait)
327                         return -EBUSY;
328
329                 if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
330                         mutex_unlock(&bo->mutex);
331                         wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
332                         mutex_lock(&bo->mutex);
333                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
334                 }
335
336                 if (bo->fence) {
337                         struct drm_fence_object *fence;
338                         uint32_t fence_type = bo->fence_type;
339
340                         drm_fence_reference_unlocked(&fence, bo->fence);
341                         mutex_unlock(&bo->mutex);
342
343                         ret = drm_fence_object_wait(fence, lazy, !interruptible,
344                                                     fence_type);
345
346                         drm_fence_usage_deref_unlocked(&fence);
347                         mutex_lock(&bo->mutex);
348                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
349                         if (ret)
350                                 return ret;
351                 }
352
353         }
354         return 0;
355 }
356 EXPORT_SYMBOL(drm_bo_wait);
357
358 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
359 {
360         struct drm_device *dev = bo->dev;
361         struct drm_buffer_manager *bm = &dev->bm;
362
363         if (bo->fence) {
364                 if (bm->nice_mode) {
365                         unsigned long _end = jiffies + 3 * DRM_HZ;
366                         int ret;
367                         do {
368                                 ret = drm_bo_wait(bo, 0, 0, 0, 0);
369                                 if (ret && allow_errors)
370                                         return ret;
371
372                         } while (ret && !time_after_eq(jiffies, _end));
373
374                         if (bo->fence) {
375                                 bm->nice_mode = 0;
376                                 DRM_ERROR("Detected GPU lockup or "
377                                           "fence driver was taken down. "
378                                           "Evicting buffer.\n");
379                         }
380                 }
381                 if (bo->fence)
382                         drm_fence_usage_deref_unlocked(&bo->fence);
383         }
384         return 0;
385 }
386
387 /*
388  * Call dev->struct_mutex locked.
389  * Attempts to remove all private references to a buffer by expiring its
390  * fence object and removing from lru lists and memory managers.
391  */
392
393 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
394 {
395         struct drm_device *dev = bo->dev;
396         struct drm_buffer_manager *bm = &dev->bm;
397
398         DRM_ASSERT_LOCKED(&dev->struct_mutex);
399
400         atomic_inc(&bo->usage);
401         mutex_unlock(&dev->struct_mutex);
402         mutex_lock(&bo->mutex);
403
404         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
405
406         if (bo->fence && drm_fence_object_signaled(bo->fence,
407                                                    bo->fence_type))
408                 drm_fence_usage_deref_unlocked(&bo->fence);
409
410         if (bo->fence && remove_all)
411                 (void)drm_bo_expire_fence(bo, 0);
412
413         mutex_lock(&dev->struct_mutex);
414
415         if (!atomic_dec_and_test(&bo->usage))
416                 goto out;
417
418         if (!bo->fence) {
419                 list_del_init(&bo->lru);
420                 if (bo->mem.mm_node) {
421                         drm_memrange_put_block(bo->mem.mm_node);
422                         if (bo->pinned_node == bo->mem.mm_node)
423                                 bo->pinned_node = NULL;
424                         bo->mem.mm_node = NULL;
425                 }
426                 list_del_init(&bo->pinned_lru);
427                 if (bo->pinned_node) {
428                         drm_memrange_put_block(bo->pinned_node);
429                         bo->pinned_node = NULL;
430                 }
431                 list_del_init(&bo->ddestroy);
432                 mutex_unlock(&bo->mutex);
433                 drm_bo_destroy_locked(bo);
434                 return;
435         }
436
437         if (list_empty(&bo->ddestroy)) {
438                 drm_fence_object_flush(bo->fence, bo->fence_type);
439                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
440                 schedule_delayed_work(&bm->wq,
441                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
442         }
443
444 out:
445         mutex_unlock(&bo->mutex);
446         return;
447 }
448
449 /*
450  * Verify that refcount is 0 and that there are no internal references
451  * to the buffer object. Then destroy it.
452  */
453
454 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
455 {
456         struct drm_device *dev = bo->dev;
457         struct drm_buffer_manager *bm = &dev->bm;
458
459         DRM_ASSERT_LOCKED(&dev->struct_mutex);
460
461         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
462             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
463             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
464                 if (bo->fence != NULL) {
465                         DRM_ERROR("Fence was non-zero.\n");
466                         drm_bo_cleanup_refs(bo, 0);
467                         return;
468                 }
469
470 #ifdef DRM_ODD_MM_COMPAT
471                 BUG_ON(!list_empty(&bo->vma_list));
472                 BUG_ON(!list_empty(&bo->p_mm_list));
473 #endif
474
475                 if (bo->ttm) {
476                         drm_ttm_unbind(bo->ttm);
477                         drm_ttm_destroy(bo->ttm);
478                         bo->ttm = NULL;
479                 }
480
481                 atomic_dec(&bm->count);
482
483                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
484
485                 return;
486         }
487
488         /*
489          * Some stuff is still trying to reference the buffer object.
490          * Get rid of those references.
491          */
492
493         drm_bo_cleanup_refs(bo, 0);
494
495         return;
496 }
497
498 /*
499  * Call dev->struct_mutex locked.
500  */
501
502 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
503 {
504         struct drm_buffer_manager *bm = &dev->bm;
505
506         struct drm_buffer_object *entry, *nentry;
507         struct list_head *list, *next;
508
509         list_for_each_safe(list, next, &bm->ddestroy) {
510                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
511
512                 nentry = NULL;
513                 if (next != &bm->ddestroy) {
514                         nentry = list_entry(next, struct drm_buffer_object,
515                                             ddestroy);
516                         atomic_inc(&nentry->usage);
517                 }
518
519                 drm_bo_cleanup_refs(entry, remove_all);
520
521                 if (nentry)
522                         atomic_dec(&nentry->usage);
523         }
524 }
525
526 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
527 static void drm_bo_delayed_workqueue(void *data)
528 #else
529 static void drm_bo_delayed_workqueue(struct work_struct *work)
530 #endif
531 {
532 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
533         struct drm_device *dev = (struct drm_device *) data;
534         struct drm_buffer_manager *bm = &dev->bm;
535 #else
536         struct drm_buffer_manager *bm =
537             container_of(work, struct drm_buffer_manager, wq.work);
538         struct drm_device *dev = container_of(bm, struct drm_device, bm);
539 #endif
540
541         DRM_DEBUG("Delayed delete Worker\n");
542
543         mutex_lock(&dev->struct_mutex);
544         if (!bm->initialized) {
545                 mutex_unlock(&dev->struct_mutex);
546                 return;
547         }
548         drm_bo_delayed_delete(dev, 0);
549         if (bm->initialized && !list_empty(&bm->ddestroy)) {
550                 schedule_delayed_work(&bm->wq,
551                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
552         }
553         mutex_unlock(&dev->struct_mutex);
554 }
555
556 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
557 {
558         struct drm_buffer_object *tmp_bo = *bo;
559         bo = NULL;
560
561         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
562
563         if (atomic_dec_and_test(&tmp_bo->usage))
564                 drm_bo_destroy_locked(tmp_bo);
565 }
566 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
567
568 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
569                                      struct drm_user_object *uo)
570 {
571         struct drm_buffer_object *bo =
572             drm_user_object_entry(uo, struct drm_buffer_object, base);
573
574         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
575
576         drm_bo_takedown_vm_locked(bo);
577         drm_bo_usage_deref_locked(&bo);
578 }
579
580 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
581 {
582         struct drm_buffer_object *tmp_bo = *bo;
583         struct drm_device *dev = tmp_bo->dev;
584
585         *bo = NULL;
586         if (atomic_dec_and_test(&tmp_bo->usage)) {
587                 mutex_lock(&dev->struct_mutex);
588                 if (atomic_read(&tmp_bo->usage) == 0)
589                         drm_bo_destroy_locked(tmp_bo);
590                 mutex_unlock(&dev->struct_mutex);
591         }
592 }
593 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
594
595 void drm_putback_buffer_objects(struct drm_device *dev)
596 {
597         struct drm_buffer_manager *bm = &dev->bm;
598         struct list_head *list = &bm->unfenced;
599         struct drm_buffer_object *entry, *next;
600
601         mutex_lock(&dev->struct_mutex);
602         list_for_each_entry_safe(entry, next, list, lru) {
603                 atomic_inc(&entry->usage);
604                 mutex_unlock(&dev->struct_mutex);
605
606                 mutex_lock(&entry->mutex);
607                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
608                 mutex_lock(&dev->struct_mutex);
609
610                 list_del_init(&entry->lru);
611                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
612                 wake_up_all(&entry->event_queue);
613
614                 /*
615                  * FIXME: Might want to put back on head of list
616                  * instead of tail here.
617                  */
618
619                 drm_bo_add_to_lru(entry);
620                 mutex_unlock(&entry->mutex);
621                 drm_bo_usage_deref_locked(&entry);
622         }
623         mutex_unlock(&dev->struct_mutex);
624 }
625 EXPORT_SYMBOL(drm_putback_buffer_objects);
626
627
628 /*
629  * Note. The caller has to register (if applicable)
630  * and deregister fence object usage.
631  */
632
633 int drm_fence_buffer_objects(struct drm_device *dev,
634                              struct list_head *list,
635                              uint32_t fence_flags,
636                              struct drm_fence_object *fence,
637                              struct drm_fence_object **used_fence)
638 {
639         struct drm_buffer_manager *bm = &dev->bm;
640         struct drm_buffer_object *entry;
641         uint32_t fence_type = 0;
642         uint32_t fence_class = ~0;
643         int count = 0;
644         int ret = 0;
645         struct list_head *l;
646
647         mutex_lock(&dev->struct_mutex);
648
649         if (!list)
650                 list = &bm->unfenced;
651
652         if (fence)
653                 fence_class = fence->fence_class;
654
655         list_for_each_entry(entry, list, lru) {
656                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
657                 fence_type |= entry->new_fence_type;
658                 if (fence_class == ~0)
659                         fence_class = entry->new_fence_class;
660                 else if (entry->new_fence_class != fence_class) {
661                         DRM_ERROR("Unmatching fence classes on unfenced list: "
662                                   "%d and %d.\n",
663                                   fence_class,
664                                   entry->new_fence_class);
665                         ret = -EINVAL;
666                         goto out;
667                 }
668                 count++;
669         }
670
671         if (!count) {
672                 ret = -EINVAL;
673                 goto out;
674         }
675
676         if (fence) {
677                 if ((fence_type & fence->type) != fence_type ||
678                     (fence->fence_class != fence_class)) {
679                         DRM_ERROR("Given fence doesn't match buffers "
680                                   "on unfenced list.\n");
681                         ret = -EINVAL;
682                         goto out;
683                 }
684         } else {
685                 mutex_unlock(&dev->struct_mutex);
686                 ret = drm_fence_object_create(dev, fence_class, fence_type,
687                                               fence_flags | DRM_FENCE_FLAG_EMIT,
688                                               &fence);
689                 mutex_lock(&dev->struct_mutex);
690                 if (ret)
691                         goto out;
692         }
693
694         count = 0;
695         l = list->next;
696         while (l != list) {
697                 prefetch(l->next);
698                 entry = list_entry(l, struct drm_buffer_object, lru);
699                 atomic_inc(&entry->usage);
700                 mutex_unlock(&dev->struct_mutex);
701                 mutex_lock(&entry->mutex);
702                 mutex_lock(&dev->struct_mutex);
703                 list_del_init(l);
704                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
705                         count++;
706                         if (entry->fence)
707                                 drm_fence_usage_deref_locked(&entry->fence);
708                         entry->fence = drm_fence_reference_locked(fence);
709                         entry->fence_class = entry->new_fence_class;
710                         entry->fence_type = entry->new_fence_type;
711                         DRM_FLAG_MASKED(entry->priv_flags, 0,
712                                         _DRM_BO_FLAG_UNFENCED);
713                         wake_up_all(&entry->event_queue);
714                         drm_bo_add_to_lru(entry);
715                 }
716                 mutex_unlock(&entry->mutex);
717                 drm_bo_usage_deref_locked(&entry);
718                 l = list->next;
719         }
720         DRM_DEBUG("Fenced %d buffers\n", count);
721 out:
722         mutex_unlock(&dev->struct_mutex);
723         *used_fence = fence;
724         return ret;
725 }
726 EXPORT_SYMBOL(drm_fence_buffer_objects);
727
728 /*
729  * bo->mutex locked
730  */
731
732 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
733                         int no_wait)
734 {
735         int ret = 0;
736         struct drm_device *dev = bo->dev;
737         struct drm_bo_mem_reg evict_mem;
738
739         /*
740          * Someone might have modified the buffer before we took the
741          * buffer mutex.
742          */
743
744         do {
745                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
746
747                 if (unlikely(bo->mem.flags &
748                              (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
749                         goto out_unlock;
750                 if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
751                         goto out_unlock;
752                 if (unlikely(bo->mem.mem_type != mem_type))
753                         goto out_unlock;
754                 ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
755                 if (ret)
756                         goto out_unlock;
757
758         } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
759
760         evict_mem = bo->mem;
761         evict_mem.mm_node = NULL;
762
763         evict_mem = bo->mem;
764         evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
765
766         mutex_lock(&dev->struct_mutex);
767         list_del_init(&bo->lru);
768         mutex_unlock(&dev->struct_mutex);
769
770         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
771
772         if (ret) {
773                 if (ret != -EAGAIN)
774                         DRM_ERROR("Failed to find memory space for "
775                                   "buffer 0x%p eviction.\n", bo);
776                 goto out;
777         }
778
779         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
780
781         if (ret) {
782                 if (ret != -EAGAIN)
783                         DRM_ERROR("Buffer eviction failed\n");
784                 goto out;
785         }
786
787         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
788                         _DRM_BO_FLAG_EVICTED);
789
790 out:
791         mutex_lock(&dev->struct_mutex);
792         if (evict_mem.mm_node) {
793                 if (evict_mem.mm_node != bo->pinned_node)
794                         drm_memrange_put_block(evict_mem.mm_node);
795                 evict_mem.mm_node = NULL;
796         }
797         drm_bo_add_to_lru(bo);
798         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
799 out_unlock:
800         mutex_unlock(&dev->struct_mutex);
801
802         return ret;
803 }
804
805 /**
806  * Repeatedly evict memory from the LRU for @mem_type until we create enough
807  * space, or we've evicted everything and there isn't enough space.
808  */
809 static int drm_bo_mem_force_space(struct drm_device *dev,
810                                   struct drm_bo_mem_reg *mem,
811                                   uint32_t mem_type, int no_wait)
812 {
813         struct drm_memrange_node *node;
814         struct drm_buffer_manager *bm = &dev->bm;
815         struct drm_buffer_object *entry;
816         struct drm_mem_type_manager *man = &bm->man[mem_type];
817         struct list_head *lru;
818         unsigned long num_pages = mem->num_pages;
819         int ret;
820
821         mutex_lock(&dev->struct_mutex);
822         do {
823                 node = drm_memrange_search_free(&man->manager, num_pages,
824                                           mem->page_alignment, 1);
825                 if (node)
826                         break;
827
828                 lru = &man->lru;
829                 if (lru->next == lru)
830                         break;
831
832                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
833                 atomic_inc(&entry->usage);
834                 mutex_unlock(&dev->struct_mutex);
835                 mutex_lock(&entry->mutex);
836                 ret = drm_bo_evict(entry, mem_type, no_wait);
837                 mutex_unlock(&entry->mutex);
838                 drm_bo_usage_deref_unlocked(&entry);
839                 if (ret)
840                         return ret;
841                 mutex_lock(&dev->struct_mutex);
842         } while (1);
843
844         if (!node) {
845                 mutex_unlock(&dev->struct_mutex);
846                 return -ENOMEM;
847         }
848
849         node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
850         if (unlikely(!node)) {
851                 mutex_unlock(&dev->struct_mutex);
852                 return -ENOMEM;
853         }
854
855         mutex_unlock(&dev->struct_mutex);
856         mem->mm_node = node;
857         mem->mem_type = mem_type;
858         return 0;
859 }
860
861 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
862                                 int disallow_fixed,
863                                 uint32_t mem_type,
864                                 uint64_t mask, uint32_t *res_mask)
865 {
866         uint64_t cur_flags = drm_bo_type_flags(mem_type);
867         uint64_t flag_diff;
868
869         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
870                 return 0;
871         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
872                 cur_flags |= DRM_BO_FLAG_CACHED;
873         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
874                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
875         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
876                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
877
878         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
879                 return 0;
880
881         if (mem_type == DRM_BO_MEM_LOCAL) {
882                 *res_mask = cur_flags;
883                 return 1;
884         }
885
886         flag_diff = (mask ^ cur_flags);
887         if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
888                 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
889
890         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
891             (!(mask & DRM_BO_FLAG_CACHED) ||
892              (mask & DRM_BO_FLAG_FORCE_CACHING)))
893                 return 0;
894
895         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
896             ((mask & DRM_BO_FLAG_MAPPABLE) ||
897              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
898                 return 0;
899
900         *res_mask = cur_flags;
901         return 1;
902 }
903
904 /**
905  * Creates space for memory region @mem according to its type.
906  *
907  * This function first searches for free space in compatible memory types in
908  * the priority order defined by the driver.  If free space isn't found, then
909  * drm_bo_mem_force_space is attempted in priority order to evict and find
910  * space.
911  */
912 int drm_bo_mem_space(struct drm_buffer_object *bo,
913                      struct drm_bo_mem_reg *mem, int no_wait)
914 {
915         struct drm_device *dev = bo->dev;
916         struct drm_buffer_manager *bm = &dev->bm;
917         struct drm_mem_type_manager *man;
918
919         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
920         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
921         uint32_t i;
922         uint32_t mem_type = DRM_BO_MEM_LOCAL;
923         uint32_t cur_flags;
924         int type_found = 0;
925         int type_ok = 0;
926         int has_eagain = 0;
927         struct drm_memrange_node *node = NULL;
928         int ret;
929
930         mem->mm_node = NULL;
931         for (i = 0; i < num_prios; ++i) {
932                 mem_type = prios[i];
933                 man = &bm->man[mem_type];
934
935                 type_ok = drm_bo_mt_compatible(man,
936                                                bo->type == drm_bo_type_user,
937                                                mem_type, mem->proposed_flags,
938                                                &cur_flags);
939
940                 if (!type_ok)
941                         continue;
942
943                 if (mem_type == DRM_BO_MEM_LOCAL)
944                         break;
945
946                 if ((mem_type == bo->pinned_mem_type) &&
947                     (bo->pinned_node != NULL)) {
948                         node = bo->pinned_node;
949                         break;
950                 }
951
952                 mutex_lock(&dev->struct_mutex);
953                 if (man->has_type && man->use_type) {
954                         type_found = 1;
955                         node = drm_memrange_search_free(&man->manager, mem->num_pages,
956                                                   mem->page_alignment, 1);
957                         if (node)
958                                 node = drm_memrange_get_block(node, mem->num_pages,
959                                                         mem->page_alignment);
960                 }
961                 mutex_unlock(&dev->struct_mutex);
962                 if (node)
963                         break;
964         }
965
966         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
967                 mem->mm_node = node;
968                 mem->mem_type = mem_type;
969                 mem->flags = cur_flags;
970                 return 0;
971         }
972
973         if (!type_found)
974                 return -EINVAL;
975
976         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
977         prios = dev->driver->bo_driver->mem_busy_prio;
978
979         for (i = 0; i < num_prios; ++i) {
980                 mem_type = prios[i];
981                 man = &bm->man[mem_type];
982
983                 if (!man->has_type)
984                         continue;
985
986                 if (!drm_bo_mt_compatible(man,
987                                           bo->type == drm_bo_type_user,
988                                           mem_type,
989                                           mem->proposed_flags,
990                                           &cur_flags))
991                         continue;
992
993                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
994
995                 if (ret == 0 && mem->mm_node) {
996                         mem->flags = cur_flags;
997                         return 0;
998                 }
999
1000                 if (ret == -EAGAIN)
1001                         has_eagain = 1;
1002         }
1003
1004         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
1005         return ret;
1006 }
1007 EXPORT_SYMBOL(drm_bo_mem_space);
1008
1009 /*
1010  * drm_bo_propose_flags:
1011  *
1012  * @bo: the buffer object getting new flags
1013  *
1014  * @new_flags: the new set of proposed flag bits
1015  *
1016  * @new_mask: the mask of bits changed in new_flags
1017  *
1018  * Modify the proposed_flag bits in @bo
1019  */
1020 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
1021                                          uint64_t new_flags, uint64_t new_mask)
1022 {
1023         uint32_t new_access;
1024
1025         /* Copy unchanging bits from existing proposed_flags */
1026         DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
1027          
1028         if (bo->type == drm_bo_type_user &&
1029             ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1030              (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1031                 DRM_ERROR("User buffers require cache-coherent memory.\n");
1032                 return -EINVAL;
1033         }
1034
1035         if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1036                 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1037                 return -EPERM;
1038         }
1039
1040         if (likely(new_mask & DRM_BO_MASK_MEM) &&
1041             (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1042             !DRM_SUSER(DRM_CURPROC)) {
1043                 if (likely(bo->mem.flags & new_flags & new_mask &
1044                            DRM_BO_MASK_MEM))
1045                         new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1046                                 (bo->mem.flags & DRM_BO_MASK_MEM);
1047                 else {
1048                         DRM_ERROR("Incompatible memory type specification "
1049                                   "for NO_EVICT buffer.\n");
1050                         return -EPERM;
1051                 }
1052         }
1053
1054         if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1055                 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1056                 return -EPERM;
1057         }
1058
1059         new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1060                                   DRM_BO_FLAG_READ);
1061
1062         if (new_access == 0) {
1063                 DRM_ERROR("Invalid buffer object rwx properties\n");
1064                 return -EINVAL;
1065         }
1066
1067         bo->mem.proposed_flags = new_flags;
1068         return 0;
1069 }
1070
1071 /*
1072  * Call dev->struct_mutex locked.
1073  */
1074
1075 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1076                                               uint32_t handle, int check_owner)
1077 {
1078         struct drm_user_object *uo;
1079         struct drm_buffer_object *bo;
1080
1081         uo = drm_lookup_user_object(file_priv, handle);
1082
1083         if (!uo || (uo->type != drm_buffer_type)) {
1084                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1085                 return NULL;
1086         }
1087
1088         if (check_owner && file_priv != uo->owner) {
1089                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1090                         return NULL;
1091         }
1092
1093         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1094         atomic_inc(&bo->usage);
1095         return bo;
1096 }
1097 EXPORT_SYMBOL(drm_lookup_buffer_object);
1098
1099 /*
1100  * Call bo->mutex locked.
1101  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
1102  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1103  */
1104
1105 static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
1106 {
1107         struct drm_fence_object *fence = bo->fence;
1108
1109         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1110                 return -EBUSY;
1111
1112         if (fence) {
1113                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1114                         drm_fence_usage_deref_unlocked(&bo->fence);
1115                         return 0;
1116                 }
1117                 return -EBUSY;
1118         }
1119         return 0;
1120 }
1121
1122 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1123 {
1124         int ret = 0;
1125
1126         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1127         if (bo->mem.mm_node)
1128                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1129         return ret;
1130 }
1131
1132 EXPORT_SYMBOL(drm_bo_evict_cached);
1133 /*
1134  * Wait until a buffer is unmapped.
1135  */
1136
1137 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1138 {
1139         int ret = 0;
1140
1141         if (likely(atomic_read(&bo->mapped)) == 0)
1142                 return 0;
1143
1144         if (unlikely(no_wait))
1145                 return -EBUSY;
1146
1147         do {
1148                 mutex_unlock(&bo->mutex);
1149                 ret = wait_event_interruptible(bo->event_queue,
1150                                                atomic_read(&bo->mapped) == 0);
1151                 mutex_lock(&bo->mutex);
1152                 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
1153
1154                 if (ret == -ERESTARTSYS)
1155                         ret = -EAGAIN;
1156         } while((ret == 0) && atomic_read(&bo->mapped) > 0);
1157
1158         return ret;
1159 }
1160
1161 /*
1162  * Fill in the ioctl reply argument with buffer info.
1163  * Bo locked.
1164  */
1165
1166 void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1167                          struct drm_bo_info_rep *rep)
1168 {
1169         if (!rep)
1170                 return;
1171
1172         rep->handle = bo->base.hash.key;
1173         rep->flags = bo->mem.flags;
1174         rep->size = bo->num_pages * PAGE_SIZE;
1175         rep->offset = bo->offset;
1176
1177         /*
1178          * drm_bo_type_device buffers have user-visible
1179          * handles which can be used to share across
1180          * processes. Hand that back to the application
1181          */
1182         if (bo->type == drm_bo_type_device)
1183                 rep->arg_handle = bo->map_list.user_token;
1184         else
1185                 rep->arg_handle = 0;
1186
1187         rep->proposed_flags = bo->mem.proposed_flags;
1188         rep->buffer_start = bo->buffer_start;
1189         rep->fence_flags = bo->fence_type;
1190         rep->rep_flags = 0;
1191         rep->page_alignment = bo->mem.page_alignment;
1192
1193         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
1194                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1195                                 DRM_BO_REP_BUSY);
1196         }
1197 }
1198 EXPORT_SYMBOL(drm_bo_fill_rep_arg);
1199
1200 /*
1201  * Wait for buffer idle and register that we've mapped the buffer.
1202  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1203  * so that if the client dies, the mapping is automatically
1204  * unregistered.
1205  */
1206
1207 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1208                                  uint32_t map_flags, unsigned hint,
1209                                  struct drm_bo_info_rep *rep)
1210 {
1211         struct drm_buffer_object *bo;
1212         struct drm_device *dev = file_priv->minor->dev;
1213         int ret = 0;
1214         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1215
1216         mutex_lock(&dev->struct_mutex);
1217         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1218         mutex_unlock(&dev->struct_mutex);
1219
1220         if (!bo)
1221                 return -EINVAL;
1222
1223         mutex_lock(&bo->mutex);
1224         do {
1225                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1226
1227                 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1228                 if (unlikely(ret))
1229                         goto out;
1230
1231                 if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1232                         drm_bo_evict_cached(bo);
1233
1234         } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1235
1236         atomic_inc(&bo->mapped);
1237         mutex_lock(&dev->struct_mutex);
1238         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1239         mutex_unlock(&dev->struct_mutex);
1240         if (ret) {
1241                 if (atomic_dec_and_test(&bo->mapped))
1242                         wake_up_all(&bo->event_queue);
1243
1244         } else
1245                 drm_bo_fill_rep_arg(bo, rep);
1246
1247  out:
1248         mutex_unlock(&bo->mutex);
1249         drm_bo_usage_deref_unlocked(&bo);
1250
1251         return ret;
1252 }
1253
1254 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1255 {
1256         struct drm_device *dev = file_priv->minor->dev;
1257         struct drm_buffer_object *bo;
1258         struct drm_ref_object *ro;
1259         int ret = 0;
1260
1261         mutex_lock(&dev->struct_mutex);
1262
1263         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1264         if (!bo) {
1265                 ret = -EINVAL;
1266                 goto out;
1267         }
1268
1269         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1270         if (!ro) {
1271                 ret = -EINVAL;
1272                 goto out;
1273         }
1274
1275         drm_remove_ref_object(file_priv, ro);
1276         drm_bo_usage_deref_locked(&bo);
1277 out:
1278         mutex_unlock(&dev->struct_mutex);
1279         return ret;
1280 }
1281
1282 /*
1283  * Call struct-sem locked.
1284  */
1285
1286 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1287                                          struct drm_user_object *uo,
1288                                          enum drm_ref_type action)
1289 {
1290         struct drm_buffer_object *bo =
1291             drm_user_object_entry(uo, struct drm_buffer_object, base);
1292
1293         /*
1294          * We DON'T want to take the bo->lock here, because we want to
1295          * hold it when we wait for unmapped buffer.
1296          */
1297
1298         BUG_ON(action != _DRM_REF_TYPE1);
1299
1300         if (atomic_dec_and_test(&bo->mapped))
1301                 wake_up_all(&bo->event_queue);
1302 }
1303
1304 /*
1305  * bo->mutex locked.
1306  * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1307  */
1308
1309 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1310                        int no_wait, int move_unfenced)
1311 {
1312         struct drm_device *dev = bo->dev;
1313         struct drm_buffer_manager *bm = &dev->bm;
1314         int ret = 0;
1315         struct drm_bo_mem_reg mem;
1316
1317         BUG_ON(bo->fence != NULL);
1318
1319         mem.num_pages = bo->num_pages;
1320         mem.size = mem.num_pages << PAGE_SHIFT;
1321         mem.proposed_flags = new_mem_flags;
1322         mem.page_alignment = bo->mem.page_alignment;
1323
1324         mutex_lock(&bm->evict_mutex);
1325         mutex_lock(&dev->struct_mutex);
1326         list_del_init(&bo->lru);
1327         mutex_unlock(&dev->struct_mutex);
1328
1329         /*
1330          * Determine where to move the buffer.
1331          */
1332         ret = drm_bo_mem_space(bo, &mem, no_wait);
1333         if (ret)
1334                 goto out_unlock;
1335
1336         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1337
1338 out_unlock:
1339         mutex_lock(&dev->struct_mutex);
1340         if (ret || !move_unfenced) {
1341                 if (mem.mm_node) {
1342                         if (mem.mm_node != bo->pinned_node)
1343                                 drm_memrange_put_block(mem.mm_node);
1344                         mem.mm_node = NULL;
1345                 }
1346                 drm_bo_add_to_lru(bo);
1347                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1348                         wake_up_all(&bo->event_queue);
1349                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1350                                         _DRM_BO_FLAG_UNFENCED);
1351                 }
1352         } else {
1353                 list_add_tail(&bo->lru, &bm->unfenced);
1354                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1355                                 _DRM_BO_FLAG_UNFENCED);
1356         }
1357         mutex_unlock(&dev->struct_mutex);
1358         mutex_unlock(&bm->evict_mutex);
1359         return ret;
1360 }
1361
1362 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1363 {
1364         uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1365
1366         if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1367                 return 0;
1368         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1369             (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1370              (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1371                 return 0;
1372
1373         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1374             ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1375              (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1376                 return 0;
1377         return 1;
1378 }
1379
1380 /**
1381  * drm_buffer_object_validate:
1382  *
1383  * @bo: the buffer object to modify
1384  *
1385  * @fence_class: the new fence class covering this buffer
1386  *
1387  * @move_unfenced: a boolean indicating whether switching the
1388  * memory space of this buffer should cause the buffer to
1389  * be placed on the unfenced list.
1390  *
1391  * @no_wait: whether this function should return -EBUSY instead
1392  * of waiting.
1393  *
1394  * Change buffer access parameters. This can involve moving
1395  * the buffer to the correct memory type, pinning the buffer
1396  * or changing the class/type of fence covering this buffer
1397  *
1398  * Must be called with bo locked.
1399  */
1400
1401 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1402                                       uint32_t fence_class,
1403                                       int move_unfenced, int no_wait,
1404                                       int move_buffer)
1405 {
1406         struct drm_device *dev = bo->dev;
1407         struct drm_buffer_manager *bm = &dev->bm;
1408         int ret;
1409
1410         if (move_buffer) {
1411                 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1412                                          move_unfenced);
1413                 if (ret) {
1414                         if (ret != -EAGAIN)
1415                                 DRM_ERROR("Failed moving buffer.\n");
1416                         if (ret == -ENOMEM)
1417                                 DRM_ERROR("Out of aperture space or "
1418                                           "DRM memory quota.\n");
1419                         return ret;
1420                 }
1421         }
1422
1423         /*
1424          * Pinned buffers.
1425          */
1426
1427         if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1428                 bo->pinned_mem_type = bo->mem.mem_type;
1429                 mutex_lock(&dev->struct_mutex);
1430                 list_del_init(&bo->pinned_lru);
1431                 drm_bo_add_to_pinned_lru(bo);
1432
1433                 if (bo->pinned_node != bo->mem.mm_node) {
1434                         if (bo->pinned_node != NULL)
1435                                 drm_memrange_put_block(bo->pinned_node);
1436                         bo->pinned_node = bo->mem.mm_node;
1437                 }
1438
1439                 mutex_unlock(&dev->struct_mutex);
1440
1441         } else if (bo->pinned_node != NULL) {
1442
1443                 mutex_lock(&dev->struct_mutex);
1444
1445                 if (bo->pinned_node != bo->mem.mm_node)
1446                         drm_memrange_put_block(bo->pinned_node);
1447
1448                 list_del_init(&bo->pinned_lru);
1449                 bo->pinned_node = NULL;
1450                 mutex_unlock(&dev->struct_mutex);
1451
1452         }
1453
1454         /*
1455          * We might need to add a TTM.
1456          */
1457
1458         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1459                 ret = drm_bo_add_ttm(bo);
1460                 if (ret)
1461                         return ret;
1462         }
1463         /*
1464          * Validation has succeeded, move the access and other
1465          * non-mapping-related flag bits from the proposed flags to
1466          * the active flags
1467          */
1468
1469         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1470
1471         /*
1472          * Finally, adjust lru to be sure.
1473          */
1474
1475         mutex_lock(&dev->struct_mutex);
1476         list_del(&bo->lru);
1477         if (move_unfenced) {
1478                 list_add_tail(&bo->lru, &bm->unfenced);
1479                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1480                                 _DRM_BO_FLAG_UNFENCED);
1481         } else {
1482                 drm_bo_add_to_lru(bo);
1483                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1484                         wake_up_all(&bo->event_queue);
1485                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1486                                         _DRM_BO_FLAG_UNFENCED);
1487                 }
1488         }
1489         mutex_unlock(&dev->struct_mutex);
1490
1491         return 0;
1492 }
1493
1494 /*
1495  * This function is called with bo->mutex locked, but may release it
1496  * temporarily to wait for events.
1497  */
1498
1499 static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
1500                                        uint64_t flags,
1501                                        uint64_t mask,
1502                                        uint32_t hint,
1503                                        uint32_t fence_class,
1504                                        int no_wait,
1505                                        int *move_buffer)
1506 {
1507         struct drm_device *dev = bo->dev;
1508         struct drm_bo_driver *driver = dev->driver->bo_driver;
1509         uint32_t ftype;
1510
1511         int ret;
1512
1513         DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1514                   (unsigned long long) bo->mem.proposed_flags,
1515                   (unsigned long long) bo->mem.flags);
1516
1517         ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1518         if (ret)
1519                 return ret;
1520
1521         ret = drm_bo_wait_unmapped(bo, no_wait);
1522         if (ret)
1523                 return ret;
1524
1525         ret = driver->fence_type(bo, &fence_class, &ftype);
1526
1527         if (ret) {
1528                 DRM_ERROR("Driver did not support given buffer permissions.\n");
1529                 return ret;
1530         }
1531
1532         /*
1533          * We're switching command submission mechanism,
1534          * or cannot simply rely on the hardware serializing for us.
1535          * Insert a driver-dependant barrier or wait for buffer idle.
1536          */
1537
1538         if ((fence_class != bo->fence_class) ||
1539             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1540
1541                 ret = -EINVAL;
1542                 if (driver->command_stream_barrier) {
1543                         ret = driver->command_stream_barrier(bo,
1544                                                              fence_class,
1545                                                              ftype,
1546                                                              no_wait);
1547                 }
1548                 if (ret && ret != -EAGAIN) 
1549                         ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1550                 
1551                 if (ret)
1552                         return ret;
1553         }
1554
1555         bo->new_fence_class = fence_class;
1556         bo->new_fence_type = ftype;
1557
1558         /*
1559          * Check whether we need to move buffer.
1560          */
1561
1562         *move_buffer = 0;
1563         if (!drm_bo_mem_compat(&bo->mem)) {
1564                 *move_buffer = 1;
1565                 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1566         }
1567
1568         return ret;
1569 }
1570
1571 /**
1572  * drm_bo_do_validate:
1573  *
1574  * @bo: the buffer object
1575  *
1576  * @flags: access rights, mapping parameters and cacheability. See
1577  * the DRM_BO_FLAG_* values in drm.h
1578  *
1579  * @mask: Which flag values to change; this allows callers to modify
1580  * things without knowing the current state of other flags.
1581  *
1582  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1583  * values in drm.h.
1584  *
1585  * @fence_class: a driver-specific way of doing fences. Presumably,
1586  * this would be used if the driver had more than one submission and
1587  * fencing mechanism. At this point, there isn't any use of this
1588  * from the user mode code.
1589  *
1590  * @rep: To be stuffed with the reply from validation
1591  * 
1592  * 'validate' a buffer object. This changes where the buffer is
1593  * located, along with changing access modes.
1594  */
1595
1596 int drm_bo_do_validate(struct drm_buffer_object *bo,
1597                        uint64_t flags, uint64_t mask, uint32_t hint,
1598                        uint32_t fence_class,
1599                        struct drm_bo_info_rep *rep)
1600 {
1601         int ret;
1602         int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1603         int move_buffer;
1604
1605         mutex_lock(&bo->mutex);
1606
1607         do {
1608                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1609
1610                 ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
1611                                                   fence_class, no_wait,
1612                                                   &move_buffer);
1613                 if (ret)
1614                         goto out;
1615
1616         } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1617
1618         ret = drm_buffer_object_validate(bo,
1619                                          fence_class,
1620                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1621                                          no_wait,
1622                                          move_buffer);
1623
1624         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
1625 out:
1626         if (rep)
1627                 drm_bo_fill_rep_arg(bo, rep);
1628
1629         mutex_unlock(&bo->mutex);
1630
1631         return ret;
1632 }
1633 EXPORT_SYMBOL(drm_bo_do_validate);
1634
1635 /**
1636  * drm_bo_handle_validate
1637  *
1638  * @file_priv: the drm file private, used to get a handle to the user context
1639  *
1640  * @handle: the buffer object handle
1641  *
1642  * @flags: access rights, mapping parameters and cacheability. See
1643  * the DRM_BO_FLAG_* values in drm.h
1644  *
1645  * @mask: Which flag values to change; this allows callers to modify
1646  * things without knowing the current state of other flags.
1647  *
1648  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1649  * values in drm.h.
1650  *
1651  * @fence_class: a driver-specific way of doing fences. Presumably,
1652  * this would be used if the driver had more than one submission and
1653  * fencing mechanism. At this point, there isn't any use of this
1654  * from the user mode code.
1655  *
1656  * @rep: To be stuffed with the reply from validation
1657  *
1658  * @bp_rep: To be stuffed with the buffer object pointer
1659  *
1660  * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
1661  * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
1662  * This is a convenience wrapper only.
1663  */
1664
1665 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1666                            uint64_t flags, uint64_t mask,
1667                            uint32_t hint,
1668                            uint32_t fence_class,
1669                            struct drm_bo_info_rep *rep,
1670                            struct drm_buffer_object **bo_rep)
1671 {
1672         struct drm_device *dev = file_priv->minor->dev;
1673         struct drm_buffer_object *bo;
1674         int ret;
1675
1676         mutex_lock(&dev->struct_mutex);
1677         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1678         mutex_unlock(&dev->struct_mutex);
1679
1680         if (!bo)
1681                 return -EINVAL;
1682
1683         if (bo->base.owner != file_priv)
1684                 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1685
1686         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1687
1688         if (!ret && bo_rep)
1689                 *bo_rep = bo;
1690         else
1691                 drm_bo_usage_deref_unlocked(&bo);
1692
1693         return ret;
1694 }
1695 EXPORT_SYMBOL(drm_bo_handle_validate);
1696
1697
1698 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1699                               struct drm_bo_info_rep *rep)
1700 {
1701         struct drm_device *dev = file_priv->minor->dev;
1702         struct drm_buffer_object *bo;
1703
1704         mutex_lock(&dev->struct_mutex);
1705         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1706         mutex_unlock(&dev->struct_mutex);
1707
1708         if (!bo)
1709                 return -EINVAL;
1710
1711         mutex_lock(&bo->mutex);
1712
1713         /*
1714          * FIXME: Quick busy here?
1715          */
1716
1717         drm_bo_busy(bo, 1);
1718         drm_bo_fill_rep_arg(bo, rep);
1719         mutex_unlock(&bo->mutex);
1720         drm_bo_usage_deref_unlocked(&bo);
1721         return 0;
1722 }
1723
1724 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1725                               uint32_t hint,
1726                               struct drm_bo_info_rep *rep)
1727 {
1728         struct drm_device *dev = file_priv->minor->dev;
1729         struct drm_buffer_object *bo;
1730         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1731         int ret;
1732
1733         mutex_lock(&dev->struct_mutex);
1734         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1735         mutex_unlock(&dev->struct_mutex);
1736
1737         if (!bo)
1738                 return -EINVAL;
1739
1740         mutex_lock(&bo->mutex);
1741         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
1742         if (ret)
1743                 goto out;
1744
1745         drm_bo_fill_rep_arg(bo, rep);
1746 out:
1747         mutex_unlock(&bo->mutex);
1748         drm_bo_usage_deref_unlocked(&bo);
1749         return ret;
1750 }
1751
1752 int drm_buffer_object_create(struct drm_device *dev,
1753                              unsigned long size,
1754                              enum drm_bo_type type,
1755                              uint64_t flags,
1756                              uint32_t hint,
1757                              uint32_t page_alignment,
1758                              unsigned long buffer_start,
1759                              struct drm_buffer_object **buf_obj)
1760 {
1761         struct drm_buffer_manager *bm = &dev->bm;
1762         struct drm_buffer_object *bo;
1763         int ret = 0;
1764         unsigned long num_pages;
1765
1766         size += buffer_start & ~PAGE_MASK;
1767         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1768         if (num_pages == 0) {
1769                 DRM_ERROR("Illegal buffer object size.\n");
1770                 return -EINVAL;
1771         }
1772
1773         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1774
1775         if (!bo)
1776                 return -ENOMEM;
1777
1778         mutex_init(&bo->mutex);
1779         mutex_lock(&bo->mutex);
1780
1781         atomic_set(&bo->usage, 1);
1782         atomic_set(&bo->mapped, 0);
1783         DRM_INIT_WAITQUEUE(&bo->event_queue);
1784         INIT_LIST_HEAD(&bo->lru);
1785         INIT_LIST_HEAD(&bo->pinned_lru);
1786         INIT_LIST_HEAD(&bo->ddestroy);
1787 #ifdef DRM_ODD_MM_COMPAT
1788         INIT_LIST_HEAD(&bo->p_mm_list);
1789         INIT_LIST_HEAD(&bo->vma_list);
1790 #endif
1791         bo->dev = dev;
1792         bo->type = type;
1793         bo->num_pages = num_pages;
1794         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1795         bo->mem.num_pages = bo->num_pages;
1796         bo->mem.mm_node = NULL;
1797         bo->mem.page_alignment = page_alignment;
1798         bo->buffer_start = buffer_start & PAGE_MASK;
1799         bo->priv_flags = 0;
1800         bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1801                          DRM_BO_FLAG_MAPPABLE);
1802         bo->mem.proposed_flags = 0;
1803         atomic_inc(&bm->count);
1804         /*
1805          * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1806          */
1807         ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1808         if (ret)
1809                 goto out_err;
1810
1811         /*
1812          * For drm_bo_type_device buffers, allocate
1813          * address space from the device so that applications
1814          * can mmap the buffer from there
1815          */
1816         if (bo->type == drm_bo_type_device) {
1817                 mutex_lock(&dev->struct_mutex);
1818                 ret = drm_bo_setup_vm_locked(bo);
1819                 mutex_unlock(&dev->struct_mutex);
1820                 if (ret)
1821                         goto out_err;
1822         }
1823
1824         mutex_unlock(&bo->mutex);
1825         ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
1826                                  0, NULL);
1827         if (ret)
1828                 goto out_err_unlocked;
1829
1830         *buf_obj = bo;
1831         return 0;
1832
1833 out_err:
1834         mutex_unlock(&bo->mutex);
1835 out_err_unlocked:
1836         drm_bo_usage_deref_unlocked(&bo);
1837         return ret;
1838 }
1839 EXPORT_SYMBOL(drm_buffer_object_create);
1840
1841
1842 static int drm_bo_add_user_object(struct drm_file *file_priv,
1843                                   struct drm_buffer_object *bo, int shareable)
1844 {
1845         struct drm_device *dev = file_priv->minor->dev;
1846         int ret;
1847
1848         mutex_lock(&dev->struct_mutex);
1849         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1850         if (ret)
1851                 goto out;
1852
1853         bo->base.remove = drm_bo_base_deref_locked;
1854         bo->base.type = drm_buffer_type;
1855         bo->base.ref_struct_locked = NULL;
1856         bo->base.unref = drm_buffer_user_object_unmap;
1857
1858 out:
1859         mutex_unlock(&dev->struct_mutex);
1860         return ret;
1861 }
1862
1863 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1864 {
1865         struct drm_bo_create_arg *arg = data;
1866         struct drm_bo_create_req *req = &arg->d.req;
1867         struct drm_bo_info_rep *rep = &arg->d.rep;
1868         struct drm_buffer_object *entry;
1869         enum drm_bo_type bo_type;
1870         int ret = 0;
1871
1872         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1873             (int)(req->size / 1024), req->page_alignment * 4);
1874
1875         if (!dev->bm.initialized) {
1876                 DRM_ERROR("Buffer object manager is not initialized.\n");
1877                 return -EINVAL;
1878         }
1879
1880         /*
1881          * If the buffer creation request comes in with a starting address,
1882          * that points at the desired user pages to map. Otherwise, create
1883          * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1884          */
1885         bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1886
1887         /*
1888          * User buffers cannot be shared
1889          */
1890         if (bo_type == drm_bo_type_user)
1891                 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1892
1893         ret = drm_buffer_object_create(file_priv->minor->dev,
1894                                        req->size, bo_type, req->flags,
1895                                        req->hint, req->page_alignment,
1896                                        req->buffer_start, &entry);
1897         if (ret)
1898                 goto out;
1899
1900         ret = drm_bo_add_user_object(file_priv, entry,
1901                                      req->flags & DRM_BO_FLAG_SHAREABLE);
1902         if (ret) {
1903                 drm_bo_usage_deref_unlocked(&entry);
1904                 goto out;
1905         }
1906
1907         mutex_lock(&entry->mutex);
1908         drm_bo_fill_rep_arg(entry, rep);
1909         mutex_unlock(&entry->mutex);
1910
1911 out:
1912         return ret;
1913 }
1914
1915 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1916                            void *data, struct drm_file *file_priv)
1917 {
1918         struct drm_bo_map_wait_idle_arg *arg = data;
1919         struct drm_bo_info_req *req = &arg->d.req;
1920         struct drm_bo_info_rep *rep = &arg->d.rep;
1921         struct drm_buffer_object *bo;
1922         int ret;
1923
1924         if (!dev->bm.initialized) {
1925                 DRM_ERROR("Buffer object manager is not initialized.\n");
1926                 return -EINVAL;
1927         }
1928
1929         ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
1930         if (ret)
1931                 return ret;
1932
1933         mutex_lock(&dev->struct_mutex);
1934         bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
1935         mutex_unlock(&dev->struct_mutex);
1936
1937         if (!bo)
1938                 return -EINVAL;
1939
1940         if (bo->base.owner != file_priv)
1941                 req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1942
1943         ret = drm_bo_do_validate(bo, req->flags, req->mask,
1944                                  req->hint | DRM_BO_HINT_DONT_FENCE,
1945                                  bo->fence_class, rep);
1946
1947         drm_bo_usage_deref_unlocked(&bo);
1948
1949         (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1950
1951         return ret;
1952 }
1953
1954 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1955 {
1956         struct drm_bo_map_wait_idle_arg *arg = data;
1957         struct drm_bo_info_req *req = &arg->d.req;
1958         struct drm_bo_info_rep *rep = &arg->d.rep;
1959         int ret;
1960         if (!dev->bm.initialized) {
1961                 DRM_ERROR("Buffer object manager is not initialized.\n");
1962                 return -EINVAL;
1963         }
1964
1965         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1966                                     req->hint, rep);
1967         if (ret)
1968                 return ret;
1969
1970         return 0;
1971 }
1972
1973 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1974 {
1975         struct drm_bo_handle_arg *arg = data;
1976         int ret;
1977         if (!dev->bm.initialized) {
1978                 DRM_ERROR("Buffer object manager is not initialized.\n");
1979                 return -EINVAL;
1980         }
1981
1982         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1983         return ret;
1984 }
1985
1986
1987 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1988 {
1989         struct drm_bo_reference_info_arg *arg = data;
1990         struct drm_bo_handle_arg *req = &arg->d.req;
1991         struct drm_bo_info_rep *rep = &arg->d.rep;
1992         struct drm_user_object *uo;
1993         int ret;
1994
1995         if (!dev->bm.initialized) {
1996                 DRM_ERROR("Buffer object manager is not initialized.\n");
1997                 return -EINVAL;
1998         }
1999
2000         ret = drm_user_object_ref(file_priv, req->handle,
2001                                   drm_buffer_type, &uo);
2002         if (ret)
2003                 return ret;
2004
2005         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2006         if (ret)
2007                 return ret;
2008
2009         return 0;
2010 }
2011
2012 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2013 {
2014         struct drm_bo_handle_arg *arg = data;
2015         int ret = 0;
2016
2017         if (!dev->bm.initialized) {
2018                 DRM_ERROR("Buffer object manager is not initialized.\n");
2019                 return -EINVAL;
2020         }
2021
2022         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2023         return ret;
2024 }
2025
2026 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2027 {
2028         struct drm_bo_reference_info_arg *arg = data;
2029         struct drm_bo_handle_arg *req = &arg->d.req;
2030         struct drm_bo_info_rep *rep = &arg->d.rep;
2031         int ret;
2032
2033         if (!dev->bm.initialized) {
2034                 DRM_ERROR("Buffer object manager is not initialized.\n");
2035                 return -EINVAL;
2036         }
2037
2038         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2039         if (ret)
2040                 return ret;
2041
2042         return 0;
2043 }
2044
2045 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2046 {
2047         struct drm_bo_map_wait_idle_arg *arg = data;
2048         struct drm_bo_info_req *req = &arg->d.req;
2049         struct drm_bo_info_rep *rep = &arg->d.rep;
2050         int ret;
2051         if (!dev->bm.initialized) {
2052                 DRM_ERROR("Buffer object manager is not initialized.\n");
2053                 return -EINVAL;
2054         }
2055
2056         ret = drm_bo_handle_wait(file_priv, req->handle,
2057                                  req->hint, rep);
2058         if (ret)
2059                 return ret;
2060
2061         return 0;
2062 }
2063
2064 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2065                              uint32_t mem_type,
2066                              int free_pinned,
2067                              int allow_errors)
2068 {
2069         struct drm_device *dev = bo->dev;
2070         int ret = 0;
2071
2072         mutex_lock(&bo->mutex);
2073
2074         ret = drm_bo_expire_fence(bo, allow_errors);
2075         if (ret)
2076                 goto out;
2077
2078         if (free_pinned) {
2079                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2080                 mutex_lock(&dev->struct_mutex);
2081                 list_del_init(&bo->pinned_lru);
2082                 if (bo->pinned_node == bo->mem.mm_node)
2083                         bo->pinned_node = NULL;
2084                 if (bo->pinned_node != NULL) {
2085                         drm_memrange_put_block(bo->pinned_node);
2086                         bo->pinned_node = NULL;
2087                 }
2088                 mutex_unlock(&dev->struct_mutex);
2089         }
2090
2091         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2092                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2093                           "cleanup. Removing flag and evicting.\n");
2094                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2095                 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2096         }
2097
2098         if (bo->mem.mem_type == mem_type)
2099                 ret = drm_bo_evict(bo, mem_type, 0);
2100
2101         if (ret) {
2102                 if (allow_errors) {
2103                         goto out;
2104                 } else {
2105                         ret = 0;
2106                         DRM_ERROR("Cleanup eviction failed\n");
2107                 }
2108         }
2109
2110 out:
2111         mutex_unlock(&bo->mutex);
2112         return ret;
2113 }
2114
2115
2116 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2117                                          int pinned_list)
2118 {
2119         if (pinned_list)
2120                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2121         else
2122                 return list_entry(list, struct drm_buffer_object, lru);
2123 }
2124
2125 /*
2126  * dev->struct_mutex locked.
2127  */
2128
2129 static int drm_bo_force_list_clean(struct drm_device *dev,
2130                                    struct list_head *head,
2131                                    unsigned mem_type,
2132                                    int free_pinned,
2133                                    int allow_errors,
2134                                    int pinned_list)
2135 {
2136         struct list_head *list, *next, *prev;
2137         struct drm_buffer_object *entry, *nentry;
2138         int ret;
2139         int do_restart;
2140
2141         /*
2142          * The list traversal is a bit odd here, because an item may
2143          * disappear from the list when we release the struct_mutex or
2144          * when we decrease the usage count. Also we're not guaranteed
2145          * to drain pinned lists, so we can't always restart.
2146          */
2147
2148 restart:
2149         nentry = NULL;
2150         list_for_each_safe(list, next, head) {
2151                 prev = list->prev;
2152
2153                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2154                 atomic_inc(&entry->usage);
2155                 if (nentry) {
2156                         atomic_dec(&nentry->usage);
2157                         nentry = NULL;
2158                 }
2159
2160                 /*
2161                  * Protect the next item from destruction, so we can check
2162                  * its list pointers later on.
2163                  */
2164
2165                 if (next != head) {
2166                         nentry = drm_bo_entry(next, pinned_list);
2167                         atomic_inc(&nentry->usage);
2168                 }
2169                 mutex_unlock(&dev->struct_mutex);
2170
2171                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2172                                         allow_errors);
2173                 mutex_lock(&dev->struct_mutex);
2174
2175                 drm_bo_usage_deref_locked(&entry);
2176                 if (ret)
2177                         return ret;
2178
2179                 /*
2180                  * Has the next item disappeared from the list?
2181                  */
2182
2183                 do_restart = ((next->prev != list) && (next->prev != prev));
2184
2185                 if (nentry != NULL && do_restart)
2186                         drm_bo_usage_deref_locked(&nentry);
2187
2188                 if (do_restart)
2189                         goto restart;
2190         }
2191         return 0;
2192 }
2193
2194 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
2195 {
2196         struct drm_buffer_manager *bm = &dev->bm;
2197         struct drm_mem_type_manager *man = &bm->man[mem_type];
2198         int ret = -EINVAL;
2199
2200         if (mem_type >= DRM_BO_MEM_TYPES) {
2201                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2202                 return ret;
2203         }
2204
2205         if (!man->has_type) {
2206                 DRM_ERROR("Trying to take down uninitialized "
2207                           "memory manager type %u\n", mem_type);
2208                 return ret;
2209         }
2210
2211         if ((man->kern_init_type) && (kern_clean == 0)) {
2212                 DRM_ERROR("Trying to take down kernel initialized "
2213                           "memory manager type %u\n", mem_type);
2214                 return -EPERM;
2215         }
2216
2217         man->use_type = 0;
2218         man->has_type = 0;
2219
2220         ret = 0;
2221         if (mem_type > 0) {
2222                 BUG_ON(!list_empty(&bm->unfenced));
2223                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2224                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2225
2226                 if (drm_memrange_clean(&man->manager)) {
2227                         drm_memrange_takedown(&man->manager);
2228                 } else {
2229                         ret = -EBUSY;
2230                 }
2231         }
2232
2233         return ret;
2234 }
2235 EXPORT_SYMBOL(drm_bo_clean_mm);
2236
2237 /**
2238  *Evict all buffers of a particular mem_type, but leave memory manager
2239  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2240  *point since we have the hardware lock.
2241  */
2242
2243 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2244 {
2245         int ret;
2246         struct drm_buffer_manager *bm = &dev->bm;
2247         struct drm_mem_type_manager *man = &bm->man[mem_type];
2248
2249         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2250                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2251                 return -EINVAL;
2252         }
2253
2254         if (!man->has_type) {
2255                 DRM_ERROR("Memory type %u has not been initialized.\n",
2256                           mem_type);
2257                 return 0;
2258         }
2259
2260         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2261         if (ret)
2262                 return ret;
2263         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2264
2265         return ret;
2266 }
2267
2268 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
2269                    unsigned long p_offset, unsigned long p_size,
2270                    int kern_init)
2271 {
2272         struct drm_buffer_manager *bm = &dev->bm;
2273         int ret = -EINVAL;
2274         struct drm_mem_type_manager *man;
2275
2276         if (type >= DRM_BO_MEM_TYPES) {
2277                 DRM_ERROR("Illegal memory type %d\n", type);
2278                 return ret;
2279         }
2280
2281         man = &bm->man[type];
2282         if (man->has_type) {
2283                 DRM_ERROR("Memory manager already initialized for type %d\n",
2284                           type);
2285                 return ret;
2286         }
2287
2288         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2289         if (ret)
2290                 return ret;
2291
2292         ret = 0;
2293         if (type != DRM_BO_MEM_LOCAL) {
2294                 if (!p_size) {
2295                         DRM_ERROR("Zero size memory manager type %d\n", type);
2296                         return ret;
2297                 }
2298                 ret = drm_memrange_init(&man->manager, p_offset, p_size);
2299                 if (ret)
2300                         return ret;
2301         }
2302         man->has_type = 1;
2303         man->use_type = 1;
2304         man->kern_init_type = kern_init;
2305         man->size = p_size;
2306
2307         INIT_LIST_HEAD(&man->lru);
2308         INIT_LIST_HEAD(&man->pinned);
2309
2310         return 0;
2311 }
2312 EXPORT_SYMBOL(drm_bo_init_mm);
2313
2314 /*
2315  * This function is intended to be called on drm driver unload.
2316  * If you decide to call it from lastclose, you must protect the call
2317  * from a potentially racing drm_bo_driver_init in firstopen.
2318  * (This may happen on X server restart).
2319  */
2320
2321 int drm_bo_driver_finish(struct drm_device *dev)
2322 {
2323         struct drm_buffer_manager *bm = &dev->bm;
2324         int ret = 0;
2325         unsigned i = DRM_BO_MEM_TYPES;
2326         struct drm_mem_type_manager *man;
2327
2328         mutex_lock(&dev->struct_mutex);
2329
2330         if (!bm->initialized)
2331                 goto out;
2332         bm->initialized = 0;
2333
2334         while (i--) {
2335                 man = &bm->man[i];
2336                 if (man->has_type) {
2337                         man->use_type = 0;
2338                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
2339                                 ret = -EBUSY;
2340                                 DRM_ERROR("DRM memory manager type %d "
2341                                           "is not clean.\n", i);
2342                         }
2343                         man->has_type = 0;
2344                 }
2345         }
2346         mutex_unlock(&dev->struct_mutex);
2347
2348         if (!cancel_delayed_work(&bm->wq))
2349                 flush_scheduled_work();
2350
2351         mutex_lock(&dev->struct_mutex);
2352         drm_bo_delayed_delete(dev, 1);
2353         if (list_empty(&bm->ddestroy))
2354                 DRM_DEBUG("Delayed destroy list was clean\n");
2355
2356         if (list_empty(&bm->man[0].lru))
2357                 DRM_DEBUG("Swap list was clean\n");
2358
2359         if (list_empty(&bm->man[0].pinned))
2360                 DRM_DEBUG("NO_MOVE list was clean\n");
2361
2362         if (list_empty(&bm->unfenced))
2363                 DRM_DEBUG("Unfenced list was clean\n");
2364
2365 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2366         ClearPageReserved(bm->dummy_read_page);
2367 #endif
2368         __free_page(bm->dummy_read_page);
2369
2370 out:
2371         mutex_unlock(&dev->struct_mutex);
2372         return ret;
2373 }
2374
2375 /*
2376  * This function is intended to be called on drm driver load.
2377  * If you decide to call it from firstopen, you must protect the call
2378  * from a potentially racing drm_bo_driver_finish in lastclose.
2379  * (This may happen on X server restart).
2380  */
2381
2382 int drm_bo_driver_init(struct drm_device *dev)
2383 {
2384         struct drm_bo_driver *driver = dev->driver->bo_driver;
2385         struct drm_buffer_manager *bm = &dev->bm;
2386         int ret = -EINVAL;
2387
2388         bm->dummy_read_page = NULL;
2389         drm_bo_init_lock(&bm->bm_lock);
2390         mutex_lock(&dev->struct_mutex);
2391         if (!driver)
2392                 goto out_unlock;
2393
2394         bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2395         if (!bm->dummy_read_page) {
2396                 ret = -ENOMEM;
2397                 goto out_unlock;
2398         }
2399
2400 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2401         SetPageReserved(bm->dummy_read_page);
2402 #endif
2403
2404         /*
2405          * Initialize the system memory buffer type.
2406          * Other types need to be driver / IOCTL initialized.
2407          */
2408         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
2409         if (ret)
2410                 goto out_unlock;
2411
2412 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2413         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2414 #else
2415         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2416 #endif
2417         bm->initialized = 1;
2418         bm->nice_mode = 1;
2419         atomic_set(&bm->count, 0);
2420         bm->cur_pages = 0;
2421         INIT_LIST_HEAD(&bm->unfenced);
2422         INIT_LIST_HEAD(&bm->ddestroy);
2423 out_unlock:
2424         mutex_unlock(&dev->struct_mutex);
2425         return ret;
2426 }
2427 EXPORT_SYMBOL(drm_bo_driver_init);
2428
2429 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2430 {
2431         struct drm_mm_init_arg *arg = data;
2432         struct drm_buffer_manager *bm = &dev->bm;
2433         struct drm_bo_driver *driver = dev->driver->bo_driver;
2434         int ret;
2435
2436         if (!driver) {
2437                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2438                 return -EINVAL;
2439         }
2440
2441         ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2442         if (ret)
2443                 return ret;
2444
2445         ret = -EINVAL;
2446         if (arg->magic != DRM_BO_INIT_MAGIC) {
2447                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2448                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2449                 return -EINVAL;
2450         }
2451         if (arg->major != DRM_BO_INIT_MAJOR) {
2452                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2453                           "\tversion don't match. Got %d, expected %d.\n",
2454                           arg->major, DRM_BO_INIT_MAJOR);
2455                 return -EINVAL;
2456         }
2457
2458         mutex_lock(&dev->struct_mutex);
2459         if (!bm->initialized) {
2460                 DRM_ERROR("DRM memory manager was not initialized.\n");
2461                 goto out;
2462         }
2463         if (arg->mem_type == 0) {
2464                 DRM_ERROR("System memory buffers already initialized.\n");
2465                 goto out;
2466         }
2467         ret = drm_bo_init_mm(dev, arg->mem_type,
2468                              arg->p_offset, arg->p_size, 0);
2469
2470 out:
2471         mutex_unlock(&dev->struct_mutex);
2472         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2473
2474         if (ret)
2475                 return ret;
2476
2477         return 0;
2478 }
2479
2480 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2481 {
2482         struct drm_mm_type_arg *arg = data;
2483         struct drm_buffer_manager *bm = &dev->bm;
2484         struct drm_bo_driver *driver = dev->driver->bo_driver;
2485         int ret;
2486
2487         if (!driver) {
2488                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2489                 return -EINVAL;
2490         }
2491
2492         ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
2493         if (ret)
2494                 return ret;
2495
2496         mutex_lock(&dev->struct_mutex);
2497         ret = -EINVAL;
2498         if (!bm->initialized) {
2499                 DRM_ERROR("DRM memory manager was not initialized\n");
2500                 goto out;
2501         }
2502         if (arg->mem_type == 0) {
2503                 DRM_ERROR("No takedown for System memory buffers.\n");
2504                 goto out;
2505         }
2506         ret = 0;
2507         if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
2508                 if (ret == -EINVAL)
2509                         DRM_ERROR("Memory manager type %d not clean. "
2510                                   "Delaying takedown\n", arg->mem_type);
2511                 ret = 0;
2512         }
2513 out:
2514         mutex_unlock(&dev->struct_mutex);
2515         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2516
2517         if (ret)
2518                 return ret;
2519
2520         return 0;
2521 }
2522
2523 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2524 {
2525         struct drm_mm_type_arg *arg = data;
2526         struct drm_bo_driver *driver = dev->driver->bo_driver;
2527         int ret;
2528
2529         if (!driver) {
2530                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2531                 return -EINVAL;
2532         }
2533
2534         if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2535                 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2536                 return -EINVAL;
2537         }
2538
2539         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2540                 ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
2541                 if (ret)
2542                         return ret;
2543         }
2544
2545         mutex_lock(&dev->struct_mutex);
2546         ret = drm_bo_lock_mm(dev, arg->mem_type);
2547         mutex_unlock(&dev->struct_mutex);
2548         if (ret) {
2549                 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2550                 return ret;
2551         }
2552
2553         return 0;
2554 }
2555
2556 int drm_mm_unlock_ioctl(struct drm_device *dev,
2557                         void *data,
2558                         struct drm_file *file_priv)
2559 {
2560         struct drm_mm_type_arg *arg = data;
2561         struct drm_bo_driver *driver = dev->driver->bo_driver;
2562         int ret;
2563
2564         if (!driver) {
2565                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2566                 return -EINVAL;
2567         }
2568
2569         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2570                 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2571                 if (ret)
2572                         return ret;
2573         }
2574
2575         return 0;
2576 }
2577
2578 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2579 {
2580         struct drm_mm_info_arg *arg = data;
2581         struct drm_buffer_manager *bm = &dev->bm;
2582         struct drm_bo_driver *driver = dev->driver->bo_driver;
2583         struct drm_mem_type_manager *man;
2584         int ret = 0;
2585         int mem_type = arg->mem_type;
2586
2587         if (!driver) {
2588                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2589                 return -EINVAL;
2590         }
2591
2592         if (mem_type >= DRM_BO_MEM_TYPES) {
2593                 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2594                 return -EINVAL;
2595         }
2596
2597         mutex_lock(&dev->struct_mutex);
2598         if (!bm->initialized) {
2599                 DRM_ERROR("DRM memory manager was not initialized\n");
2600                 ret = -EINVAL;
2601                 goto out;
2602         }
2603
2604
2605         man = &bm->man[arg->mem_type];
2606
2607         arg->p_size = man->size;
2608
2609 out:
2610         mutex_unlock(&dev->struct_mutex);
2611      
2612         return ret;
2613 }
2614 /*
2615  * buffer object vm functions.
2616  */
2617
2618 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2619 {
2620         struct drm_buffer_manager *bm = &dev->bm;
2621         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2622
2623         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2624                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2625                         return 0;
2626
2627                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2628                         return 0;
2629
2630                 if (mem->flags & DRM_BO_FLAG_CACHED)
2631                         return 0;
2632         }
2633         return 1;
2634 }
2635 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2636
2637 /**
2638  * \c Get the PCI offset for the buffer object memory.
2639  *
2640  * \param bo The buffer object.
2641  * \param bus_base On return the base of the PCI region
2642  * \param bus_offset On return the byte offset into the PCI region
2643  * \param bus_size On return the byte size of the buffer object or zero if
2644  *     the buffer object memory is not accessible through a PCI region.
2645  * \return Failure indication.
2646  *
2647  * Returns -EINVAL if the buffer object is currently not mappable.
2648  * Otherwise returns zero.
2649  */
2650
2651 int drm_bo_pci_offset(struct drm_device *dev,
2652                       struct drm_bo_mem_reg *mem,
2653                       unsigned long *bus_base,
2654                       unsigned long *bus_offset, unsigned long *bus_size)
2655 {
2656         struct drm_buffer_manager *bm = &dev->bm;
2657         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2658
2659         *bus_size = 0;
2660         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2661                 return -EINVAL;
2662
2663         if (drm_mem_reg_is_pci(dev, mem)) {
2664                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2665                 *bus_size = mem->num_pages << PAGE_SHIFT;
2666                 *bus_base = man->io_offset;
2667         }
2668
2669         return 0;
2670 }
2671
2672 /**
2673  * \c Kill all user-space virtual mappings of this buffer object.
2674  *
2675  * \param bo The buffer object.
2676  *
2677  * Call bo->mutex locked.
2678  */
2679
2680 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2681 {
2682         struct drm_device *dev = bo->dev;
2683         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2684         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2685
2686         if (!dev->dev_mapping)
2687                 return;
2688
2689         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2690 }
2691
2692 /**
2693  * drm_bo_takedown_vm_locked:
2694  *
2695  * @bo: the buffer object to remove any drm device mapping
2696  *
2697  * Remove any associated vm mapping on the drm device node that
2698  * would have been created for a drm_bo_type_device buffer
2699  */
2700 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2701 {
2702         struct drm_map_list *list;
2703         drm_local_map_t *map;
2704         struct drm_device *dev = bo->dev;
2705
2706         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2707         if (bo->type != drm_bo_type_device)
2708                 return;
2709
2710         list = &bo->map_list;
2711         if (list->user_token) {
2712                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2713                 list->user_token = 0;
2714         }
2715         if (list->file_offset_node) {
2716                 drm_memrange_put_block(list->file_offset_node);
2717                 list->file_offset_node = NULL;
2718         }
2719
2720         map = list->map;
2721         if (!map)
2722                 return;
2723
2724         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2725         list->map = NULL;
2726         list->user_token = 0ULL;
2727         drm_bo_usage_deref_locked(&bo);
2728 }
2729
2730 /**
2731  * drm_bo_setup_vm_locked:
2732  *
2733  * @bo: the buffer to allocate address space for
2734  *
2735  * Allocate address space in the drm device so that applications
2736  * can mmap the buffer and access the contents. This only
2737  * applies to drm_bo_type_device objects as others are not
2738  * placed in the drm device address space.
2739  */
2740 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2741 {
2742         struct drm_map_list *list = &bo->map_list;
2743         drm_local_map_t *map;
2744         struct drm_device *dev = bo->dev;
2745
2746         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2747         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2748         if (!list->map)
2749                 return -ENOMEM;
2750
2751         map = list->map;
2752         map->offset = 0;
2753         map->type = _DRM_TTM;
2754         map->flags = _DRM_REMOVABLE;
2755         map->size = bo->mem.num_pages * PAGE_SIZE;
2756         atomic_inc(&bo->usage);
2757         map->handle = (void *)bo;
2758
2759         list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
2760                                                     bo->mem.num_pages, 0, 0);
2761
2762         if (unlikely(!list->file_offset_node)) {
2763                 drm_bo_takedown_vm_locked(bo);
2764                 return -ENOMEM;
2765         }
2766
2767         list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
2768                                                   bo->mem.num_pages, 0);
2769
2770         if (unlikely(!list->file_offset_node)) {
2771                 drm_bo_takedown_vm_locked(bo);
2772                 return -ENOMEM;
2773         }
2774                 
2775         list->hash.key = list->file_offset_node->start;
2776         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2777                 drm_bo_takedown_vm_locked(bo);
2778                 return -ENOMEM;
2779         }
2780
2781         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2782
2783         return 0;
2784 }
2785
2786 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2787                          struct drm_file *file_priv)
2788 {
2789         struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2790
2791         arg->major = DRM_BO_INIT_MAJOR;
2792         arg->minor = DRM_BO_INIT_MINOR;
2793         arg->patchlevel = DRM_BO_INIT_PATCH;
2794
2795         return 0;
2796 }