Some cleanup. A buffer object should only have one active memory type.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34 /*
35  * Buffer object locking policy:
36  * Lock dev->struct_mutex;
37  * Increase usage
38  * Unlock dev->struct_mutex;
39  * Lock buffer->mutex;
40  * Do whatever you want;
41  * Unlock buffer->mutex;
42  * Decrease usage. Call destruction if zero.
43  *
44  * User object visibility ups usage just once, since it has its own 
45  * refcounting.
46  *
47  * Destruction:
48  * lock dev->struct_mutex;
49  * Verify that usage is zero. Otherwise unlock and continue.
50  * Destroy object.
51  * unlock dev->struct_mutex;
52  *
53  * Mutex and spinlock locking orders:
54  * 1.) Buffer mutex
55  * 2.) Refer to ttm locking orders.
56  */
57
58 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
59 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
60 }
61
62 static inline uint32_t drm_bo_type_flags(unsigned type)
63 {
64         return (1 << (24 + type));
65 }
66
67 /*
68  * bo locked. dev->struct_mutex locked.
69  */
70
71 static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
72                               drm_buffer_manager_t * bm)
73 {
74         struct list_head *list;
75         buf->mem_type = 0;
76
77         switch(buf->flags & DRM_BO_MASK_MEM) {
78         case DRM_BO_FLAG_MEM_TT:
79                 buf->mem_type = DRM_BO_MEM_TT;
80                 break;
81         case DRM_BO_FLAG_MEM_VRAM:
82                 buf->mem_type = DRM_BO_MEM_VRAM;
83                 break;
84         case DRM_BO_FLAG_MEM_LOCAL:
85                 buf->mem_type = DRM_BO_MEM_LOCAL;
86                 break;
87         default:
88                 BUG_ON(1);              
89         }
90         list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
91                 &bm->pinned[buf->mem_type] : &bm->lru[buf->mem_type];
92         list_add_tail(&buf->lru, list);
93         return;
94 }
95
96 /*
97  * bo locked.
98  */
99
100 static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
101                                 int force_no_move)
102 {
103         drm_device_t *dev = buf->dev;
104         int ret;
105
106         if (buf->mm_node) {
107                 mutex_lock(&dev->struct_mutex);
108                 if (evict)
109                         ret = drm_evict_ttm(buf->ttm);
110                 else
111                         ret = drm_unbind_ttm(buf->ttm);
112
113                 if (ret) {
114                         mutex_unlock(&dev->struct_mutex);
115                         if (ret == -EAGAIN)
116                                 schedule();
117                         return ret;
118                 }
119
120                 if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
121                         drm_mm_put_block(buf->mm_node);
122                         buf->mm_node = NULL;
123                 }
124                 mutex_unlock(&dev->struct_mutex);
125         }
126
127         buf->flags &= ~DRM_BO_FLAG_MEM_TT;
128         buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
129
130         return 0;
131 }
132
133 /*
134  * Lock dev->struct_mutex
135  */
136
137 static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
138 {
139
140         drm_buffer_manager_t *bm = &dev->bm;
141
142         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
143
144         /*
145          * Somone might try to access us through the still active BM lists.
146          */
147
148         if (atomic_read(&bo->usage) != 0)
149                 return;
150         if (!list_empty(&bo->ddestroy))
151                 return;
152
153         if (bo->fence) {
154                 if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) {
155
156                         drm_fence_object_flush(dev, bo->fence, bo->fence_type);
157                         list_add_tail(&bo->ddestroy, &bm->ddestroy);
158                         schedule_delayed_work(&bm->wq,
159                                               ((DRM_HZ / 100) <
160                                                1) ? 1 : DRM_HZ / 100);
161                         return;
162                 } else {
163                         drm_fence_usage_deref_locked(dev, bo->fence);
164                         bo->fence = NULL;
165                 }
166         }
167         /*
168          * Take away from lru lists.
169          */
170
171         list_del_init(&bo->lru);
172
173         if (bo->ttm) {
174                 unsigned long _end = jiffies + DRM_HZ;
175                 int ret;
176
177                 /*
178                  * This temporarily unlocks struct_mutex. 
179                  */
180
181                 do {
182                         ret = drm_unbind_ttm(bo->ttm);
183                         if (ret == -EAGAIN) {
184                                 mutex_unlock(&dev->struct_mutex);
185                                 schedule();
186                                 mutex_lock(&dev->struct_mutex);
187                         }
188                 } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
189
190                 if (ret) {
191                         DRM_ERROR("Couldn't unbind buffer. "
192                                   "Bad. Continuing anyway\n");
193                 }
194         }
195
196         if (bo->mm_node) {
197                 drm_mm_put_block(bo->mm_node);
198                 bo->mm_node = NULL;
199         }
200         if (bo->ttm_object) {
201                 drm_ttm_object_deref_locked(dev, bo->ttm_object);
202         }
203         atomic_dec(&bm->count);
204         drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
205 }
206
207 /*
208  * Call bo->mutex locked.
209  * Wait until the buffer is idle.
210  */
211
212 static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
213                        int no_wait)
214 {
215
216         drm_fence_object_t *fence = bo->fence;
217         int ret;
218
219         if (fence) {
220                 drm_device_t *dev = bo->dev;
221                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
222                         drm_fence_usage_deref_unlocked(dev, fence);
223                         bo->fence = NULL;
224                         return 0;
225                 }
226                 if (no_wait) {
227                         return -EBUSY;
228                 }
229                 ret =
230                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
231                                           bo->fence_type);
232                 if (ret)
233                         return ret;
234
235                 drm_fence_usage_deref_unlocked(dev, fence);
236                 bo->fence = NULL;
237
238         }
239         return 0;
240 }
241
242 /*
243  * Call dev->struct_mutex locked.
244  */
245
246 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
247 {
248         drm_buffer_manager_t *bm = &dev->bm;
249
250         drm_buffer_object_t *entry, *nentry;
251         struct list_head *list, *next;
252         drm_fence_object_t *fence;
253
254         list_for_each_safe(list, next, &bm->ddestroy) {
255                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
256                 atomic_inc(&entry->usage);
257                 if (atomic_read(&entry->usage) != 1) {
258                         atomic_dec(&entry->usage);
259                         continue;
260                 }
261
262                 nentry = NULL;
263                 if (next != &bm->ddestroy) {
264                         nentry = list_entry(next, drm_buffer_object_t,
265                                             ddestroy);
266                         atomic_inc(&nentry->usage);
267                 }
268
269                 mutex_unlock(&dev->struct_mutex);
270                 mutex_lock(&entry->mutex);
271                 fence = entry->fence;
272                 if (fence && drm_fence_object_signaled(fence,
273                                                        entry->fence_type)) {
274                         drm_fence_usage_deref_locked(dev, fence);
275                         entry->fence = NULL;
276                 }
277
278                 if (entry->fence && remove_all) {
279                         if (bm->nice_mode) {
280                                 unsigned long _end = jiffies + 3 * DRM_HZ;
281                                 int ret;
282                                 do {
283                                         ret = drm_bo_wait(entry, 0, 1, 0);
284                                 } while (ret && !time_after_eq(jiffies, _end));
285
286                                 if (entry->fence) {
287                                         bm->nice_mode = 0;
288                                         DRM_ERROR("Detected GPU lockup or "
289                                                   "fence driver was taken down. "
290                                                   "Evicting waiting buffers.\n");
291                                 }
292                         }
293                         if (entry->fence) {
294                                 drm_fence_usage_deref_unlocked(dev,
295                                                                entry->fence);
296                                 entry->fence = NULL;
297                         }
298                 }
299                 mutex_lock(&dev->struct_mutex);
300                 mutex_unlock(&entry->mutex);
301                 if (atomic_dec_and_test(&entry->usage) && (!entry->fence)) {
302                         list_del_init(&entry->ddestroy);
303                         drm_bo_destroy_locked(dev, entry);
304                 }
305                 if (nentry) {
306                         atomic_dec(&nentry->usage);
307                 }
308         }
309
310 }
311
312 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
313 static void drm_bo_delayed_workqueue(void *data)
314 #else
315 static void drm_bo_delayed_workqueue(struct work_struct *work)
316 #endif
317 {
318 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
319         drm_device_t *dev = (drm_device_t *) data;
320         drm_buffer_manager_t *bm = &dev->bm;
321 #else
322         drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
323         drm_device_t *dev = container_of(bm, drm_device_t, bm);
324 #endif
325
326
327         DRM_DEBUG("Delayed delete Worker\n");
328
329         mutex_lock(&dev->struct_mutex);
330         if (!bm->initialized) {
331                 mutex_unlock(&dev->struct_mutex);
332                 return;
333         }
334         drm_bo_delayed_delete(dev, 0);
335         if (bm->initialized && !list_empty(&bm->ddestroy)) {
336                 schedule_delayed_work(&bm->wq,
337                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
338         }
339         mutex_unlock(&dev->struct_mutex);
340 }
341
342 void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo)
343 {
344         if (atomic_dec_and_test(&bo->usage)) {
345                 drm_bo_destroy_locked(dev, bo);
346         }
347 }
348
349 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
350 {
351         drm_bo_usage_deref_locked(priv->head->dev,
352                                   drm_user_object_entry(uo, drm_buffer_object_t,
353                                                         base));
354 }
355
356 void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
357 {
358         if (atomic_dec_and_test(&bo->usage)) {
359                 mutex_lock(&dev->struct_mutex);
360                 if (atomic_read(&bo->usage) == 0)
361                         drm_bo_destroy_locked(dev, bo);
362                 mutex_unlock(&dev->struct_mutex);
363         }
364 }
365
366 /*
367  * Note. The caller has to register (if applicable) 
368  * and deregister fence object usage.
369  */
370
371 int drm_fence_buffer_objects(drm_file_t * priv,
372                              struct list_head *list,
373                              uint32_t fence_flags,
374                              drm_fence_object_t * fence,
375                              drm_fence_object_t ** used_fence)
376 {
377         drm_device_t *dev = priv->head->dev;
378         drm_buffer_manager_t *bm = &dev->bm;
379
380         drm_buffer_object_t *entry;
381         uint32_t fence_type = 0;
382         int count = 0;
383         int ret = 0;
384         struct list_head f_list, *l;
385
386         mutex_lock(&dev->struct_mutex);
387
388         if (!list)
389                 list = &bm->unfenced;
390
391         list_for_each_entry(entry, list, lru) {
392                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
393                 fence_type |= entry->fence_type;
394                 if (entry->fence_class != 0) {
395                         DRM_ERROR("Fence class %d is not implemented yet.\n",
396                                   entry->fence_class);
397                         ret = -EINVAL;
398                         goto out;
399                 }
400                 count++;
401         }
402
403         if (!count) {
404                 ret = -EINVAL;
405                 goto out;
406         }
407
408         /*
409          * Transfer to a local list before we release the dev->struct_mutex;
410          * This is so we don't get any new unfenced objects while fencing 
411          * the ones we already have..
412          */
413
414         list_add_tail(&f_list, list);
415         list_del_init(list);
416
417         if (fence) {
418                 if ((fence_type & fence->type) != fence_type) {
419                         DRM_ERROR("Given fence doesn't match buffers "
420                                   "on unfenced list.\n");
421                         ret = -EINVAL;
422                         goto out;
423                 }
424         } else {
425                 mutex_unlock(&dev->struct_mutex);
426                 ret = drm_fence_object_create(dev, fence_type,
427                                               fence_flags | DRM_FENCE_FLAG_EMIT,
428                                               &fence);
429                 mutex_lock(&dev->struct_mutex);
430                 if (ret)
431                         goto out;
432         }
433
434         count = 0;
435         l = f_list.next;
436         while (l != &f_list) {
437                 entry = list_entry(l, drm_buffer_object_t, lru);
438                 atomic_inc(&entry->usage);
439                 mutex_unlock(&dev->struct_mutex);
440                 mutex_lock(&entry->mutex);
441                 mutex_lock(&dev->struct_mutex);
442                 list_del_init(l);
443                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
444                         count++;
445                         if (entry->fence)
446                                 drm_fence_usage_deref_locked(dev, entry->fence);
447                         entry->fence = fence;
448                         DRM_FLAG_MASKED(entry->priv_flags, 0,
449                                         _DRM_BO_FLAG_UNFENCED);
450                         DRM_WAKEUP(&entry->event_queue);
451                         drm_bo_add_to_lru(entry, bm);
452                 }
453                 mutex_unlock(&entry->mutex);
454                 drm_bo_usage_deref_locked(dev, entry);
455                 l = f_list.next;
456         }
457         atomic_add(count, &fence->usage);
458         DRM_DEBUG("Fenced %d buffers\n", count);
459       out:
460         mutex_unlock(&dev->struct_mutex);
461         *used_fence = fence;
462         return ret;
463 }
464
465 EXPORT_SYMBOL(drm_fence_buffer_objects);
466
467 /*
468  * bo->mutex locked 
469  */
470
471 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
472                         int no_wait, int force_no_move)
473 {
474         int ret = 0;
475         drm_device_t *dev = bo->dev;
476         drm_buffer_manager_t *bm = &dev->bm;
477
478         /*
479          * Someone might have modified the buffer before we took the buffer mutex.
480          */
481
482         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
483                 goto out;
484         if (!(bo->flags & drm_bo_type_flags(mem_type)))
485                 goto out;
486
487         ret = drm_bo_wait(bo, 0, 0, no_wait);
488
489         if (ret) {
490                 if (ret != -EAGAIN)
491                         DRM_ERROR("Failed to expire fence before "
492                                   "buffer eviction.\n");
493                 goto out;
494         }
495
496         if (mem_type == DRM_BO_MEM_TT) {
497                 ret = drm_move_tt_to_local(bo, 1, force_no_move);
498                 if (ret)
499                         goto out;
500                 mutex_lock(&dev->struct_mutex);
501                 list_del_init(&bo->lru);
502                 drm_bo_add_to_lru(bo, bm);
503                 mutex_unlock(&dev->struct_mutex);
504         }
505
506         if (ret)
507                 goto out;
508
509         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
510                         _DRM_BO_FLAG_EVICTED);
511       out:
512         return ret;
513 }
514
515 /*
516  * buf->mutex locked.
517  */
518
519 int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
520                        int no_wait)
521 {
522         drm_device_t *dev = buf->dev;
523         drm_mm_node_t *node;
524         drm_buffer_manager_t *bm = &dev->bm;
525         drm_buffer_object_t *bo;
526         drm_mm_t *mm = &bm->manager[mem_type];
527         struct list_head *lru;
528         unsigned long size = buf->num_pages;
529         int ret;
530
531         mutex_lock(&dev->struct_mutex);
532         do {
533                 node = drm_mm_search_free(mm, size, buf->page_alignment, 1);
534                 if (node)
535                         break;
536
537                 lru = &bm->lru[mem_type];
538                 if (lru->next == lru)
539                         break;
540
541                 bo = list_entry(lru->next, drm_buffer_object_t, lru);
542
543                 atomic_inc(&bo->usage);
544                 mutex_unlock(&dev->struct_mutex);
545                 mutex_lock(&bo->mutex);
546                 BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
547                 ret = drm_bo_evict(bo, mem_type, no_wait, 0);
548                 mutex_unlock(&bo->mutex);
549                 drm_bo_usage_deref_unlocked(dev, bo);
550                 if (ret)
551                         return ret;
552                 mutex_lock(&dev->struct_mutex);
553         } while (1);
554
555         if (!node) {
556                 DRM_ERROR("Out of videoram / aperture space\n");
557                 mutex_unlock(&dev->struct_mutex);
558                 return -ENOMEM;
559         }
560
561         node = drm_mm_get_block(node, size, buf->page_alignment);
562         mutex_unlock(&dev->struct_mutex);
563         BUG_ON(!node);
564         node->private = (void *)buf;
565
566         buf->mm_node = node;
567         buf->offset = node->start * PAGE_SIZE;
568         return 0;
569 }
570
571 static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
572 {
573         drm_device_t *dev = bo->dev;
574         drm_ttm_backend_t *be;
575         int ret;
576
577         if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
578                 BUG_ON(bo->mm_node);
579                 ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
580                 if (ret)
581                         return ret;
582         }
583
584         DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
585
586         mutex_lock(&dev->struct_mutex);
587         ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
588                            bo->mm_node->start);
589         if (ret) {
590                 drm_mm_put_block(bo->mm_node);
591                 bo->mm_node = NULL;
592         }
593         mutex_unlock(&dev->struct_mutex);
594
595         if (ret) {
596                 return ret;
597         }
598
599         be = bo->ttm->be;
600         if (be->needs_ub_cache_adjust(be))
601                 bo->flags &= ~DRM_BO_FLAG_CACHED;
602         bo->flags &= ~DRM_BO_MASK_MEM;
603         bo->flags |= DRM_BO_FLAG_MEM_TT;
604
605         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
606                 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
607                 if (ret)
608                         DRM_ERROR("Could not flush read caches\n");
609         }
610         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
611
612         return 0;
613 }
614
615 static int drm_bo_new_flags(drm_device_t * dev,
616                             uint32_t flags, uint32_t new_mask, uint32_t hint,
617                             int init, uint32_t * n_flags, uint32_t * n_mask)
618 {
619         uint32_t new_flags = 0;
620         uint32_t new_props;
621         drm_bo_driver_t *driver = dev->driver->bo_driver;
622         drm_buffer_manager_t *bm = &dev->bm;
623         unsigned i;
624
625         /*
626          * First adjust the mask to take away nonexistant memory types. 
627          */
628
629         for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
630                 if (!bm->use_type[i])
631                         new_mask &= ~drm_bo_type_flags(i);
632         }
633
634         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
635                 DRM_ERROR
636                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
637                      "processes\n");
638                 return -EPERM;
639         }
640         if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
641                 if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
642                      !driver->cached[DRM_BO_MEM_TT]) &&
643                     ((new_mask & DRM_BO_FLAG_MEM_VRAM)
644                      && !driver->cached[DRM_BO_MEM_VRAM])) {
645                         new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
646                 } else {
647                         if (!driver->cached[DRM_BO_MEM_TT])
648                                 new_flags &= DRM_BO_FLAG_MEM_TT;
649                         if (!driver->cached[DRM_BO_MEM_VRAM])
650                                 new_flags &= DRM_BO_FLAG_MEM_VRAM;
651                 }
652         }
653
654         if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
655             !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
656                 if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
657                     !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
658                         DRM_ERROR
659                             ("Cannot read cached from a pinned VRAM / TT buffer\n");
660                         return -EINVAL;
661                 }
662         }
663
664         /*
665          * Determine new memory location:
666          */
667
668         if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
669
670                 new_flags = new_mask & DRM_BO_MASK_MEM;
671
672                 if (!new_flags) {
673                         DRM_ERROR("Invalid buffer object memory flags\n");
674                         return -EINVAL;
675                 }
676
677                 if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
678                         if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
679                             new_flags & (DRM_BO_FLAG_MEM_VRAM |
680                                          DRM_BO_FLAG_MEM_TT)) {
681                                 new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
682                         } else {
683                                 new_flags = DRM_BO_FLAG_MEM_LOCAL;
684                         }
685                 }
686                 if (new_flags & DRM_BO_FLAG_MEM_TT) {
687                         if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
688                             new_flags & DRM_BO_FLAG_MEM_VRAM) {
689                                 new_flags = DRM_BO_FLAG_MEM_VRAM;
690                         } else {
691                                 new_flags = DRM_BO_FLAG_MEM_TT;
692                         }
693                 }
694         } else {
695                 new_flags = flags & DRM_BO_MASK_MEM;
696         }
697
698         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
699                                 DRM_BO_FLAG_READ);
700
701         if (!new_props) {
702                 DRM_ERROR("Invalid buffer object rwx properties\n");
703                 return -EINVAL;
704         }
705
706         new_flags |= new_mask & ~DRM_BO_MASK_MEM;
707
708         if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
709             (new_flags & DRM_BO_FLAG_NO_EVICT) &&
710             (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
711                 if (!(flags & DRM_BO_FLAG_CACHED)) {
712                         DRM_ERROR
713                             ("Cannot change caching policy of pinned buffer\n");
714                         return -EINVAL;
715                 } else {
716                         new_flags &= ~DRM_BO_FLAG_CACHED;
717                 }
718         }
719
720         *n_flags = new_flags;
721         *n_mask = new_mask;
722         return 0;
723 }
724
725 /*
726  * Call dev->struct_mutex locked.
727  */
728
729 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
730                                               uint32_t handle, int check_owner)
731 {
732         drm_user_object_t *uo;
733         drm_buffer_object_t *bo;
734
735         uo = drm_lookup_user_object(priv, handle);
736
737         if (!uo || (uo->type != drm_buffer_type)) {
738                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
739                 return NULL;
740         }
741
742         if (check_owner && priv != uo->owner) {
743                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
744                         return NULL;
745         }
746
747         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
748         atomic_inc(&bo->usage);
749         return bo;
750 }
751
752 /*
753  * Call bo->mutex locked.
754  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
755  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
756  */
757
758 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
759 {
760         drm_fence_object_t *fence = bo->fence;
761
762         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
763         if (fence) {
764                 drm_device_t *dev = bo->dev;
765                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
766                         drm_fence_usage_deref_unlocked(dev, fence);
767                         bo->fence = NULL;
768                         return 0;
769                 }
770                 return 1;
771         }
772         return 0;
773 }
774
775 /*
776  * Call bo->mutex locked.
777  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
778  */
779
780 static int drm_bo_busy(drm_buffer_object_t * bo)
781 {
782         drm_fence_object_t *fence = bo->fence;
783
784         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
785         if (fence) {
786                 drm_device_t *dev = bo->dev;
787                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
788                         drm_fence_usage_deref_unlocked(dev, fence);
789                         bo->fence = NULL;
790                         return 0;
791                 }
792                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
793                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
794                         drm_fence_usage_deref_unlocked(dev, fence);
795                         bo->fence = NULL;
796                         return 0;
797                 }
798                 return 1;
799         }
800         return 0;
801 }
802
803 static int drm_bo_read_cached(drm_buffer_object_t * bo)
804 {
805         int ret = 0;
806
807         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
808         if (bo->mm_node)
809                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
810         return ret;
811 }
812
813 /*
814  * Wait until a buffer is unmapped.
815  */
816
817 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
818 {
819         int ret = 0;
820
821         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
822                 return -EBUSY;
823
824         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
825                     atomic_read(&bo->mapped) == -1);
826
827         if (ret == -EINTR)
828                 ret = -EAGAIN;
829
830         return ret;
831 }
832
833 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
834 {
835         int ret;
836
837         mutex_lock(&bo->mutex);
838         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
839         mutex_unlock(&bo->mutex);
840         return ret;
841 }
842
843 /*
844  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
845  * Until then, we cannot really do anything with it except delete it.
846  * The unfenced list is a PITA, and the operations
847  * 1) validating
848  * 2) submitting commands
849  * 3) fencing
850  * Should really be an atomic operation. 
851  * We now "solve" this problem by keeping
852  * the buffer "unfenced" after validating, but before fencing.
853  */
854
855 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
856                                 int eagain_if_wait)
857 {
858         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
859         unsigned long _end = jiffies + 3 * DRM_HZ;
860
861         if (ret && no_wait)
862                 return -EBUSY;
863         else if (!ret)
864                 return 0;
865
866         do {
867                 mutex_unlock(&bo->mutex);
868                 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
869                             !drm_bo_check_unfenced(bo));
870                 mutex_lock(&bo->mutex);
871                 if (ret == -EINTR)
872                         return -EAGAIN;
873                 if (ret) {
874                         DRM_ERROR
875                             ("Error waiting for buffer to become fenced\n");
876                         return ret;
877                 }
878                 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
879         } while (ret && !time_after_eq(jiffies, _end));
880         if (ret) {
881                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
882                 return ret;
883         }
884         if (eagain_if_wait)
885                 return -EAGAIN;
886
887         return 0;
888 }
889
890 /*
891  * Fill in the ioctl reply argument with buffer info.
892  * Bo locked. 
893  */
894
895 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
896                                 drm_bo_arg_reply_t * rep)
897 {
898         rep->handle = bo->base.hash.key;
899         rep->flags = bo->flags;
900         rep->size = bo->num_pages * PAGE_SIZE;
901         rep->offset = bo->offset;
902
903         if (bo->ttm_object) {
904                 rep->arg_handle = bo->ttm_object->map_list.user_token;
905         } else {
906                 rep->arg_handle = 0;
907         }
908
909         rep->mask = bo->mask;
910         rep->buffer_start = bo->buffer_start;
911         rep->fence_flags = bo->fence_type;
912         rep->rep_flags = 0;
913         rep->page_alignment = bo->page_alignment;
914
915         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
916                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
917                                 DRM_BO_REP_BUSY);
918         }
919 }
920
921 /*
922  * Wait for buffer idle and register that we've mapped the buffer.
923  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
924  * so that if the client dies, the mapping is automatically 
925  * unregistered.
926  */
927
928 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
929                                  uint32_t map_flags, unsigned hint,
930                                  drm_bo_arg_reply_t * rep)
931 {
932         drm_buffer_object_t *bo;
933         drm_device_t *dev = priv->head->dev;
934         int ret = 0;
935         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
936
937         mutex_lock(&dev->struct_mutex);
938         bo = drm_lookup_buffer_object(priv, handle, 1);
939         mutex_unlock(&dev->struct_mutex);
940
941         if (!bo)
942                 return -EINVAL;
943
944         mutex_lock(&bo->mutex);
945         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
946                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
947                 if (ret)
948                         goto out;
949         }
950
951         /*
952          * If this returns true, we are currently unmapped.
953          * We need to do this test, because unmapping can
954          * be done without the bo->mutex held.
955          */
956
957         while (1) {
958                 if (atomic_inc_and_test(&bo->mapped)) {
959                         if (no_wait && drm_bo_busy(bo)) {
960                                 atomic_dec(&bo->mapped);
961                                 ret = -EBUSY;
962                                 goto out;
963                         }
964                         ret = drm_bo_wait(bo, 0, 0, no_wait);
965                         if (ret) {
966                                 atomic_dec(&bo->mapped);
967                                 goto out;
968                         }
969
970                         if ((map_flags & DRM_BO_FLAG_READ) &&
971                             (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
972                             (!(bo->flags & DRM_BO_FLAG_CACHED))) {
973                                 drm_bo_read_cached(bo);
974                         }
975                         break;
976                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
977                            (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
978                            (!(bo->flags & DRM_BO_FLAG_CACHED))) {
979
980                         /*
981                          * We are already mapped with different flags.
982                          * need to wait for unmap.
983                          */
984
985                         ret = drm_bo_wait_unmapped(bo, no_wait);
986                         if (ret)
987                                 goto out;
988
989                         continue;
990                 }
991                 break;
992         }
993
994         mutex_lock(&dev->struct_mutex);
995         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
996         mutex_unlock(&dev->struct_mutex);
997         if (ret) {
998                 if (atomic_add_negative(-1, &bo->mapped))
999                         DRM_WAKEUP(&bo->event_queue);
1000
1001         } else
1002                 drm_bo_fill_rep_arg(bo, rep);
1003       out:
1004         mutex_unlock(&bo->mutex);
1005         drm_bo_usage_deref_unlocked(dev, bo);
1006         return ret;
1007 }
1008
1009 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1010 {
1011         drm_device_t *dev = priv->head->dev;
1012         drm_buffer_object_t *bo;
1013         drm_ref_object_t *ro;
1014         int ret = 0;
1015
1016         mutex_lock(&dev->struct_mutex);
1017
1018         bo = drm_lookup_buffer_object(priv, handle, 1);
1019         if (!bo) {
1020                 ret = -EINVAL;
1021                 goto out;
1022         }
1023
1024         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1025         if (!ro) {
1026                 ret = -EINVAL;
1027                 goto out;
1028         }
1029
1030         drm_remove_ref_object(priv, ro);
1031         drm_bo_usage_deref_locked(dev, bo);
1032       out:
1033         mutex_unlock(&dev->struct_mutex);
1034         return ret;
1035 }
1036
1037 /*
1038  * Call struct-sem locked.
1039  */
1040
1041 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1042                                          drm_user_object_t * uo,
1043                                          drm_ref_t action)
1044 {
1045         drm_buffer_object_t *bo =
1046             drm_user_object_entry(uo, drm_buffer_object_t, base);
1047
1048         /*
1049          * We DON'T want to take the bo->lock here, because we want to
1050          * hold it when we wait for unmapped buffer.
1051          */
1052
1053         BUG_ON(action != _DRM_REF_TYPE1);
1054
1055         if (atomic_add_negative(-1, &bo->mapped))
1056                 DRM_WAKEUP(&bo->event_queue);
1057 }
1058
1059 /*
1060  * bo->mutex locked. 
1061  */
1062
1063 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
1064                               int no_wait, int force_no_move)
1065 {
1066         int ret = 0;
1067
1068         /*
1069          * Flush outstanding fences.
1070          */
1071         drm_bo_busy(bo);
1072
1073         /*
1074          * Make sure we're not mapped.
1075          */
1076
1077         ret = drm_bo_wait_unmapped(bo, no_wait);
1078         if (ret)
1079                 return ret;
1080
1081         /*
1082          * Wait for outstanding fences.
1083          */
1084
1085         ret = drm_bo_wait(bo, 0, 0, no_wait);
1086
1087         if (ret == -EINTR)
1088                 return -EAGAIN;
1089         if (ret)
1090                 return ret;
1091
1092         if (new_flags & DRM_BO_FLAG_MEM_TT) {
1093                 ret = drm_move_local_to_tt(bo, no_wait);
1094                 if (ret)
1095                         return ret;
1096         } else {
1097                 drm_move_tt_to_local(bo, 0, force_no_move);
1098         }
1099
1100         return 0;
1101 }
1102
1103 /*
1104  * bo locked.
1105  */
1106
1107 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1108                                       uint32_t new_flags,
1109                                       int move_unfenced, int no_wait)
1110 {
1111         drm_device_t *dev = bo->dev;
1112         drm_buffer_manager_t *bm = &dev->bm;
1113         uint32_t flag_diff = (new_flags ^ bo->flags);
1114         drm_bo_driver_t *driver = dev->driver->bo_driver;
1115
1116         int ret;
1117
1118         if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
1119                 DRM_ERROR("Vram support not implemented yet\n");
1120                 return -EINVAL;
1121         }
1122
1123         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
1124         ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
1125         if (ret) {
1126                 DRM_ERROR("Driver did not support given buffer permissions\n");
1127                 return ret;
1128         }
1129
1130         /*
1131          * Move out if we need to change caching policy.
1132          */
1133
1134         if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
1135             !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
1136                 if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1137                         DRM_ERROR("Cannot change caching policy of "
1138                                   "pinned buffer.\n");
1139                         return -EINVAL;
1140                 }
1141                 ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
1142                 if (ret) {
1143                         if (ret != -EAGAIN)
1144                                 DRM_ERROR("Failed moving buffer.\n");
1145                         return ret;
1146                 }
1147         }
1148         DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
1149         flag_diff = (new_flags ^ bo->flags);
1150
1151         /*
1152          * Check whether we dropped no_move policy, and in that case,
1153          * release reserved manager regions.
1154          */
1155
1156         if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1157             !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
1158                 mutex_lock(&dev->struct_mutex);
1159                 if (bo->mm_node) {
1160                         drm_mm_put_block(bo->mm_node);
1161                         bo->mm_node = NULL;
1162                 }
1163                 mutex_unlock(&dev->struct_mutex);
1164         }
1165
1166         /*
1167          * Check whether we need to move buffer.
1168          */
1169
1170         if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
1171                 ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
1172                 if (ret) {
1173                         if (ret != -EAGAIN)
1174                                 DRM_ERROR("Failed moving buffer.\n");
1175                         return ret;
1176                 }
1177         }
1178
1179         if (move_unfenced) {
1180
1181                 /*
1182                  * Place on unfenced list.
1183                  */
1184
1185                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1186                                 _DRM_BO_FLAG_UNFENCED);
1187                 mutex_lock(&dev->struct_mutex);
1188                 list_del(&bo->lru);
1189                 list_add_tail(&bo->lru, &bm->unfenced);
1190                 mutex_unlock(&dev->struct_mutex);
1191         } else {
1192
1193                 mutex_lock(&dev->struct_mutex);
1194                 list_del_init(&bo->lru);
1195                 drm_bo_add_to_lru(bo, bm);
1196                 mutex_unlock(&dev->struct_mutex);
1197         }
1198
1199         bo->flags = new_flags;
1200         return 0;
1201 }
1202
1203 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1204                                   uint32_t flags, uint32_t mask, uint32_t hint,
1205                                   drm_bo_arg_reply_t * rep)
1206 {
1207         drm_buffer_object_t *bo;
1208         drm_device_t *dev = priv->head->dev;
1209         int ret;
1210         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1211         uint32_t new_flags;
1212
1213         bo = drm_lookup_buffer_object(priv, handle, 1);
1214         if (!bo) {
1215                 return -EINVAL;
1216         }
1217
1218         mutex_lock(&bo->mutex);
1219         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1220
1221         if (ret)
1222                 goto out;
1223
1224         ret = drm_bo_new_flags(dev, bo->flags,
1225                                (flags & mask) | (bo->mask & ~mask), hint,
1226                                0, &new_flags, &bo->mask);
1227
1228         if (ret)
1229                 goto out;
1230
1231         ret =
1232             drm_buffer_object_validate(bo, new_flags,
1233                                        !(hint & DRM_BO_HINT_DONT_FENCE),
1234                                        no_wait);
1235         drm_bo_fill_rep_arg(bo, rep);
1236
1237       out:
1238
1239         mutex_unlock(&bo->mutex);
1240         drm_bo_usage_deref_unlocked(dev, bo);
1241         return ret;
1242 }
1243
1244 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1245                               drm_bo_arg_reply_t * rep)
1246 {
1247         drm_buffer_object_t *bo;
1248
1249         bo = drm_lookup_buffer_object(priv, handle, 1);
1250         if (!bo) {
1251                 return -EINVAL;
1252         }
1253         mutex_lock(&bo->mutex);
1254         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1255                 (void)drm_bo_busy(bo);
1256         drm_bo_fill_rep_arg(bo, rep);
1257         mutex_unlock(&bo->mutex);
1258         drm_bo_usage_deref_unlocked(bo->dev, bo);
1259         return 0;
1260 }
1261
1262 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1263                               uint32_t hint, drm_bo_arg_reply_t * rep)
1264 {
1265         drm_buffer_object_t *bo;
1266         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1267         int ret;
1268
1269         bo = drm_lookup_buffer_object(priv, handle, 1);
1270         if (!bo) {
1271                 return -EINVAL;
1272         }
1273
1274         mutex_lock(&bo->mutex);
1275         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1276         if (ret)
1277                 goto out;
1278         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1279         if (ret)
1280                 goto out;
1281
1282         drm_bo_fill_rep_arg(bo, rep);
1283
1284       out:
1285         mutex_unlock(&bo->mutex);
1286         drm_bo_usage_deref_unlocked(bo->dev, bo);
1287         return ret;
1288 }
1289
1290 /*
1291  * Call bo->mutex locked.
1292  */
1293
1294 static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
1295 {
1296         drm_device_t *dev = bo->dev;
1297         drm_ttm_object_t *to = NULL;
1298         int ret = 0;
1299         uint32_t ttm_flags = 0;
1300
1301         bo->ttm_object = NULL;
1302         bo->ttm = NULL;
1303
1304         switch (bo->type) {
1305         case drm_bo_type_dc:
1306                 mutex_lock(&dev->struct_mutex);
1307                 ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
1308                                             ttm_flags, &to);
1309                 mutex_unlock(&dev->struct_mutex);
1310                 break;
1311         case drm_bo_type_user:
1312         case drm_bo_type_fake:
1313                 break;
1314         default:
1315                 DRM_ERROR("Illegal buffer object type\n");
1316                 ret = -EINVAL;
1317                 break;
1318         }
1319
1320         if (ret) {
1321                 return ret;
1322         }
1323
1324         if (to) {
1325                 bo->ttm_object = to;
1326                 bo->ttm = drm_ttm_from_object(to);
1327         }
1328         return ret;
1329 }
1330
1331 int drm_buffer_object_create(drm_file_t * priv,
1332                              unsigned long size,
1333                              drm_bo_type_t type,
1334                              uint32_t mask,
1335                              uint32_t hint,
1336                              uint32_t page_alignment,
1337                              unsigned long buffer_start,
1338                              drm_buffer_object_t ** buf_obj)
1339 {
1340         drm_device_t *dev = priv->head->dev;
1341         drm_buffer_manager_t *bm = &dev->bm;
1342         drm_buffer_object_t *bo;
1343         int ret = 0;
1344         uint32_t new_flags;
1345         unsigned long num_pages;
1346
1347         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1348                 DRM_ERROR("Invalid buffer object start.\n");
1349                 return -EINVAL;
1350         }
1351         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1352         if (num_pages == 0) {
1353                 DRM_ERROR("Illegal buffer object size.\n");
1354                 return -EINVAL;
1355         }
1356
1357         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1358
1359         if (!bo)
1360                 return -ENOMEM;
1361
1362         mutex_init(&bo->mutex);
1363         mutex_lock(&bo->mutex);
1364
1365         atomic_set(&bo->usage, 1);
1366         atomic_set(&bo->mapped, -1);
1367         DRM_INIT_WAITQUEUE(&bo->event_queue);
1368         INIT_LIST_HEAD(&bo->lru);
1369         INIT_LIST_HEAD(&bo->ddestroy);
1370         bo->dev = dev;
1371         bo->type = type;
1372         bo->num_pages = num_pages;
1373         bo->mm_node = NULL;
1374         bo->page_alignment = page_alignment;
1375         if (bo->type == drm_bo_type_fake) {
1376                 bo->offset = buffer_start;
1377                 bo->buffer_start = 0;
1378         } else {
1379                 bo->buffer_start = buffer_start;
1380         }
1381         bo->priv_flags = 0;
1382         bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1383         atomic_inc(&bm->count);
1384         ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
1385                                1, &new_flags, &bo->mask);
1386         if (ret)
1387                 goto out_err;
1388         ret = drm_bo_add_ttm(priv, bo);
1389         if (ret)
1390                 goto out_err;
1391
1392         ret = drm_buffer_object_validate(bo, new_flags, 0,
1393                                          hint & DRM_BO_HINT_DONT_BLOCK);
1394         if (ret)
1395                 goto out_err;
1396
1397         mutex_unlock(&bo->mutex);
1398         *buf_obj = bo;
1399         return 0;
1400
1401       out_err:
1402         mutex_unlock(&bo->mutex);
1403         drm_bo_usage_deref_unlocked(dev, bo);
1404         return ret;
1405 }
1406
1407 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1408                                   int shareable)
1409 {
1410         drm_device_t *dev = priv->head->dev;
1411         int ret;
1412
1413         mutex_lock(&dev->struct_mutex);
1414         ret = drm_add_user_object(priv, &bo->base, shareable);
1415         if (ret)
1416                 goto out;
1417
1418         bo->base.remove = drm_bo_base_deref_locked;
1419         bo->base.type = drm_buffer_type;
1420         bo->base.ref_struct_locked = NULL;
1421         bo->base.unref = drm_buffer_user_object_unmap;
1422
1423       out:
1424         mutex_unlock(&dev->struct_mutex);
1425         return ret;
1426 }
1427
1428 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1429 {
1430         LOCK_TEST_WITH_RETURN(dev, filp);
1431         return 0;
1432 }
1433
1434 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1435 {
1436         DRM_DEVICE;
1437         drm_bo_arg_t arg;
1438         drm_bo_arg_request_t *req = &arg.d.req;
1439         drm_bo_arg_reply_t rep;
1440         unsigned long next;
1441         drm_user_object_t *uo;
1442         drm_buffer_object_t *entry;
1443
1444         if (!dev->bm.initialized) {
1445                 DRM_ERROR("Buffer object manager is not initialized.\n");
1446                 return -EINVAL;
1447         }
1448
1449         do {
1450                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1451
1452                 if (arg.handled) {
1453                         data = arg.next;
1454                         continue;
1455                 }
1456
1457                 rep.ret = 0;
1458                 switch (req->op) {
1459                 case drm_bo_create:
1460                         rep.ret =
1461                             drm_buffer_object_create(priv, req->size,
1462                                                      req->type,
1463                                                      req->mask,
1464                                                      req->hint,
1465                                                      req->page_alignment,
1466                                                      req->buffer_start, &entry);
1467                         if (rep.ret)
1468                                 break;
1469
1470                         rep.ret =
1471                             drm_bo_add_user_object(priv, entry,
1472                                                    req->
1473                                                    mask &
1474                                                    DRM_BO_FLAG_SHAREABLE);
1475                         if (rep.ret)
1476                                 drm_bo_usage_deref_unlocked(dev, entry);
1477
1478                         if (rep.ret)
1479                                 break;
1480
1481                         mutex_lock(&entry->mutex);
1482                         drm_bo_fill_rep_arg(entry, &rep);
1483                         mutex_unlock(&entry->mutex);
1484                         break;
1485                 case drm_bo_unmap:
1486                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1487                         break;
1488                 case drm_bo_map:
1489                         rep.ret = drm_buffer_object_map(priv, req->handle,
1490                                                         req->mask,
1491                                                         req->hint, &rep);
1492                         break;
1493                 case drm_bo_destroy:
1494                         mutex_lock(&dev->struct_mutex);
1495                         uo = drm_lookup_user_object(priv, req->handle);
1496                         if (!uo || (uo->type != drm_buffer_type)
1497                             || uo->owner != priv) {
1498                                 mutex_unlock(&dev->struct_mutex);
1499                                 rep.ret = -EINVAL;
1500                                 break;
1501                         }
1502                         rep.ret = drm_remove_user_object(priv, uo);
1503                         mutex_unlock(&dev->struct_mutex);
1504                         break;
1505                 case drm_bo_reference:
1506                         rep.ret = drm_user_object_ref(priv, req->handle,
1507                                                       drm_buffer_type, &uo);
1508                         if (rep.ret)
1509                                 break;
1510                         mutex_lock(&dev->struct_mutex);
1511                         uo = drm_lookup_user_object(priv, req->handle);
1512                         entry =
1513                             drm_user_object_entry(uo, drm_buffer_object_t,
1514                                                   base);
1515                         atomic_dec(&entry->usage);
1516                         mutex_unlock(&dev->struct_mutex);
1517                         mutex_lock(&entry->mutex);
1518                         drm_bo_fill_rep_arg(entry, &rep);
1519                         mutex_unlock(&entry->mutex);
1520                         break;
1521                 case drm_bo_unreference:
1522                         rep.ret = drm_user_object_unref(priv, req->handle,
1523                                                         drm_buffer_type);
1524                         break;
1525                 case drm_bo_validate:
1526                         rep.ret = drm_bo_lock_test(dev, filp);
1527
1528                         if (rep.ret)
1529                                 break;
1530                         rep.ret =
1531                             drm_bo_handle_validate(priv, req->handle, req->mask,
1532                                                    req->arg_handle, req->hint,
1533                                                    &rep);
1534                         break;
1535                 case drm_bo_fence:
1536                         rep.ret = drm_bo_lock_test(dev, filp);
1537                         if (rep.ret)
1538                                 break;
1539                          /**/ break;
1540                 case drm_bo_info:
1541                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1542                         break;
1543                 case drm_bo_wait_idle:
1544                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1545                                                      req->hint, &rep);
1546                         break;
1547                 case drm_bo_ref_fence:
1548                         rep.ret = -EINVAL;
1549                         DRM_ERROR("Function is not implemented yet.\n");
1550                 default:
1551                         rep.ret = -EINVAL;
1552                 }
1553                 next = arg.next;
1554
1555                 /*
1556                  * A signal interrupted us. Make sure the ioctl is restartable.
1557                  */
1558
1559                 if (rep.ret == -EAGAIN)
1560                         return -EAGAIN;
1561
1562                 arg.handled = 1;
1563                 arg.d.rep = rep;
1564                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1565                 data = next;
1566         } while (data);
1567         return 0;
1568 }
1569
1570 /*
1571  * dev->struct_sem locked.
1572  */
1573
1574 static int drm_bo_force_list_clean(drm_device_t * dev,
1575                                    struct list_head *head,
1576                                    unsigned mem_type,
1577                                    int force_no_move, int allow_errors)
1578 {
1579         drm_buffer_manager_t *bm = &dev->bm;
1580         struct list_head *list, *next, *prev;
1581         drm_buffer_object_t *entry;
1582         int ret;
1583         int clean;
1584
1585       retry:
1586         clean = 1;
1587         list_for_each_safe(list, next, head) {
1588                 prev = list->prev;
1589                 entry = list_entry(list, drm_buffer_object_t, lru);
1590                 atomic_inc(&entry->usage);
1591                 mutex_unlock(&dev->struct_mutex);
1592                 mutex_lock(&entry->mutex);
1593                 mutex_lock(&dev->struct_mutex);
1594
1595                 if (prev != list->prev || next != list->next) {
1596                         mutex_unlock(&entry->mutex);
1597                         drm_bo_usage_deref_locked(dev, entry);
1598                         goto retry;
1599                 }
1600                 if (entry->mm_node) {
1601                         clean = 0;
1602
1603                         /*
1604                          * Expire the fence.
1605                          */
1606
1607                         mutex_unlock(&dev->struct_mutex);
1608                         if (entry->fence && bm->nice_mode) {
1609                                 unsigned long _end = jiffies + 3 * DRM_HZ;
1610                                 do {
1611                                         ret = drm_bo_wait(entry, 0, 1, 0);
1612                                         if (ret && allow_errors) {
1613                                                 if (ret == -EINTR)
1614                                                         ret = -EAGAIN;
1615                                                 goto out_err;
1616                                         }
1617                                 } while (ret && !time_after_eq(jiffies, _end));
1618
1619                                 if (entry->fence) {
1620                                         bm->nice_mode = 0;
1621                                         DRM_ERROR("Detected GPU hang or "
1622                                                   "fence manager was taken down. "
1623                                                   "Evicting waiting buffers\n");
1624                                 }
1625                         }
1626                         if (entry->fence) {
1627                                 drm_fence_usage_deref_unlocked(dev,
1628                                                                entry->fence);
1629                                 entry->fence = NULL;
1630                         }
1631
1632                         DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
1633                                      0);
1634
1635                         if (force_no_move) {
1636                                 DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
1637                                              0);
1638                         }
1639                         if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
1640                                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1641                                           "cleanup. Removing flag and evicting.\n");
1642                                 entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
1643                                 entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
1644                         }
1645
1646                         ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1647                         if (ret) {
1648                                 if (allow_errors) {
1649                                         goto out_err;
1650                                 } else {
1651                                         DRM_ERROR("Aargh. Eviction failed.\n");
1652                                 }
1653                         }
1654                         mutex_lock(&dev->struct_mutex);
1655                 }
1656                 mutex_unlock(&entry->mutex);
1657                 drm_bo_usage_deref_locked(dev, entry);
1658                 if (prev != list->prev || next != list->next) {
1659                         goto retry;
1660                 }
1661         }
1662         if (!clean)
1663                 goto retry;
1664         return 0;
1665       out_err:
1666         mutex_unlock(&entry->mutex);
1667         drm_bo_usage_deref_unlocked(dev, entry);
1668         mutex_lock(&dev->struct_mutex);
1669         return ret;
1670 }
1671
1672 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1673 {
1674         drm_buffer_manager_t *bm = &dev->bm;
1675         int ret = -EINVAL;
1676
1677         if (mem_type >= DRM_BO_MEM_TYPES) {
1678                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1679                 return ret;
1680         }
1681
1682         if (!bm->has_type[mem_type]) {
1683                 DRM_ERROR("Trying to take down uninitialized "
1684                           "memory manager type\n");
1685                 return ret;
1686         }
1687         bm->use_type[mem_type] = 0;
1688         bm->has_type[mem_type] = 0;
1689
1690         ret = 0;
1691         if (mem_type > 0) {
1692
1693                 /*
1694                  * Throw out unfenced buffers.
1695                  */
1696
1697                 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1698
1699                 /*
1700                  * Throw out evicted no-move buffers.
1701                  */
1702
1703                 drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
1704                                         mem_type, 1, 0);
1705                 drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
1706                                         0);
1707                 drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
1708                                         0);
1709
1710                 if (drm_mm_clean(&bm->manager[mem_type])) {
1711                         drm_mm_takedown(&bm->manager[mem_type]);
1712                 } else {
1713                         ret = -EBUSY;
1714                 }
1715         }
1716
1717         return ret;
1718 }
1719
1720 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1721 {
1722         int ret;
1723         drm_buffer_manager_t *bm = &dev->bm;
1724
1725         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1726                 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1727                 return -EINVAL;
1728         }
1729
1730         ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1731         if (ret)
1732                 return ret;
1733         ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
1734         if (ret)
1735                 return ret;
1736         ret =
1737             drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
1738         return ret;
1739 }
1740
1741 static int drm_bo_init_mm(drm_device_t * dev,
1742                           unsigned type,
1743                           unsigned long p_offset, unsigned long p_size)
1744 {
1745         drm_buffer_manager_t *bm = &dev->bm;
1746         int ret = -EINVAL;
1747
1748         if (type >= DRM_BO_MEM_TYPES) {
1749                 DRM_ERROR("Illegal memory type %d\n", type);
1750                 return ret;
1751         }
1752         if (bm->has_type[type]) {
1753                 DRM_ERROR("Memory manager already initialized for type %d\n",
1754                           type);
1755                 return ret;
1756         }
1757
1758         ret = 0;
1759         if (type != DRM_BO_MEM_LOCAL) {
1760                 if (!p_size) {
1761                         DRM_ERROR("Zero size memory manager type %d\n", type);
1762                         return ret;
1763                 }
1764                 ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
1765                 if (ret)
1766                         return ret;
1767         }
1768         bm->has_type[type] = 1;
1769         bm->use_type[type] = 1;
1770
1771         INIT_LIST_HEAD(&bm->lru[type]);
1772         INIT_LIST_HEAD(&bm->pinned[type]);
1773
1774         return 0;
1775 }
1776
1777 /*
1778  * This is called from lastclose, so we don't need to bother about
1779  * any clients still running when we set the initialized flag to zero.
1780  */
1781
1782 int drm_bo_driver_finish(drm_device_t * dev)
1783 {
1784         drm_buffer_manager_t *bm = &dev->bm;
1785         int ret = 0;
1786         unsigned i = DRM_BO_MEM_TYPES;
1787
1788         mutex_lock(&dev->bm.init_mutex);
1789         mutex_lock(&dev->struct_mutex);
1790
1791         if (!bm->initialized)
1792                 goto out;
1793         bm->initialized = 0;
1794
1795         while (i--) {
1796                 if (bm->has_type[i]) {
1797                         bm->use_type[i] = 0;
1798                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1799                                 ret = -EBUSY;
1800                                 DRM_ERROR("DRM memory manager type %d "
1801                                           "is not clean.\n", i);
1802                         }
1803                         bm->has_type[i] = 0;
1804                 }
1805         }
1806         mutex_unlock(&dev->struct_mutex);
1807         if (!cancel_delayed_work(&bm->wq)) {
1808                 flush_scheduled_work();
1809         }
1810         mutex_lock(&dev->struct_mutex);
1811         drm_bo_delayed_delete(dev, 1);
1812         if (list_empty(&bm->ddestroy)) {
1813                 DRM_DEBUG("Delayed destroy list was clean\n");
1814         }
1815         if (list_empty(&bm->lru[0])) {
1816                 DRM_DEBUG("Swap list was clean\n");
1817         }
1818         if (list_empty(&bm->pinned[0])) {
1819                 DRM_DEBUG("NO_MOVE list was clean\n");
1820         }
1821         if (list_empty(&bm->unfenced)) {
1822                 DRM_DEBUG("Unfenced list was clean\n");
1823         }
1824       out:
1825         mutex_unlock(&dev->struct_mutex);
1826         mutex_unlock(&dev->bm.init_mutex);
1827         return ret;
1828 }
1829
1830 int drm_bo_driver_init(drm_device_t * dev)
1831 {
1832         drm_bo_driver_t *driver = dev->driver->bo_driver;
1833         drm_buffer_manager_t *bm = &dev->bm;
1834         int ret = -EINVAL;
1835
1836         mutex_lock(&dev->bm.init_mutex);
1837         mutex_lock(&dev->struct_mutex);
1838         if (!driver)
1839                 goto out_unlock;
1840
1841         /*
1842          * Initialize the system memory buffer type.
1843          * Other types need to be driver / IOCTL initialized.
1844          */
1845
1846         ret = drm_bo_init_mm(dev, 0, 0, 0);
1847         if (ret)
1848                 goto out_unlock;
1849
1850 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1851         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1852 #else
1853         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1854 #endif
1855         bm->initialized = 1;
1856         bm->nice_mode = 1;
1857         atomic_set(&bm->count, 0);
1858         bm->cur_pages = 0;
1859         INIT_LIST_HEAD(&bm->unfenced);
1860         INIT_LIST_HEAD(&bm->ddestroy);
1861       out_unlock:
1862         mutex_unlock(&dev->struct_mutex);
1863         mutex_unlock(&dev->bm.init_mutex);
1864         return ret;
1865 }
1866
1867 EXPORT_SYMBOL(drm_bo_driver_init);
1868
1869 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
1870 {
1871         DRM_DEVICE;
1872
1873         int ret = 0;
1874         drm_mm_init_arg_t arg;
1875         drm_buffer_manager_t *bm = &dev->bm;
1876         drm_bo_driver_t *driver = dev->driver->bo_driver;
1877
1878         if (!driver) {
1879                 DRM_ERROR("Buffer objects are not supported by this driver\n");
1880                 return -EINVAL;
1881         }
1882
1883         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1884
1885         switch (arg.req.op) {
1886         case mm_init:
1887                 ret = -EINVAL;
1888                 mutex_lock(&dev->bm.init_mutex);
1889                 mutex_lock(&dev->struct_mutex);
1890                 if (!bm->initialized) {
1891                         DRM_ERROR("DRM memory manager was not initialized.\n");
1892                         break;
1893                 }
1894                 if (arg.req.mem_type == 0) {
1895                         DRM_ERROR
1896                             ("System memory buffers already initialized.\n");
1897                         break;
1898                 }
1899                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
1900                                      arg.req.p_offset, arg.req.p_size);
1901                 break;
1902         case mm_takedown:
1903                 LOCK_TEST_WITH_RETURN(dev, filp);
1904                 mutex_lock(&dev->bm.init_mutex);
1905                 mutex_lock(&dev->struct_mutex);
1906                 ret = -EINVAL;
1907                 if (!bm->initialized) {
1908                         DRM_ERROR("DRM memory manager was not initialized\n");
1909                         break;
1910                 }
1911                 if (arg.req.mem_type == 0) {
1912                         DRM_ERROR("No takedown for System memory buffers.\n");
1913                         break;
1914                 }
1915                 ret = 0;
1916                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
1917                         DRM_ERROR("Memory manager type %d not clean. "
1918                                   "Delaying takedown\n", arg.req.mem_type);
1919                 }
1920                 break;
1921         case mm_lock:
1922                 LOCK_TEST_WITH_RETURN(dev, filp);
1923                 mutex_lock(&dev->bm.init_mutex);
1924                 mutex_lock(&dev->struct_mutex);
1925                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
1926                 break;
1927         case mm_unlock:
1928                 LOCK_TEST_WITH_RETURN(dev, filp);
1929                 mutex_lock(&dev->bm.init_mutex);
1930                 mutex_lock(&dev->struct_mutex);
1931                 ret = 0;
1932                 break;
1933         default:
1934                 DRM_ERROR("Function not implemented yet\n");
1935                 return -EINVAL;
1936         }
1937
1938         mutex_unlock(&dev->struct_mutex);
1939         mutex_unlock(&dev->bm.init_mutex);
1940         if (ret)
1941                 return ret;
1942
1943         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1944         return 0;
1945 }