drm: use anon-inode instead of relying on cdevs
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / radeon / radeon_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/radeon_drm.h>
36 #include "radeon.h"
37 #include "radeon_trace.h"
38
39
40 int radeon_ttm_init(struct radeon_device *rdev);
41 void radeon_ttm_fini(struct radeon_device *rdev);
42 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43
44 /*
45  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46  * function are calling it.
47  */
48
49 static void radeon_bo_clear_va(struct radeon_bo *bo)
50 {
51         struct radeon_bo_va *bo_va, *tmp;
52
53         list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54                 /* remove from all vm address space */
55                 radeon_vm_bo_rmv(bo->rdev, bo_va);
56         }
57 }
58
59 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
60 {
61         struct radeon_bo *bo;
62
63         bo = container_of(tbo, struct radeon_bo, tbo);
64         mutex_lock(&bo->rdev->gem.mutex);
65         list_del_init(&bo->list);
66         mutex_unlock(&bo->rdev->gem.mutex);
67         radeon_bo_clear_surface_reg(bo);
68         radeon_bo_clear_va(bo);
69         drm_gem_object_release(&bo->gem_base);
70         kfree(bo);
71 }
72
73 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
74 {
75         if (bo->destroy == &radeon_ttm_bo_destroy)
76                 return true;
77         return false;
78 }
79
80 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
81 {
82         u32 c = 0;
83
84         rbo->placement.fpfn = 0;
85         rbo->placement.lpfn = 0;
86         rbo->placement.placement = rbo->placements;
87         rbo->placement.busy_placement = rbo->placements;
88         if (domain & RADEON_GEM_DOMAIN_VRAM)
89                 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
90                                         TTM_PL_FLAG_VRAM;
91         if (domain & RADEON_GEM_DOMAIN_GTT) {
92                 if (rbo->rdev->flags & RADEON_IS_AGP) {
93                         rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
94                 } else {
95                         rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
96                 }
97         }
98         if (domain & RADEON_GEM_DOMAIN_CPU) {
99                 if (rbo->rdev->flags & RADEON_IS_AGP) {
100                         rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
101                 } else {
102                         rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
103                 }
104         }
105         if (!c)
106                 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
107         rbo->placement.num_placement = c;
108         rbo->placement.num_busy_placement = c;
109 }
110
111 int radeon_bo_create(struct radeon_device *rdev,
112                      unsigned long size, int byte_align, bool kernel, u32 domain,
113                      struct sg_table *sg, struct radeon_bo **bo_ptr)
114 {
115         struct radeon_bo *bo;
116         enum ttm_bo_type type;
117         unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
118         size_t acc_size;
119         int r;
120
121         size = ALIGN(size, PAGE_SIZE);
122
123         rdev->mman.bdev.dev_mapping = rdev->ddev->anon_inode->i_mapping;
124         if (kernel) {
125                 type = ttm_bo_type_kernel;
126         } else if (sg) {
127                 type = ttm_bo_type_sg;
128         } else {
129                 type = ttm_bo_type_device;
130         }
131         *bo_ptr = NULL;
132
133         acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
134                                        sizeof(struct radeon_bo));
135
136         bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
137         if (bo == NULL)
138                 return -ENOMEM;
139         r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
140         if (unlikely(r)) {
141                 kfree(bo);
142                 return r;
143         }
144         bo->rdev = rdev;
145         bo->surface_reg = -1;
146         INIT_LIST_HEAD(&bo->list);
147         INIT_LIST_HEAD(&bo->va);
148         radeon_ttm_placement_from_domain(bo, domain);
149         /* Kernel allocation are uninterruptible */
150         down_read(&rdev->pm.mclk_lock);
151         r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
152                         &bo->placement, page_align, !kernel, NULL,
153                         acc_size, sg, &radeon_ttm_bo_destroy);
154         up_read(&rdev->pm.mclk_lock);
155         if (unlikely(r != 0)) {
156                 return r;
157         }
158         *bo_ptr = bo;
159
160         trace_radeon_bo_create(bo);
161
162         return 0;
163 }
164
165 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
166 {
167         bool is_iomem;
168         int r;
169
170         if (bo->kptr) {
171                 if (ptr) {
172                         *ptr = bo->kptr;
173                 }
174                 return 0;
175         }
176         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
177         if (r) {
178                 return r;
179         }
180         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
181         if (ptr) {
182                 *ptr = bo->kptr;
183         }
184         radeon_bo_check_tiling(bo, 0, 0);
185         return 0;
186 }
187
188 void radeon_bo_kunmap(struct radeon_bo *bo)
189 {
190         if (bo->kptr == NULL)
191                 return;
192         bo->kptr = NULL;
193         radeon_bo_check_tiling(bo, 0, 0);
194         ttm_bo_kunmap(&bo->kmap);
195 }
196
197 void radeon_bo_unref(struct radeon_bo **bo)
198 {
199         struct ttm_buffer_object *tbo;
200         struct radeon_device *rdev;
201
202         if ((*bo) == NULL)
203                 return;
204         rdev = (*bo)->rdev;
205         tbo = &((*bo)->tbo);
206         down_read(&rdev->pm.mclk_lock);
207         ttm_bo_unref(&tbo);
208         up_read(&rdev->pm.mclk_lock);
209         if (tbo == NULL)
210                 *bo = NULL;
211 }
212
213 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
214                              u64 *gpu_addr)
215 {
216         int r, i;
217
218         if (bo->pin_count) {
219                 bo->pin_count++;
220                 if (gpu_addr)
221                         *gpu_addr = radeon_bo_gpu_offset(bo);
222
223                 if (max_offset != 0) {
224                         u64 domain_start;
225
226                         if (domain == RADEON_GEM_DOMAIN_VRAM)
227                                 domain_start = bo->rdev->mc.vram_start;
228                         else
229                                 domain_start = bo->rdev->mc.gtt_start;
230                         WARN_ON_ONCE(max_offset <
231                                      (radeon_bo_gpu_offset(bo) - domain_start));
232                 }
233
234                 return 0;
235         }
236         radeon_ttm_placement_from_domain(bo, domain);
237         if (domain == RADEON_GEM_DOMAIN_VRAM) {
238                 /* force to pin into visible video ram */
239                 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
240         }
241         if (max_offset) {
242                 u64 lpfn = max_offset >> PAGE_SHIFT;
243
244                 if (!bo->placement.lpfn)
245                         bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
246
247                 if (lpfn < bo->placement.lpfn)
248                         bo->placement.lpfn = lpfn;
249         }
250         for (i = 0; i < bo->placement.num_placement; i++)
251                 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
252         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
253         if (likely(r == 0)) {
254                 bo->pin_count = 1;
255                 if (gpu_addr != NULL)
256                         *gpu_addr = radeon_bo_gpu_offset(bo);
257         }
258         if (unlikely(r != 0))
259                 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
260         return r;
261 }
262
263 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
264 {
265         return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
266 }
267
268 int radeon_bo_unpin(struct radeon_bo *bo)
269 {
270         int r, i;
271
272         if (!bo->pin_count) {
273                 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
274                 return 0;
275         }
276         bo->pin_count--;
277         if (bo->pin_count)
278                 return 0;
279         for (i = 0; i < bo->placement.num_placement; i++)
280                 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
281         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
282         if (unlikely(r != 0))
283                 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
284         return r;
285 }
286
287 int radeon_bo_evict_vram(struct radeon_device *rdev)
288 {
289         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
290         if (0 && (rdev->flags & RADEON_IS_IGP)) {
291                 if (rdev->mc.igp_sideport_enabled == false)
292                         /* Useless to evict on IGP chips */
293                         return 0;
294         }
295         return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
296 }
297
298 void radeon_bo_force_delete(struct radeon_device *rdev)
299 {
300         struct radeon_bo *bo, *n;
301
302         if (list_empty(&rdev->gem.objects)) {
303                 return;
304         }
305         dev_err(rdev->dev, "Userspace still has active objects !\n");
306         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
307                 mutex_lock(&rdev->ddev->struct_mutex);
308                 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
309                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
310                         *((unsigned long *)&bo->gem_base.refcount));
311                 mutex_lock(&bo->rdev->gem.mutex);
312                 list_del_init(&bo->list);
313                 mutex_unlock(&bo->rdev->gem.mutex);
314                 /* this should unref the ttm bo */
315                 drm_gem_object_unreference(&bo->gem_base);
316                 mutex_unlock(&rdev->ddev->struct_mutex);
317         }
318 }
319
320 int radeon_bo_init(struct radeon_device *rdev)
321 {
322         /* Add an MTRR for the VRAM */
323         if (!rdev->fastfb_working) {
324                 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
325                                                       rdev->mc.aper_size);
326         }
327         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
328                 rdev->mc.mc_vram_size >> 20,
329                 (unsigned long long)rdev->mc.aper_size >> 20);
330         DRM_INFO("RAM width %dbits %cDR\n",
331                         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
332         return radeon_ttm_init(rdev);
333 }
334
335 void radeon_bo_fini(struct radeon_device *rdev)
336 {
337         radeon_ttm_fini(rdev);
338         arch_phys_wc_del(rdev->mc.vram_mtrr);
339 }
340
341 void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
342                                 struct list_head *head)
343 {
344         if (lobj->written) {
345                 list_add(&lobj->tv.head, head);
346         } else {
347                 list_add_tail(&lobj->tv.head, head);
348         }
349 }
350
351 int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
352                             struct list_head *head, int ring)
353 {
354         struct radeon_bo_list *lobj;
355         struct radeon_bo *bo;
356         u32 domain;
357         int r;
358
359         r = ttm_eu_reserve_buffers(ticket, head);
360         if (unlikely(r != 0)) {
361                 return r;
362         }
363         list_for_each_entry(lobj, head, tv.head) {
364                 bo = lobj->bo;
365                 if (!bo->pin_count) {
366                         domain = lobj->domain;
367                         
368                 retry:
369                         radeon_ttm_placement_from_domain(bo, domain);
370                         if (ring == R600_RING_TYPE_UVD_INDEX)
371                                 radeon_uvd_force_into_uvd_segment(bo);
372                         r = ttm_bo_validate(&bo->tbo, &bo->placement,
373                                                 true, false);
374                         if (unlikely(r)) {
375                                 if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
376                                         domain = lobj->alt_domain;
377                                         goto retry;
378                                 }
379                                 ttm_eu_backoff_reservation(ticket, head);
380                                 return r;
381                         }
382                 }
383                 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
384                 lobj->tiling_flags = bo->tiling_flags;
385         }
386         return 0;
387 }
388
389 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
390                              struct vm_area_struct *vma)
391 {
392         return ttm_fbdev_mmap(vma, &bo->tbo);
393 }
394
395 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
396 {
397         struct radeon_device *rdev = bo->rdev;
398         struct radeon_surface_reg *reg;
399         struct radeon_bo *old_object;
400         int steal;
401         int i;
402
403         lockdep_assert_held(&bo->tbo.resv->lock.base);
404
405         if (!bo->tiling_flags)
406                 return 0;
407
408         if (bo->surface_reg >= 0) {
409                 reg = &rdev->surface_regs[bo->surface_reg];
410                 i = bo->surface_reg;
411                 goto out;
412         }
413
414         steal = -1;
415         for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
416
417                 reg = &rdev->surface_regs[i];
418                 if (!reg->bo)
419                         break;
420
421                 old_object = reg->bo;
422                 if (old_object->pin_count == 0)
423                         steal = i;
424         }
425
426         /* if we are all out */
427         if (i == RADEON_GEM_MAX_SURFACES) {
428                 if (steal == -1)
429                         return -ENOMEM;
430                 /* find someone with a surface reg and nuke their BO */
431                 reg = &rdev->surface_regs[steal];
432                 old_object = reg->bo;
433                 /* blow away the mapping */
434                 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
435                 ttm_bo_unmap_virtual(&old_object->tbo);
436                 old_object->surface_reg = -1;
437                 i = steal;
438         }
439
440         bo->surface_reg = i;
441         reg->bo = bo;
442
443 out:
444         radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
445                                bo->tbo.mem.start << PAGE_SHIFT,
446                                bo->tbo.num_pages << PAGE_SHIFT);
447         return 0;
448 }
449
450 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
451 {
452         struct radeon_device *rdev = bo->rdev;
453         struct radeon_surface_reg *reg;
454
455         if (bo->surface_reg == -1)
456                 return;
457
458         reg = &rdev->surface_regs[bo->surface_reg];
459         radeon_clear_surface_reg(rdev, bo->surface_reg);
460
461         reg->bo = NULL;
462         bo->surface_reg = -1;
463 }
464
465 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
466                                 uint32_t tiling_flags, uint32_t pitch)
467 {
468         struct radeon_device *rdev = bo->rdev;
469         int r;
470
471         if (rdev->family >= CHIP_CEDAR) {
472                 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
473
474                 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
475                 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
476                 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
477                 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
478                 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
479                 switch (bankw) {
480                 case 0:
481                 case 1:
482                 case 2:
483                 case 4:
484                 case 8:
485                         break;
486                 default:
487                         return -EINVAL;
488                 }
489                 switch (bankh) {
490                 case 0:
491                 case 1:
492                 case 2:
493                 case 4:
494                 case 8:
495                         break;
496                 default:
497                         return -EINVAL;
498                 }
499                 switch (mtaspect) {
500                 case 0:
501                 case 1:
502                 case 2:
503                 case 4:
504                 case 8:
505                         break;
506                 default:
507                         return -EINVAL;
508                 }
509                 if (tilesplit > 6) {
510                         return -EINVAL;
511                 }
512                 if (stilesplit > 6) {
513                         return -EINVAL;
514                 }
515         }
516         r = radeon_bo_reserve(bo, false);
517         if (unlikely(r != 0))
518                 return r;
519         bo->tiling_flags = tiling_flags;
520         bo->pitch = pitch;
521         radeon_bo_unreserve(bo);
522         return 0;
523 }
524
525 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
526                                 uint32_t *tiling_flags,
527                                 uint32_t *pitch)
528 {
529         lockdep_assert_held(&bo->tbo.resv->lock.base);
530
531         if (tiling_flags)
532                 *tiling_flags = bo->tiling_flags;
533         if (pitch)
534                 *pitch = bo->pitch;
535 }
536
537 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
538                                 bool force_drop)
539 {
540         if (!force_drop)
541                 lockdep_assert_held(&bo->tbo.resv->lock.base);
542
543         if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
544                 return 0;
545
546         if (force_drop) {
547                 radeon_bo_clear_surface_reg(bo);
548                 return 0;
549         }
550
551         if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
552                 if (!has_moved)
553                         return 0;
554
555                 if (bo->surface_reg >= 0)
556                         radeon_bo_clear_surface_reg(bo);
557                 return 0;
558         }
559
560         if ((bo->surface_reg >= 0) && !has_moved)
561                 return 0;
562
563         return radeon_bo_get_surface_reg(bo);
564 }
565
566 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
567                            struct ttm_mem_reg *mem)
568 {
569         struct radeon_bo *rbo;
570         if (!radeon_ttm_bo_is_radeon_bo(bo))
571                 return;
572         rbo = container_of(bo, struct radeon_bo, tbo);
573         radeon_bo_check_tiling(rbo, 0, 1);
574         radeon_vm_bo_invalidate(rbo->rdev, rbo);
575 }
576
577 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
578 {
579         struct radeon_device *rdev;
580         struct radeon_bo *rbo;
581         unsigned long offset, size;
582         int r;
583
584         if (!radeon_ttm_bo_is_radeon_bo(bo))
585                 return 0;
586         rbo = container_of(bo, struct radeon_bo, tbo);
587         radeon_bo_check_tiling(rbo, 0, 0);
588         rdev = rbo->rdev;
589         if (bo->mem.mem_type != TTM_PL_VRAM)
590                 return 0;
591
592         size = bo->mem.num_pages << PAGE_SHIFT;
593         offset = bo->mem.start << PAGE_SHIFT;
594         if ((offset + size) <= rdev->mc.visible_vram_size)
595                 return 0;
596
597         /* hurrah the memory is not visible ! */
598         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
599         rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
600         r = ttm_bo_validate(bo, &rbo->placement, false, false);
601         if (unlikely(r == -ENOMEM)) {
602                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
603                 return ttm_bo_validate(bo, &rbo->placement, false, false);
604         } else if (unlikely(r != 0)) {
605                 return r;
606         }
607
608         offset = bo->mem.start << PAGE_SHIFT;
609         /* this should never happen */
610         if ((offset + size) > rdev->mc.visible_vram_size)
611                 return -EINVAL;
612
613         return 0;
614 }
615
616 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
617 {
618         int r;
619
620         r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
621         if (unlikely(r != 0))
622                 return r;
623         spin_lock(&bo->tbo.bdev->fence_lock);
624         if (mem_type)
625                 *mem_type = bo->tbo.mem.mem_type;
626         if (bo->tbo.sync_obj)
627                 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
628         spin_unlock(&bo->tbo.bdev->fence_lock);
629         ttm_bo_unreserve(&bo->tbo);
630         return r;
631 }