radeon: first pass at bios scratch regs
[platform/upstream/libdrm.git] / linux-core / radeon_gem.c
1 /*
2  * Copyright 2008 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Dave Airlie
23  */
24 #include "drmP.h"
25 #include "drm.h"
26
27 #include "radeon_drm.h"
28 #include "radeon_drv.h"
29
30 static int radeon_gem_ib_init(struct drm_device *dev);
31 static int radeon_gem_ib_destroy(struct drm_device *dev);
32 static int radeon_gem_dma_bufs_init(struct drm_device *dev);
33 static void radeon_gem_dma_bufs_destroy(struct drm_device *dev);
34
35 int radeon_gem_init_object(struct drm_gem_object *obj)
36 {
37         struct drm_radeon_gem_object *obj_priv;
38
39         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
40         if (!obj_priv) {
41                 return -ENOMEM;
42         }
43
44         obj->driver_private = obj_priv;
45         obj_priv->obj = obj;
46         
47         return 0;
48 }
49
50 void radeon_gem_free_object(struct drm_gem_object *obj)
51 {
52
53         struct drm_radeon_gem_object *obj_priv = obj->driver_private;
54
55         /* tear down the buffer object - gem holds struct mutex */
56         drm_bo_takedown_vm_locked(obj_priv->bo);
57         drm_bo_usage_deref_locked(&obj_priv->bo);
58         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
59 }
60
61 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
62                           struct drm_file *file_priv)
63 {
64         struct drm_radeon_private *dev_priv = dev->dev_private;
65         struct drm_radeon_gem_info *args = data;
66
67         args->vram_start = dev_priv->mm.vram_offset;
68         args->vram_size = dev_priv->mm.vram_size;
69         args->vram_visible = dev_priv->mm.vram_visible;
70
71         args->gart_start = dev_priv->mm.gart_start;
72         args->gart_size = dev_priv->mm.gart_size;
73
74         return 0;
75 }
76
77 struct drm_gem_object *radeon_gem_object_alloc(struct drm_device *dev, int size, int alignment,
78                                                int initial_domain)
79 {
80         struct drm_gem_object *obj;
81         struct drm_radeon_gem_object *obj_priv;
82         int ret;
83         uint32_t flags;
84
85         DRM_DEBUG("size 0x%x, alignment %d, initial_domain %d\n", size, alignment, initial_domain);
86         obj = drm_gem_object_alloc(dev, size);
87         if (!obj)
88                 return NULL;;
89
90         obj_priv = obj->driver_private;
91         if (initial_domain == RADEON_GEM_DOMAIN_VRAM)
92                 flags = DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE;
93         else
94                 flags = DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE;
95
96         flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE;
97         /* create a TTM BO */
98         ret = drm_buffer_object_create(dev,
99                                        size, drm_bo_type_device,
100                                        flags, 0, alignment,
101                                        0, &obj_priv->bo);
102         if (ret)
103                 goto fail;
104
105         return obj;
106 fail:
107
108         return NULL;
109 }
110
111 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
112                             struct drm_file *file_priv)
113 {
114         struct drm_radeon_gem_create *args = data;
115         struct drm_radeon_gem_object *obj_priv;
116         struct drm_gem_object *obj;
117         int ret = 0;
118         uint32_t flags;
119         int handle;
120
121         /* create a gem object to contain this object in */
122         args->size = roundup(args->size, PAGE_SIZE);
123
124         obj = radeon_gem_object_alloc(dev, args->size, args->alignment, args->initial_domain);
125         if (!obj)
126                 return -EINVAL;
127
128         obj_priv = obj->driver_private;
129         DRM_DEBUG("obj is %p bo is %p, %d\n", obj, obj_priv->bo, obj_priv->bo->num_pages);
130         ret = drm_gem_handle_create(file_priv, obj, &handle);
131         mutex_lock(&dev->struct_mutex);
132         drm_gem_object_handle_unreference(obj);
133         mutex_unlock(&dev->struct_mutex);
134
135         if (ret)
136                 goto fail;
137
138         args->handle = handle;
139
140         return 0;
141 fail:
142         drm_gem_object_unreference(obj);
143
144         return ret;
145 }
146
147 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
148                                 struct drm_file *file_priv)
149 {
150         /* transition the BO to a domain - just validate the BO into a certain domain */
151         struct drm_radeon_gem_set_domain *args = data;
152         struct drm_gem_object *obj;
153         struct drm_radeon_gem_object *obj_priv;
154         int ret;
155         /* for now if someone requests domain CPU - just make sure the buffer is finished with */
156
157         /* just do a BO wait for now */
158         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
159         if (obj == NULL)
160                 return -EINVAL;
161
162         obj_priv = obj->driver_private;
163
164         mutex_lock(&obj_priv->bo->mutex);
165         ret = drm_bo_wait(obj_priv->bo, 0, 1, 0, 0);
166         mutex_unlock(&obj_priv->bo->mutex);
167
168         mutex_lock(&dev->struct_mutex);
169         drm_gem_object_unreference(obj);
170         mutex_unlock(&dev->struct_mutex);
171         return ret;
172 }
173
174 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
175                           struct drm_file *file_priv)
176 {
177         return -ENOSYS;
178 }
179
180 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
181                           struct drm_file *file_priv)
182 {
183         return -ENOSYS;
184 }
185
186 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
187                           struct drm_file *file_priv)
188 {
189         struct drm_radeon_gem_mmap *args = data;
190         struct drm_gem_object *obj;
191         struct drm_radeon_gem_object *obj_priv;
192         loff_t offset;
193         unsigned long addr;
194
195         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
196         if (obj == NULL)
197                 return -EINVAL;
198
199         offset = args->offset;
200
201         DRM_DEBUG("got here %p\n", obj);
202         obj_priv = obj->driver_private;
203
204         DRM_DEBUG("got here %p %p %lld %ld\n", obj, obj_priv->bo, args->size, obj_priv->bo->num_pages);
205         if (!obj_priv->bo) {
206                 mutex_lock(&dev->struct_mutex);
207                 drm_gem_object_unreference(obj);
208                 mutex_unlock(&dev->struct_mutex);
209                 return -EINVAL;
210         }
211
212         down_write(&current->mm->mmap_sem);
213         addr = do_mmap_pgoff(file_priv->filp, 0, args->size,
214                              PROT_READ | PROT_WRITE, MAP_SHARED,
215                              obj_priv->bo->map_list.hash.key);
216         up_write(&current->mm->mmap_sem);
217
218         DRM_DEBUG("got here %p\n", obj);
219         mutex_lock(&dev->struct_mutex);
220         drm_gem_object_unreference(obj);
221         mutex_unlock(&dev->struct_mutex);
222         if (IS_ERR((void *)addr))
223                 return addr;
224
225         args->addr_ptr = (uint64_t) addr;
226
227         return 0;
228
229 }
230
231 int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
232                          struct drm_file *file_priv)
233 {
234         struct drm_radeon_gem_pin *args = data;
235         struct drm_gem_object *obj;
236         struct drm_radeon_gem_object *obj_priv;
237         int ret;
238
239         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
240         if (obj == NULL)
241                 return -EINVAL;
242
243         obj_priv = obj->driver_private;
244
245         DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage));
246         /* validate into a pin with no fence */
247
248         if (!(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) {
249           ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
250                                    DRM_BO_HINT_DONT_FENCE, 0);
251         } else
252           ret = 0;
253
254         args->offset = obj_priv->bo->offset;
255         DRM_DEBUG("got here %p %p\n", obj, obj_priv->bo);
256
257         mutex_lock(&dev->struct_mutex);
258         drm_gem_object_unreference(obj);
259         mutex_unlock(&dev->struct_mutex);
260         return ret;
261 }
262
263 int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
264                            struct drm_file *file_priv)
265 {
266         struct drm_radeon_gem_unpin *args = data;
267         struct drm_gem_object *obj;
268         struct drm_radeon_gem_object *obj_priv;
269         int ret;
270
271         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
272         if (obj == NULL)
273                 return -EINVAL;
274
275         obj_priv = obj->driver_private;
276
277         /* validate into a pin with no fence */
278
279         ret = drm_bo_do_validate(obj_priv->bo, DRM_BO_FLAG_NO_EVICT, DRM_BO_FLAG_NO_EVICT,
280                                  DRM_BO_HINT_DONT_FENCE, 0);
281
282         mutex_lock(&dev->struct_mutex);
283         drm_gem_object_unreference(obj);
284         mutex_unlock(&dev->struct_mutex);
285         return ret;
286 }
287
288 int radeon_gem_busy(struct drm_device *dev, void *data,
289                      struct drm_file *file_priv)
290 {
291         return 0;
292 }
293
294 int radeon_gem_execbuffer(struct drm_device *dev, void *data,
295                           struct drm_file *file_priv)
296 {
297         return -ENOSYS;
298
299
300 }
301
302 int radeon_gem_indirect_ioctl(struct drm_device *dev, void *data,
303                               struct drm_file *file_priv)
304 {
305         struct drm_radeon_gem_indirect *args = data;
306         struct drm_radeon_private *dev_priv = dev->dev_private;
307         struct drm_gem_object *obj;
308         struct drm_radeon_gem_object *obj_priv;
309         uint32_t start, end;
310         int ret;
311         RING_LOCALS;
312
313         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
314         if (obj == NULL)
315                 return -EINVAL;
316
317         obj_priv = obj->driver_private;
318
319         DRM_DEBUG("got here %p %d\n", obj, args->used);
320         //RING_SPACE_TEST_WITH_RETURN(dev_priv);
321         //VB_AGE_TEST_WITH_RETURN(dev_priv);
322
323         ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
324                                  0 , 0);
325         if (ret)
326                 return ret;
327
328         /* Wait for the 3D stream to idle before the indirect buffer
329          * containing 2D acceleration commands is processed.
330          */
331         BEGIN_RING(2);
332
333         RADEON_WAIT_UNTIL_3D_IDLE();
334
335         ADVANCE_RING();
336         
337         start = 0;
338         end = args->used;
339
340         if (start != end) {
341                 int offset = (dev_priv->gart_vm_start + 
342                               + obj_priv->bo->offset + start);
343                 int dwords = (end - start + 3) / sizeof(u32);
344
345 #if 0
346                 /* Indirect buffer data must be an even number of
347                  * dwords, so if we've been given an odd number we must
348                  * pad the data with a Type-2 CP packet.
349                  */
350                 if (dwords & 1) {
351                         u32 *data = (u32 *)
352                             ((char *)dev->agp_buffer_map->handle
353                              + buf->offset + start);
354                         data[dwords++] = RADEON_CP_PACKET2;
355                 }
356 #endif
357                 /* Fire off the indirect buffer */
358                 BEGIN_RING(3);
359
360                 OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
361                 OUT_RING(offset);
362                 OUT_RING(dwords);
363
364                 ADVANCE_RING();
365         }
366
367         COMMIT_RING();
368
369         /* we need to fence the buffer */
370         ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &obj_priv->fence);
371         if (ret) {
372           
373                 drm_putback_buffer_objects(dev);
374                 ret = 0;
375                 goto fail;
376         }
377
378         /* dereference he fence object */
379         drm_fence_usage_deref_unlocked(&obj_priv->fence);
380
381         mutex_lock(&dev->struct_mutex);
382         drm_gem_object_unreference(obj);
383         mutex_unlock(&dev->struct_mutex);
384         ret = 0;
385  fail:
386         return ret;
387 }
388
389 /*
390  * Depending on card genertation, chipset bugs, etc... the amount of vram
391  * accessible to the CPU can vary. This function is our best shot at figuring
392  * it out. Returns a value in KB.
393  */
394 static uint32_t radeon_get_accessible_vram(struct drm_device *dev)
395 {
396         drm_radeon_private_t *dev_priv = dev->dev_private;
397         uint32_t aper_size;
398         u8 byte;
399
400         if (dev_priv->chip_family >= CHIP_R600)
401                 aper_size = RADEON_READ(R600_CONFIG_APER_SIZE) / 1024;
402         else
403                 aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE) / 1024;
404
405         /* Set HDP_APER_CNTL only on cards that are known not to be broken,
406          * that is has the 2nd generation multifunction PCI interface
407          */
408         if (dev_priv->chip_family == CHIP_RV280 ||
409             dev_priv->chip_family == CHIP_RV350 ||
410             dev_priv->chip_family == CHIP_RV380 ||
411             dev_priv->chip_family == CHIP_R420 ||
412             dev_priv->chip_family == CHIP_RV410 ||
413             dev_priv->chip_family >= CHIP_RS600) {
414                 uint32_t temp = RADEON_READ(RADEON_HOST_PATH_CNTL);
415                 temp |= RADEON_HDP_APER_CNTL;
416                 RADEON_WRITE(RADEON_HOST_PATH_CNTL, temp);
417                 return aper_size * 2;
418         }
419         
420         /* Older cards have all sorts of funny issues to deal with. First
421          * check if it's a multifunction card by reading the PCI config
422          * header type... Limit those to one aperture size
423          */
424         pci_read_config_byte(dev->pdev, 0xe, &byte);
425         if (byte & 0x80)
426                 return aper_size;
427         
428         /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
429          * have set it up. We don't write this as it's broken on some ASICs but
430          * we expect the BIOS to have done the right thing (might be too optimistic...)
431          */
432         if (RADEON_READ(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
433                 return aper_size * 2;
434
435         return aper_size;
436 }       
437
438 /* code from the DDX - do memory sizing */
439 void radeon_vram_setup(struct drm_device *dev)
440 {
441         drm_radeon_private_t *dev_priv = dev->dev_private;
442         uint32_t vram;
443         uint32_t accessible,  bar_size;
444
445         if ((dev_priv->chip_family <= CHIP_RV515) && (dev_priv->flags & RADEON_IS_IGP)) {
446                 uint32_t tom = RADEON_READ(RADEON_NB_TOM);
447
448                 vram = (((tom >> 16) - (tom & 0xffff) + 1) << 6);
449                 RADEON_WRITE(RADEON_CONFIG_MEMSIZE, vram * 1024);
450         } else {
451                 if (dev_priv->chip_family >= CHIP_R600)
452                         vram = RADEON_READ(R600_CONFIG_MEMSIZE) / 1024;
453                 else {
454                         vram = RADEON_READ(RADEON_CONFIG_MEMSIZE) / 1024;
455
456                         /* Some production boards of m6 will return 0 if it's 8 MB */
457                         if (vram == 0) {
458                                 vram = 8192;
459                                 RADEON_WRITE(RADEON_CONFIG_MEMSIZE, 0x800000);
460                         }
461                 }
462         }
463
464         accessible = radeon_get_accessible_vram(dev);
465
466         bar_size = drm_get_resource_len(dev, 0) / 1024;
467         if (bar_size == 0)
468                 bar_size = 0x20000;
469         if (accessible > bar_size)
470                 accessible = bar_size;
471
472         DRM_INFO("Detected VRAM RAM=%dK, accessible=%uK, BAR=%uK\n",
473                  vram, accessible, bar_size);
474
475         dev_priv->mm.vram_offset = dev_priv->fb_aper_offset;
476         dev_priv->mm.vram_size = vram * 1024;
477         dev_priv->mm.vram_visible = accessible * 1024;
478
479
480 }
481
482 static int radeon_gart_init(struct drm_device *dev)
483 {
484         drm_radeon_private_t *dev_priv = dev->dev_private;
485         int ret;
486         u32 base = 0;
487
488         /* setup a 32MB GART */
489         dev_priv->gart_size = dev_priv->mm.gart_size;
490         dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
491
492 #if __OS_HAS_AGP
493         /* setup VRAM vs GART here */
494         if (dev_priv->flags & RADEON_IS_AGP) {
495                 base = dev->agp->base;
496                 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
497                     base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
498                         DRM_INFO("Can't use agp base @0x%08xlx, won't fit\n",
499                                  dev->agp->base);
500                         base = 0;
501                 }
502         }
503 #endif
504
505         if (base == 0) {
506                 base = dev_priv->fb_location + dev_priv->fb_size;
507                 if (base < dev_priv->fb_location ||
508                     ((base + dev_priv->gart_size) & 0xfffffffful) < base)
509                         base = dev_priv->fb_location
510                                 - dev_priv->gart_size;
511         }
512         /* start on the card */
513         dev_priv->gart_vm_start = base & 0xffc00000u;
514         if (dev_priv->gart_vm_start != base)
515                 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
516                          base, dev_priv->gart_vm_start);
517
518         /* if on PCIE we need to allocate an fb object for the PCIE GART table */
519         if (dev_priv->flags & RADEON_IS_PCIE) {
520                 ret = drm_buffer_object_create(dev, RADEON_PCIGART_TABLE_SIZE,
521                                                drm_bo_type_kernel,
522                                                DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
523                                                0, 1, 0, &dev_priv->mm.pcie_table.bo);
524                 if (ret)
525                         return -EINVAL;
526
527                 DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table.bo, dev_priv->mm.pcie_table.bo->offset);
528                 ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
529                                   &dev_priv->mm.pcie_table.kmap);
530                 if (ret)
531                         return -EINVAL;
532
533                 dev_priv->pcigart_offset_set = 2;
534                 dev_priv->gart_info.bus_addr =  dev_priv->fb_location + dev_priv->mm.pcie_table.bo->offset;
535                 dev_priv->gart_info.addr = dev_priv->mm.pcie_table.kmap.virtual;
536                 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
537                 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
538                 memset(dev_priv->gart_info.addr, 0, RADEON_PCIGART_TABLE_SIZE);
539         } else if (!(dev_priv->flags & RADEON_IS_AGP)) {
540                 /* allocate PCI GART table */
541                 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
542                 ret = drm_ati_alloc_pcigart_table(dev, &dev_priv->gart_info);
543                 if (ret) {
544                         DRM_ERROR("cannot allocate PCI GART page!\n");
545                         return -EINVAL;
546                 }
547
548                 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
549                 if (dev_priv->flags & RADEON_IS_IGPGART)
550                         dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
551                 else
552                         dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
553                 dev_priv->gart_info.addr = dev_priv->gart_info.table_handle->vaddr;
554                 dev_priv->gart_info.bus_addr = dev_priv->gart_info.table_handle->busaddr;
555         }
556         
557         /* gart values setup - start the GART */
558         if (dev_priv->flags & RADEON_IS_AGP) {
559                 radeon_set_pcigart(dev_priv, 0);
560         } else {
561                 radeon_set_pcigart(dev_priv, 1);
562         }
563                 
564         return 0;
565 }
566
567 int radeon_alloc_gart_objects(struct drm_device *dev)
568 {
569         drm_radeon_private_t *dev_priv = dev->dev_private;
570         int ret;
571
572         ret = drm_buffer_object_create(dev, RADEON_DEFAULT_RING_SIZE,
573                                        drm_bo_type_kernel,
574                                        DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
575                                        DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
576                                        0, 1, 0, &dev_priv->mm.ring.bo);
577         if (ret) {
578                 DRM_ERROR("failed to allocate ring\n");
579                 return -EINVAL;
580         }
581
582         ret = drm_bo_kmap(dev_priv->mm.ring.bo, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT,
583                           &dev_priv->mm.ring.kmap);
584         if (ret) {
585                 DRM_ERROR("failed to map ring\n");
586                 return -EINVAL;
587         }
588
589         ret = drm_buffer_object_create(dev, PAGE_SIZE,
590                                        drm_bo_type_kernel,
591                                        DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
592                                        DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
593                                        0, 1, 0, &dev_priv->mm.ring_read.bo);
594         if (ret) {
595                 DRM_ERROR("failed to allocate ring read\n");
596                 return -EINVAL;
597         }
598
599         ret = drm_bo_kmap(dev_priv->mm.ring_read.bo, 0,
600                           PAGE_SIZE >> PAGE_SHIFT,
601                           &dev_priv->mm.ring_read.kmap);
602         if (ret) {
603                 DRM_ERROR("failed to map ring read\n");
604                 return -EINVAL;
605         }
606
607         DRM_DEBUG("Ring ptr %p mapped at %d %p, read ptr %p maped at %d %p\n",
608                   dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual,
609                   dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual);
610
611         /* init the indirect buffers */
612         radeon_gem_ib_init(dev);
613         radeon_gem_dma_bufs_init(dev);
614         return 0;                         
615
616 }
617
618 static void radeon_init_memory_map(struct drm_device *dev)
619 {
620         drm_radeon_private_t *dev_priv = dev->dev_private;
621         u32 mem_size, aper_size;
622
623         dev_priv->mc_fb_location = radeon_read_fb_location(dev_priv);
624         radeon_read_agp_location(dev_priv, &dev_priv->mc_agp_loc_lo, &dev_priv->mc_agp_loc_hi);
625
626         if (dev_priv->chip_family >= CHIP_R600) {
627                 mem_size = RADEON_READ(R600_CONFIG_MEMSIZE);
628                 aper_size = RADEON_READ(R600_CONFIG_APER_SIZE);
629         } else {
630                 mem_size = RADEON_READ(RADEON_CONFIG_MEMSIZE);
631                 aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE);
632         }
633
634         /* M6s report illegal memory size */
635         if (mem_size == 0)
636                 mem_size = 8 * 1024 * 1024;
637
638         /* for RN50/M6/M7 - Novell bug 204882 */
639         if (aper_size > mem_size)
640                 mem_size = aper_size;
641
642         if ((dev_priv->chip_family != CHIP_RS600) &&
643             (dev_priv->chip_family != CHIP_RS690) &&
644             (dev_priv->chip_family != CHIP_RS740)) {
645                 if (dev_priv->flags & RADEON_IS_IGP)
646                         dev_priv->mc_fb_location = RADEON_READ(RADEON_NB_TOM);
647                 else {
648                         uint32_t aper0_base;
649
650                         if (dev_priv->chip_family >= CHIP_R600)
651                                 aper0_base = RADEON_READ(R600_CONFIG_F0_BASE);
652                         else
653                                 aper0_base = RADEON_READ(RADEON_CONFIG_APER_0_BASE);
654
655
656                         /* Some chips have an "issue" with the memory controller, the
657                          * location must be aligned to the size. We just align it down,
658                          * too bad if we walk over the top of system memory, we don't
659                          * use DMA without a remapped anyway.
660                          * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
661                          */
662                         if (dev_priv->chip_family == CHIP_RV280 ||
663                             dev_priv->chip_family == CHIP_R300 ||
664                             dev_priv->chip_family == CHIP_R350 ||
665                             dev_priv->chip_family == CHIP_RV350 ||
666                             dev_priv->chip_family == CHIP_RV380 ||
667                             dev_priv->chip_family == CHIP_R420 ||
668                             dev_priv->chip_family == CHIP_RV410)
669                                 aper0_base &= ~(mem_size - 1);
670
671                         if (dev_priv->chip_family >= CHIP_R600) {
672                                 dev_priv->mc_fb_location = (aper0_base >> 24) |
673                                         (((aper0_base + mem_size - 1) & 0xff000000U) >> 8);
674                         } else {
675                                 dev_priv->mc_fb_location = (aper0_base >> 16) |
676                                         ((aper0_base + mem_size - 1) & 0xffff0000U);
677                         }
678                 }
679         }
680         
681         if (dev_priv->chip_family >= CHIP_R600)
682                 dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 24;
683         else
684                 dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 16;
685
686         if (radeon_is_avivo(dev_priv)) {
687                 if (dev_priv->chip_family >= CHIP_R600) 
688                         RADEON_WRITE(R600_HDP_NONSURFACE_BASE, (dev_priv->mc_fb_location << 16) & 0xff0000);
689                 else
690                         RADEON_WRITE(AVIVO_HDP_FB_LOCATION, dev_priv->mc_fb_location);
691         }
692
693         radeon_write_fb_location(dev_priv, dev_priv->mc_fb_location);
694
695         dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
696         dev_priv->fb_size =
697                 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
698                 - dev_priv->fb_location;
699
700 }
701
702 /* init memory manager - start with all of VRAM and a 32MB GART aperture for now */
703 int radeon_gem_mm_init(struct drm_device *dev)
704 {
705         drm_radeon_private_t *dev_priv = dev->dev_private;
706         int ret;
707         u32 pg_offset;
708
709         /* size the mappable VRAM memory for now */
710         radeon_vram_setup(dev);
711         
712         radeon_init_memory_map(dev);
713
714 #define VRAM_RESERVE_TEXT (64*1024)
715         dev_priv->mm.vram_visible -= VRAM_RESERVE_TEXT;
716         pg_offset = VRAM_RESERVE_TEXT >> PAGE_SHIFT;
717         drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, pg_offset, /*dev_priv->mm.vram_offset >> PAGE_SHIFT,*/
718                        ((dev_priv->mm.vram_visible) >> PAGE_SHIFT) - 16,
719                        0);
720
721
722         dev_priv->mm.gart_size = (32 * 1024 * 1024);
723         dev_priv->mm.gart_start = 0;
724         ret = radeon_gart_init(dev);
725         if (ret)
726                 return -EINVAL;
727         
728         drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
729                        dev_priv->mm.gart_size >> PAGE_SHIFT,
730                        0);
731
732         /* need to allocate some objects in the GART */
733         /* ring + ring read ptr */
734         ret = radeon_alloc_gart_objects(dev);
735         if (ret)
736                 return -EINVAL;
737
738         dev_priv->mm_enabled = true;
739         return 0;
740 }
741
742 void radeon_gem_mm_fini(struct drm_device *dev)
743 {
744         drm_radeon_private_t *dev_priv = dev->dev_private;
745
746         radeon_gem_dma_bufs_destroy(dev);
747         radeon_gem_ib_destroy(dev);
748
749         mutex_lock(&dev->struct_mutex);
750
751         
752         if (dev_priv->mm.ring_read.bo) {
753                 drm_bo_kunmap(&dev_priv->mm.ring_read.kmap);
754                 drm_bo_usage_deref_locked(&dev_priv->mm.ring_read.bo);
755         }
756
757         if (dev_priv->mm.ring.bo) {
758                 drm_bo_kunmap(&dev_priv->mm.ring.kmap);
759                 drm_bo_usage_deref_locked(&dev_priv->mm.ring.bo);
760         }
761
762         if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
763                 DRM_DEBUG("delaying takedown of TTM memory\n");
764         }
765
766         if (dev_priv->flags & RADEON_IS_PCIE) {
767                 if (dev_priv->mm.pcie_table.bo) {
768                         drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap);
769                         drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo);
770                 }
771         }
772
773         if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) {
774                 DRM_DEBUG("delaying takedown of TTM memory\n");
775         }
776
777         mutex_unlock(&dev->struct_mutex);
778
779         drm_bo_driver_finish(dev);
780         dev_priv->mm_enabled = false;
781 }
782
783 int radeon_gem_object_pin(struct drm_gem_object *obj,
784                           uint32_t alignment)
785 {
786         struct drm_radeon_gem_object *obj_priv;
787         int ret;
788
789         obj_priv = obj->driver_private;
790
791         ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
792                                  DRM_BO_HINT_DONT_FENCE, 0);
793
794         return ret;
795 }
796
797 #define RADEON_IB_MEMORY (1*1024*1024)
798 #define RADEON_IB_SIZE (65536)
799
800 #define RADEON_NUM_IB (RADEON_IB_MEMORY / RADEON_IB_SIZE)
801
802 int radeon_gem_ib_get(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset)
803 {
804         int i, index = -1;
805         int ret;
806         drm_radeon_private_t *dev_priv = dev->dev_private;
807
808         for (i = 0; i < RADEON_NUM_IB; i++) {
809                 if (!(dev_priv->ib_alloc_bitmap & (1 << i))){
810                         index = i;
811                         break;
812                 }
813         }
814
815         /* if all in use we need to wait */
816         if (index == -1) {
817                 for (i = 0; i < RADEON_NUM_IB; i++) {
818                         if (dev_priv->ib_alloc_bitmap & (1 << i)) {
819                                 mutex_lock(&dev_priv->ib_objs[i]->bo->mutex);
820                                 ret = drm_bo_wait(dev_priv->ib_objs[i]->bo, 0, 1, 0, 0);
821                                 mutex_unlock(&dev_priv->ib_objs[i]->bo->mutex);
822                                 if (ret)
823                                         continue;
824                                 dev_priv->ib_alloc_bitmap &= ~(1 << i);
825                                 index = i;
826                                 break;
827                         }
828                 }
829         }
830
831         if (index == -1) {
832                 DRM_ERROR("Major case fail to allocate IB from freelist %x\n", dev_priv->ib_alloc_bitmap);
833                 return -EINVAL;
834         }
835                 
836
837         if (dwords > RADEON_IB_SIZE / sizeof(uint32_t))
838                 return -EINVAL;
839
840         ret = drm_bo_do_validate(dev_priv->ib_objs[index]->bo, 0,
841                                  DRM_BO_FLAG_NO_EVICT,
842                                  0, 0);
843         if (ret) {
844                 DRM_ERROR("Failed to validate IB %d\n", index);
845                 return -EINVAL;
846         }
847                 
848         *card_offset = dev_priv->gart_vm_start + dev_priv->ib_objs[index]->bo->offset;
849         *ib = dev_priv->ib_objs[index]->kmap.virtual;
850         dev_priv->ib_alloc_bitmap |= (1 << i);
851         return 0;
852 }
853
854 static void radeon_gem_ib_free(struct drm_device *dev, void *ib, uint32_t dwords)
855 {
856         drm_radeon_private_t *dev_priv = dev->dev_private;
857         struct drm_fence_object *fence;
858         int ret;
859         int i;
860
861         for (i = 0; i < RADEON_NUM_IB; i++) {
862
863                 if (dev_priv->ib_objs[i]->kmap.virtual == ib) {
864                         /* emit a fence object */
865                         ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
866                         if (ret) {
867                                 
868                                 drm_putback_buffer_objects(dev);
869                         }
870                         /* dereference the fence object */
871                         if (fence)
872                                 drm_fence_usage_deref_unlocked(&fence);
873                 }
874         }
875
876 }
877
878 static int radeon_gem_ib_destroy(struct drm_device *dev)
879 {
880         drm_radeon_private_t *dev_priv = dev->dev_private;
881         int i;
882
883         if (dev_priv->ib_objs) {
884                 for (i = 0; i < RADEON_NUM_IB; i++) {
885                         if (dev_priv->ib_objs[i]) {
886                                 drm_bo_kunmap(&dev_priv->ib_objs[i]->kmap);
887                                 drm_bo_usage_deref_unlocked(&dev_priv->ib_objs[i]->bo);
888                         }
889                         drm_free(dev_priv->ib_objs[i], sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
890                 }
891                 drm_free(dev_priv->ib_objs, RADEON_NUM_IB*sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
892         }
893         dev_priv->ib_objs = NULL;
894         return 0;
895 }
896
897 static int radeon_gem_relocate(struct drm_device *dev, struct drm_file *file_priv,
898                                 uint32_t *reloc, uint32_t *offset)
899 {
900         drm_radeon_private_t *dev_priv = dev->dev_private;
901         /* relocate the handle */
902         int domains = reloc[2];
903         struct drm_gem_object *obj;
904         int flags = 0;
905         int ret;
906         struct drm_radeon_gem_object *obj_priv;
907
908         obj = drm_gem_object_lookup(dev, file_priv, reloc[1]);
909         if (!obj)
910                 return false;
911
912         obj_priv = obj->driver_private;
913         if (domains == RADEON_GEM_DOMAIN_VRAM) {
914                 flags = DRM_BO_FLAG_MEM_VRAM;
915         } else {
916                 flags = DRM_BO_FLAG_MEM_TT;
917         }
918
919         ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM, 0, 0);
920         if (ret)
921                 return ret;
922
923         if (flags == DRM_BO_FLAG_MEM_VRAM)
924                 *offset = obj_priv->bo->offset + dev_priv->fb_location;
925         else
926                 *offset = obj_priv->bo->offset + dev_priv->gart_vm_start;
927
928         /* BAD BAD BAD - LINKED LIST THE OBJS and UNREF ONCE IB is SUBMITTED */
929         drm_gem_object_unreference(obj);
930         return 0;
931 }
932
933 /* allocate 1MB of 64k IBs the the kernel can keep mapped */
934 static int radeon_gem_ib_init(struct drm_device *dev)
935 {
936         drm_radeon_private_t *dev_priv = dev->dev_private;
937         int i;
938         int ret;
939
940         dev_priv->ib_objs = drm_calloc(RADEON_NUM_IB, sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
941         if (!dev_priv->ib_objs)
942                 goto free_all;
943
944         for (i = 0; i < RADEON_NUM_IB; i++) {
945                 dev_priv->ib_objs[i] = drm_calloc(1, sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
946                 if (!dev_priv->ib_objs[i])
947                         goto free_all;
948
949                 ret = drm_buffer_object_create(dev, RADEON_IB_SIZE,
950                                                drm_bo_type_kernel,
951                                                DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
952                                                DRM_BO_FLAG_MAPPABLE, 0,
953                                                0, 0, &dev_priv->ib_objs[i]->bo);
954                 if (ret)
955                         goto free_all;
956
957                 ret = drm_bo_kmap(dev_priv->ib_objs[i]->bo, 0, RADEON_IB_SIZE >> PAGE_SHIFT,
958                                   &dev_priv->ib_objs[i]->kmap);
959
960                 if (ret)
961                         goto free_all;
962         }
963
964         dev_priv->ib_alloc_bitmap = 0;
965
966         dev_priv->cs.ib_get = radeon_gem_ib_get;
967         dev_priv->cs.ib_free = radeon_gem_ib_free;
968
969         radeon_cs_init(dev);
970         dev_priv->cs.relocate = radeon_gem_relocate;
971         return 0;
972
973 free_all:
974         radeon_gem_ib_destroy(dev);
975         return -ENOMEM;
976 }
977
978 #define RADEON_DMA_BUFFER_SIZE (64 * 1024)
979 #define RADEON_DMA_BUFFER_COUNT (16)
980
981
982 /**
983  * Cleanup after an error on one of the addbufs() functions.
984  *
985  * \param dev DRM device.
986  * \param entry buffer entry where the error occurred.
987  *
988  * Frees any pages and buffers associated with the given entry.
989  */
990 static void drm_cleanup_buf_error(struct drm_device * dev,
991                                   struct drm_buf_entry * entry)
992 {
993         int i;
994
995         if (entry->seg_count) {
996                 for (i = 0; i < entry->seg_count; i++) {
997                         if (entry->seglist[i]) {
998                                 drm_pci_free(dev, entry->seglist[i]);
999                         }
1000                 }
1001                 drm_free(entry->seglist,
1002                          entry->seg_count *
1003                          sizeof(*entry->seglist), DRM_MEM_SEGS);
1004
1005                 entry->seg_count = 0;
1006         }
1007
1008         if (entry->buf_count) {
1009                 for (i = 0; i < entry->buf_count; i++) {
1010                         if (entry->buflist[i].dev_private) {
1011                                 drm_free(entry->buflist[i].dev_private,
1012                                          entry->buflist[i].dev_priv_size,
1013                                          DRM_MEM_BUFS);
1014                         }
1015                 }
1016                 drm_free(entry->buflist,
1017                          entry->buf_count *
1018                          sizeof(*entry->buflist), DRM_MEM_BUFS);
1019
1020                 entry->buf_count = 0;
1021         }
1022 }
1023
1024 static int radeon_gem_addbufs(struct drm_device *dev)
1025 {
1026         struct drm_radeon_private *dev_priv = dev->dev_private;
1027         struct drm_device_dma *dma = dev->dma;
1028         struct drm_buf_entry *entry;
1029         struct drm_buf *buf;
1030         unsigned long offset;
1031         unsigned long agp_offset;
1032         int count;
1033         int order;
1034         int size;
1035         int alignment;
1036         int page_order;
1037         int total;
1038         int byte_count;
1039         int i;
1040         struct drm_buf **temp_buflist;
1041         
1042         if (!dma)
1043                 return -EINVAL;
1044
1045         count = RADEON_DMA_BUFFER_COUNT;
1046         order = drm_order(RADEON_DMA_BUFFER_SIZE);
1047         size = 1 << order;
1048
1049         alignment = PAGE_ALIGN(size);
1050         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1051         total = PAGE_SIZE << page_order;
1052
1053         byte_count = 0;
1054         agp_offset = dev_priv->mm.dma_bufs.bo->offset;
1055
1056         DRM_DEBUG("count:      %d\n", count);
1057         DRM_DEBUG("order:      %d\n", order);
1058         DRM_DEBUG("size:       %d\n", size);
1059         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1060         DRM_DEBUG("alignment:  %d\n", alignment);
1061         DRM_DEBUG("page_order: %d\n", page_order);
1062         DRM_DEBUG("total:      %d\n", total);
1063
1064         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1065                 return -EINVAL;
1066         if (dev->queue_count)
1067                 return -EBUSY;  /* Not while in use */
1068
1069         spin_lock(&dev->count_lock);
1070         if (dev->buf_use) {
1071                 spin_unlock(&dev->count_lock);
1072                 return -EBUSY;
1073         }
1074         atomic_inc(&dev->buf_alloc);
1075         spin_unlock(&dev->count_lock);
1076
1077         mutex_lock(&dev->struct_mutex);
1078         entry = &dma->bufs[order];
1079         if (entry->buf_count) {
1080                 mutex_unlock(&dev->struct_mutex);
1081                 atomic_dec(&dev->buf_alloc);
1082                 return -ENOMEM; /* May only call once for each order */
1083         }
1084
1085         if (count < 0 || count > 4096) {
1086                 mutex_unlock(&dev->struct_mutex);
1087                 atomic_dec(&dev->buf_alloc);
1088                 return -EINVAL;
1089         }
1090
1091         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1092                                    DRM_MEM_BUFS);
1093         if (!entry->buflist) {
1094                 mutex_unlock(&dev->struct_mutex);
1095                 atomic_dec(&dev->buf_alloc);
1096                 return -ENOMEM;
1097         }
1098         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1099
1100         entry->buf_size = size;
1101         entry->page_order = page_order;
1102
1103         offset = 0;
1104
1105         while (entry->buf_count < count) {
1106                 buf = &entry->buflist[entry->buf_count];
1107                 buf->idx = dma->buf_count + entry->buf_count;
1108                 buf->total = alignment;
1109                 buf->order = order;
1110                 buf->used = 0;
1111
1112                 buf->offset = (dma->byte_count + offset);
1113                 buf->bus_address = dev_priv->gart_vm_start + agp_offset + offset;
1114                 buf->address = (void *)(agp_offset + offset);
1115                 buf->next = NULL;
1116                 buf->waiting = 0;
1117                 buf->pending = 0;
1118                 init_waitqueue_head(&buf->dma_wait);
1119                 buf->file_priv = NULL;
1120
1121                 buf->dev_priv_size = dev->driver->dev_priv_size;
1122                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1123                 if (!buf->dev_private) {
1124                         /* Set count correctly so we free the proper amount. */
1125                         entry->buf_count = count;
1126                         drm_cleanup_buf_error(dev, entry);
1127                         mutex_unlock(&dev->struct_mutex);
1128                         atomic_dec(&dev->buf_alloc);
1129                         return -ENOMEM;
1130                 }
1131
1132                 memset(buf->dev_private, 0, buf->dev_priv_size);
1133
1134                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1135
1136                 offset += alignment;
1137                 entry->buf_count++;
1138                 byte_count += PAGE_SIZE << page_order;
1139         }
1140
1141         DRM_DEBUG("byte_count: %d\n", byte_count);
1142
1143         temp_buflist = drm_realloc(dma->buflist,
1144                                    dma->buf_count * sizeof(*dma->buflist),
1145                                    (dma->buf_count + entry->buf_count)
1146                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1147         if (!temp_buflist) {
1148                 /* Free the entry because it isn't valid */
1149                 drm_cleanup_buf_error(dev, entry);
1150                 mutex_unlock(&dev->struct_mutex);
1151                 atomic_dec(&dev->buf_alloc);
1152                 return -ENOMEM;
1153         }
1154         dma->buflist = temp_buflist;
1155
1156         for (i = 0; i < entry->buf_count; i++) {
1157                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1158         }
1159
1160         dma->buf_count += entry->buf_count;
1161         dma->seg_count += entry->seg_count;
1162         dma->page_count += byte_count >> PAGE_SHIFT;
1163         dma->byte_count += byte_count;
1164
1165         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1166         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1167
1168         mutex_unlock(&dev->struct_mutex);
1169
1170         dma->flags = _DRM_DMA_USE_SG;
1171         atomic_dec(&dev->buf_alloc);
1172         return 0;
1173 }
1174
1175 static int radeon_gem_dma_bufs_init(struct drm_device *dev)
1176 {
1177         struct drm_radeon_private *dev_priv = dev->dev_private;
1178         int size = RADEON_DMA_BUFFER_SIZE * RADEON_DMA_BUFFER_COUNT;
1179         int ret;
1180
1181         ret = drm_dma_setup(dev);
1182         if (ret < 0)
1183                 return ret;
1184
1185         ret = drm_buffer_object_create(dev, size, drm_bo_type_device,
1186                                        DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_NO_EVICT |
1187                                        DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE, 0,
1188                                        0, 0, &dev_priv->mm.dma_bufs.bo);
1189         if (ret) {
1190                 DRM_ERROR("Failed to create DMA bufs\n");
1191                 return -ENOMEM;
1192         }
1193
1194         ret = drm_bo_kmap(dev_priv->mm.dma_bufs.bo, 0, size >> PAGE_SHIFT,
1195                           &dev_priv->mm.dma_bufs.kmap);
1196         if (ret) {
1197                 DRM_ERROR("Failed to mmap DMA buffers\n");
1198                 return -ENOMEM;
1199         }
1200         DRM_DEBUG("\n");
1201         radeon_gem_addbufs(dev);
1202
1203         DRM_DEBUG("%x %d\n", dev_priv->mm.dma_bufs.bo->map_list.hash.key, size);
1204         dev->agp_buffer_token = dev_priv->mm.dma_bufs.bo->map_list.hash.key << PAGE_SHIFT;
1205         dev_priv->mm.fake_agp_map.handle = dev_priv->mm.dma_bufs.kmap.virtual;
1206         dev_priv->mm.fake_agp_map.size = size;
1207         
1208         dev->agp_buffer_map = &dev_priv->mm.fake_agp_map;
1209         dev_priv->gart_buffers_offset = dev_priv->mm.dma_bufs.bo->offset + dev_priv->gart_vm_start;
1210         return 0;
1211 }
1212
1213 static void radeon_gem_dma_bufs_destroy(struct drm_device *dev)
1214 {
1215
1216         struct drm_radeon_private *dev_priv = dev->dev_private;
1217         drm_dma_takedown(dev);
1218
1219         drm_bo_kunmap(&dev_priv->mm.dma_bufs.kmap);
1220         drm_bo_usage_deref_unlocked(&dev_priv->mm.dma_bufs.bo);
1221 }
1222
1223
1224 static struct drm_gem_object *gem_object_get(struct drm_device *dev, uint32_t name)
1225 {
1226         struct drm_gem_object *obj;
1227
1228         spin_lock(&dev->object_name_lock);
1229         obj = idr_find(&dev->object_name_idr, name);
1230         if (obj)
1231                 drm_gem_object_reference(obj);
1232         spin_unlock(&dev->object_name_lock);
1233         return obj;
1234 }
1235
1236 void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master)
1237 {
1238         drm_radeon_private_t *dev_priv = dev->dev_private;
1239         struct drm_radeon_master_private *master_priv = master->driver_priv;
1240         drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1241         struct drm_gem_object *obj;
1242         struct drm_radeon_gem_object *obj_priv;
1243
1244         /* update front_pitch_offset and back_pitch_offset */
1245         obj = gem_object_get(dev, sarea_priv->front_handle);
1246         if (obj) {
1247                 obj_priv = obj->driver_private;
1248
1249                 dev_priv->front_offset = obj_priv->bo->offset;
1250                 dev_priv->front_pitch_offset = (((sarea_priv->front_pitch / 64) << 22) |
1251                                                 ((obj_priv->bo->offset
1252                                                   + dev_priv->fb_location) >> 10));
1253                 drm_gem_object_unreference(obj);
1254         }
1255
1256         obj = gem_object_get(dev, sarea_priv->back_handle);
1257         if (obj) {
1258                 obj_priv = obj->driver_private;
1259                 dev_priv->back_offset = obj_priv->bo->offset;
1260                 dev_priv->back_pitch_offset = (((sarea_priv->back_pitch / 64) << 22) |
1261                                                 ((obj_priv->bo->offset
1262                                                   + dev_priv->fb_location) >> 10));
1263                 drm_gem_object_unreference(obj);
1264         }
1265         dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
1266
1267 }