2 * Copyright 2008 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "radeon_drm.h"
28 #include "radeon_drv.h"
30 static int radeon_gem_ib_init(struct drm_device *dev);
31 static int radeon_gem_ib_destroy(struct drm_device *dev);
32 static int radeon_gem_dma_bufs_init(struct drm_device *dev);
33 static void radeon_gem_dma_bufs_destroy(struct drm_device *dev);
35 int radeon_gem_init_object(struct drm_gem_object *obj)
37 struct drm_radeon_gem_object *obj_priv;
39 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
44 obj->driver_private = obj_priv;
49 void radeon_gem_free_object(struct drm_gem_object *obj)
52 struct drm_radeon_gem_object *obj_priv = obj->driver_private;
54 /* tear down the buffer object - gem holds struct mutex */
55 drm_bo_takedown_vm_locked(obj_priv->bo);
56 drm_bo_usage_deref_locked(&obj_priv->bo);
57 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
60 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
61 struct drm_file *file_priv)
63 struct drm_radeon_private *dev_priv = dev->dev_private;
64 struct drm_radeon_gem_info *args = data;
66 args->vram_start = dev_priv->mm.vram_offset;
67 args->vram_size = dev_priv->mm.vram_size;
68 args->vram_visible = dev_priv->mm.vram_visible;
70 args->gart_start = dev_priv->mm.gart_start;
71 args->gart_size = dev_priv->mm.gart_size;
76 struct drm_gem_object *radeon_gem_object_alloc(struct drm_device *dev, int size, int alignment,
79 struct drm_gem_object *obj;
80 struct drm_radeon_gem_object *obj_priv;
84 obj = drm_gem_object_alloc(dev, size);
88 obj_priv = obj->driver_private;
89 flags = DRM_BO_FLAG_MAPPABLE;
90 if (initial_domain == RADEON_GEM_DOMAIN_VRAM)
91 flags |= DRM_BO_FLAG_MEM_VRAM;
92 else if (initial_domain == RADEON_GEM_DOMAIN_GTT)
93 flags |= DRM_BO_FLAG_MEM_TT;
95 flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
97 flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE;
99 ret = drm_buffer_object_create(dev,
100 size, drm_bo_type_device,
106 DRM_DEBUG("%p : size 0x%x, alignment %d, initial_domain %d\n", obj_priv->bo, size, alignment, initial_domain);
113 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
114 struct drm_file *file_priv)
116 struct drm_radeon_gem_create *args = data;
117 struct drm_radeon_gem_object *obj_priv;
118 struct drm_gem_object *obj;
123 /* create a gem object to contain this object in */
124 args->size = roundup(args->size, PAGE_SIZE);
126 obj = radeon_gem_object_alloc(dev, args->size, args->alignment, args->initial_domain);
130 obj_priv = obj->driver_private;
131 DRM_DEBUG("obj is %p bo is %p, %d\n", obj, obj_priv->bo, obj_priv->bo->num_pages);
132 ret = drm_gem_handle_create(file_priv, obj, &handle);
133 mutex_lock(&dev->struct_mutex);
134 drm_gem_object_handle_unreference(obj);
135 mutex_unlock(&dev->struct_mutex);
140 args->handle = handle;
144 drm_gem_object_unreference(obj);
149 int radeon_gem_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain, uint32_t *flags_p, bool unfenced)
151 struct drm_device *dev = obj->dev;
152 drm_radeon_private_t *dev_priv = dev->dev_private;
153 struct drm_radeon_gem_object *obj_priv;
157 obj_priv = obj->driver_private;
159 /* work out where to validate the buffer to */
160 if (write_domain) { /* write domains always win */
161 if (write_domain == RADEON_GEM_DOMAIN_VRAM)
162 flags = DRM_BO_FLAG_MEM_VRAM;
163 else if (write_domain == RADEON_GEM_DOMAIN_GTT)
164 flags = DRM_BO_FLAG_MEM_TT; // need a can write gart check
166 return -EINVAL; // we can't write to system RAM
168 /* okay for a read domain - prefer wherever the object is now or close enough */
169 if ((read_domains == 0) || (read_domains == RADEON_GEM_DOMAIN_CPU))
172 /* simple case no choice in domains */
173 if (read_domains == RADEON_GEM_DOMAIN_VRAM)
174 flags = DRM_BO_FLAG_MEM_VRAM;
175 else if (read_domains == RADEON_GEM_DOMAIN_GTT)
176 flags = DRM_BO_FLAG_MEM_TT;
177 else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_VRAM) && (read_domains & RADEON_GEM_DOMAIN_VRAM))
178 flags = DRM_BO_FLAG_MEM_VRAM;
179 else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_TT) && (read_domains & RADEON_GEM_DOMAIN_GTT))
180 flags = DRM_BO_FLAG_MEM_TT;
181 else if (read_domains & RADEON_GEM_DOMAIN_VRAM)
182 flags = DRM_BO_FLAG_MEM_VRAM;
183 else if (read_domains & RADEON_GEM_DOMAIN_GTT)
184 flags = DRM_BO_FLAG_MEM_TT;
187 ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM | DRM_BO_FLAG_CACHED,
188 unfenced ? DRM_BO_HINT_DONT_FENCE : 0, 0);
198 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
199 struct drm_file *file_priv)
201 /* transition the BO to a domain - just validate the BO into a certain domain */
202 struct drm_radeon_gem_set_domain *args = data;
203 struct drm_gem_object *obj;
204 struct drm_radeon_gem_object *obj_priv;
207 /* for now if someone requests domain CPU - just make sure the buffer is finished with */
209 /* just do a BO wait for now */
210 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
214 obj_priv = obj->driver_private;
216 ret = radeon_gem_set_domain(obj, args->read_domains, args->write_domain, NULL, true);
218 mutex_lock(&dev->struct_mutex);
219 drm_gem_object_unreference(obj);
220 mutex_unlock(&dev->struct_mutex);
224 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
225 struct drm_file *file_priv)
230 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
231 struct drm_file *file_priv)
233 struct drm_radeon_gem_pwrite *args = data;
234 struct drm_gem_object *obj;
235 struct drm_radeon_gem_object *obj_priv;
238 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
242 obj_priv = obj->driver_private;
244 /* check where the buffer is first - if not in VRAM
245 fallback to userspace copying for now */
246 mutex_lock(&obj_priv->bo->mutex);
247 if (obj_priv->bo->mem.mem_type != DRM_BO_MEM_VRAM) {
252 DRM_ERROR("pwriting data->size %lld %llx\n", args->size, args->offset);
256 /* so need to grab an IB, copy the data into it in a loop
257 and send them to VRAM using HDB */
258 while ((buf = radeon_host_data_blit(dev, cpp, w, dst_pitch_off, &buf_pitch,
259 x, &y, (unsigned int*)&h, &hpass)) != 0) {
260 radeon_host_data_blit_copy_pass(dev, cpp, buf, (uint8_t *)src,
261 hpass, buf_pitch, src_pitch);
262 src += hpass * src_pitch;
266 mutex_unlock(&obj_priv->bo->mutex);
267 mutex_lock(&dev->struct_mutex);
268 drm_gem_object_unreference(obj);
269 mutex_unlock(&dev->struct_mutex);
273 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
274 struct drm_file *file_priv)
276 struct drm_radeon_gem_mmap *args = data;
277 struct drm_gem_object *obj;
278 struct drm_radeon_gem_object *obj_priv;
282 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
286 offset = args->offset;
288 DRM_DEBUG("got here %p\n", obj);
289 obj_priv = obj->driver_private;
291 DRM_DEBUG("got here %p %p %lld %ld\n", obj, obj_priv->bo, args->size, obj_priv->bo->num_pages);
293 mutex_lock(&dev->struct_mutex);
294 drm_gem_object_unreference(obj);
295 mutex_unlock(&dev->struct_mutex);
299 down_write(¤t->mm->mmap_sem);
300 addr = do_mmap_pgoff(file_priv->filp, 0, args->size,
301 PROT_READ | PROT_WRITE, MAP_SHARED,
302 obj_priv->bo->map_list.hash.key);
303 up_write(¤t->mm->mmap_sem);
305 DRM_DEBUG("got here %p %d\n", obj, obj_priv->bo->mem.mem_type);
306 mutex_lock(&dev->struct_mutex);
307 drm_gem_object_unreference(obj);
308 mutex_unlock(&dev->struct_mutex);
309 if (IS_ERR((void *)addr))
312 args->addr_ptr = (uint64_t) addr;
318 int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
319 struct drm_file *file_priv)
321 struct drm_radeon_gem_pin *args = data;
322 struct drm_gem_object *obj;
323 struct drm_radeon_gem_object *obj_priv;
325 int flags = DRM_BO_FLAG_NO_EVICT;
326 int mask = DRM_BO_FLAG_NO_EVICT;
328 /* check for valid args */
329 if (args->pin_domain) {
330 mask |= DRM_BO_MASK_MEM;
331 if (args->pin_domain == RADEON_GEM_DOMAIN_GTT)
332 flags |= DRM_BO_FLAG_MEM_TT;
333 else if (args->pin_domain == RADEON_GEM_DOMAIN_VRAM)
334 flags |= DRM_BO_FLAG_MEM_VRAM;
339 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
343 obj_priv = obj->driver_private;
345 /* validate into a pin with no fence */
346 DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage));
347 if (!(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) {
348 ret = drm_bo_do_validate(obj_priv->bo, flags, mask,
349 DRM_BO_HINT_DONT_FENCE, 0);
353 args->offset = obj_priv->bo->offset;
354 DRM_DEBUG("got here %p %p %x\n", obj, obj_priv->bo, obj_priv->bo->offset);
356 mutex_lock(&dev->struct_mutex);
357 drm_gem_object_unreference(obj);
358 mutex_unlock(&dev->struct_mutex);
362 int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
363 struct drm_file *file_priv)
365 struct drm_radeon_gem_unpin *args = data;
366 struct drm_gem_object *obj;
367 struct drm_radeon_gem_object *obj_priv;
370 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
374 obj_priv = obj->driver_private;
376 /* validate into a pin with no fence */
378 ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
379 DRM_BO_HINT_DONT_FENCE, 0);
381 mutex_lock(&dev->struct_mutex);
382 drm_gem_object_unreference(obj);
383 mutex_unlock(&dev->struct_mutex);
387 int radeon_gem_busy(struct drm_device *dev, void *data,
388 struct drm_file *file_priv)
393 int radeon_gem_execbuffer(struct drm_device *dev, void *data,
394 struct drm_file *file_priv)
401 int radeon_gem_indirect_ioctl(struct drm_device *dev, void *data,
402 struct drm_file *file_priv)
404 struct drm_radeon_gem_indirect *args = data;
405 struct drm_radeon_private *dev_priv = dev->dev_private;
406 struct drm_gem_object *obj;
407 struct drm_radeon_gem_object *obj_priv;
412 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
416 obj_priv = obj->driver_private;
418 DRM_DEBUG("got here %p %d\n", obj, args->used);
419 //RING_SPACE_TEST_WITH_RETURN(dev_priv);
420 //VB_AGE_TEST_WITH_RETURN(dev_priv);
422 ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
427 /* Wait for the 3D stream to idle before the indirect buffer
428 * containing 2D acceleration commands is processed.
432 RADEON_WAIT_UNTIL_3D_IDLE();
440 int offset = (dev_priv->gart_vm_start +
441 + obj_priv->bo->offset + start);
442 int dwords = (end - start + 3) / sizeof(u32);
444 /* Fire off the indirect buffer */
447 OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
456 /* we need to fence the buffer */
457 ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &obj_priv->fence);
460 drm_putback_buffer_objects(dev);
465 /* dereference he fence object */
466 drm_fence_usage_deref_unlocked(&obj_priv->fence);
468 mutex_lock(&dev->struct_mutex);
469 drm_gem_object_unreference(obj);
470 mutex_unlock(&dev->struct_mutex);
477 * Depending on card genertation, chipset bugs, etc... the amount of vram
478 * accessible to the CPU can vary. This function is our best shot at figuring
479 * it out. Returns a value in KB.
481 static uint32_t radeon_get_accessible_vram(struct drm_device *dev)
483 drm_radeon_private_t *dev_priv = dev->dev_private;
487 if (dev_priv->chip_family >= CHIP_R600)
488 aper_size = RADEON_READ(R600_CONFIG_APER_SIZE) / 1024;
490 aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE) / 1024;
492 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
493 * that is has the 2nd generation multifunction PCI interface
495 if (dev_priv->chip_family == CHIP_RV280 ||
496 dev_priv->chip_family == CHIP_RV350 ||
497 dev_priv->chip_family == CHIP_RV380 ||
498 dev_priv->chip_family == CHIP_R420 ||
499 dev_priv->chip_family == CHIP_RV410 ||
500 radeon_is_avivo(dev_priv)) {
501 uint32_t temp = RADEON_READ(RADEON_HOST_PATH_CNTL);
502 temp |= RADEON_HDP_APER_CNTL;
503 RADEON_WRITE(RADEON_HOST_PATH_CNTL, temp);
504 return aper_size * 2;
507 /* Older cards have all sorts of funny issues to deal with. First
508 * check if it's a multifunction card by reading the PCI config
509 * header type... Limit those to one aperture size
511 pci_read_config_byte(dev->pdev, 0xe, &byte);
515 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
516 * have set it up. We don't write this as it's broken on some ASICs but
517 * we expect the BIOS to have done the right thing (might be too optimistic...)
519 if (RADEON_READ(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
520 return aper_size * 2;
525 /* code from the DDX - do memory sizing */
526 void radeon_vram_setup(struct drm_device *dev)
528 drm_radeon_private_t *dev_priv = dev->dev_private;
530 uint32_t accessible, bar_size;
532 if (!radeon_is_avivo(dev_priv) && (dev_priv->flags & RADEON_IS_IGP)) {
533 uint32_t tom = RADEON_READ(RADEON_NB_TOM);
535 vram = (((tom >> 16) - (tom & 0xffff) + 1) << 6);
536 RADEON_WRITE(RADEON_CONFIG_MEMSIZE, vram * 1024);
538 if (dev_priv->chip_family >= CHIP_R600)
539 vram = RADEON_READ(R600_CONFIG_MEMSIZE) / 1024;
541 vram = RADEON_READ(RADEON_CONFIG_MEMSIZE) / 1024;
543 /* Some production boards of m6 will return 0 if it's 8 MB */
546 RADEON_WRITE(RADEON_CONFIG_MEMSIZE, 0x800000);
551 accessible = radeon_get_accessible_vram(dev);
553 bar_size = drm_get_resource_len(dev, 0) / 1024;
556 if (accessible > bar_size)
557 accessible = bar_size;
559 DRM_INFO("Detected VRAM RAM=%dK, accessible=%uK, BAR=%uK\n",
560 vram, accessible, bar_size);
562 dev_priv->mm.vram_offset = dev_priv->fb_aper_offset;
563 dev_priv->mm.vram_size = vram * 1024;
564 dev_priv->mm.vram_visible = accessible * 1024;
569 static int radeon_gart_init(struct drm_device *dev)
571 drm_radeon_private_t *dev_priv = dev->dev_private;
575 /* setup a 32MB GART */
576 dev_priv->gart_size = dev_priv->mm.gart_size;
578 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
581 /* setup VRAM vs GART here */
582 if (dev_priv->flags & RADEON_IS_AGP) {
583 base = dev->agp->base;
584 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
585 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
586 DRM_INFO("Can't use agp base @0x%08xlx, won't fit\n",
594 base = dev_priv->fb_location + dev_priv->fb_size;
595 if (base < dev_priv->fb_location ||
596 ((base + dev_priv->gart_size) & 0xfffffffful) < base)
597 base = dev_priv->fb_location
598 - dev_priv->gart_size;
600 /* start on the card */
601 dev_priv->gart_vm_start = base & 0xffc00000u;
602 if (dev_priv->gart_vm_start != base)
603 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
604 base, dev_priv->gart_vm_start);
606 /* if on PCIE we need to allocate an fb object for the PCIE GART table */
607 if (dev_priv->flags & RADEON_IS_PCIE) {
608 ret = drm_buffer_object_create(dev, RADEON_PCIGART_TABLE_SIZE,
610 DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
611 0, 1, 0, &dev_priv->mm.pcie_table.bo);
615 dev_priv->mm.pcie_table_backup = kzalloc(RADEON_PCIGART_TABLE_SIZE, GFP_KERNEL);
616 if (!dev_priv->mm.pcie_table_backup)
619 ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
620 &dev_priv->mm.pcie_table.kmap);
624 dev_priv->pcigart_offset_set = 2;
625 dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table.bo->offset;
626 dev_priv->gart_info.addr = dev_priv->mm.pcie_table.kmap.virtual;
627 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
628 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
629 memset(dev_priv->gart_info.addr, 0, RADEON_PCIGART_TABLE_SIZE);
630 } else if (!(dev_priv->flags & RADEON_IS_AGP)) {
631 /* allocate PCI GART table */
632 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
633 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
634 if (dev_priv->flags & RADEON_IS_IGPGART)
635 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
637 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
639 ret = drm_ati_alloc_pcigart_table(dev, &dev_priv->gart_info);
641 DRM_ERROR("cannot allocate PCI GART page!\n");
645 dev_priv->gart_info.addr = dev_priv->gart_info.table_handle->vaddr;
646 dev_priv->gart_info.bus_addr = dev_priv->gart_info.table_handle->busaddr;
649 /* gart values setup - start the GART */
650 if (dev_priv->flags & RADEON_IS_AGP) {
651 radeon_set_pcigart(dev_priv, 0);
653 radeon_set_pcigart(dev_priv, 1);
659 int radeon_alloc_gart_objects(struct drm_device *dev)
661 drm_radeon_private_t *dev_priv = dev->dev_private;
664 ret = drm_buffer_object_create(dev, RADEON_DEFAULT_RING_SIZE,
666 DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
667 DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
668 0, 1, 0, &dev_priv->mm.ring.bo);
670 if (dev_priv->flags & RADEON_IS_AGP)
671 DRM_ERROR("failed to allocate ring - most likely an AGP driver bug\n");
673 DRM_ERROR("failed to allocate ring\n");
677 ret = drm_bo_kmap(dev_priv->mm.ring.bo, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT,
678 &dev_priv->mm.ring.kmap);
680 DRM_ERROR("failed to map ring\n");
684 ret = drm_buffer_object_create(dev, PAGE_SIZE,
686 DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
687 DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
688 0, 1, 0, &dev_priv->mm.ring_read.bo);
690 DRM_ERROR("failed to allocate ring read\n");
694 ret = drm_bo_kmap(dev_priv->mm.ring_read.bo, 0,
695 PAGE_SIZE >> PAGE_SHIFT,
696 &dev_priv->mm.ring_read.kmap);
698 DRM_ERROR("failed to map ring read\n");
702 DRM_DEBUG("Ring ptr %p mapped at %d %p, read ptr %p maped at %d %p\n",
703 dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual,
704 dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual);
706 /* init the indirect buffers */
707 radeon_gem_ib_init(dev);
708 radeon_gem_dma_bufs_init(dev);
713 static bool avivo_get_mc_idle(struct drm_device *dev)
715 drm_radeon_private_t *dev_priv = dev->dev_private;
717 if (dev_priv->chip_family >= CHIP_R600) {
718 /* no idea where this is on r600 yet */
720 } else if (dev_priv->chip_family == CHIP_RV515) {
721 if (radeon_read_mc_reg(dev_priv, RV515_MC_STATUS) & RV515_MC_STATUS_IDLE)
725 } else if (dev_priv->chip_family == CHIP_RS600) {
726 if (radeon_read_mc_reg(dev_priv, RS600_MC_STATUS) & RS600_MC_STATUS_IDLE)
730 } else if ((dev_priv->chip_family == CHIP_RS690) ||
731 (dev_priv->chip_family == CHIP_RS740)) {
732 if (radeon_read_mc_reg(dev_priv, RS690_MC_STATUS) & RS690_MC_STATUS_IDLE)
737 if (radeon_read_mc_reg(dev_priv, R520_MC_STATUS) & R520_MC_STATUS_IDLE)
745 static void avivo_disable_mc_clients(struct drm_device *dev)
747 drm_radeon_private_t *dev_priv = dev->dev_private;
751 radeon_do_wait_for_idle(dev_priv);
753 RADEON_WRITE(AVIVO_D1VGA_CONTROL, RADEON_READ(AVIVO_D1VGA_CONTROL) & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
754 RADEON_WRITE(AVIVO_D2VGA_CONTROL, RADEON_READ(AVIVO_D2VGA_CONTROL) & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
756 tmp = RADEON_READ(AVIVO_D1CRTC_CONTROL);
757 RADEON_WRITE(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
759 tmp = RADEON_READ(AVIVO_D2CRTC_CONTROL);
760 RADEON_WRITE(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
762 tmp = RADEON_READ(AVIVO_D2CRTC_CONTROL);
767 while (!(avivo_get_mc_idle(dev))) {
768 if (++timeout > 100000) {
769 DRM_ERROR("Timeout waiting for memory controller to update settings\n");
770 DRM_ERROR("Bad things may or may not happen\n");
776 static inline u32 radeon_busy_wait(struct drm_device *dev, uint32_t reg, uint32_t bits,
777 unsigned int timeout)
779 drm_radeon_private_t *dev_priv = dev->dev_private;
784 status = RADEON_READ(reg);
786 } while(status != 0xffffffff && (status & bits) && (timeout > 0));
794 /* Wait for vertical sync on primary CRTC */
795 static void radeon_wait_for_vsync(struct drm_device *dev)
797 drm_radeon_private_t *dev_priv = dev->dev_private;
798 uint32_t crtc_gen_cntl;
801 crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL);
802 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
803 !(crtc_gen_cntl & RADEON_CRTC_EN))
806 /* Clear the CRTC_VBLANK_SAVE bit */
807 RADEON_WRITE(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
809 radeon_busy_wait(dev, RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE, 2000);
813 /* Wait for vertical sync on primary CRTC */
814 static void radeon_wait_for_vsync2(struct drm_device *dev)
816 drm_radeon_private_t *dev_priv = dev->dev_private;
817 uint32_t crtc2_gen_cntl;
818 struct timeval timeout;
820 crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL);
821 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
822 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
825 /* Clear the CRTC_VBLANK_SAVE bit */
826 RADEON_WRITE(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
828 radeon_busy_wait(dev, RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE, 2000);
831 static void legacy_disable_mc_clients(struct drm_device *dev)
833 drm_radeon_private_t *dev_priv = dev->dev_private;
834 uint32_t old_mc_status, status_idle;
835 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
838 radeon_do_wait_for_idle(dev_priv);
840 if (dev_priv->flags & RADEON_IS_IGP)
843 old_mc_status = RADEON_READ(RADEON_MC_STATUS);
845 /* stop display and memory access */
846 ov0_scale_cntl = RADEON_READ(RADEON_OV0_SCALE_CNTL);
847 RADEON_WRITE(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
848 crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL);
849 RADEON_WRITE(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
850 crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL);
852 radeon_wait_for_vsync(dev);
854 RADEON_WRITE(RADEON_CRTC_GEN_CNTL,
855 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
856 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
858 if (!(dev_priv->flags & RADEON_SINGLE_CRTC)) {
859 crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL);
861 radeon_wait_for_vsync2(dev);
862 RADEON_WRITE(RADEON_CRTC2_GEN_CNTL,
864 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
865 RADEON_CRTC2_DISP_REQ_EN_B);
870 if (radeon_is_r300(dev_priv))
871 status_idle = R300_MC_IDLE;
873 status_idle = RADEON_MC_IDLE;
875 status = radeon_busy_wait(dev, RADEON_MC_STATUS, status_idle, 200000);
876 if (status == 0xffffffff) {
877 DRM_ERROR("Timeout waiting for memory controller to update settings\n");
878 DRM_ERROR("Bad things may or may not happen\n");
883 void radeon_init_memory_map(struct drm_device *dev)
885 drm_radeon_private_t *dev_priv = dev->dev_private;
886 u32 mem_size, aper_size;
889 dev_priv->mc_fb_location = radeon_read_fb_location(dev_priv);
890 radeon_read_agp_location(dev_priv, &dev_priv->mc_agp_loc_lo, &dev_priv->mc_agp_loc_hi);
892 if (dev_priv->chip_family >= CHIP_R600) {
893 mem_size = RADEON_READ(R600_CONFIG_MEMSIZE);
894 aper_size = RADEON_READ(R600_CONFIG_APER_SIZE);
896 mem_size = RADEON_READ(RADEON_CONFIG_MEMSIZE);
897 aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE);
900 /* M6s report illegal memory size */
902 mem_size = 8 * 1024 * 1024;
904 /* for RN50/M6/M7 - Novell bug 204882 */
905 if (aper_size > mem_size)
906 mem_size = aper_size;
908 if ((dev_priv->chip_family != CHIP_RS600) &&
909 (dev_priv->chip_family != CHIP_RS690) &&
910 (dev_priv->chip_family != CHIP_RS740)) {
911 if (dev_priv->flags & RADEON_IS_IGP)
912 dev_priv->mc_fb_location = RADEON_READ(RADEON_NB_TOM);
916 if (dev_priv->chip_family >= CHIP_R600)
917 aper0_base = RADEON_READ(R600_CONFIG_F0_BASE);
919 aper0_base = RADEON_READ(RADEON_CONFIG_APER_0_BASE);
922 /* Some chips have an "issue" with the memory controller, the
923 * location must be aligned to the size. We just align it down,
924 * too bad if we walk over the top of system memory, we don't
925 * use DMA without a remapped anyway.
926 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
928 if (dev_priv->chip_family == CHIP_RV280 ||
929 dev_priv->chip_family == CHIP_R300 ||
930 dev_priv->chip_family == CHIP_R350 ||
931 dev_priv->chip_family == CHIP_RV350 ||
932 dev_priv->chip_family == CHIP_RV380 ||
933 dev_priv->chip_family == CHIP_R420 ||
934 dev_priv->chip_family == CHIP_RV410)
935 aper0_base &= ~(mem_size - 1);
937 if (dev_priv->chip_family >= CHIP_R600) {
938 dev_priv->mc_fb_location = (aper0_base >> 24) |
939 (((aper0_base + mem_size - 1) & 0xff000000U) >> 8);
941 dev_priv->mc_fb_location = (aper0_base >> 16) |
942 ((aper0_base + mem_size - 1) & 0xffff0000U);
947 if (dev_priv->chip_family >= CHIP_R600)
948 dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 24;
950 dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 16;
952 /* updating mc regs here */
953 if (radeon_is_avivo(dev_priv))
954 avivo_disable_mc_clients(dev);
956 legacy_disable_mc_clients(dev);
958 radeon_write_fb_location(dev_priv, dev_priv->mc_fb_location);
960 if (radeon_is_avivo(dev_priv)) {
961 if (dev_priv->chip_family >= CHIP_R600)
962 RADEON_WRITE(R600_HDP_NONSURFACE_BASE, (dev_priv->mc_fb_location << 16) & 0xff0000);
964 RADEON_WRITE(AVIVO_HDP_FB_LOCATION, dev_priv->mc_fb_location);
967 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
969 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
970 - dev_priv->fb_location;
974 /* init memory manager - start with all of VRAM and a 32MB GART aperture for now */
975 int radeon_gem_mm_init(struct drm_device *dev)
977 drm_radeon_private_t *dev_priv = dev->dev_private;
981 /* init TTM underneath */
982 drm_bo_driver_init(dev);
984 /* size the mappable VRAM memory for now */
985 radeon_vram_setup(dev);
987 radeon_init_memory_map(dev);
989 #define VRAM_RESERVE_TEXT (256*1024) /* need to reserve 256 for text mode for now */
990 dev_priv->mm.vram_visible -= VRAM_RESERVE_TEXT;
991 pg_offset = VRAM_RESERVE_TEXT >> PAGE_SHIFT;
992 drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, pg_offset, /*dev_priv->mm.vram_offset >> PAGE_SHIFT,*/
993 ((dev_priv->mm.vram_visible) >> PAGE_SHIFT) - 16,
997 dev_priv->mm.gart_size = (32 * 1024 * 1024);
998 dev_priv->mm.gart_start = 0;
999 ret = radeon_gart_init(dev);
1003 drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
1004 dev_priv->mm.gart_size >> PAGE_SHIFT,
1007 /* need to allocate some objects in the GART */
1008 /* ring + ring read ptr */
1009 ret = radeon_alloc_gart_objects(dev);
1011 radeon_gem_mm_fini(dev);
1015 dev_priv->mm_enabled = true;
1019 void radeon_gem_mm_fini(struct drm_device *dev)
1021 drm_radeon_private_t *dev_priv = dev->dev_private;
1023 radeon_gem_dma_bufs_destroy(dev);
1024 radeon_gem_ib_destroy(dev);
1026 mutex_lock(&dev->struct_mutex);
1028 if (dev_priv->mm.ring_read.bo) {
1029 drm_bo_kunmap(&dev_priv->mm.ring_read.kmap);
1030 drm_bo_usage_deref_locked(&dev_priv->mm.ring_read.bo);
1033 if (dev_priv->mm.ring.bo) {
1034 drm_bo_kunmap(&dev_priv->mm.ring.kmap);
1035 drm_bo_usage_deref_locked(&dev_priv->mm.ring.bo);
1038 if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
1039 DRM_DEBUG("delaying takedown of TTM memory\n");
1042 if (dev_priv->flags & RADEON_IS_PCIE) {
1043 if (dev_priv->mm.pcie_table_backup) {
1044 kfree(dev_priv->mm.pcie_table_backup);
1045 dev_priv->mm.pcie_table_backup = NULL;
1047 if (dev_priv->mm.pcie_table.bo) {
1048 drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap);
1049 drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo);
1053 if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) {
1054 DRM_DEBUG("delaying takedown of VRAM memory\n");
1057 mutex_unlock(&dev->struct_mutex);
1059 drm_bo_driver_finish(dev);
1060 dev_priv->mm_enabled = false;
1063 int radeon_gem_object_pin(struct drm_gem_object *obj,
1064 uint32_t alignment, uint32_t pin_domain)
1066 struct drm_radeon_gem_object *obj_priv;
1068 uint32_t flags = DRM_BO_FLAG_NO_EVICT;
1069 uint32_t mask = DRM_BO_FLAG_NO_EVICT;
1071 obj_priv = obj->driver_private;
1074 mask |= DRM_BO_MASK_MEM;
1075 if (pin_domain == RADEON_GEM_DOMAIN_GTT)
1076 flags |= DRM_BO_FLAG_MEM_TT;
1077 else if (pin_domain == RADEON_GEM_DOMAIN_VRAM)
1078 flags |= DRM_BO_FLAG_MEM_VRAM;
1082 ret = drm_bo_do_validate(obj_priv->bo, flags, mask,
1083 DRM_BO_HINT_DONT_FENCE, 0);
1088 int radeon_gem_object_unpin(struct drm_gem_object *obj)
1090 struct drm_radeon_gem_object *obj_priv;
1093 obj_priv = obj->driver_private;
1095 ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
1096 DRM_BO_HINT_DONT_FENCE, 0);
1101 #define RADEON_IB_MEMORY (1*1024*1024)
1102 #define RADEON_IB_SIZE (65536)
1104 #define RADEON_NUM_IB (RADEON_IB_MEMORY / RADEON_IB_SIZE)
1106 int radeon_gem_ib_get(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset)
1110 drm_radeon_private_t *dev_priv = dev->dev_private;
1112 for (i = 0; i < RADEON_NUM_IB; i++) {
1113 if (!(dev_priv->ib_alloc_bitmap & (1 << i))){
1119 /* if all in use we need to wait */
1121 for (i = 0; i < RADEON_NUM_IB; i++) {
1122 if (dev_priv->ib_alloc_bitmap & (1 << i)) {
1123 mutex_lock(&dev_priv->ib_objs[i]->bo->mutex);
1124 ret = drm_bo_wait(dev_priv->ib_objs[i]->bo, 0, 1, 0, 0);
1125 mutex_unlock(&dev_priv->ib_objs[i]->bo->mutex);
1128 dev_priv->ib_alloc_bitmap &= ~(1 << i);
1136 DRM_ERROR("Major case fail to allocate IB from freelist %x\n", dev_priv->ib_alloc_bitmap);
1141 if (dwords > RADEON_IB_SIZE / sizeof(uint32_t))
1144 ret = drm_bo_do_validate(dev_priv->ib_objs[index]->bo, 0,
1145 DRM_BO_FLAG_NO_EVICT,
1148 DRM_ERROR("Failed to validate IB %d\n", index);
1152 *card_offset = dev_priv->gart_vm_start + dev_priv->ib_objs[index]->bo->offset;
1153 *ib = dev_priv->ib_objs[index]->kmap.virtual;
1154 dev_priv->ib_alloc_bitmap |= (1 << i);
1158 static void radeon_gem_ib_free(struct drm_device *dev, void *ib, uint32_t dwords)
1160 drm_radeon_private_t *dev_priv = dev->dev_private;
1161 struct drm_fence_object *fence;
1165 for (i = 0; i < RADEON_NUM_IB; i++) {
1167 if (dev_priv->ib_objs[i]->kmap.virtual == ib) {
1168 /* emit a fence object */
1169 ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
1172 drm_putback_buffer_objects(dev);
1174 /* dereference the fence object */
1176 drm_fence_usage_deref_unlocked(&fence);
1182 static int radeon_gem_ib_destroy(struct drm_device *dev)
1184 drm_radeon_private_t *dev_priv = dev->dev_private;
1187 if (dev_priv->ib_objs) {
1188 for (i = 0; i < RADEON_NUM_IB; i++) {
1189 if (dev_priv->ib_objs[i]) {
1190 drm_bo_kunmap(&dev_priv->ib_objs[i]->kmap);
1191 drm_bo_usage_deref_unlocked(&dev_priv->ib_objs[i]->bo);
1193 drm_free(dev_priv->ib_objs[i], sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
1195 drm_free(dev_priv->ib_objs, RADEON_NUM_IB*sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
1197 dev_priv->ib_objs = NULL;
1201 static int radeon_gem_relocate(struct drm_device *dev, struct drm_file *file_priv,
1202 uint32_t *reloc, uint32_t *offset)
1204 drm_radeon_private_t *dev_priv = dev->dev_private;
1205 /* relocate the handle */
1206 uint32_t read_domains = reloc[2];
1207 uint32_t write_domain = reloc[3];
1208 struct drm_gem_object *obj;
1211 struct drm_radeon_gem_object *obj_priv;
1213 obj = drm_gem_object_lookup(dev, file_priv, reloc[1]);
1217 obj_priv = obj->driver_private;
1218 radeon_gem_set_domain(obj, read_domains, write_domain, &flags, false);
1220 if (flags == DRM_BO_FLAG_MEM_VRAM)
1221 *offset = obj_priv->bo->offset + dev_priv->fb_location;
1222 else if (flags == DRM_BO_FLAG_MEM_TT)
1223 *offset = obj_priv->bo->offset + dev_priv->gart_vm_start;
1225 /* BAD BAD BAD - LINKED LIST THE OBJS and UNREF ONCE IB is SUBMITTED */
1226 drm_gem_object_unreference(obj);
1230 /* allocate 1MB of 64k IBs the the kernel can keep mapped */
1231 static int radeon_gem_ib_init(struct drm_device *dev)
1233 drm_radeon_private_t *dev_priv = dev->dev_private;
1237 dev_priv->ib_objs = drm_calloc(RADEON_NUM_IB, sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
1238 if (!dev_priv->ib_objs)
1241 for (i = 0; i < RADEON_NUM_IB; i++) {
1242 dev_priv->ib_objs[i] = drm_calloc(1, sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
1243 if (!dev_priv->ib_objs[i])
1246 ret = drm_buffer_object_create(dev, RADEON_IB_SIZE,
1248 DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
1249 DRM_BO_FLAG_MAPPABLE, 0,
1250 0, 0, &dev_priv->ib_objs[i]->bo);
1254 ret = drm_bo_kmap(dev_priv->ib_objs[i]->bo, 0, RADEON_IB_SIZE >> PAGE_SHIFT,
1255 &dev_priv->ib_objs[i]->kmap);
1261 dev_priv->ib_alloc_bitmap = 0;
1263 dev_priv->cs.ib_get = radeon_gem_ib_get;
1264 dev_priv->cs.ib_free = radeon_gem_ib_free;
1266 radeon_cs_init(dev);
1267 dev_priv->cs.relocate = radeon_gem_relocate;
1271 radeon_gem_ib_destroy(dev);
1275 #define RADEON_DMA_BUFFER_SIZE (64 * 1024)
1276 #define RADEON_DMA_BUFFER_COUNT (16)
1280 * Cleanup after an error on one of the addbufs() functions.
1282 * \param dev DRM device.
1283 * \param entry buffer entry where the error occurred.
1285 * Frees any pages and buffers associated with the given entry.
1287 static void drm_cleanup_buf_error(struct drm_device * dev,
1288 struct drm_buf_entry * entry)
1292 if (entry->seg_count) {
1293 for (i = 0; i < entry->seg_count; i++) {
1294 if (entry->seglist[i]) {
1295 drm_pci_free(dev, entry->seglist[i]);
1298 drm_free(entry->seglist,
1300 sizeof(*entry->seglist), DRM_MEM_SEGS);
1302 entry->seg_count = 0;
1305 if (entry->buf_count) {
1306 for (i = 0; i < entry->buf_count; i++) {
1307 if (entry->buflist[i].dev_private) {
1308 drm_free(entry->buflist[i].dev_private,
1309 entry->buflist[i].dev_priv_size,
1313 drm_free(entry->buflist,
1315 sizeof(*entry->buflist), DRM_MEM_BUFS);
1317 entry->buf_count = 0;
1321 static int radeon_gem_addbufs(struct drm_device *dev)
1323 struct drm_radeon_private *dev_priv = dev->dev_private;
1324 struct drm_device_dma *dma = dev->dma;
1325 struct drm_buf_entry *entry;
1326 struct drm_buf *buf;
1327 unsigned long offset;
1328 unsigned long agp_offset;
1337 struct drm_buf **temp_buflist;
1342 count = RADEON_DMA_BUFFER_COUNT;
1343 order = drm_order(RADEON_DMA_BUFFER_SIZE);
1346 alignment = PAGE_ALIGN(size);
1347 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1348 total = PAGE_SIZE << page_order;
1351 agp_offset = dev_priv->mm.dma_bufs.bo->offset;
1353 DRM_DEBUG("count: %d\n", count);
1354 DRM_DEBUG("order: %d\n", order);
1355 DRM_DEBUG("size: %d\n", size);
1356 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1357 DRM_DEBUG("alignment: %d\n", alignment);
1358 DRM_DEBUG("page_order: %d\n", page_order);
1359 DRM_DEBUG("total: %d\n", total);
1361 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1363 if (dev->queue_count)
1364 return -EBUSY; /* Not while in use */
1366 spin_lock(&dev->count_lock);
1368 spin_unlock(&dev->count_lock);
1371 atomic_inc(&dev->buf_alloc);
1372 spin_unlock(&dev->count_lock);
1374 mutex_lock(&dev->struct_mutex);
1375 entry = &dma->bufs[order];
1376 if (entry->buf_count) {
1377 mutex_unlock(&dev->struct_mutex);
1378 atomic_dec(&dev->buf_alloc);
1379 return -ENOMEM; /* May only call once for each order */
1382 if (count < 0 || count > 4096) {
1383 mutex_unlock(&dev->struct_mutex);
1384 atomic_dec(&dev->buf_alloc);
1388 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1390 if (!entry->buflist) {
1391 mutex_unlock(&dev->struct_mutex);
1392 atomic_dec(&dev->buf_alloc);
1395 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1397 entry->buf_size = size;
1398 entry->page_order = page_order;
1402 while (entry->buf_count < count) {
1403 buf = &entry->buflist[entry->buf_count];
1404 buf->idx = dma->buf_count + entry->buf_count;
1405 buf->total = alignment;
1409 buf->offset = (dma->byte_count + offset);
1410 buf->bus_address = dev_priv->gart_vm_start + agp_offset + offset;
1411 buf->address = (void *)(agp_offset + offset);
1415 init_waitqueue_head(&buf->dma_wait);
1416 buf->file_priv = NULL;
1418 buf->dev_priv_size = dev->driver->dev_priv_size;
1419 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1420 if (!buf->dev_private) {
1421 /* Set count correctly so we free the proper amount. */
1422 entry->buf_count = count;
1423 drm_cleanup_buf_error(dev, entry);
1424 mutex_unlock(&dev->struct_mutex);
1425 atomic_dec(&dev->buf_alloc);
1429 memset(buf->dev_private, 0, buf->dev_priv_size);
1431 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1433 offset += alignment;
1435 byte_count += PAGE_SIZE << page_order;
1438 DRM_DEBUG("byte_count: %d\n", byte_count);
1440 temp_buflist = drm_realloc(dma->buflist,
1441 dma->buf_count * sizeof(*dma->buflist),
1442 (dma->buf_count + entry->buf_count)
1443 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1444 if (!temp_buflist) {
1445 /* Free the entry because it isn't valid */
1446 drm_cleanup_buf_error(dev, entry);
1447 mutex_unlock(&dev->struct_mutex);
1448 atomic_dec(&dev->buf_alloc);
1451 dma->buflist = temp_buflist;
1453 for (i = 0; i < entry->buf_count; i++) {
1454 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1457 dma->buf_count += entry->buf_count;
1458 dma->seg_count += entry->seg_count;
1459 dma->page_count += byte_count >> PAGE_SHIFT;
1460 dma->byte_count += byte_count;
1462 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1463 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1465 mutex_unlock(&dev->struct_mutex);
1467 dma->flags = _DRM_DMA_USE_SG;
1468 atomic_dec(&dev->buf_alloc);
1472 static int radeon_gem_dma_bufs_init(struct drm_device *dev)
1474 struct drm_radeon_private *dev_priv = dev->dev_private;
1475 int size = RADEON_DMA_BUFFER_SIZE * RADEON_DMA_BUFFER_COUNT;
1478 ret = drm_dma_setup(dev);
1482 ret = drm_buffer_object_create(dev, size, drm_bo_type_device,
1483 DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_NO_EVICT |
1484 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE, 0,
1485 0, 0, &dev_priv->mm.dma_bufs.bo);
1487 DRM_ERROR("Failed to create DMA bufs\n");
1491 ret = drm_bo_kmap(dev_priv->mm.dma_bufs.bo, 0, size >> PAGE_SHIFT,
1492 &dev_priv->mm.dma_bufs.kmap);
1494 DRM_ERROR("Failed to mmap DMA buffers\n");
1498 radeon_gem_addbufs(dev);
1500 DRM_DEBUG("%x %d\n", dev_priv->mm.dma_bufs.bo->map_list.hash.key, size);
1501 dev->agp_buffer_token = dev_priv->mm.dma_bufs.bo->map_list.hash.key << PAGE_SHIFT;
1502 dev_priv->mm.fake_agp_map.handle = dev_priv->mm.dma_bufs.kmap.virtual;
1503 dev_priv->mm.fake_agp_map.size = size;
1505 dev->agp_buffer_map = &dev_priv->mm.fake_agp_map;
1506 dev_priv->gart_buffers_offset = dev_priv->mm.dma_bufs.bo->offset + dev_priv->gart_vm_start;
1510 static void radeon_gem_dma_bufs_destroy(struct drm_device *dev)
1513 struct drm_radeon_private *dev_priv = dev->dev_private;
1514 drm_dma_takedown(dev);
1516 if (dev_priv->mm.dma_bufs.bo) {
1517 drm_bo_kunmap(&dev_priv->mm.dma_bufs.kmap);
1518 drm_bo_usage_deref_unlocked(&dev_priv->mm.dma_bufs.bo);
1523 static struct drm_gem_object *gem_object_get(struct drm_device *dev, uint32_t name)
1525 struct drm_gem_object *obj;
1527 spin_lock(&dev->object_name_lock);
1528 obj = idr_find(&dev->object_name_idr, name);
1530 drm_gem_object_reference(obj);
1531 spin_unlock(&dev->object_name_lock);
1535 void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master)
1537 drm_radeon_private_t *dev_priv = dev->dev_private;
1538 struct drm_radeon_master_private *master_priv = master->driver_priv;
1539 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1540 struct drm_gem_object *obj;
1541 struct drm_radeon_gem_object *obj_priv;
1543 /* update front_pitch_offset and back_pitch_offset */
1544 obj = gem_object_get(dev, sarea_priv->front_handle);
1546 obj_priv = obj->driver_private;
1548 dev_priv->front_offset = obj_priv->bo->offset;
1549 dev_priv->front_pitch_offset = (((sarea_priv->front_pitch / 64) << 22) |
1550 ((obj_priv->bo->offset
1551 + dev_priv->fb_location) >> 10));
1552 drm_gem_object_unreference(obj);
1555 obj = gem_object_get(dev, sarea_priv->back_handle);
1557 obj_priv = obj->driver_private;
1558 dev_priv->back_offset = obj_priv->bo->offset;
1559 dev_priv->back_pitch_offset = (((sarea_priv->back_pitch / 64) << 22) |
1560 ((obj_priv->bo->offset
1561 + dev_priv->fb_location) >> 10));
1562 drm_gem_object_unreference(obj);
1564 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;