2 * Copyright 2008 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "radeon_drm.h"
28 #include "radeon_drv.h"
30 static int radeon_gem_ib_init(struct drm_device *dev);
31 static int radeon_gem_ib_destroy(struct drm_device *dev);
32 static int radeon_gem_dma_bufs_init(struct drm_device *dev);
33 static void radeon_gem_dma_bufs_destroy(struct drm_device *dev);
35 int radeon_gem_init_object(struct drm_gem_object *obj)
37 struct drm_radeon_gem_object *obj_priv;
39 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
44 obj->driver_private = obj_priv;
49 void radeon_gem_free_object(struct drm_gem_object *obj)
52 struct drm_radeon_gem_object *obj_priv = obj->driver_private;
54 /* tear down the buffer object - gem holds struct mutex */
55 drm_bo_takedown_vm_locked(obj_priv->bo);
56 drm_bo_usage_deref_locked(&obj_priv->bo);
57 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
60 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
61 struct drm_file *file_priv)
63 struct drm_radeon_private *dev_priv = dev->dev_private;
64 struct drm_radeon_gem_info *args = data;
66 args->vram_start = dev_priv->mm.vram_offset;
67 args->vram_size = dev_priv->mm.vram_size;
68 args->vram_visible = dev_priv->mm.vram_visible;
70 args->gart_start = dev_priv->mm.gart_start;
71 args->gart_size = dev_priv->mm.gart_size;
76 struct drm_gem_object *radeon_gem_object_alloc(struct drm_device *dev, int size, int alignment,
77 int initial_domain, bool discardable)
79 struct drm_gem_object *obj;
80 struct drm_radeon_gem_object *obj_priv;
85 obj = drm_gem_object_alloc(dev, size);
89 obj_priv = obj->driver_private;
90 flags = DRM_BO_FLAG_MAPPABLE;
91 if (initial_domain == RADEON_GEM_DOMAIN_VRAM)
92 flags |= DRM_BO_FLAG_MEM_VRAM;
93 else if (initial_domain == RADEON_GEM_DOMAIN_GTT)
94 flags |= DRM_BO_FLAG_MEM_TT;
96 flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
98 flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE;
101 flags |= DRM_BO_FLAG_DISCARDABLE;
104 alignment = PAGE_SIZE;
106 page_align = alignment >> PAGE_SHIFT;
107 /* create a TTM BO */
108 ret = drm_buffer_object_create(dev,
109 size, drm_bo_type_device,
110 flags, 0, page_align,
115 DRM_DEBUG("%p : size 0x%x, alignment %d, initial_domain %d\n", obj_priv->bo, size, alignment, initial_domain);
122 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
123 struct drm_file *file_priv)
125 struct drm_radeon_gem_create *args = data;
126 struct drm_radeon_gem_object *obj_priv;
127 struct drm_gem_object *obj;
132 /* create a gem object to contain this object in */
133 args->size = roundup(args->size, PAGE_SIZE);
135 obj = radeon_gem_object_alloc(dev, args->size, args->alignment, args->initial_domain, args->no_backing_store);
139 obj_priv = obj->driver_private;
140 DRM_DEBUG("obj is %p bo is %p, %d\n", obj, obj_priv->bo, obj_priv->bo->num_pages);
141 ret = drm_gem_handle_create(file_priv, obj, &handle);
142 mutex_lock(&dev->struct_mutex);
143 drm_gem_object_handle_unreference(obj);
144 mutex_unlock(&dev->struct_mutex);
149 args->handle = handle;
153 drm_gem_object_unreference(obj);
158 int radeon_gem_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain, uint32_t *flags_p, bool unfenced)
160 struct drm_device *dev = obj->dev;
161 drm_radeon_private_t *dev_priv = dev->dev_private;
162 struct drm_radeon_gem_object *obj_priv;
166 obj_priv = obj->driver_private;
168 /* work out where to validate the buffer to */
169 if (write_domain) { /* write domains always win */
170 if (write_domain == RADEON_GEM_DOMAIN_VRAM)
171 flags = DRM_BO_FLAG_MEM_VRAM;
172 else if (write_domain == RADEON_GEM_DOMAIN_GTT)
173 flags = DRM_BO_FLAG_MEM_TT; // need a can write gart check
175 return -EINVAL; // we can't write to system RAM
177 /* okay for a read domain - prefer wherever the object is now or close enough */
178 if (read_domains == 0)
181 /* if its already a local memory and CPU is valid do nothing */
182 if (read_domains & RADEON_GEM_DOMAIN_CPU) {
183 if (obj_priv->bo->mem.mem_type == DRM_BO_MEM_LOCAL)
185 if (read_domains == RADEON_GEM_DOMAIN_CPU)
189 /* simple case no choice in domains */
190 if (read_domains == RADEON_GEM_DOMAIN_VRAM)
191 flags = DRM_BO_FLAG_MEM_VRAM;
192 else if (read_domains == RADEON_GEM_DOMAIN_GTT)
193 flags = DRM_BO_FLAG_MEM_TT;
194 else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_VRAM) && (read_domains & RADEON_GEM_DOMAIN_VRAM))
195 flags = DRM_BO_FLAG_MEM_VRAM;
196 else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_TT) && (read_domains & RADEON_GEM_DOMAIN_GTT))
197 flags = DRM_BO_FLAG_MEM_TT;
198 else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_LOCAL) && (read_domains & RADEON_GEM_DOMAIN_GTT))
199 flags = DRM_BO_FLAG_MEM_TT;
201 /* no idea here just set whatever we are input */
203 if (read_domains & RADEON_GEM_DOMAIN_VRAM)
204 flags |= DRM_BO_FLAG_MEM_VRAM;
205 if (read_domains & RADEON_GEM_DOMAIN_GTT)
206 flags |= DRM_BO_FLAG_MEM_TT;
210 /* if this BO is pinned then we ain't moving it anywhere */
211 if (obj_priv->bo->pinned_mem_type && unfenced)
214 DRM_DEBUG("validating %p from %d into %x %d %d\n", obj_priv->bo, obj_priv->bo->mem.mem_type, flags, read_domains, write_domain);
215 ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM | DRM_BO_FLAG_CACHED,
216 unfenced ? DRM_BO_HINT_DONT_FENCE : 0, 0);
226 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *file_priv)
229 /* transition the BO to a domain - just validate the BO into a certain domain */
230 struct drm_radeon_gem_set_domain *args = data;
231 struct drm_gem_object *obj;
232 struct drm_radeon_gem_object *obj_priv;
235 /* for now if someone requests domain CPU - just make sure the buffer is finished with */
237 /* just do a BO wait for now */
238 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
242 obj_priv = obj->driver_private;
244 ret = radeon_gem_set_domain(obj, args->read_domains, args->write_domain, NULL, true);
246 mutex_lock(&dev->struct_mutex);
247 drm_gem_object_unreference(obj);
248 mutex_unlock(&dev->struct_mutex);
252 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
253 struct drm_file *file_priv)
258 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
259 struct drm_file *file_priv)
261 struct drm_radeon_gem_pwrite *args = data;
262 struct drm_gem_object *obj;
263 struct drm_radeon_gem_object *obj_priv;
266 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
270 obj_priv = obj->driver_private;
272 /* check where the buffer is first - if not in VRAM
273 fallback to userspace copying for now */
274 mutex_lock(&obj_priv->bo->mutex);
275 if (obj_priv->bo->mem.mem_type != DRM_BO_MEM_VRAM) {
280 DRM_ERROR("pwriting data->size %lld %llx\n", args->size, args->offset);
284 /* so need to grab an IB, copy the data into it in a loop
285 and send them to VRAM using HDB */
286 while ((buf = radeon_host_data_blit(dev, cpp, w, dst_pitch_off, &buf_pitch,
287 x, &y, (unsigned int*)&h, &hpass)) != 0) {
288 radeon_host_data_blit_copy_pass(dev, cpp, buf, (uint8_t *)src,
289 hpass, buf_pitch, src_pitch);
290 src += hpass * src_pitch;
294 mutex_unlock(&obj_priv->bo->mutex);
295 mutex_lock(&dev->struct_mutex);
296 drm_gem_object_unreference(obj);
297 mutex_unlock(&dev->struct_mutex);
301 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
302 struct drm_file *file_priv)
304 struct drm_radeon_gem_mmap *args = data;
305 struct drm_gem_object *obj;
306 struct drm_radeon_gem_object *obj_priv;
310 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
314 offset = args->offset;
316 DRM_DEBUG("got here %p\n", obj);
317 obj_priv = obj->driver_private;
319 DRM_DEBUG("got here %p %p %lld %ld\n", obj, obj_priv->bo, args->size, obj_priv->bo->num_pages);
321 mutex_lock(&dev->struct_mutex);
322 drm_gem_object_unreference(obj);
323 mutex_unlock(&dev->struct_mutex);
327 down_write(¤t->mm->mmap_sem);
328 addr = do_mmap_pgoff(file_priv->filp, 0, args->size,
329 PROT_READ | PROT_WRITE, MAP_SHARED,
330 obj_priv->bo->map_list.hash.key);
331 up_write(¤t->mm->mmap_sem);
333 DRM_DEBUG("got here %p %d\n", obj, obj_priv->bo->mem.mem_type);
334 mutex_lock(&dev->struct_mutex);
335 drm_gem_object_unreference(obj);
336 mutex_unlock(&dev->struct_mutex);
337 if (IS_ERR((void *)addr))
340 args->addr_ptr = (uint64_t) addr;
346 int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
347 struct drm_file *file_priv)
349 struct drm_radeon_gem_pin *args = data;
350 struct drm_gem_object *obj;
351 struct drm_radeon_gem_object *obj_priv;
353 int flags = DRM_BO_FLAG_NO_EVICT;
354 int mask = DRM_BO_FLAG_NO_EVICT;
356 /* check for valid args */
357 if (args->pin_domain) {
358 mask |= DRM_BO_MASK_MEM;
359 if (args->pin_domain == RADEON_GEM_DOMAIN_GTT)
360 flags |= DRM_BO_FLAG_MEM_TT;
361 else if (args->pin_domain == RADEON_GEM_DOMAIN_VRAM)
362 flags |= DRM_BO_FLAG_MEM_VRAM;
363 else /* hand back the offset we currently have if no args supplied
364 - this is to allow old mesa to work - its a hack */
368 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
372 obj_priv = obj->driver_private;
374 /* validate into a pin with no fence */
375 DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage));
376 if (flags && !(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) {
377 ret = drm_bo_do_validate(obj_priv->bo, flags, mask,
378 DRM_BO_HINT_DONT_FENCE, 0);
382 args->offset = obj_priv->bo->offset;
383 DRM_DEBUG("got here %p %p %x\n", obj, obj_priv->bo, obj_priv->bo->offset);
385 mutex_lock(&dev->struct_mutex);
386 drm_gem_object_unreference(obj);
387 mutex_unlock(&dev->struct_mutex);
391 int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
392 struct drm_file *file_priv)
394 struct drm_radeon_gem_unpin *args = data;
395 struct drm_gem_object *obj;
396 struct drm_radeon_gem_object *obj_priv;
399 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
403 obj_priv = obj->driver_private;
405 /* validate into a pin with no fence */
407 ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
408 DRM_BO_HINT_DONT_FENCE, 0);
410 mutex_lock(&dev->struct_mutex);
411 drm_gem_object_unreference(obj);
412 mutex_unlock(&dev->struct_mutex);
416 int radeon_gem_busy(struct drm_device *dev, void *data,
417 struct drm_file *file_priv)
422 int radeon_gem_execbuffer(struct drm_device *dev, void *data,
423 struct drm_file *file_priv)
430 int radeon_gem_indirect_ioctl(struct drm_device *dev, void *data,
431 struct drm_file *file_priv)
433 struct drm_radeon_gem_indirect *args = data;
434 struct drm_radeon_private *dev_priv = dev->dev_private;
435 struct drm_gem_object *obj;
436 struct drm_radeon_gem_object *obj_priv;
441 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
445 obj_priv = obj->driver_private;
447 DRM_DEBUG("got here %p %d\n", obj, args->used);
448 //RING_SPACE_TEST_WITH_RETURN(dev_priv);
449 //VB_AGE_TEST_WITH_RETURN(dev_priv);
451 ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
456 /* Wait for the 3D stream to idle before the indirect buffer
457 * containing 2D acceleration commands is processed.
461 RADEON_WAIT_UNTIL_3D_IDLE();
469 int offset = (dev_priv->gart_vm_start +
470 + obj_priv->bo->offset + start);
471 int dwords = (end - start + 3) / sizeof(u32);
473 /* Fire off the indirect buffer */
476 OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
485 /* we need to fence the buffer */
486 ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &obj_priv->fence);
489 drm_putback_buffer_objects(dev);
494 /* dereference he fence object */
495 drm_fence_usage_deref_unlocked(&obj_priv->fence);
497 mutex_lock(&dev->struct_mutex);
498 drm_gem_object_unreference(obj);
499 mutex_unlock(&dev->struct_mutex);
506 * Depending on card genertation, chipset bugs, etc... the amount of vram
507 * accessible to the CPU can vary. This function is our best shot at figuring
508 * it out. Returns a value in KB.
510 static uint32_t radeon_get_accessible_vram(struct drm_device *dev)
512 drm_radeon_private_t *dev_priv = dev->dev_private;
516 if (dev_priv->chip_family >= CHIP_R600)
517 aper_size = RADEON_READ(R600_CONFIG_APER_SIZE) / 1024;
519 aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE) / 1024;
521 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
522 * that is has the 2nd generation multifunction PCI interface
524 if (dev_priv->chip_family == CHIP_RV280 ||
525 dev_priv->chip_family == CHIP_RV350 ||
526 dev_priv->chip_family == CHIP_RV380 ||
527 dev_priv->chip_family == CHIP_R420 ||
528 dev_priv->chip_family == CHIP_R423 ||
529 dev_priv->chip_family == CHIP_RV410 ||
530 radeon_is_avivo(dev_priv)) {
531 uint32_t temp = RADEON_READ(RADEON_HOST_PATH_CNTL);
532 temp |= RADEON_HDP_APER_CNTL;
533 RADEON_WRITE(RADEON_HOST_PATH_CNTL, temp);
534 return aper_size * 2;
537 /* Older cards have all sorts of funny issues to deal with. First
538 * check if it's a multifunction card by reading the PCI config
539 * header type... Limit those to one aperture size
541 pci_read_config_byte(dev->pdev, 0xe, &byte);
545 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
546 * have set it up. We don't write this as it's broken on some ASICs but
547 * we expect the BIOS to have done the right thing (might be too optimistic...)
549 if (RADEON_READ(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
550 return aper_size * 2;
555 /* code from the DDX - do memory sizing */
556 void radeon_vram_setup(struct drm_device *dev)
558 drm_radeon_private_t *dev_priv = dev->dev_private;
560 uint32_t accessible, bar_size;
562 if (!radeon_is_avivo(dev_priv) && (dev_priv->flags & RADEON_IS_IGP)) {
563 uint32_t tom = RADEON_READ(RADEON_NB_TOM);
565 vram = (((tom >> 16) - (tom & 0xffff) + 1) << 6);
566 RADEON_WRITE(RADEON_CONFIG_MEMSIZE, vram * 1024);
568 if (dev_priv->chip_family >= CHIP_R600)
569 vram = RADEON_READ(R600_CONFIG_MEMSIZE) / 1024;
571 vram = RADEON_READ(RADEON_CONFIG_MEMSIZE) / 1024;
573 /* Some production boards of m6 will return 0 if it's 8 MB */
576 RADEON_WRITE(RADEON_CONFIG_MEMSIZE, 0x800000);
581 accessible = radeon_get_accessible_vram(dev);
583 bar_size = drm_get_resource_len(dev, 0) / 1024;
586 if (accessible > bar_size)
587 accessible = bar_size;
589 if (accessible > vram)
592 DRM_INFO("Detected VRAM RAM=%dK, accessible=%uK, BAR=%uK\n",
593 vram, accessible, bar_size);
595 dev_priv->mm.vram_offset = dev_priv->fb_aper_offset;
596 dev_priv->mm.vram_size = vram * 1024;
597 dev_priv->mm.vram_visible = accessible * 1024;
602 static int radeon_gart_init(struct drm_device *dev)
604 drm_radeon_private_t *dev_priv = dev->dev_private;
608 /* setup a 32MB GART */
609 dev_priv->gart_size = dev_priv->mm.gart_size;
611 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
614 /* setup VRAM vs GART here */
615 if (dev_priv->flags & RADEON_IS_AGP) {
616 base = dev->agp->base;
617 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
618 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
619 DRM_INFO("Can't use agp base @0x%08xlx, won't fit\n",
627 base = dev_priv->fb_location + dev_priv->fb_size;
628 if (base < dev_priv->fb_location ||
629 ((base + dev_priv->gart_size) & 0xfffffffful) < base)
630 base = dev_priv->fb_location
631 - dev_priv->gart_size;
633 /* start on the card */
634 dev_priv->gart_vm_start = base & 0xffc00000u;
635 if (dev_priv->gart_vm_start != base)
636 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
637 base, dev_priv->gart_vm_start);
639 /* if on PCIE we need to allocate an fb object for the PCIE GART table */
640 if (dev_priv->flags & RADEON_IS_PCIE) {
641 ret = drm_buffer_object_create(dev, RADEON_PCIGART_TABLE_SIZE,
643 DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
644 0, 1, 0, &dev_priv->mm.pcie_table.bo);
648 dev_priv->mm.pcie_table_backup = kzalloc(RADEON_PCIGART_TABLE_SIZE, GFP_KERNEL);
649 if (!dev_priv->mm.pcie_table_backup)
652 ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
653 &dev_priv->mm.pcie_table.kmap);
657 dev_priv->pcigart_offset_set = 2;
658 dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table.bo->offset;
659 dev_priv->gart_info.addr = dev_priv->mm.pcie_table.kmap.virtual;
660 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
661 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
662 memset(dev_priv->gart_info.addr, 0, RADEON_PCIGART_TABLE_SIZE);
663 } else if (!(dev_priv->flags & RADEON_IS_AGP)) {
664 /* allocate PCI GART table */
665 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
666 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
667 if (dev_priv->flags & RADEON_IS_IGPGART)
668 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
670 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
672 ret = drm_ati_alloc_pcigart_table(dev, &dev_priv->gart_info);
674 DRM_ERROR("cannot allocate PCI GART page!\n");
678 dev_priv->gart_info.addr = dev_priv->gart_info.table_handle->vaddr;
679 dev_priv->gart_info.bus_addr = dev_priv->gart_info.table_handle->busaddr;
682 /* gart values setup - start the GART */
683 if (dev_priv->flags & RADEON_IS_AGP) {
684 radeon_set_pcigart(dev_priv, 0);
686 radeon_set_pcigart(dev_priv, 1);
692 int radeon_alloc_gart_objects(struct drm_device *dev)
694 drm_radeon_private_t *dev_priv = dev->dev_private;
697 ret = drm_buffer_object_create(dev, RADEON_DEFAULT_RING_SIZE,
699 DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
700 DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
701 0, 1, 0, &dev_priv->mm.ring.bo);
703 if (dev_priv->flags & RADEON_IS_AGP)
704 DRM_ERROR("failed to allocate ring - most likely an AGP driver bug\n");
706 DRM_ERROR("failed to allocate ring\n");
710 ret = drm_bo_kmap(dev_priv->mm.ring.bo, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT,
711 &dev_priv->mm.ring.kmap);
713 DRM_ERROR("failed to map ring\n");
717 ret = drm_buffer_object_create(dev, PAGE_SIZE,
719 DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
720 DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
721 0, 1, 0, &dev_priv->mm.ring_read.bo);
723 DRM_ERROR("failed to allocate ring read\n");
727 ret = drm_bo_kmap(dev_priv->mm.ring_read.bo, 0,
728 PAGE_SIZE >> PAGE_SHIFT,
729 &dev_priv->mm.ring_read.kmap);
731 DRM_ERROR("failed to map ring read\n");
735 DRM_DEBUG("Ring ptr %p mapped at %d %p, read ptr %p maped at %d %p\n",
736 dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual,
737 dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual);
739 /* init the indirect buffers */
740 radeon_gem_ib_init(dev);
741 radeon_gem_dma_bufs_init(dev);
746 static bool avivo_get_mc_idle(struct drm_device *dev)
748 drm_radeon_private_t *dev_priv = dev->dev_private;
750 if (dev_priv->chip_family >= CHIP_R600) {
751 /* no idea where this is on r600 yet */
753 } else if (dev_priv->chip_family == CHIP_RV515) {
754 if (radeon_read_mc_reg(dev_priv, RV515_MC_STATUS) & RV515_MC_STATUS_IDLE)
758 } else if (dev_priv->chip_family == CHIP_RS600) {
759 if (radeon_read_mc_reg(dev_priv, RS600_MC_STATUS) & RS600_MC_STATUS_IDLE)
763 } else if ((dev_priv->chip_family == CHIP_RS690) ||
764 (dev_priv->chip_family == CHIP_RS740)) {
765 if (radeon_read_mc_reg(dev_priv, RS690_MC_STATUS) & RS690_MC_STATUS_IDLE)
770 if (radeon_read_mc_reg(dev_priv, R520_MC_STATUS) & R520_MC_STATUS_IDLE)
778 static void avivo_disable_mc_clients(struct drm_device *dev)
780 drm_radeon_private_t *dev_priv = dev->dev_private;
784 radeon_do_wait_for_idle(dev_priv);
786 RADEON_WRITE(AVIVO_D1VGA_CONTROL, RADEON_READ(AVIVO_D1VGA_CONTROL) & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
787 RADEON_WRITE(AVIVO_D2VGA_CONTROL, RADEON_READ(AVIVO_D2VGA_CONTROL) & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
789 tmp = RADEON_READ(AVIVO_D1CRTC_CONTROL);
790 RADEON_WRITE(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
792 tmp = RADEON_READ(AVIVO_D2CRTC_CONTROL);
793 RADEON_WRITE(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
795 tmp = RADEON_READ(AVIVO_D2CRTC_CONTROL);
800 while (!(avivo_get_mc_idle(dev))) {
801 if (++timeout > 100000) {
802 DRM_ERROR("Timeout waiting for memory controller to update settings\n");
803 DRM_ERROR("Bad things may or may not happen\n");
809 static inline u32 radeon_busy_wait(struct drm_device *dev, uint32_t reg, uint32_t bits,
810 unsigned int timeout)
812 drm_radeon_private_t *dev_priv = dev->dev_private;
817 status = RADEON_READ(reg);
819 } while(status != 0xffffffff && (status & bits) && (timeout > 0));
827 /* Wait for vertical sync on primary CRTC */
828 static void radeon_wait_for_vsync(struct drm_device *dev)
830 drm_radeon_private_t *dev_priv = dev->dev_private;
831 uint32_t crtc_gen_cntl;
834 crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL);
835 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
836 !(crtc_gen_cntl & RADEON_CRTC_EN))
839 /* Clear the CRTC_VBLANK_SAVE bit */
840 RADEON_WRITE(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
842 radeon_busy_wait(dev, RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE, 2000);
846 /* Wait for vertical sync on primary CRTC */
847 static void radeon_wait_for_vsync2(struct drm_device *dev)
849 drm_radeon_private_t *dev_priv = dev->dev_private;
850 uint32_t crtc2_gen_cntl;
851 struct timeval timeout;
853 crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL);
854 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
855 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
858 /* Clear the CRTC_VBLANK_SAVE bit */
859 RADEON_WRITE(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
861 radeon_busy_wait(dev, RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE, 2000);
864 static void legacy_disable_mc_clients(struct drm_device *dev)
866 drm_radeon_private_t *dev_priv = dev->dev_private;
867 uint32_t old_mc_status, status_idle;
868 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
871 radeon_do_wait_for_idle(dev_priv);
873 if (dev_priv->flags & RADEON_IS_IGP)
876 old_mc_status = RADEON_READ(RADEON_MC_STATUS);
878 /* stop display and memory access */
879 ov0_scale_cntl = RADEON_READ(RADEON_OV0_SCALE_CNTL);
880 RADEON_WRITE(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
881 crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL);
882 RADEON_WRITE(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
883 crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL);
885 radeon_wait_for_vsync(dev);
887 RADEON_WRITE(RADEON_CRTC_GEN_CNTL,
888 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
889 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
891 if (!(dev_priv->flags & RADEON_SINGLE_CRTC)) {
892 crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL);
894 radeon_wait_for_vsync2(dev);
895 RADEON_WRITE(RADEON_CRTC2_GEN_CNTL,
897 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
898 RADEON_CRTC2_DISP_REQ_EN_B);
903 if (radeon_is_r300(dev_priv))
904 status_idle = R300_MC_IDLE;
906 status_idle = RADEON_MC_IDLE;
908 status = radeon_busy_wait(dev, RADEON_MC_STATUS, status_idle, 200000);
909 if (status == 0xffffffff) {
910 DRM_ERROR("Timeout waiting for memory controller to update settings\n");
911 DRM_ERROR("Bad things may or may not happen\n");
916 void radeon_init_memory_map(struct drm_device *dev)
918 drm_radeon_private_t *dev_priv = dev->dev_private;
919 u32 mem_size, aper_size;
922 dev_priv->mc_fb_location = radeon_read_fb_location(dev_priv);
923 radeon_read_agp_location(dev_priv, &dev_priv->mc_agp_loc_lo, &dev_priv->mc_agp_loc_hi);
925 if (dev_priv->chip_family >= CHIP_R600) {
926 mem_size = RADEON_READ(R600_CONFIG_MEMSIZE);
927 aper_size = RADEON_READ(R600_CONFIG_APER_SIZE);
929 mem_size = RADEON_READ(RADEON_CONFIG_MEMSIZE);
930 aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE);
933 /* M6s report illegal memory size */
935 mem_size = 8 * 1024 * 1024;
937 /* for RN50/M6/M7 - Novell bug 204882 */
938 if (aper_size > mem_size)
939 mem_size = aper_size;
941 if ((dev_priv->chip_family != CHIP_RS600) &&
942 (dev_priv->chip_family != CHIP_RS690) &&
943 (dev_priv->chip_family != CHIP_RS740)) {
944 if (dev_priv->flags & RADEON_IS_IGP)
945 dev_priv->mc_fb_location = RADEON_READ(RADEON_NB_TOM);
949 if (dev_priv->chip_family >= CHIP_R600)
950 aper0_base = RADEON_READ(R600_CONFIG_F0_BASE);
952 aper0_base = RADEON_READ(RADEON_CONFIG_APER_0_BASE);
955 /* Some chips have an "issue" with the memory controller, the
956 * location must be aligned to the size. We just align it down,
957 * too bad if we walk over the top of system memory, we don't
958 * use DMA without a remapped anyway.
959 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
961 if (dev_priv->chip_family == CHIP_RV280 ||
962 dev_priv->chip_family == CHIP_R300 ||
963 dev_priv->chip_family == CHIP_R350 ||
964 dev_priv->chip_family == CHIP_RV350 ||
965 dev_priv->chip_family == CHIP_RV380 ||
966 dev_priv->chip_family == CHIP_R420 ||
967 dev_priv->chip_family == CHIP_R423 ||
968 dev_priv->chip_family == CHIP_RV410)
969 aper0_base &= ~(mem_size - 1);
971 if (dev_priv->chip_family >= CHIP_R600) {
972 dev_priv->mc_fb_location = (aper0_base >> 24) |
973 (((aper0_base + mem_size - 1) & 0xff000000U) >> 8);
975 dev_priv->mc_fb_location = (aper0_base >> 16) |
976 ((aper0_base + mem_size - 1) & 0xffff0000U);
981 if (dev_priv->chip_family >= CHIP_R600)
982 dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 24;
984 dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 16;
986 /* updating mc regs here */
987 if (radeon_is_avivo(dev_priv))
988 avivo_disable_mc_clients(dev);
990 legacy_disable_mc_clients(dev);
992 radeon_write_fb_location(dev_priv, dev_priv->mc_fb_location);
994 if (radeon_is_avivo(dev_priv)) {
995 if (dev_priv->chip_family >= CHIP_R600)
996 RADEON_WRITE(R600_HDP_NONSURFACE_BASE, (dev_priv->mc_fb_location << 16) & 0xff0000);
998 RADEON_WRITE(AVIVO_HDP_FB_LOCATION, dev_priv->mc_fb_location);
1001 if (dev_priv->chip_family >= CHIP_R600) {
1002 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffffff) << 24;
1003 dev_priv->fb_size = ((radeon_read_fb_location(dev_priv) & 0xff000000u) + 0x1000000)
1004 - dev_priv->fb_location;
1006 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
1008 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
1009 - dev_priv->fb_location;
1014 /* init memory manager - start with all of VRAM and a 32MB GART aperture for now */
1015 int radeon_gem_mm_init(struct drm_device *dev)
1017 drm_radeon_private_t *dev_priv = dev->dev_private;
1021 /* init TTM underneath */
1022 drm_bo_driver_init(dev);
1024 /* size the mappable VRAM memory for now */
1025 radeon_vram_setup(dev);
1027 radeon_init_memory_map(dev);
1029 #define VRAM_RESERVE_TEXT (256*1024) /* need to reserve 256 for text mode for now */
1030 dev_priv->mm.vram_visible -= VRAM_RESERVE_TEXT;
1031 pg_offset = VRAM_RESERVE_TEXT >> PAGE_SHIFT;
1032 drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, pg_offset, /*dev_priv->mm.vram_offset >> PAGE_SHIFT,*/
1033 ((dev_priv->mm.vram_visible) >> PAGE_SHIFT) - 16,
1037 if (dev_priv->chip_family > CHIP_R600) {
1038 dev_priv->mm_enabled = true;
1042 dev_priv->mm.gart_size = (32 * 1024 * 1024);
1043 dev_priv->mm.gart_start = 0;
1044 ret = radeon_gart_init(dev);
1048 drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
1049 dev_priv->mm.gart_size >> PAGE_SHIFT,
1052 /* need to allocate some objects in the GART */
1053 /* ring + ring read ptr */
1054 ret = radeon_alloc_gart_objects(dev);
1056 radeon_gem_mm_fini(dev);
1060 dev_priv->mm_enabled = true;
1064 void radeon_gem_mm_fini(struct drm_device *dev)
1066 drm_radeon_private_t *dev_priv = dev->dev_private;
1068 radeon_gem_dma_bufs_destroy(dev);
1069 radeon_gem_ib_destroy(dev);
1071 mutex_lock(&dev->struct_mutex);
1073 if (dev_priv->mm.ring_read.bo) {
1074 drm_bo_kunmap(&dev_priv->mm.ring_read.kmap);
1075 drm_bo_usage_deref_locked(&dev_priv->mm.ring_read.bo);
1078 if (dev_priv->mm.ring.bo) {
1079 drm_bo_kunmap(&dev_priv->mm.ring.kmap);
1080 drm_bo_usage_deref_locked(&dev_priv->mm.ring.bo);
1083 if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
1084 DRM_DEBUG("delaying takedown of TTM memory\n");
1087 if (dev_priv->flags & RADEON_IS_PCIE) {
1088 if (dev_priv->mm.pcie_table_backup) {
1089 kfree(dev_priv->mm.pcie_table_backup);
1090 dev_priv->mm.pcie_table_backup = NULL;
1092 if (dev_priv->mm.pcie_table.bo) {
1093 drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap);
1094 drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo);
1098 if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) {
1099 DRM_DEBUG("delaying takedown of VRAM memory\n");
1102 mutex_unlock(&dev->struct_mutex);
1104 drm_bo_driver_finish(dev);
1105 dev_priv->mm_enabled = false;
1108 int radeon_gem_object_pin(struct drm_gem_object *obj,
1109 uint32_t alignment, uint32_t pin_domain)
1111 struct drm_radeon_gem_object *obj_priv;
1113 uint32_t flags = DRM_BO_FLAG_NO_EVICT;
1114 uint32_t mask = DRM_BO_FLAG_NO_EVICT;
1116 obj_priv = obj->driver_private;
1119 mask |= DRM_BO_MASK_MEM;
1120 if (pin_domain == RADEON_GEM_DOMAIN_GTT)
1121 flags |= DRM_BO_FLAG_MEM_TT;
1122 else if (pin_domain == RADEON_GEM_DOMAIN_VRAM)
1123 flags |= DRM_BO_FLAG_MEM_VRAM;
1127 ret = drm_bo_do_validate(obj_priv->bo, flags, mask,
1128 DRM_BO_HINT_DONT_FENCE, 0);
1133 int radeon_gem_object_unpin(struct drm_gem_object *obj)
1135 struct drm_radeon_gem_object *obj_priv;
1138 obj_priv = obj->driver_private;
1140 ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
1141 DRM_BO_HINT_DONT_FENCE, 0);
1146 #define RADEON_IB_MEMORY (1*1024*1024)
1147 #define RADEON_IB_SIZE (65536)
1149 #define RADEON_NUM_IB (RADEON_IB_MEMORY / RADEON_IB_SIZE)
1151 int radeon_gem_ib_get(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset)
1155 drm_radeon_private_t *dev_priv = dev->dev_private;
1157 for (i = 0; i < RADEON_NUM_IB; i++) {
1158 if (!(dev_priv->ib_alloc_bitmap & (1 << i))){
1164 /* if all in use we need to wait */
1166 for (i = 0; i < RADEON_NUM_IB; i++) {
1167 if (dev_priv->ib_alloc_bitmap & (1 << i)) {
1168 mutex_lock(&dev_priv->ib_objs[i]->bo->mutex);
1169 ret = drm_bo_wait(dev_priv->ib_objs[i]->bo, 0, 1, 0, 0);
1170 mutex_unlock(&dev_priv->ib_objs[i]->bo->mutex);
1173 dev_priv->ib_alloc_bitmap &= ~(1 << i);
1181 DRM_ERROR("Major case fail to allocate IB from freelist %x\n", dev_priv->ib_alloc_bitmap);
1186 if (dwords > RADEON_IB_SIZE / sizeof(uint32_t))
1189 ret = drm_bo_do_validate(dev_priv->ib_objs[index]->bo, 0,
1190 DRM_BO_FLAG_NO_EVICT,
1193 DRM_ERROR("Failed to validate IB %d\n", index);
1197 *card_offset = dev_priv->gart_vm_start + dev_priv->ib_objs[index]->bo->offset;
1198 *ib = dev_priv->ib_objs[index]->kmap.virtual;
1199 dev_priv->ib_alloc_bitmap |= (1 << i);
1203 static void radeon_gem_ib_free(struct drm_device *dev, void *ib, uint32_t dwords)
1205 drm_radeon_private_t *dev_priv = dev->dev_private;
1206 struct drm_fence_object *fence;
1210 for (i = 0; i < RADEON_NUM_IB; i++) {
1212 if (dev_priv->ib_objs[i]->kmap.virtual == ib) {
1213 /* emit a fence object */
1214 ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
1217 drm_putback_buffer_objects(dev);
1219 /* dereference the fence object */
1221 drm_fence_usage_deref_unlocked(&fence);
1227 static int radeon_gem_ib_destroy(struct drm_device *dev)
1229 drm_radeon_private_t *dev_priv = dev->dev_private;
1232 if (dev_priv->ib_objs) {
1233 for (i = 0; i < RADEON_NUM_IB; i++) {
1234 if (dev_priv->ib_objs[i]) {
1235 drm_bo_kunmap(&dev_priv->ib_objs[i]->kmap);
1236 drm_bo_usage_deref_unlocked(&dev_priv->ib_objs[i]->bo);
1238 drm_free(dev_priv->ib_objs[i], sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
1240 drm_free(dev_priv->ib_objs, RADEON_NUM_IB*sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
1242 dev_priv->ib_objs = NULL;
1246 static int radeon_gem_relocate(struct drm_device *dev, struct drm_file *file_priv,
1247 uint32_t *reloc, uint32_t *offset)
1249 drm_radeon_private_t *dev_priv = dev->dev_private;
1250 /* relocate the handle */
1251 uint32_t read_domains = reloc[2];
1252 uint32_t write_domain = reloc[3];
1253 struct drm_gem_object *obj;
1256 struct drm_radeon_gem_object *obj_priv;
1258 obj = drm_gem_object_lookup(dev, file_priv, reloc[1]);
1262 obj_priv = obj->driver_private;
1263 radeon_gem_set_domain(obj, read_domains, write_domain, &flags, false);
1265 obj_priv->bo->mem.flags &= ~DRM_BO_FLAG_CLEAN;
1266 obj_priv->bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN;
1268 if (flags == DRM_BO_FLAG_MEM_VRAM)
1269 *offset = obj_priv->bo->offset + dev_priv->fb_location;
1270 else if (flags == DRM_BO_FLAG_MEM_TT)
1271 *offset = obj_priv->bo->offset + dev_priv->gart_vm_start;
1273 /* BAD BAD BAD - LINKED LIST THE OBJS and UNREF ONCE IB is SUBMITTED */
1274 drm_gem_object_unreference(obj);
1278 /* allocate 1MB of 64k IBs the the kernel can keep mapped */
1279 static int radeon_gem_ib_init(struct drm_device *dev)
1281 drm_radeon_private_t *dev_priv = dev->dev_private;
1285 dev_priv->ib_objs = drm_calloc(RADEON_NUM_IB, sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
1286 if (!dev_priv->ib_objs)
1289 for (i = 0; i < RADEON_NUM_IB; i++) {
1290 dev_priv->ib_objs[i] = drm_calloc(1, sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
1291 if (!dev_priv->ib_objs[i])
1294 ret = drm_buffer_object_create(dev, RADEON_IB_SIZE,
1296 DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
1297 DRM_BO_FLAG_MAPPABLE, 0,
1298 0, 0, &dev_priv->ib_objs[i]->bo);
1302 ret = drm_bo_kmap(dev_priv->ib_objs[i]->bo, 0, RADEON_IB_SIZE >> PAGE_SHIFT,
1303 &dev_priv->ib_objs[i]->kmap);
1309 dev_priv->ib_alloc_bitmap = 0;
1311 dev_priv->cs.ib_get = radeon_gem_ib_get;
1312 dev_priv->cs.ib_free = radeon_gem_ib_free;
1314 radeon_cs_init(dev);
1315 dev_priv->cs.relocate = radeon_gem_relocate;
1319 radeon_gem_ib_destroy(dev);
1323 #define RADEON_DMA_BUFFER_SIZE (64 * 1024)
1324 #define RADEON_DMA_BUFFER_COUNT (16)
1328 * Cleanup after an error on one of the addbufs() functions.
1330 * \param dev DRM device.
1331 * \param entry buffer entry where the error occurred.
1333 * Frees any pages and buffers associated with the given entry.
1335 static void drm_cleanup_buf_error(struct drm_device * dev,
1336 struct drm_buf_entry * entry)
1340 if (entry->seg_count) {
1341 for (i = 0; i < entry->seg_count; i++) {
1342 if (entry->seglist[i]) {
1343 drm_pci_free(dev, entry->seglist[i]);
1346 drm_free(entry->seglist,
1348 sizeof(*entry->seglist), DRM_MEM_SEGS);
1350 entry->seg_count = 0;
1353 if (entry->buf_count) {
1354 for (i = 0; i < entry->buf_count; i++) {
1355 if (entry->buflist[i].dev_private) {
1356 drm_free(entry->buflist[i].dev_private,
1357 entry->buflist[i].dev_priv_size,
1361 drm_free(entry->buflist,
1363 sizeof(*entry->buflist), DRM_MEM_BUFS);
1365 entry->buf_count = 0;
1369 static int radeon_gem_addbufs(struct drm_device *dev)
1371 struct drm_radeon_private *dev_priv = dev->dev_private;
1372 struct drm_device_dma *dma = dev->dma;
1373 struct drm_buf_entry *entry;
1374 struct drm_buf *buf;
1375 unsigned long offset;
1376 unsigned long agp_offset;
1385 struct drm_buf **temp_buflist;
1390 count = RADEON_DMA_BUFFER_COUNT;
1391 order = drm_order(RADEON_DMA_BUFFER_SIZE);
1394 alignment = PAGE_ALIGN(size);
1395 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1396 total = PAGE_SIZE << page_order;
1399 agp_offset = dev_priv->mm.dma_bufs.bo->offset;
1401 DRM_DEBUG("count: %d\n", count);
1402 DRM_DEBUG("order: %d\n", order);
1403 DRM_DEBUG("size: %d\n", size);
1404 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1405 DRM_DEBUG("alignment: %d\n", alignment);
1406 DRM_DEBUG("page_order: %d\n", page_order);
1407 DRM_DEBUG("total: %d\n", total);
1409 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1411 if (dev->queue_count)
1412 return -EBUSY; /* Not while in use */
1414 spin_lock(&dev->count_lock);
1416 spin_unlock(&dev->count_lock);
1419 atomic_inc(&dev->buf_alloc);
1420 spin_unlock(&dev->count_lock);
1422 mutex_lock(&dev->struct_mutex);
1423 entry = &dma->bufs[order];
1424 if (entry->buf_count) {
1425 mutex_unlock(&dev->struct_mutex);
1426 atomic_dec(&dev->buf_alloc);
1427 return -ENOMEM; /* May only call once for each order */
1430 if (count < 0 || count > 4096) {
1431 mutex_unlock(&dev->struct_mutex);
1432 atomic_dec(&dev->buf_alloc);
1436 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1438 if (!entry->buflist) {
1439 mutex_unlock(&dev->struct_mutex);
1440 atomic_dec(&dev->buf_alloc);
1443 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1445 entry->buf_size = size;
1446 entry->page_order = page_order;
1450 while (entry->buf_count < count) {
1451 buf = &entry->buflist[entry->buf_count];
1452 buf->idx = dma->buf_count + entry->buf_count;
1453 buf->total = alignment;
1457 buf->offset = (dma->byte_count + offset);
1458 buf->bus_address = dev_priv->gart_vm_start + agp_offset + offset;
1459 buf->address = (void *)(agp_offset + offset);
1463 init_waitqueue_head(&buf->dma_wait);
1464 buf->file_priv = NULL;
1466 buf->dev_priv_size = dev->driver->dev_priv_size;
1467 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1468 if (!buf->dev_private) {
1469 /* Set count correctly so we free the proper amount. */
1470 entry->buf_count = count;
1471 drm_cleanup_buf_error(dev, entry);
1472 mutex_unlock(&dev->struct_mutex);
1473 atomic_dec(&dev->buf_alloc);
1477 memset(buf->dev_private, 0, buf->dev_priv_size);
1479 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1481 offset += alignment;
1483 byte_count += PAGE_SIZE << page_order;
1486 DRM_DEBUG("byte_count: %d\n", byte_count);
1488 temp_buflist = drm_realloc(dma->buflist,
1489 dma->buf_count * sizeof(*dma->buflist),
1490 (dma->buf_count + entry->buf_count)
1491 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1492 if (!temp_buflist) {
1493 /* Free the entry because it isn't valid */
1494 drm_cleanup_buf_error(dev, entry);
1495 mutex_unlock(&dev->struct_mutex);
1496 atomic_dec(&dev->buf_alloc);
1499 dma->buflist = temp_buflist;
1501 for (i = 0; i < entry->buf_count; i++) {
1502 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1505 dma->buf_count += entry->buf_count;
1506 dma->seg_count += entry->seg_count;
1507 dma->page_count += byte_count >> PAGE_SHIFT;
1508 dma->byte_count += byte_count;
1510 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1511 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1513 mutex_unlock(&dev->struct_mutex);
1515 dma->flags = _DRM_DMA_USE_SG;
1516 atomic_dec(&dev->buf_alloc);
1520 static int radeon_gem_dma_bufs_init(struct drm_device *dev)
1522 struct drm_radeon_private *dev_priv = dev->dev_private;
1523 int size = RADEON_DMA_BUFFER_SIZE * RADEON_DMA_BUFFER_COUNT;
1526 ret = drm_dma_setup(dev);
1530 ret = drm_buffer_object_create(dev, size, drm_bo_type_device,
1531 DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_NO_EVICT |
1532 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE, 0,
1533 0, 0, &dev_priv->mm.dma_bufs.bo);
1535 DRM_ERROR("Failed to create DMA bufs\n");
1539 ret = drm_bo_kmap(dev_priv->mm.dma_bufs.bo, 0, size >> PAGE_SHIFT,
1540 &dev_priv->mm.dma_bufs.kmap);
1542 DRM_ERROR("Failed to mmap DMA buffers\n");
1546 radeon_gem_addbufs(dev);
1548 DRM_DEBUG("%x %d\n", dev_priv->mm.dma_bufs.bo->map_list.hash.key, size);
1549 dev->agp_buffer_token = dev_priv->mm.dma_bufs.bo->map_list.hash.key << PAGE_SHIFT;
1550 dev_priv->mm.fake_agp_map.handle = dev_priv->mm.dma_bufs.kmap.virtual;
1551 dev_priv->mm.fake_agp_map.size = size;
1553 dev->agp_buffer_map = &dev_priv->mm.fake_agp_map;
1554 dev_priv->gart_buffers_offset = dev_priv->mm.dma_bufs.bo->offset + dev_priv->gart_vm_start;
1558 static void radeon_gem_dma_bufs_destroy(struct drm_device *dev)
1561 struct drm_radeon_private *dev_priv = dev->dev_private;
1562 drm_dma_takedown(dev);
1564 if (dev_priv->mm.dma_bufs.bo) {
1565 drm_bo_kunmap(&dev_priv->mm.dma_bufs.kmap);
1566 drm_bo_usage_deref_unlocked(&dev_priv->mm.dma_bufs.bo);
1571 static struct drm_gem_object *gem_object_get(struct drm_device *dev, uint32_t name)
1573 struct drm_gem_object *obj;
1575 spin_lock(&dev->object_name_lock);
1576 obj = idr_find(&dev->object_name_idr, name);
1578 drm_gem_object_reference(obj);
1579 spin_unlock(&dev->object_name_lock);
1583 void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master)
1585 drm_radeon_private_t *dev_priv = dev->dev_private;
1586 struct drm_radeon_master_private *master_priv = master->driver_priv;
1587 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1588 struct drm_gem_object *obj;
1589 struct drm_radeon_gem_object *obj_priv;
1591 /* update front_pitch_offset and back_pitch_offset */
1592 obj = gem_object_get(dev, sarea_priv->front_handle);
1594 obj_priv = obj->driver_private;
1596 dev_priv->front_offset = obj_priv->bo->offset;
1597 dev_priv->front_pitch_offset = (((sarea_priv->front_pitch / 64) << 22) |
1598 ((obj_priv->bo->offset
1599 + dev_priv->fb_location) >> 10));
1600 drm_gem_object_unreference(obj);
1603 obj = gem_object_get(dev, sarea_priv->back_handle);
1605 obj_priv = obj->driver_private;
1606 dev_priv->back_offset = obj_priv->bo->offset;
1607 dev_priv->back_pitch_offset = (((sarea_priv->back_pitch / 64) << 22) |
1608 ((obj_priv->bo->offset
1609 + dev_priv->fb_location) >> 10));
1610 drm_gem_object_unreference(obj);
1612 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;