2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Keith Whitwell <keith@tungstengraphics.com>
35 #include "drm_sarea.h"
36 #include "nouveau_drv.h"
37 #include "nv50_kms_wrapper.h"
40 static struct mem_block *
41 split_block(struct mem_block *p, uint64_t start, uint64_t size,
42 struct drm_file *file_priv)
44 /* Maybe cut off the start of an existing block */
45 if (start > p->start) {
46 struct mem_block *newblock =
47 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
50 newblock->start = start;
51 newblock->size = p->size - (start - p->start);
52 newblock->file_priv = NULL;
53 newblock->next = p->next;
55 p->next->prev = newblock;
57 p->size -= newblock->size;
61 /* Maybe cut off the end of an existing block */
63 struct mem_block *newblock =
64 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
67 newblock->start = start + size;
68 newblock->size = p->size - size;
69 newblock->file_priv = NULL;
70 newblock->next = p->next;
72 p->next->prev = newblock;
78 /* Our block is in the middle */
79 p->file_priv = file_priv;
84 nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
85 int align2, struct drm_file *file_priv, int tail)
88 uint64_t mask = (1 << align2) - 1;
94 list_for_each_prev(p, heap) {
95 uint64_t start = ((p->start + p->size) - size) & ~mask;
97 if (p->file_priv == 0 && start >= p->start &&
98 start + size <= p->start + p->size)
99 return split_block(p, start, size, file_priv);
102 list_for_each(p, heap) {
103 uint64_t start = (p->start + mask) & ~mask;
105 if (p->file_priv == 0 &&
106 start + size <= p->start + p->size)
107 return split_block(p, start, size, file_priv);
114 static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
118 list_for_each(p, heap)
119 if (p->start == start)
125 struct mem_block *find_block_by_handle(struct mem_block *heap, drm_handle_t handle)
129 list_for_each(p, heap)
130 if (p->map_handle == handle)
136 void nouveau_mem_free_block(struct mem_block *p)
140 /* Assumes a single contiguous range. Needs a special file_priv in
141 * 'heap' to stop it being subsumed.
143 if (p->next->file_priv == 0) {
144 struct mem_block *q = p->next;
148 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
151 if (p->prev->file_priv == 0) {
152 struct mem_block *q = p->prev;
156 drm_free(p, sizeof(*q), DRM_MEM_BUFS);
160 /* Initialize. How to check for an uninitialized heap?
162 int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
165 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
170 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
172 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
176 blocks->start = start;
178 blocks->file_priv = NULL;
179 blocks->next = blocks->prev = *heap;
181 memset(*heap, 0, sizeof(**heap));
182 (*heap)->file_priv = (struct drm_file *) - 1;
183 (*heap)->next = (*heap)->prev = blocks;
188 * Free all blocks associated with the releasing file_priv
190 void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
194 if (!heap || !heap->next)
197 list_for_each(p, heap) {
198 if (p->file_priv == file_priv)
202 /* Assumes a single contiguous range. Needs a special file_priv in
203 * 'heap' to stop it being subsumed.
205 list_for_each(p, heap) {
206 while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
208 struct mem_block *q = p->next;
212 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
220 void nouveau_mem_takedown(struct mem_block **heap)
227 for (p = (*heap)->next; p != *heap;) {
228 struct mem_block *q = p;
230 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
233 drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
237 void nouveau_mem_close(struct drm_device *dev)
239 struct drm_nouveau_private *dev_priv = dev->dev_private;
241 nouveau_mem_takedown(&dev_priv->agp_heap);
242 nouveau_mem_takedown(&dev_priv->fb_heap);
243 if (dev_priv->pci_heap)
244 nouveau_mem_takedown(&dev_priv->pci_heap);
247 /*XXX won't work on BSD because of pci_read_config_dword */
249 nouveau_mem_fb_amount_igp(struct drm_device *dev)
251 #if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253 struct pci_dev *bridge;
256 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
258 DRM_ERROR("no bridge device\n");
262 if (dev_priv->flags&NV_NFORCE) {
263 pci_read_config_dword(bridge, 0x7C, &mem);
264 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
266 if(dev_priv->flags&NV_NFORCE2) {
267 pci_read_config_dword(bridge, 0x84, &mem);
268 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
271 DRM_ERROR("impossible!\n");
273 DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
279 /* returns the amount of FB ram in bytes */
280 uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
282 struct drm_nouveau_private *dev_priv=dev->dev_private;
283 switch(dev_priv->card_type)
287 if (NV_READ(NV03_BOOT_0) & 0x00000100) {
288 return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
290 switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
292 case NV04_BOOT_0_RAM_AMOUNT_32MB:
294 case NV04_BOOT_0_RAM_AMOUNT_16MB:
296 case NV04_BOOT_0_RAM_AMOUNT_8MB:
298 case NV04_BOOT_0_RAM_AMOUNT_4MB:
311 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
312 return nouveau_mem_fb_amount_igp(dev);
316 mem = (NV_READ(NV04_FIFO_DATA) &
317 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
318 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
319 return mem*1024*1024;
324 DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
328 static void nouveau_mem_reset_agp(struct drm_device *dev)
330 struct drm_nouveau_private *dev_priv = dev->dev_private;
331 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
333 saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
334 saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
336 /* clear busmaster bit */
337 NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
338 /* clear SBA and AGP bits */
339 NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
341 /* power cycle pgraph, if enabled */
342 pmc_enable = NV_READ(NV03_PMC_ENABLE);
343 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
344 NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
345 NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
346 NV_PMC_ENABLE_PGRAPH);
349 /* and restore (gives effect of resetting AGP) */
350 NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
351 NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
355 nouveau_mem_init_agp(struct drm_device *dev, int ttm)
357 struct drm_nouveau_private *dev_priv = dev->dev_private;
358 struct drm_agp_info info;
359 struct drm_agp_mode mode;
362 nouveau_mem_reset_agp(dev);
364 ret = drm_agp_acquire(dev);
366 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
370 ret = drm_agp_info(dev, &info);
372 DRM_ERROR("Unable to get AGP info: %d\n", ret);
376 /* see agp.h for the AGPSTAT_* modes available */
377 mode.mode = info.mode;
378 ret = drm_agp_enable(dev, mode);
380 DRM_ERROR("Unable to enable AGP: %d\n", ret);
385 struct drm_agp_buffer agp_req;
386 struct drm_agp_binding bind_req;
388 agp_req.size = info.aperture_size;
390 ret = drm_agp_alloc(dev, &agp_req);
392 DRM_ERROR("Unable to alloc AGP: %d\n", ret);
396 bind_req.handle = agp_req.handle;
398 ret = drm_agp_bind(dev, &bind_req);
400 DRM_ERROR("Unable to bind AGP: %d\n", ret);
405 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
406 dev_priv->gart_info.aper_base = info.aperture_base;
407 dev_priv->gart_info.aper_size = info.aperture_size;
413 nouveau_mem_init_ttm(struct drm_device *dev)
415 struct drm_nouveau_private *dev_priv = dev->dev_private;
416 uint32_t vram_size, bar1_size;
419 dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
420 dev_priv->fb_phys = drm_get_resource_start(dev,1);
421 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
423 drm_bo_driver_init(dev);
425 /* non-mappable vram */
426 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
427 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
428 vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
429 bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
430 if (bar1_size < vram_size) {
431 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
432 bar1_size, vram_size - bar1_size, 1))) {
433 DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
436 vram_size = bar1_size;
443 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) {
444 DRM_ERROR("Failed VRAM mm init: %d\n", ret);
449 #if !defined(__powerpc__) && !defined(__ia64__)
450 if (drm_device_is_agp(dev) && dev->agp) {
451 if ((ret = nouveau_mem_init_agp(dev, 1)))
452 DRM_ERROR("Error initialising AGP: %d\n", ret);
456 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
457 if ((ret = nouveau_sgdma_init(dev)))
458 DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
461 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
462 dev_priv->gart_info.aper_size >>
464 DRM_ERROR("Failed TT mm init: %d\n", ret);
469 vram_size <<= PAGE_SHIFT;
470 DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
471 if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
478 int nouveau_mem_init(struct drm_device *dev)
480 struct drm_nouveau_private *dev_priv = dev->dev_private;
484 dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
485 dev_priv->fb_phys = 0;
486 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
488 /* setup a mtrr over the FB */
489 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
490 nouveau_mem_fb_amount(dev),
494 dev_priv->fb_phys=drm_get_resource_start(dev,1);
495 fb_size = nouveau_mem_fb_amount(dev);
496 /* On G80, limit VRAM to 512MiB temporarily due to limits in how
497 * we handle VRAM page tables.
499 if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
500 fb_size = (512 * 1024 * 1024);
501 /* On at least NV40, RAMIN is actually at the end of vram.
502 * We don't want to allocate this... */
503 if (dev_priv->card_type >= NV_40)
504 fb_size -= dev_priv->ramin_rsvd_vram;
505 dev_priv->fb_available_size = fb_size;
506 DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
508 if (fb_size>256*1024*1024) {
509 /* On cards with > 256Mb, you can't map everything.
510 * So we create a second FB heap for that type of memory */
511 if (nouveau_mem_init_heap(&dev_priv->fb_heap,
514 if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
515 256*1024*1024, fb_size-256*1024*1024))
518 if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
520 dev_priv->fb_nomap_heap=NULL;
523 #if !defined(__powerpc__) && !defined(__ia64__)
524 /* Init AGP / NV50 PCIEGART */
525 if (drm_device_is_agp(dev) && dev->agp) {
526 if ((ret = nouveau_mem_init_agp(dev, 0)))
527 DRM_ERROR("Error initialising AGP: %d\n", ret);
531 /*Note: this is *not* just NV50 code, but only used on NV50 for now */
532 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
533 dev_priv->card_type >= NV_50) {
534 ret = nouveau_sgdma_init(dev);
536 ret = nouveau_sgdma_nottm_hack_init(dev);
538 nouveau_sgdma_takedown(dev);
542 DRM_ERROR("Error initialising SG DMA: %d\n", ret);
545 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
546 if (nouveau_mem_init_heap(&dev_priv->agp_heap,
547 0, dev_priv->gart_info.aper_size)) {
548 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
549 nouveau_sgdma_nottm_hack_takedown(dev);
550 nouveau_sgdma_takedown(dev);
555 /* NV04-NV40 PCIEGART */
556 if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
557 struct drm_scatter_gather sgreq;
559 DRM_DEBUG("Allocating sg memory for PCI DMA\n");
560 sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
562 if (drm_sg_alloc(dev, &sgreq)) {
563 DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
564 " pages for PCI DMA!",sgreq.size>>20);
566 if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
567 dev->sg->pages * PAGE_SIZE)) {
568 DRM_ERROR("Unable to initialize pci_heap!");
573 /* G8x: Allocate shared page table to map real VRAM pages into */
574 if (dev_priv->card_type >= NV_50) {
575 unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
577 ret = nouveau_gpuobj_new(dev, NULL, size, 0,
578 NVOBJ_FLAG_ZERO_ALLOC |
579 NVOBJ_FLAG_ALLOW_NO_REFS,
580 &dev_priv->vm_vram_pt);
582 DRM_ERROR("Error creating VRAM page table: %d\n", ret);
592 nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size,
593 int flags, struct drm_file *file_priv)
595 struct drm_nouveau_private *dev_priv = dev->dev_private;
596 struct mem_block *block;
597 int type, tail = !(flags & NOUVEAU_MEM_USER);
600 * Make things easier on ourselves: all allocations are page-aligned.
601 * We need that to map allocated regions into the user space
603 if (alignment < PAGE_SHIFT)
604 alignment = PAGE_SHIFT;
606 /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB
607 * page size in the GPU VM.
609 if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) {
610 size = (size + 65535) & ~65535;
616 * Warn about 0 sized allocations, but let it go through. It'll return 1 page
619 DRM_INFO("warning : 0 byte allocation\n");
622 * Keep alloc size a multiple of the page size to keep drm_addmap() happy
624 if (size & (~PAGE_MASK))
625 size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
628 #define NOUVEAU_MEM_ALLOC_AGP {\
629 type=NOUVEAU_MEM_AGP;\
630 block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
631 alignment, file_priv, tail); \
632 if (block) goto alloc_ok;\
635 #define NOUVEAU_MEM_ALLOC_PCI {\
636 type = NOUVEAU_MEM_PCI;\
637 block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
638 alignment, file_priv, tail); \
639 if ( block ) goto alloc_ok;\
642 #define NOUVEAU_MEM_ALLOC_FB {\
643 type=NOUVEAU_MEM_FB;\
644 if (!(flags&NOUVEAU_MEM_MAPPED)) {\
645 block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
648 if (block) goto alloc_ok;\
650 block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
651 alignment, file_priv, tail);\
652 if (block) goto alloc_ok;\
656 if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
657 if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
658 if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
659 if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
660 if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
661 if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
669 /* On G8x, map memory into VM */
670 if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
671 !(flags & NOUVEAU_MEM_NOVM)) {
672 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
673 unsigned offset = block->start;
674 unsigned count = block->size / 65536;
678 DRM_ERROR("vm alloc without vm pt\n");
679 nouveau_mem_free_block(block);
683 /* The tiling stuff is *not* what NVIDIA does - but both the
684 * 2D and 3D engines seem happy with this simpler method.
685 * Should look into why NVIDIA do what they do at some point.
687 if (flags & NOUVEAU_MEM_TILE) {
688 if (flags & NOUVEAU_MEM_TILE_ZETA)
695 unsigned pte = offset / 65536;
697 INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
698 INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
702 block->flags |= NOUVEAU_MEM_NOVM;
705 if (flags&NOUVEAU_MEM_MAPPED)
707 struct drm_map_list *entry;
709 block->flags|=NOUVEAU_MEM_MAPPED;
711 if (type == NOUVEAU_MEM_AGP) {
712 if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
713 ret = drm_addmap(dev, block->start, block->size,
714 _DRM_AGP, 0, &block->map);
716 ret = drm_addmap(dev, block->start, block->size,
717 _DRM_SCATTER_GATHER, 0, &block->map);
719 else if (type == NOUVEAU_MEM_FB)
720 ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
721 block->size, _DRM_FRAME_BUFFER,
723 else if (type == NOUVEAU_MEM_PCI)
724 ret = drm_addmap(dev, block->start, block->size,
725 _DRM_SCATTER_GATHER, 0, &block->map);
728 nouveau_mem_free_block(block);
732 entry = drm_find_matching_map(dev, block->map);
734 nouveau_mem_free_block(block);
737 block->map_handle = entry->user_token;
740 DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags);
744 void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
746 struct drm_nouveau_private *dev_priv = dev->dev_private;
748 DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
750 /* Check if the deallocations cause problems for our modesetting system. */
751 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
752 if (dev_priv->card_type >= NV_50) {
753 struct nv50_crtc *crtc = NULL;
754 struct nv50_display *display = nv50_get_display(dev);
756 list_for_each_entry(crtc, &display->crtcs, item) {
757 if (crtc->fb->block == block) {
758 crtc->fb->block = NULL;
761 crtc->blank(crtc, true);
764 if (crtc->cursor->block == block) {
765 crtc->cursor->block = NULL;
767 if (crtc->cursor->visible)
768 crtc->cursor->hide(crtc);
774 if (block->flags&NOUVEAU_MEM_MAPPED)
775 drm_rmmap(dev, block->map);
777 /* G8x: Remove pages from vm */
778 if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
779 !(block->flags & NOUVEAU_MEM_NOVM)) {
780 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
781 unsigned offset = block->start;
782 unsigned count = block->size / 65536;
785 DRM_ERROR("vm free without vm pt\n");
790 unsigned pte = offset / 65536;
791 INSTANCE_WR(pt, (pte * 2) + 0, 0);
792 INSTANCE_WR(pt, (pte * 2) + 1, 0);
798 nouveau_mem_free_block(block);
806 nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
807 struct drm_file *file_priv)
809 struct drm_nouveau_private *dev_priv = dev->dev_private;
810 struct drm_nouveau_mem_alloc *alloc = data;
811 struct mem_block *block;
813 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
815 if (alloc->flags & NOUVEAU_MEM_INTERNAL)
818 block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
819 alloc->flags | NOUVEAU_MEM_USER, file_priv);
822 alloc->map_handle=block->map_handle;
823 alloc->offset=block->start;
824 alloc->flags=block->flags;
826 if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB)
827 alloc->offset += 512*1024*1024;
833 nouveau_ioctl_mem_free(struct drm_device *dev, void *data,
834 struct drm_file *file_priv)
836 struct drm_nouveau_private *dev_priv = dev->dev_private;
837 struct drm_nouveau_mem_free *memfree = data;
838 struct mem_block *block;
840 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
842 if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB)
843 memfree->offset -= 512*1024*1024;
846 if (memfree->flags & NOUVEAU_MEM_FB)
847 block = find_block(dev_priv->fb_heap, memfree->offset);
848 else if (memfree->flags & NOUVEAU_MEM_AGP)
849 block = find_block(dev_priv->agp_heap, memfree->offset);
850 else if (memfree->flags & NOUVEAU_MEM_PCI)
851 block = find_block(dev_priv->pci_heap, memfree->offset);
854 if (block->file_priv != file_priv)
857 nouveau_mem_free(dev, block);
862 nouveau_ioctl_mem_tile(struct drm_device *dev, void *data,
863 struct drm_file *file_priv)
865 struct drm_nouveau_private *dev_priv = dev->dev_private;
866 struct drm_nouveau_mem_tile *memtile = data;
867 struct mem_block *block = NULL;
869 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
871 if (dev_priv->card_type < NV_50)
874 if (memtile->flags & NOUVEAU_MEM_FB) {
875 memtile->offset -= 512*1024*1024;
876 block = find_block(dev_priv->fb_heap, memtile->offset);
882 if (block->file_priv != file_priv)
886 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
887 unsigned offset = block->start + memtile->delta;
888 unsigned count = memtile->size / 65536;
891 if (memtile->flags & NOUVEAU_MEM_TILE) {
892 if (memtile->flags & NOUVEAU_MEM_TILE_ZETA)
899 unsigned pte = offset / 65536;
901 INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
902 INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);