2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Keith Whitwell <keith@tungstengraphics.com>
35 #include "drm_sarea.h"
36 #include "nouveau_drv.h"
38 static struct mem_block *
39 split_block(struct mem_block *p, uint64_t start, uint64_t size,
40 struct drm_file *file_priv)
42 /* Maybe cut off the start of an existing block */
43 if (start > p->start) {
44 struct mem_block *newblock =
45 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
48 newblock->start = start;
49 newblock->size = p->size - (start - p->start);
50 newblock->file_priv = NULL;
51 newblock->next = p->next;
53 p->next->prev = newblock;
55 p->size -= newblock->size;
59 /* Maybe cut off the end of an existing block */
61 struct mem_block *newblock =
62 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
65 newblock->start = start + size;
66 newblock->size = p->size - size;
67 newblock->file_priv = NULL;
68 newblock->next = p->next;
70 p->next->prev = newblock;
76 /* Our block is in the middle */
77 p->file_priv = file_priv;
82 nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
83 int align2, struct drm_file *file_priv, int tail)
86 uint64_t mask = (1 << align2) - 1;
92 list_for_each_prev(p, heap) {
93 uint64_t start = ((p->start + p->size) - size) & ~mask;
95 if (p->file_priv == 0 && start >= p->start &&
96 start + size <= p->start + p->size)
97 return split_block(p, start, size, file_priv);
100 list_for_each(p, heap) {
101 uint64_t start = (p->start + mask) & ~mask;
103 if (p->file_priv == 0 &&
104 start + size <= p->start + p->size)
105 return split_block(p, start, size, file_priv);
112 static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
116 list_for_each(p, heap)
117 if (p->start == start)
123 void nouveau_mem_free_block(struct mem_block *p)
127 /* Assumes a single contiguous range. Needs a special file_priv in
128 * 'heap' to stop it being subsumed.
130 if (p->next->file_priv == 0) {
131 struct mem_block *q = p->next;
135 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
138 if (p->prev->file_priv == 0) {
139 struct mem_block *q = p->prev;
143 drm_free(p, sizeof(*q), DRM_MEM_BUFS);
147 /* Initialize. How to check for an uninitialized heap?
149 int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
152 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
157 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
159 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
163 blocks->start = start;
165 blocks->file_priv = NULL;
166 blocks->next = blocks->prev = *heap;
168 memset(*heap, 0, sizeof(**heap));
169 (*heap)->file_priv = (struct drm_file *) - 1;
170 (*heap)->next = (*heap)->prev = blocks;
175 * Free all blocks associated with the releasing file_priv
177 void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
181 if (!heap || !heap->next)
184 list_for_each(p, heap) {
185 if (p->file_priv == file_priv)
189 /* Assumes a single contiguous range. Needs a special file_priv in
190 * 'heap' to stop it being subsumed.
192 list_for_each(p, heap) {
193 while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
195 struct mem_block *q = p->next;
199 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
207 void nouveau_mem_takedown(struct mem_block **heap)
214 for (p = (*heap)->next; p != *heap;) {
215 struct mem_block *q = p;
217 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
220 drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
224 void nouveau_mem_close(struct drm_device *dev)
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 nouveau_mem_takedown(&dev_priv->agp_heap);
229 nouveau_mem_takedown(&dev_priv->fb_heap);
230 if (dev_priv->pci_heap)
231 nouveau_mem_takedown(&dev_priv->pci_heap);
234 /*XXX won't work on BSD because of pci_read_config_dword */
236 nouveau_mem_fb_amount_igp(struct drm_device *dev)
238 #if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
239 struct drm_nouveau_private *dev_priv = dev->dev_private;
240 struct pci_dev *bridge;
243 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
245 DRM_ERROR("no bridge device\n");
249 if (dev_priv->flags&NV_NFORCE) {
250 pci_read_config_dword(bridge, 0x7C, &mem);
251 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
253 if(dev_priv->flags&NV_NFORCE2) {
254 pci_read_config_dword(bridge, 0x84, &mem);
255 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
258 DRM_ERROR("impossible!\n");
260 DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
266 /* returns the amount of FB ram in bytes */
267 uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
269 struct drm_nouveau_private *dev_priv=dev->dev_private;
270 switch(dev_priv->card_type)
274 if (NV_READ(NV03_BOOT_0) & 0x00000100) {
275 return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
277 switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
279 case NV04_BOOT_0_RAM_AMOUNT_32MB:
281 case NV04_BOOT_0_RAM_AMOUNT_16MB:
283 case NV04_BOOT_0_RAM_AMOUNT_8MB:
285 case NV04_BOOT_0_RAM_AMOUNT_4MB:
298 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
299 return nouveau_mem_fb_amount_igp(dev);
303 mem = (NV_READ(NV04_FIFO_DATA) &
304 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
305 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
306 return mem*1024*1024;
311 DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
315 static void nouveau_mem_reset_agp(struct drm_device *dev)
317 struct drm_nouveau_private *dev_priv = dev->dev_private;
318 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
320 saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
321 saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
323 /* clear busmaster bit */
324 NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
325 /* clear SBA and AGP bits */
326 NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
328 /* power cycle pgraph, if enabled */
329 pmc_enable = NV_READ(NV03_PMC_ENABLE);
330 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
331 NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
332 NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
333 NV_PMC_ENABLE_PGRAPH);
336 /* and restore (gives effect of resetting AGP) */
337 NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
338 NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
342 nouveau_mem_init_agp(struct drm_device *dev, int ttm)
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
345 struct drm_agp_info info;
346 struct drm_agp_mode mode;
349 nouveau_mem_reset_agp(dev);
351 ret = drm_agp_acquire(dev);
353 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
357 ret = drm_agp_info(dev, &info);
359 DRM_ERROR("Unable to get AGP info: %d\n", ret);
363 /* see agp.h for the AGPSTAT_* modes available */
364 mode.mode = info.mode;
365 ret = drm_agp_enable(dev, mode);
367 DRM_ERROR("Unable to enable AGP: %d\n", ret);
372 struct drm_agp_buffer agp_req;
373 struct drm_agp_binding bind_req;
375 agp_req.size = info.aperture_size;
377 ret = drm_agp_alloc(dev, &agp_req);
379 DRM_ERROR("Unable to alloc AGP: %d\n", ret);
383 bind_req.handle = agp_req.handle;
385 ret = drm_agp_bind(dev, &bind_req);
387 DRM_ERROR("Unable to bind AGP: %d\n", ret);
392 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
393 dev_priv->gart_info.aper_base = info.aperture_base;
394 dev_priv->gart_info.aper_size = info.aperture_size;
400 nouveau_mem_init_ttm(struct drm_device *dev)
402 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 uint32_t vram_size, bar1_size;
406 dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
407 dev_priv->fb_phys = drm_get_resource_start(dev,1);
408 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
410 drm_bo_driver_init(dev);
412 /* non-mappable vram */
413 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
414 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
415 vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
416 bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
417 if (bar1_size < vram_size) {
418 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
419 bar1_size, vram_size - bar1_size, 1))) {
420 DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
423 vram_size = bar1_size;
430 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) {
431 DRM_ERROR("Failed VRAM mm init: %d\n", ret);
436 #if !defined(__powerpc__) && !defined(__ia64__)
437 if (drm_device_is_agp(dev) && dev->agp) {
438 if ((ret = nouveau_mem_init_agp(dev, 1)))
439 DRM_ERROR("Error initialising AGP: %d\n", ret);
443 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
444 if ((ret = nouveau_sgdma_init(dev)))
445 DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
448 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
449 dev_priv->gart_info.aper_size >>
451 DRM_ERROR("Failed TT mm init: %d\n", ret);
456 vram_size <<= PAGE_SHIFT;
457 DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
458 if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
465 int nouveau_mem_init(struct drm_device *dev)
467 struct drm_nouveau_private *dev_priv = dev->dev_private;
471 dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
472 dev_priv->fb_phys = 0;
473 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
475 /* setup a mtrr over the FB */
476 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
477 nouveau_mem_fb_amount(dev),
481 dev_priv->fb_phys=drm_get_resource_start(dev,1);
482 fb_size = nouveau_mem_fb_amount(dev);
483 /* On G80, limit VRAM to 512MiB temporarily due to limits in how
484 * we handle VRAM page tables.
486 if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
487 fb_size = (512 * 1024 * 1024);
488 /* On at least NV40, RAMIN is actually at the end of vram.
489 * We don't want to allocate this... */
490 if (dev_priv->card_type >= NV_40)
491 fb_size -= dev_priv->ramin_rsvd_vram;
492 dev_priv->fb_available_size = fb_size;
493 DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
495 if (fb_size>256*1024*1024) {
496 /* On cards with > 256Mb, you can't map everything.
497 * So we create a second FB heap for that type of memory */
498 if (nouveau_mem_init_heap(&dev_priv->fb_heap,
501 if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
502 256*1024*1024, fb_size-256*1024*1024))
505 if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
507 dev_priv->fb_nomap_heap=NULL;
510 #if !defined(__powerpc__) && !defined(__ia64__)
511 /* Init AGP / NV50 PCIEGART */
512 if (drm_device_is_agp(dev) && dev->agp) {
513 if ((ret = nouveau_mem_init_agp(dev, 0)))
514 DRM_ERROR("Error initialising AGP: %d\n", ret);
518 /*Note: this is *not* just NV50 code, but only used on NV50 for now */
519 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
520 dev_priv->card_type >= NV_50) {
521 ret = nouveau_sgdma_init(dev);
523 ret = nouveau_sgdma_nottm_hack_init(dev);
525 nouveau_sgdma_takedown(dev);
529 DRM_ERROR("Error initialising SG DMA: %d\n", ret);
532 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
533 if (nouveau_mem_init_heap(&dev_priv->agp_heap,
534 0, dev_priv->gart_info.aper_size)) {
535 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
536 nouveau_sgdma_nottm_hack_takedown(dev);
537 nouveau_sgdma_takedown(dev);
542 /* NV04-NV40 PCIEGART */
543 if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
544 struct drm_scatter_gather sgreq;
546 DRM_DEBUG("Allocating sg memory for PCI DMA\n");
547 sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
549 if (drm_sg_alloc(dev, &sgreq)) {
550 DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
551 " pages for PCI DMA!",sgreq.size>>20);
553 if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
554 dev->sg->pages * PAGE_SIZE)) {
555 DRM_ERROR("Unable to initialize pci_heap!");
560 /* G8x: Allocate shared page table to map real VRAM pages into */
561 if (dev_priv->card_type >= NV_50) {
562 unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
564 ret = nouveau_gpuobj_new(dev, NULL, size, 0,
565 NVOBJ_FLAG_ZERO_ALLOC |
566 NVOBJ_FLAG_ALLOW_NO_REFS,
567 &dev_priv->vm_vram_pt);
569 DRM_ERROR("Error creating VRAM page table: %d\n", ret);
579 nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size,
580 int flags, struct drm_file *file_priv)
582 struct drm_nouveau_private *dev_priv = dev->dev_private;
583 struct mem_block *block;
584 int type, tail = !(flags & NOUVEAU_MEM_USER);
587 * Make things easier on ourselves: all allocations are page-aligned.
588 * We need that to map allocated regions into the user space
590 if (alignment < PAGE_SHIFT)
591 alignment = PAGE_SHIFT;
593 /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB
594 * page size in the GPU VM.
596 if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) {
597 size = (size + 65535) & ~65535;
603 * Warn about 0 sized allocations, but let it go through. It'll return 1 page
606 DRM_INFO("warning : 0 byte allocation\n");
609 * Keep alloc size a multiple of the page size to keep drm_addmap() happy
611 if (size & (~PAGE_MASK))
612 size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
615 #define NOUVEAU_MEM_ALLOC_AGP {\
616 type=NOUVEAU_MEM_AGP;\
617 block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
618 alignment, file_priv, tail); \
619 if (block) goto alloc_ok;\
622 #define NOUVEAU_MEM_ALLOC_PCI {\
623 type = NOUVEAU_MEM_PCI;\
624 block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
625 alignment, file_priv, tail); \
626 if ( block ) goto alloc_ok;\
629 #define NOUVEAU_MEM_ALLOC_FB {\
630 type=NOUVEAU_MEM_FB;\
631 if (!(flags&NOUVEAU_MEM_MAPPED)) {\
632 block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
635 if (block) goto alloc_ok;\
637 block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
638 alignment, file_priv, tail);\
639 if (block) goto alloc_ok;\
643 if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
644 if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
645 if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
646 if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
647 if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
648 if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
656 /* On G8x, map memory into VM */
657 if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
658 !(flags & NOUVEAU_MEM_NOVM)) {
659 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
660 unsigned offset = block->start;
661 unsigned count = block->size / 65536;
665 DRM_ERROR("vm alloc without vm pt\n");
666 nouveau_mem_free_block(block);
670 /* The tiling stuff is *not* what NVIDIA does - but both the
671 * 2D and 3D engines seem happy with this simpler method.
672 * Should look into why NVIDIA do what they do at some point.
674 if (flags & NOUVEAU_MEM_TILE) {
675 if (flags & NOUVEAU_MEM_TILE_ZETA)
682 unsigned pte = offset / 65536;
684 INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
685 INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
689 block->flags |= NOUVEAU_MEM_NOVM;
692 if (flags&NOUVEAU_MEM_MAPPED)
694 struct drm_map_list *entry;
696 block->flags|=NOUVEAU_MEM_MAPPED;
698 if (type == NOUVEAU_MEM_AGP) {
699 if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
700 ret = drm_addmap(dev, block->start, block->size,
701 _DRM_AGP, 0, &block->map);
703 ret = drm_addmap(dev, block->start, block->size,
704 _DRM_SCATTER_GATHER, 0, &block->map);
706 else if (type == NOUVEAU_MEM_FB)
707 ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
708 block->size, _DRM_FRAME_BUFFER,
710 else if (type == NOUVEAU_MEM_PCI)
711 ret = drm_addmap(dev, block->start, block->size,
712 _DRM_SCATTER_GATHER, 0, &block->map);
715 nouveau_mem_free_block(block);
719 entry = drm_find_matching_map(dev, block->map);
721 nouveau_mem_free_block(block);
724 block->map_handle = entry->user_token;
727 DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags);
731 void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
733 struct drm_nouveau_private *dev_priv = dev->dev_private;
735 DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
737 if (block->flags&NOUVEAU_MEM_MAPPED)
738 drm_rmmap(dev, block->map);
740 /* G8x: Remove pages from vm */
741 if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
742 !(block->flags & NOUVEAU_MEM_NOVM)) {
743 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
744 unsigned offset = block->start;
745 unsigned count = block->size / 65536;
748 DRM_ERROR("vm free without vm pt\n");
753 unsigned pte = offset / 65536;
754 INSTANCE_WR(pt, (pte * 2) + 0, 0);
755 INSTANCE_WR(pt, (pte * 2) + 1, 0);
761 nouveau_mem_free_block(block);
769 nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
770 struct drm_file *file_priv)
772 struct drm_nouveau_private *dev_priv = dev->dev_private;
773 struct drm_nouveau_mem_alloc *alloc = data;
774 struct mem_block *block;
776 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
778 if (alloc->flags & NOUVEAU_MEM_INTERNAL)
781 block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
782 alloc->flags | NOUVEAU_MEM_USER, file_priv);
785 alloc->map_handle=block->map_handle;
786 alloc->offset=block->start;
787 alloc->flags=block->flags;
789 if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB)
790 alloc->offset += 512*1024*1024;
796 nouveau_ioctl_mem_free(struct drm_device *dev, void *data,
797 struct drm_file *file_priv)
799 struct drm_nouveau_private *dev_priv = dev->dev_private;
800 struct drm_nouveau_mem_free *memfree = data;
801 struct mem_block *block;
803 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
805 if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB)
806 memfree->offset -= 512*1024*1024;
809 if (memfree->flags & NOUVEAU_MEM_FB)
810 block = find_block(dev_priv->fb_heap, memfree->offset);
811 else if (memfree->flags & NOUVEAU_MEM_AGP)
812 block = find_block(dev_priv->agp_heap, memfree->offset);
813 else if (memfree->flags & NOUVEAU_MEM_PCI)
814 block = find_block(dev_priv->pci_heap, memfree->offset);
817 if (block->file_priv != file_priv)
820 nouveau_mem_free(dev, block);
825 nouveau_ioctl_mem_tile(struct drm_device *dev, void *data,
826 struct drm_file *file_priv)
828 struct drm_nouveau_private *dev_priv = dev->dev_private;
829 struct drm_nouveau_mem_tile *memtile = data;
830 struct mem_block *block = NULL;
832 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
834 if (dev_priv->card_type < NV_50)
837 if (memtile->flags & NOUVEAU_MEM_FB) {
838 memtile->offset -= 512*1024*1024;
839 block = find_block(dev_priv->fb_heap, memtile->offset);
845 if (block->file_priv != file_priv)
849 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
850 unsigned offset = block->start + memtile->delta;
851 unsigned count = memtile->size / 65536;
854 if (memtile->flags & NOUVEAU_MEM_TILE) {
855 if (memtile->flags & NOUVEAU_MEM_TILE_ZETA)
862 unsigned pte = offset / 65536;
864 INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
865 INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);