drm/nouveau/fifo: remove all the "special" engine hooks
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / nouveau / nouveau_mem.c
1 /*
2  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Ben Skeggs <bskeggs@redhat.com>
30  *    Roy Spliet <r.spliet@student.tudelft.nl>
31  */
32
33
34 #include "drmP.h"
35 #include "drm.h"
36 #include "drm_sarea.h"
37
38 #include "nouveau_drv.h"
39 #include "nouveau_pm.h"
40 #include "nouveau_mm.h"
41 #include "nouveau_vm.h"
42 #include "nouveau_fence.h"
43
44 /*
45  * NV10-NV40 tiling helpers
46  */
47
48 static void
49 nv10_mem_update_tile_region(struct drm_device *dev,
50                             struct nouveau_tile_reg *tile, uint32_t addr,
51                             uint32_t size, uint32_t pitch, uint32_t flags)
52 {
53         struct drm_nouveau_private *dev_priv = dev->dev_private;
54         struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
55         int i = tile - dev_priv->tile.reg, j;
56         unsigned long save;
57
58         nouveau_fence_unref(&tile->fence);
59
60         if (tile->pitch)
61                 pfb->free_tile_region(dev, i);
62
63         if (pitch)
64                 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65
66         spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67         nv_wr32(dev, NV03_PFIFO_CACHES, 0);
68         nv04_fifo_cache_pull(dev, false);
69
70         nouveau_wait_for_idle(dev);
71
72         pfb->set_tile_region(dev, i);
73         for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
74                 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
75                         dev_priv->eng[j]->set_tile_region(dev, i);
76         }
77
78         nv04_fifo_cache_pull(dev, true);
79         nv_wr32(dev, NV03_PFIFO_CACHES, 1);
80         spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
81 }
82
83 static struct nouveau_tile_reg *
84 nv10_mem_get_tile_region(struct drm_device *dev, int i)
85 {
86         struct drm_nouveau_private *dev_priv = dev->dev_private;
87         struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
88
89         spin_lock(&dev_priv->tile.lock);
90
91         if (!tile->used &&
92             (!tile->fence || nouveau_fence_done(tile->fence)))
93                 tile->used = true;
94         else
95                 tile = NULL;
96
97         spin_unlock(&dev_priv->tile.lock);
98         return tile;
99 }
100
101 void
102 nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
103                          struct nouveau_fence *fence)
104 {
105         struct drm_nouveau_private *dev_priv = dev->dev_private;
106
107         if (tile) {
108                 spin_lock(&dev_priv->tile.lock);
109                 if (fence) {
110                         /* Mark it as pending. */
111                         tile->fence = fence;
112                         nouveau_fence_ref(fence);
113                 }
114
115                 tile->used = false;
116                 spin_unlock(&dev_priv->tile.lock);
117         }
118 }
119
120 struct nouveau_tile_reg *
121 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
122                     uint32_t pitch, uint32_t flags)
123 {
124         struct drm_nouveau_private *dev_priv = dev->dev_private;
125         struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
126         struct nouveau_tile_reg *tile, *found = NULL;
127         int i;
128
129         for (i = 0; i < pfb->num_tiles; i++) {
130                 tile = nv10_mem_get_tile_region(dev, i);
131
132                 if (pitch && !found) {
133                         found = tile;
134                         continue;
135
136                 } else if (tile && tile->pitch) {
137                         /* Kill an unused tile region. */
138                         nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
139                 }
140
141                 nv10_mem_put_tile_region(dev, tile, NULL);
142         }
143
144         if (found)
145                 nv10_mem_update_tile_region(dev, found, addr, size,
146                                             pitch, flags);
147         return found;
148 }
149
150 /*
151  * Cleanup everything
152  */
153 void
154 nouveau_mem_vram_fini(struct drm_device *dev)
155 {
156         struct drm_nouveau_private *dev_priv = dev->dev_private;
157
158         ttm_bo_device_release(&dev_priv->ttm.bdev);
159
160         nouveau_ttm_global_release(dev_priv);
161
162         if (dev_priv->fb_mtrr >= 0) {
163                 drm_mtrr_del(dev_priv->fb_mtrr,
164                              pci_resource_start(dev->pdev, 1),
165                              pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
166                 dev_priv->fb_mtrr = -1;
167         }
168 }
169
170 void
171 nouveau_mem_gart_fini(struct drm_device *dev)
172 {
173         nouveau_sgdma_takedown(dev);
174
175         if (drm_core_has_AGP(dev) && dev->agp) {
176                 struct drm_agp_mem *entry, *tempe;
177
178                 /* Remove AGP resources, but leave dev->agp
179                    intact until drv_cleanup is called. */
180                 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
181                         if (entry->bound)
182                                 drm_unbind_agp(entry->memory);
183                         drm_free_agp(entry->memory, entry->pages);
184                         kfree(entry);
185                 }
186                 INIT_LIST_HEAD(&dev->agp->memory);
187
188                 if (dev->agp->acquired)
189                         drm_agp_release(dev);
190
191                 dev->agp->acquired = 0;
192                 dev->agp->enabled = 0;
193         }
194 }
195
196 bool
197 nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
198 {
199         if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
200                 return true;
201
202         return false;
203 }
204
205 #if __OS_HAS_AGP
206 static unsigned long
207 get_agp_mode(struct drm_device *dev, unsigned long mode)
208 {
209         struct drm_nouveau_private *dev_priv = dev->dev_private;
210
211         /*
212          * FW seems to be broken on nv18, it makes the card lock up
213          * randomly.
214          */
215         if (dev_priv->chipset == 0x18)
216                 mode &= ~PCI_AGP_COMMAND_FW;
217
218         /*
219          * AGP mode set in the command line.
220          */
221         if (nouveau_agpmode > 0) {
222                 bool agpv3 = mode & 0x8;
223                 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
224
225                 mode = (mode & ~0x7) | (rate & 0x7);
226         }
227
228         return mode;
229 }
230 #endif
231
232 int
233 nouveau_mem_reset_agp(struct drm_device *dev)
234 {
235 #if __OS_HAS_AGP
236         uint32_t saved_pci_nv_1, pmc_enable;
237         int ret;
238
239         /* First of all, disable fast writes, otherwise if it's
240          * already enabled in the AGP bridge and we disable the card's
241          * AGP controller we might be locking ourselves out of it. */
242         if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
243              dev->agp->mode) & PCI_AGP_COMMAND_FW) {
244                 struct drm_agp_info info;
245                 struct drm_agp_mode mode;
246
247                 ret = drm_agp_info(dev, &info);
248                 if (ret)
249                         return ret;
250
251                 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
252                 ret = drm_agp_enable(dev, mode);
253                 if (ret)
254                         return ret;
255         }
256
257         saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
258
259         /* clear busmaster bit */
260         nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
261         /* disable AGP */
262         nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
263
264         /* power cycle pgraph, if enabled */
265         pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
266         if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
267                 nv_wr32(dev, NV03_PMC_ENABLE,
268                                 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
269                 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
270                                 NV_PMC_ENABLE_PGRAPH);
271         }
272
273         /* and restore (gives effect of resetting AGP) */
274         nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
275 #endif
276
277         return 0;
278 }
279
280 int
281 nouveau_mem_init_agp(struct drm_device *dev)
282 {
283 #if __OS_HAS_AGP
284         struct drm_nouveau_private *dev_priv = dev->dev_private;
285         struct drm_agp_info info;
286         struct drm_agp_mode mode;
287         int ret;
288
289         if (!dev->agp->acquired) {
290                 ret = drm_agp_acquire(dev);
291                 if (ret) {
292                         NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
293                         return ret;
294                 }
295         }
296
297         nouveau_mem_reset_agp(dev);
298
299         ret = drm_agp_info(dev, &info);
300         if (ret) {
301                 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
302                 return ret;
303         }
304
305         /* see agp.h for the AGPSTAT_* modes available */
306         mode.mode = get_agp_mode(dev, info.mode);
307         ret = drm_agp_enable(dev, mode);
308         if (ret) {
309                 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
310                 return ret;
311         }
312
313         dev_priv->gart_info.type        = NOUVEAU_GART_AGP;
314         dev_priv->gart_info.aper_base   = info.aperture_base;
315         dev_priv->gart_info.aper_size   = info.aperture_size;
316 #endif
317         return 0;
318 }
319
320 static const struct vram_types {
321         int value;
322         const char *name;
323 } vram_type_map[] = {
324         { NV_MEM_TYPE_STOLEN , "stolen system memory" },
325         { NV_MEM_TYPE_SGRAM  , "SGRAM" },
326         { NV_MEM_TYPE_SDRAM  , "SDRAM" },
327         { NV_MEM_TYPE_DDR1   , "DDR1" },
328         { NV_MEM_TYPE_DDR2   , "DDR2" },
329         { NV_MEM_TYPE_DDR3   , "DDR3" },
330         { NV_MEM_TYPE_GDDR2  , "GDDR2" },
331         { NV_MEM_TYPE_GDDR3  , "GDDR3" },
332         { NV_MEM_TYPE_GDDR4  , "GDDR4" },
333         { NV_MEM_TYPE_GDDR5  , "GDDR5" },
334         { NV_MEM_TYPE_UNKNOWN, "unknown type" }
335 };
336
337 int
338 nouveau_mem_vram_init(struct drm_device *dev)
339 {
340         struct drm_nouveau_private *dev_priv = dev->dev_private;
341         struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
342         const struct vram_types *vram_type;
343         int ret, dma_bits;
344
345         dma_bits = 32;
346         if (dev_priv->card_type >= NV_50) {
347                 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
348                         dma_bits = 40;
349         } else
350         if (0 && pci_is_pcie(dev->pdev) &&
351             dev_priv->chipset  > 0x40 &&
352             dev_priv->chipset != 0x45) {
353                 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
354                         dma_bits = 39;
355         }
356
357         ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
358         if (ret)
359                 return ret;
360         ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
361         if (ret) {
362                 /* Reset to default value. */
363                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
364         }
365
366
367         ret = nouveau_ttm_global_init(dev_priv);
368         if (ret)
369                 return ret;
370
371         ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
372                                  dev_priv->ttm.bo_global_ref.ref.object,
373                                  &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
374                                  dma_bits <= 32 ? true : false);
375         if (ret) {
376                 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
377                 return ret;
378         }
379
380         vram_type = vram_type_map;
381         while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
382                 if (nouveau_vram_type) {
383                         if (!strcasecmp(nouveau_vram_type, vram_type->name))
384                                 break;
385                         dev_priv->vram_type = vram_type->value;
386                 } else {
387                         if (vram_type->value == dev_priv->vram_type)
388                                 break;
389                 }
390                 vram_type++;
391         }
392
393         NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
394                 (int)(dev_priv->vram_size >> 20), vram_type->name);
395         if (dev_priv->vram_sys_base) {
396                 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
397                         dev_priv->vram_sys_base);
398         }
399
400         dev_priv->fb_available_size = dev_priv->vram_size;
401         dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
402         if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
403                 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
404         dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
405
406         dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
407         dev_priv->fb_aper_free = dev_priv->fb_available_size;
408
409         /* mappable vram */
410         ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
411                              dev_priv->fb_available_size >> PAGE_SHIFT);
412         if (ret) {
413                 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
414                 return ret;
415         }
416
417         if (dev_priv->card_type < NV_50) {
418                 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
419                                      0, 0, NULL, &dev_priv->vga_ram);
420                 if (ret == 0)
421                         ret = nouveau_bo_pin(dev_priv->vga_ram,
422                                              TTM_PL_FLAG_VRAM);
423
424                 if (ret) {
425                         NV_WARN(dev, "failed to reserve VGA memory\n");
426                         nouveau_bo_ref(NULL, &dev_priv->vga_ram);
427                 }
428         }
429
430         dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
431                                          pci_resource_len(dev->pdev, 1),
432                                          DRM_MTRR_WC);
433         return 0;
434 }
435
436 int
437 nouveau_mem_gart_init(struct drm_device *dev)
438 {
439         struct drm_nouveau_private *dev_priv = dev->dev_private;
440         struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
441         int ret;
442
443         dev_priv->gart_info.type = NOUVEAU_GART_NONE;
444
445 #if !defined(__powerpc__) && !defined(__ia64__)
446         if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
447                 ret = nouveau_mem_init_agp(dev);
448                 if (ret)
449                         NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
450         }
451 #endif
452
453         if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
454                 ret = nouveau_sgdma_init(dev);
455                 if (ret) {
456                         NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
457                         return ret;
458                 }
459         }
460
461         NV_INFO(dev, "%d MiB GART (aperture)\n",
462                 (int)(dev_priv->gart_info.aper_size >> 20));
463         dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
464
465         ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
466                              dev_priv->gart_info.aper_size >> PAGE_SHIFT);
467         if (ret) {
468                 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
469                 return ret;
470         }
471
472         return 0;
473 }
474
475 static int
476 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
477                      struct nouveau_pm_tbl_entry *e, u8 len,
478                      struct nouveau_pm_memtiming *boot,
479                      struct nouveau_pm_memtiming *t)
480 {
481         t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
482
483         /* XXX: I don't trust the -1's and +1's... they must come
484          *      from somewhere! */
485         t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
486                     1 << 16 |
487                     (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
488                     (e->tCL + 2 - (t->tCWL - 1));
489
490         t->reg[2] = 0x20200000 |
491                     ((t->tCWL - 1) << 24 |
492                      e->tRRD << 16 |
493                      e->tRCDWR << 8 |
494                      e->tRCDRD);
495
496         NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
497                  t->reg[0], t->reg[1], t->reg[2]);
498         return 0;
499 }
500
501 static int
502 nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
503                      struct nouveau_pm_tbl_entry *e, u8 len,
504                      struct nouveau_pm_memtiming *boot,
505                      struct nouveau_pm_memtiming *t)
506 {
507         struct drm_nouveau_private *dev_priv = dev->dev_private;
508         struct bit_entry P;
509         uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
510
511         if (bit_table(dev, 'P', &P))
512                 return -EINVAL;
513
514         switch (min(len, (u8) 22)) {
515         case 22:
516                 unk21 = e->tUNK_21;
517         case 21:
518                 unk20 = e->tUNK_20;
519         case 20:
520                 if (e->tCWL > 0)
521                         t->tCWL = e->tCWL;
522         case 19:
523                 unk18 = e->tUNK_18;
524                 break;
525         }
526
527         t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
528
529         t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
530                                 max(unk18, (u8) 1) << 16 |
531                                 (e->tWTR + 2 + (t->tCWL - 1)) << 8;
532
533         t->reg[2] = ((t->tCWL - 1) << 24 |
534                     e->tRRD << 16 |
535                     e->tRCDWR << 8 |
536                     e->tRCDRD);
537
538         t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
539
540         t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
541
542         t->reg[8] = boot->reg[8] & 0xffffff00;
543
544         if (P.version == 1) {
545                 t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
546
547                 t->reg[3] = (0x14 + e->tCL) << 24 |
548                             0x16 << 16 |
549                             (e->tCL - 1) << 8 |
550                             (e->tCL - 1);
551
552                 t->reg[4] |= boot->reg[4] & 0xffff0000;
553
554                 t->reg[6] = (0x33 - t->tCWL) << 16 |
555                             t->tCWL << 8 |
556                             (0x2e + e->tCL - t->tCWL);
557
558                 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
559
560                 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
561                 if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
562                         t->reg[5] |= (e->tCL + 3) << 8;
563                         t->reg[6] |= (t->tCWL - 2) << 8;
564                         t->reg[8] |= (e->tCL - 4);
565                 } else {
566                         t->reg[5] |= (e->tCL + 2) << 8;
567                         t->reg[6] |= t->tCWL << 8;
568                         t->reg[8] |= (e->tCL - 2);
569                 }
570         } else {
571                 t->reg[1] |= (5 + e->tCL - (t->tCWL));
572
573                 /* XXX: 0xb? 0x30? */
574                 t->reg[3] = (0x30 + e->tCL) << 24 |
575                             (boot->reg[3] & 0x00ff0000)|
576                             (0xb + e->tCL) << 8 |
577                             (e->tCL - 1);
578
579                 t->reg[4] |= (unk20 << 24 | unk21 << 16);
580
581                 /* XXX: +6? */
582                 t->reg[5] |= (t->tCWL + 6) << 8;
583
584                 t->reg[6] = (0x5a + e->tCL) << 16 |
585                             (6 - e->tCL + t->tCWL) << 8 |
586                             (0x50 + e->tCL - t->tCWL);
587
588                 tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
589                 t->reg[7] = (tmp7_3 << 24) |
590                             ((tmp7_3 - 6 + e->tCL) << 16) |
591                             0x202;
592         }
593
594         NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
595                  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
596         NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
597                  t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
598         NV_DEBUG(dev, "         240: %08x\n", t->reg[8]);
599         return 0;
600 }
601
602 static int
603 nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
604                      struct nouveau_pm_tbl_entry *e, u8 len,
605                      struct nouveau_pm_memtiming *boot,
606                      struct nouveau_pm_memtiming *t)
607 {
608         if (e->tCWL > 0)
609                 t->tCWL = e->tCWL;
610
611         t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
612                      e->tRFC << 8 | e->tRC);
613
614         t->reg[1] = (boot->reg[1] & 0xff000000) |
615                     (e->tRCDWR & 0x0f) << 20 |
616                     (e->tRCDRD & 0x0f) << 14 |
617                     (t->tCWL << 7) |
618                     (e->tCL & 0x0f);
619
620         t->reg[2] = (boot->reg[2] & 0xff0000ff) |
621                     e->tWR << 16 | e->tWTR << 8;
622
623         t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
624                     (e->tUNK_21 & 0xf) << 5 |
625                     (e->tUNK_13 & 0x1f);
626
627         t->reg[4] = (boot->reg[4] & 0xfff00fff) |
628                     (e->tRRD&0x1f) << 15;
629
630         NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
631                  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
632         NV_DEBUG(dev, "         2a0: %08x\n", t->reg[4]);
633         return 0;
634 }
635
636 /**
637  * MR generation methods
638  */
639
640 static int
641 nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
642                     struct nouveau_pm_tbl_entry *e, u8 len,
643                     struct nouveau_pm_memtiming *boot,
644                     struct nouveau_pm_memtiming *t)
645 {
646         t->drive_strength = 0;
647         if (len < 15) {
648                 t->odt = boot->odt;
649         } else {
650                 t->odt = e->RAM_FT1 & 0x07;
651         }
652
653         if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
654                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
655                 return -ERANGE;
656         }
657
658         if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
659                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
660                 return -ERANGE;
661         }
662
663         if (t->odt > 3) {
664                 NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
665                         t->id, t->odt);
666                 t->odt = 0;
667         }
668
669         t->mr[0] = (boot->mr[0] & 0x100f) |
670                    (e->tCL) << 4 |
671                    (e->tWR - 1) << 9;
672         t->mr[1] = (boot->mr[1] & 0x101fbb) |
673                    (t->odt & 0x1) << 2 |
674                    (t->odt & 0x2) << 5;
675
676         NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
677         return 0;
678 }
679
680 uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
681         0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
682
683 static int
684 nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
685                     struct nouveau_pm_tbl_entry *e, u8 len,
686                     struct nouveau_pm_memtiming *boot,
687                     struct nouveau_pm_memtiming *t)
688 {
689         u8 cl = e->tCL - 4;
690
691         t->drive_strength = 0;
692         if (len < 15) {
693                 t->odt = boot->odt;
694         } else {
695                 t->odt = e->RAM_FT1 & 0x07;
696         }
697
698         if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
699                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
700                 return -ERANGE;
701         }
702
703         if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
704                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
705                 return -ERANGE;
706         }
707
708         if (e->tCWL < 5) {
709                 NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
710                 return -ERANGE;
711         }
712
713         t->mr[0] = (boot->mr[0] & 0x180b) |
714                    /* CAS */
715                    (cl & 0x7) << 4 |
716                    (cl & 0x8) >> 1 |
717                    (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
718         t->mr[1] = (boot->mr[1] & 0x101dbb) |
719                    (t->odt & 0x1) << 2 |
720                    (t->odt & 0x2) << 5 |
721                    (t->odt & 0x4) << 7;
722         t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
723
724         NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
725         return 0;
726 }
727
728 uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
729         0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
730 uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
731         0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
732
733 static int
734 nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
735                      struct nouveau_pm_tbl_entry *e, u8 len,
736                      struct nouveau_pm_memtiming *boot,
737                      struct nouveau_pm_memtiming *t)
738 {
739         if (len < 15) {
740                 t->drive_strength = boot->drive_strength;
741                 t->odt = boot->odt;
742         } else {
743                 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
744                 t->odt = e->RAM_FT1 & 0x07;
745         }
746
747         if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
748                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
749                 return -ERANGE;
750         }
751
752         if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
753                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
754                 return -ERANGE;
755         }
756
757         if (t->odt > 3) {
758                 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
759                         t->id, t->odt);
760                 t->odt = 0;
761         }
762
763         t->mr[0] = (boot->mr[0] & 0xe0b) |
764                    /* CAS */
765                    ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
766                    ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
767         t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
768                    (t->odt << 2) |
769                    (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
770         t->mr[2] = boot->mr[2];
771
772         NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
773                       t->mr[0], t->mr[1], t->mr[2]);
774         return 0;
775 }
776
777 static int
778 nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
779                      struct nouveau_pm_tbl_entry *e, u8 len,
780                      struct nouveau_pm_memtiming *boot,
781                      struct nouveau_pm_memtiming *t)
782 {
783         if (len < 15) {
784                 t->drive_strength = boot->drive_strength;
785                 t->odt = boot->odt;
786         } else {
787                 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
788                 t->odt = e->RAM_FT1 & 0x03;
789         }
790
791         if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
792                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
793                 return -ERANGE;
794         }
795
796         if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
797                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
798                 return -ERANGE;
799         }
800
801         if (t->odt > 3) {
802                 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
803                         t->id, t->odt);
804                 t->odt = 0;
805         }
806
807         t->mr[0] = (boot->mr[0] & 0x007) |
808                    ((e->tCL - 5) << 3) |
809                    ((e->tWR - 4) << 8);
810         t->mr[1] = (boot->mr[1] & 0x1007f0) |
811                    t->drive_strength |
812                    (t->odt << 2);
813
814         NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
815         return 0;
816 }
817
818 int
819 nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
820                         struct nouveau_pm_memtiming *t)
821 {
822         struct drm_nouveau_private *dev_priv = dev->dev_private;
823         struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
824         struct nouveau_pm_memtiming *boot = &pm->boot.timing;
825         struct nouveau_pm_tbl_entry *e;
826         u8 ver, len, *ptr, *ramcfg;
827         int ret;
828
829         ptr = nouveau_perf_timing(dev, freq, &ver, &len);
830         if (!ptr || ptr[0] == 0x00) {
831                 *t = *boot;
832                 return 0;
833         }
834         e = (struct nouveau_pm_tbl_entry *)ptr;
835
836         t->tCWL = boot->tCWL;
837
838         switch (dev_priv->card_type) {
839         case NV_40:
840                 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
841                 break;
842         case NV_50:
843                 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
844                 break;
845         case NV_C0:
846         case NV_D0:
847                 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
848                 break;
849         default:
850                 ret = -ENODEV;
851                 break;
852         }
853
854         switch (dev_priv->vram_type * !ret) {
855         case NV_MEM_TYPE_GDDR3:
856                 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
857                 break;
858         case NV_MEM_TYPE_GDDR5:
859                 ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
860                 break;
861         case NV_MEM_TYPE_DDR2:
862                 ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
863                 break;
864         case NV_MEM_TYPE_DDR3:
865                 ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
866                 break;
867         default:
868                 ret = -EINVAL;
869                 break;
870         }
871
872         ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
873         if (ramcfg) {
874                 int dll_off;
875
876                 if (ver == 0x00)
877                         dll_off = !!(ramcfg[3] & 0x04);
878                 else
879                         dll_off = !!(ramcfg[2] & 0x40);
880
881                 switch (dev_priv->vram_type) {
882                 case NV_MEM_TYPE_GDDR3:
883                         t->mr[1] &= ~0x00000040;
884                         t->mr[1] |=  0x00000040 * dll_off;
885                         break;
886                 default:
887                         t->mr[1] &= ~0x00000001;
888                         t->mr[1] |=  0x00000001 * dll_off;
889                         break;
890                 }
891         }
892
893         return ret;
894 }
895
896 void
897 nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
898 {
899         struct drm_nouveau_private *dev_priv = dev->dev_private;
900         u32 timing_base, timing_regs, mr_base;
901         int i;
902
903         if (dev_priv->card_type >= 0xC0) {
904                 timing_base = 0x10f290;
905                 mr_base = 0x10f300;
906         } else {
907                 timing_base = 0x100220;
908                 mr_base = 0x1002c0;
909         }
910
911         t->id = -1;
912
913         switch (dev_priv->card_type) {
914         case NV_50:
915                 timing_regs = 9;
916                 break;
917         case NV_C0:
918         case NV_D0:
919                 timing_regs = 5;
920                 break;
921         case NV_30:
922         case NV_40:
923                 timing_regs = 3;
924                 break;
925         default:
926                 timing_regs = 0;
927                 return;
928         }
929         for(i = 0; i < timing_regs; i++)
930                 t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
931
932         t->tCWL = 0;
933         if (dev_priv->card_type < NV_C0) {
934                 t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
935         } else if (dev_priv->card_type <= NV_D0) {
936                 t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
937         }
938
939         t->mr[0] = nv_rd32(dev, mr_base);
940         t->mr[1] = nv_rd32(dev, mr_base + 0x04);
941         t->mr[2] = nv_rd32(dev, mr_base + 0x20);
942         t->mr[3] = nv_rd32(dev, mr_base + 0x24);
943
944         t->odt = 0;
945         t->drive_strength = 0;
946
947         switch (dev_priv->vram_type) {
948         case NV_MEM_TYPE_DDR3:
949                 t->odt |= (t->mr[1] & 0x200) >> 7;
950         case NV_MEM_TYPE_DDR2:
951                 t->odt |= (t->mr[1] & 0x04) >> 2 |
952                           (t->mr[1] & 0x40) >> 5;
953                 break;
954         case NV_MEM_TYPE_GDDR3:
955         case NV_MEM_TYPE_GDDR5:
956                 t->drive_strength = t->mr[1] & 0x03;
957                 t->odt = (t->mr[1] & 0x0c) >> 2;
958                 break;
959         default:
960                 break;
961         }
962 }
963
964 int
965 nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
966                  struct nouveau_pm_level *perflvl)
967 {
968         struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
969         struct nouveau_pm_memtiming *info = &perflvl->timing;
970         u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
971         u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
972         u32 mr1_dlloff;
973
974         switch (dev_priv->vram_type) {
975         case NV_MEM_TYPE_DDR2:
976                 tDLLK = 2000;
977                 mr1_dlloff = 0x00000001;
978                 break;
979         case NV_MEM_TYPE_DDR3:
980                 tDLLK = 12000;
981                 tCKSRE = 2000;
982                 tXS = 1000;
983                 mr1_dlloff = 0x00000001;
984                 break;
985         case NV_MEM_TYPE_GDDR3:
986                 tDLLK = 40000;
987                 mr1_dlloff = 0x00000040;
988                 break;
989         default:
990                 NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
991                 return -ENODEV;
992         }
993
994         /* fetch current MRs */
995         switch (dev_priv->vram_type) {
996         case NV_MEM_TYPE_GDDR3:
997         case NV_MEM_TYPE_DDR3:
998                 mr[2] = exec->mrg(exec, 2);
999         default:
1000                 mr[1] = exec->mrg(exec, 1);
1001                 mr[0] = exec->mrg(exec, 0);
1002                 break;
1003         }
1004
1005         /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
1006         if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
1007                 exec->precharge(exec);
1008                 exec->mrs (exec, 1, mr[1] | mr1_dlloff);
1009                 exec->wait(exec, tMRD);
1010         }
1011
1012         /* enter self-refresh mode */
1013         exec->precharge(exec);
1014         exec->refresh(exec);
1015         exec->refresh(exec);
1016         exec->refresh_auto(exec, false);
1017         exec->refresh_self(exec, true);
1018         exec->wait(exec, tCKSRE);
1019
1020         /* modify input clock frequency */
1021         exec->clock_set(exec);
1022
1023         /* exit self-refresh mode */
1024         exec->wait(exec, tCKSRX);
1025         exec->precharge(exec);
1026         exec->refresh_self(exec, false);
1027         exec->refresh_auto(exec, true);
1028         exec->wait(exec, tXS);
1029         exec->wait(exec, tXS);
1030
1031         /* update MRs */
1032         if (mr[2] != info->mr[2]) {
1033                 exec->mrs (exec, 2, info->mr[2]);
1034                 exec->wait(exec, tMRD);
1035         }
1036
1037         if (mr[1] != info->mr[1]) {
1038                 /* need to keep DLL off until later, at least on GDDR3 */
1039                 exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
1040                 exec->wait(exec, tMRD);
1041         }
1042
1043         if (mr[0] != info->mr[0]) {
1044                 exec->mrs (exec, 0, info->mr[0]);
1045                 exec->wait(exec, tMRD);
1046         }
1047
1048         /* update PFB timing registers */
1049         exec->timing_set(exec);
1050
1051         /* DLL (enable + ) reset */
1052         if (!(info->mr[1] & mr1_dlloff)) {
1053                 if (mr[1] & mr1_dlloff) {
1054                         exec->mrs (exec, 1, info->mr[1]);
1055                         exec->wait(exec, tMRD);
1056                 }
1057                 exec->mrs (exec, 0, info->mr[0] | 0x00000100);
1058                 exec->wait(exec, tMRD);
1059                 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
1060                 exec->wait(exec, tMRD);
1061                 exec->wait(exec, tDLLK);
1062                 if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
1063                         exec->precharge(exec);
1064         }
1065
1066         return 0;
1067 }
1068
1069 int
1070 nouveau_mem_vbios_type(struct drm_device *dev)
1071 {
1072         struct bit_entry M;
1073         u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
1074         if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
1075                 u8 *table = ROMPTR(dev, M.data[3]);
1076                 if (table && table[0] == 0x10 && ramcfg < table[3]) {
1077                         u8 *entry = table + table[1] + (ramcfg * table[2]);
1078                         switch (entry[0] & 0x0f) {
1079                         case 0: return NV_MEM_TYPE_DDR2;
1080                         case 1: return NV_MEM_TYPE_DDR3;
1081                         case 2: return NV_MEM_TYPE_GDDR3;
1082                         case 3: return NV_MEM_TYPE_GDDR5;
1083                         default:
1084                                 break;
1085                         }
1086
1087                 }
1088         }
1089         return NV_MEM_TYPE_UNKNOWN;
1090 }
1091
1092 static int
1093 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1094 {
1095         /* nothing to do */
1096         return 0;
1097 }
1098
1099 static int
1100 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
1101 {
1102         /* nothing to do */
1103         return 0;
1104 }
1105
1106 static inline void
1107 nouveau_mem_node_cleanup(struct nouveau_mem *node)
1108 {
1109         if (node->vma[0].node) {
1110                 nouveau_vm_unmap(&node->vma[0]);
1111                 nouveau_vm_put(&node->vma[0]);
1112         }
1113
1114         if (node->vma[1].node) {
1115                 nouveau_vm_unmap(&node->vma[1]);
1116                 nouveau_vm_put(&node->vma[1]);
1117         }
1118 }
1119
1120 static void
1121 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
1122                          struct ttm_mem_reg *mem)
1123 {
1124         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1125         struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1126         struct drm_device *dev = dev_priv->dev;
1127
1128         nouveau_mem_node_cleanup(mem->mm_node);
1129         vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
1130 }
1131
1132 static int
1133 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
1134                          struct ttm_buffer_object *bo,
1135                          struct ttm_placement *placement,
1136                          struct ttm_mem_reg *mem)
1137 {
1138         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1139         struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1140         struct drm_device *dev = dev_priv->dev;
1141         struct nouveau_bo *nvbo = nouveau_bo(bo);
1142         struct nouveau_mem *node;
1143         u32 size_nc = 0;
1144         int ret;
1145
1146         if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
1147                 size_nc = 1 << nvbo->page_shift;
1148
1149         ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
1150                         mem->page_alignment << PAGE_SHIFT, size_nc,
1151                         (nvbo->tile_flags >> 8) & 0x3ff, &node);
1152         if (ret) {
1153                 mem->mm_node = NULL;
1154                 return (ret == -ENOSPC) ? 0 : ret;
1155         }
1156
1157         node->page_shift = nvbo->page_shift;
1158
1159         mem->mm_node = node;
1160         mem->start   = node->offset >> PAGE_SHIFT;
1161         return 0;
1162 }
1163
1164 void
1165 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1166 {
1167         struct nouveau_mm *mm = man->priv;
1168         struct nouveau_mm_node *r;
1169         u32 total = 0, free = 0;
1170
1171         mutex_lock(&mm->mutex);
1172         list_for_each_entry(r, &mm->nodes, nl_entry) {
1173                 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
1174                        prefix, r->type, ((u64)r->offset << 12),
1175                        (((u64)r->offset + r->length) << 12));
1176
1177                 total += r->length;
1178                 if (!r->type)
1179                         free += r->length;
1180         }
1181         mutex_unlock(&mm->mutex);
1182
1183         printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
1184                prefix, (u64)total << 12, (u64)free << 12);
1185         printk(KERN_DEBUG "%s  block: 0x%08x\n",
1186                prefix, mm->block_size << 12);
1187 }
1188
1189 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
1190         nouveau_vram_manager_init,
1191         nouveau_vram_manager_fini,
1192         nouveau_vram_manager_new,
1193         nouveau_vram_manager_del,
1194         nouveau_vram_manager_debug
1195 };
1196
1197 static int
1198 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1199 {
1200         return 0;
1201 }
1202
1203 static int
1204 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1205 {
1206         return 0;
1207 }
1208
1209 static void
1210 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1211                          struct ttm_mem_reg *mem)
1212 {
1213         nouveau_mem_node_cleanup(mem->mm_node);
1214         kfree(mem->mm_node);
1215         mem->mm_node = NULL;
1216 }
1217
1218 static int
1219 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1220                          struct ttm_buffer_object *bo,
1221                          struct ttm_placement *placement,
1222                          struct ttm_mem_reg *mem)
1223 {
1224         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1225         struct nouveau_mem *node;
1226
1227         if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1228                      dev_priv->gart_info.aper_size))
1229                 return -ENOMEM;
1230
1231         node = kzalloc(sizeof(*node), GFP_KERNEL);
1232         if (!node)
1233                 return -ENOMEM;
1234         node->page_shift = 12;
1235
1236         mem->mm_node = node;
1237         mem->start   = 0;
1238         return 0;
1239 }
1240
1241 void
1242 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1243 {
1244 }
1245
1246 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1247         nouveau_gart_manager_init,
1248         nouveau_gart_manager_fini,
1249         nouveau_gart_manager_new,
1250         nouveau_gart_manager_del,
1251         nouveau_gart_manager_debug
1252 };