2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_mm.h"
30 static void nvc0_fifo_isr(struct drm_device *);
32 struct nvc0_fifo_priv {
33 struct nouveau_gpuobj *playlist[2];
35 struct nouveau_vma user_vma;
39 struct nvc0_fifo_chan {
40 struct nouveau_gpuobj *user;
41 struct nouveau_gpuobj *ramfc;
45 nvc0_fifo_playlist_update(struct drm_device *dev)
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
50 struct nvc0_fifo_priv *priv = pfifo->priv;
51 struct nouveau_gpuobj *cur;
54 cur = priv->playlist[priv->cur_playlist];
55 priv->cur_playlist = !priv->cur_playlist;
57 for (i = 0, p = 0; i < 128; i++) {
58 if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
60 nv_wo32(cur, p + 0, i);
61 nv_wo32(cur, p + 4, 0x00000004);
66 nv_wr32(dev, 0x002270, cur->vinst >> 12);
67 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
68 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
69 NV_ERROR(dev, "PFIFO - playlist update failed\n");
73 nvc0_fifo_create_context(struct nouveau_channel *chan)
75 struct drm_device *dev = chan->dev;
76 struct drm_nouveau_private *dev_priv = dev->dev_private;
77 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
78 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
79 struct nvc0_fifo_priv *priv = pfifo->priv;
80 struct nvc0_fifo_chan *fifoch;
81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
84 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
87 fifoch = chan->fifo_priv;
89 /* allocate vram for control regs, map into polling area */
90 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
91 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
95 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
96 *(struct nouveau_mem **)fifoch->user->node);
98 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
99 priv->user_vma.offset + (chan->id * 0x1000),
107 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
108 chan->ramin->vinst, 0x100,
109 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
113 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst));
114 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst));
115 nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
116 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
117 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
118 nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
119 upper_32_bits(ib_virt));
120 nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
121 nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
122 nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
123 nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
124 nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
125 nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
126 nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
127 nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
128 nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
129 nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
130 pinstmem->flush(dev);
132 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
133 (chan->ramin->vinst >> 12));
134 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
135 nvc0_fifo_playlist_update(dev);
139 pfifo->destroy_context(chan);
144 nvc0_fifo_destroy_context(struct nouveau_channel *chan)
146 struct drm_device *dev = chan->dev;
147 struct nvc0_fifo_chan *fifoch;
149 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
150 nv_wr32(dev, 0x002634, chan->id);
151 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
152 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
154 nvc0_fifo_playlist_update(dev);
156 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
163 fifoch = chan->fifo_priv;
164 chan->fifo_priv = NULL;
168 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
169 nouveau_gpuobj_ref(NULL, &fifoch->user);
174 nvc0_fifo_load_context(struct nouveau_channel *chan)
180 nvc0_fifo_unload_context(struct drm_device *dev)
184 for (i = 0; i < 128; i++) {
185 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
188 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
189 nv_wr32(dev, 0x002634, i);
190 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
191 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
192 i, nv_rd32(dev, 0x002634));
201 nvc0_fifo_destroy(struct drm_device *dev)
203 struct drm_nouveau_private *dev_priv = dev->dev_private;
204 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
205 struct nvc0_fifo_priv *priv;
211 nouveau_vm_put(&priv->user_vma);
212 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
213 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
218 nvc0_fifo_takedown(struct drm_device *dev)
220 nv_wr32(dev, 0x002140, 0x00000000);
221 nvc0_fifo_destroy(dev);
225 nvc0_fifo_create(struct drm_device *dev)
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
229 struct nvc0_fifo_priv *priv;
232 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
237 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
242 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
247 ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
248 12, NV_MEM_ACCESS_RW, &priv->user_vma);
252 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
256 nvc0_fifo_destroy(dev);
261 nvc0_fifo_init(struct drm_device *dev)
263 struct drm_nouveau_private *dev_priv = dev->dev_private;
264 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
265 struct nouveau_channel *chan;
266 struct nvc0_fifo_priv *priv;
270 ret = nvc0_fifo_create(dev);
276 /* reset PFIFO, enable all available PSUBFIFO areas */
277 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
278 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
279 nv_wr32(dev, 0x000204, 0xffffffff);
280 nv_wr32(dev, 0x002204, 0xffffffff);
282 priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
283 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
285 /* assign engines to subfifos */
286 if (priv->spoon_nr >= 3) {
287 nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
288 nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
289 nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
290 nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
291 nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
292 nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
296 for (i = 0; i < priv->spoon_nr; i++) {
297 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
298 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
299 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
302 nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
303 nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
305 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
306 nv_wr32(dev, 0x002100, 0xffffffff);
307 nv_wr32(dev, 0x002140, 0xbfffffff);
309 /* restore PFIFO context table */
310 for (i = 0; i < 128; i++) {
311 chan = dev_priv->channels.ptr[i];
312 if (!chan || !chan->fifo_priv)
315 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
316 (chan->ramin->vinst >> 12));
317 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
319 nvc0_fifo_playlist_update(dev);
324 struct nouveau_enum nvc0_fifo_fault_unit[] = {
326 { 0x03, "PEEPHOLE" },
332 { 0x13, "PCOUNTER" },
340 struct nouveau_enum nvc0_fifo_fault_reason[] = {
341 { 0x00, "PT_NOT_PRESENT" },
342 { 0x01, "PT_TOO_SHORT" },
343 { 0x02, "PAGE_NOT_PRESENT" },
344 { 0x03, "VM_LIMIT_EXCEEDED" },
345 { 0x04, "NO_CHANNEL" },
346 { 0x05, "PAGE_SYSTEM_ONLY" },
347 { 0x06, "PAGE_READ_ONLY" },
348 { 0x0a, "COMPRESSED_SYSRAM" },
349 { 0x0c, "INVALID_STORAGE_TYPE" },
353 struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
356 { 0x04, "DISPATCH" },
359 { 0x07, "BAR_READ" },
360 { 0x08, "BAR_WRITE" },
364 { 0x11, "PCOUNTER" },
367 { 0x15, "CCACHE_POST" },
371 struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
379 struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
380 /* { 0x00008000, "" } seen with null ib push */
381 { 0x00200000, "ILLEGAL_MTHD" },
382 { 0x00800000, "EMPTY_SUBC" },
387 nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
389 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
390 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
391 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
392 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
393 u32 client = (stat & 0x00001f00) >> 8;
395 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
396 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
397 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
399 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
400 if (stat & 0x00000040) {
402 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
404 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
405 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
407 printk(" on channel 0x%010llx\n", (u64)inst << 12);
411 nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 struct nouveau_channel *chan = NULL;
418 spin_lock_irqsave(&dev_priv->channels.lock, flags);
419 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) {
420 chan = dev_priv->channels.ptr[chid];
422 ret = nouveau_finish_page_flip(chan, NULL);
424 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
429 nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
431 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
432 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
433 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
434 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
435 u32 subc = (addr & 0x00070000);
436 u32 mthd = (addr & 0x00003ffc);
439 if (stat & 0x00200000) {
440 if (mthd == 0x0054) {
441 if (!nvc0_fifo_page_flip(dev, chid))
447 NV_INFO(dev, "PFIFO%d:", unit);
448 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
449 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
450 unit, chid, subc, mthd, data);
453 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
454 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
458 nvc0_fifo_isr(struct drm_device *dev)
460 u32 stat = nv_rd32(dev, 0x002100);
462 if (stat & 0x00000100) {
463 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
464 nv_wr32(dev, 0x002100, 0x00000100);
468 if (stat & 0x10000000) {
469 u32 units = nv_rd32(dev, 0x00259c);
474 nvc0_fifo_isr_vm_fault(dev, i);
478 nv_wr32(dev, 0x00259c, units);
482 if (stat & 0x20000000) {
483 u32 units = nv_rd32(dev, 0x0025a0);
488 nvc0_fifo_isr_subfifo_intr(dev, i);
492 nv_wr32(dev, 0x0025a0, units);
496 if (stat & 0x40000000) {
497 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
498 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
503 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
504 nv_wr32(dev, 0x002100, stat);
505 nv_wr32(dev, 0x002140, 0);