drm/nouveau/fifo: remove all the "special" engine hooks
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / nouveau / nv50_graph.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_ramht.h"
31 #include "nouveau_dma.h"
32 #include "nouveau_vm.h"
33 #include "nv50_evo.h"
34
35 struct nv50_graph_engine {
36         struct nouveau_exec_engine base;
37         u32 ctxprog[512];
38         u32 ctxprog_size;
39         u32 grctx_size;
40 };
41
42 static void
43 nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
44 {
45         const uint32_t mask = 0x00010001;
46
47         if (enabled)
48                 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
49         else
50                 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
51 }
52
53 static struct nouveau_channel *
54 nv50_graph_channel(struct drm_device *dev)
55 {
56         struct drm_nouveau_private *dev_priv = dev->dev_private;
57         uint32_t inst;
58         int i;
59
60         /* Be sure we're not in the middle of a context switch or bad things
61          * will happen, such as unloading the wrong pgraph context.
62          */
63         if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
64                 NV_ERROR(dev, "Ctxprog is still running\n");
65
66         inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
67         if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
68                 return NULL;
69         inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
70
71         for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
72                 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
73
74                 if (chan && chan->ramin && chan->ramin->vinst == inst)
75                         return chan;
76         }
77
78         return NULL;
79 }
80
81 static int
82 nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
83 {
84         uint32_t fifo = nv_rd32(dev, 0x400500);
85
86         nv_wr32(dev, 0x400500, fifo & ~1);
87         nv_wr32(dev, 0x400784, inst);
88         nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
89         nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
90         nv_wr32(dev, 0x400040, 0xffffffff);
91         (void)nv_rd32(dev, 0x400040);
92         nv_wr32(dev, 0x400040, 0x00000000);
93         nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
94
95         if (nouveau_wait_for_idle(dev))
96                 nv_wr32(dev, 0x40032c, inst | (1<<31));
97         nv_wr32(dev, 0x400500, fifo);
98
99         return 0;
100 }
101
102 static int
103 nv50_graph_unload_context(struct drm_device *dev)
104 {
105         uint32_t inst;
106
107         inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
108         if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
109                 return 0;
110         inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
111
112         nouveau_wait_for_idle(dev);
113         nv_wr32(dev, 0x400784, inst);
114         nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
115         nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
116         nouveau_wait_for_idle(dev);
117
118         nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
119         return 0;
120 }
121
122 static int
123 nv50_graph_init(struct drm_device *dev, int engine)
124 {
125         struct drm_nouveau_private *dev_priv = dev->dev_private;
126         struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
127         u32 units = nv_rd32(dev, 0x001540);
128         int i;
129
130         NV_DEBUG(dev, "\n");
131
132         /* master reset */
133         nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
134         nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
135         nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
136
137         /* reset/enable traps and interrupts */
138         nv_wr32(dev, 0x400804, 0xc0000000);
139         nv_wr32(dev, 0x406800, 0xc0000000);
140         nv_wr32(dev, 0x400c04, 0xc0000000);
141         nv_wr32(dev, 0x401800, 0xc0000000);
142         nv_wr32(dev, 0x405018, 0xc0000000);
143         nv_wr32(dev, 0x402000, 0xc0000000);
144         for (i = 0; i < 16; i++) {
145                 if (!(units & (1 << i)))
146                         continue;
147
148                 if (dev_priv->chipset < 0xa0) {
149                         nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
150                         nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
151                         nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
152                 } else {
153                         nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
154                         nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
155                         nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
156                 }
157         }
158
159         nv_wr32(dev, 0x400108, 0xffffffff);
160         nv_wr32(dev, 0x400138, 0xffffffff);
161         nv_wr32(dev, 0x400100, 0xffffffff);
162         nv_wr32(dev, 0x40013c, 0xffffffff);
163         nv_wr32(dev, 0x400500, 0x00010001);
164
165         /* upload context program, initialise ctxctl defaults */
166         nv_wr32(dev, 0x400324, 0x00000000);
167         for (i = 0; i < pgraph->ctxprog_size; i++)
168                 nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
169         nv_wr32(dev, 0x400824, 0x00000000);
170         nv_wr32(dev, 0x400828, 0x00000000);
171         nv_wr32(dev, 0x40082c, 0x00000000);
172         nv_wr32(dev, 0x400830, 0x00000000);
173         nv_wr32(dev, 0x400724, 0x00000000);
174         nv_wr32(dev, 0x40032c, 0x00000000);
175         nv_wr32(dev, 0x400320, 4);      /* CTXCTL_CMD = NEWCTXDMA */
176
177         /* some unknown zcull magic */
178         switch (dev_priv->chipset & 0xf0) {
179         case 0x50:
180         case 0x80:
181         case 0x90:
182                 nv_wr32(dev, 0x402ca8, 0x00000800);
183                 break;
184         case 0xa0:
185         default:
186                 nv_wr32(dev, 0x402cc0, 0x00000000);
187                 if (dev_priv->chipset == 0xa0 ||
188                     dev_priv->chipset == 0xaa ||
189                     dev_priv->chipset == 0xac) {
190                         nv_wr32(dev, 0x402ca8, 0x00000802);
191                 } else {
192                         nv_wr32(dev, 0x402cc0, 0x00000000);
193                         nv_wr32(dev, 0x402ca8, 0x00000002);
194                 }
195
196                 break;
197         }
198
199         /* zero out zcull regions */
200         for (i = 0; i < 8; i++) {
201                 nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
202                 nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
203                 nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
204                 nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
205         }
206
207         return 0;
208 }
209
210 static int
211 nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
212 {
213         nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
214         if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
215                 nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
216                 return -EBUSY;
217         }
218         nv50_graph_unload_context(dev);
219         nv_wr32(dev, 0x40013c, 0x00000000);
220         return 0;
221 }
222
223 static int
224 nv50_graph_context_new(struct nouveau_channel *chan, int engine)
225 {
226         struct drm_device *dev = chan->dev;
227         struct drm_nouveau_private *dev_priv = dev->dev_private;
228         struct nouveau_gpuobj *ramin = chan->ramin;
229         struct nouveau_gpuobj *grctx = NULL;
230         struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
231         int hdr, ret;
232
233         NV_DEBUG(dev, "ch%d\n", chan->id);
234
235         ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
236                                  NVOBJ_FLAG_ZERO_ALLOC |
237                                  NVOBJ_FLAG_ZERO_FREE, &grctx);
238         if (ret)
239                 return ret;
240
241         hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
242         nv_wo32(ramin, hdr + 0x00, 0x00190002);
243         nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
244         nv_wo32(ramin, hdr + 0x08, grctx->vinst);
245         nv_wo32(ramin, hdr + 0x0c, 0);
246         nv_wo32(ramin, hdr + 0x10, 0);
247         nv_wo32(ramin, hdr + 0x14, 0x00010000);
248
249         nv50_grctx_fill(dev, grctx);
250         nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
251
252         dev_priv->engine.instmem.flush(dev);
253
254         atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
255         chan->engctx[NVOBJ_ENGINE_GR] = grctx;
256         return 0;
257 }
258
259 static void
260 nv50_graph_context_del(struct nouveau_channel *chan, int engine)
261 {
262         struct nouveau_gpuobj *grctx = chan->engctx[engine];
263         struct drm_device *dev = chan->dev;
264         struct drm_nouveau_private *dev_priv = dev->dev_private;
265         int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
266         unsigned long flags;
267
268         NV_DEBUG(dev, "ch%d\n", chan->id);
269
270         if (!chan->ramin)
271                 return;
272
273         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
274         nv_wr32(dev, NV03_PFIFO_CACHES, 0);
275         nv50_graph_fifo_access(dev, false);
276
277         if (nv50_graph_channel(dev) == chan)
278                 nv50_graph_unload_context(dev);
279
280         for (i = hdr; i < hdr + 24; i += 4)
281                 nv_wo32(chan->ramin, i, 0);
282         dev_priv->engine.instmem.flush(dev);
283
284         nv50_graph_fifo_access(dev, true);
285         nv_wr32(dev, NV03_PFIFO_CACHES, 1);
286         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
287
288         nouveau_gpuobj_ref(NULL, &grctx);
289
290         atomic_dec(&chan->vm->engref[engine]);
291         chan->engctx[engine] = NULL;
292 }
293
294 static int
295 nv50_graph_object_new(struct nouveau_channel *chan, int engine,
296                       u32 handle, u16 class)
297 {
298         struct drm_device *dev = chan->dev;
299         struct drm_nouveau_private *dev_priv = dev->dev_private;
300         struct nouveau_gpuobj *obj = NULL;
301         int ret;
302
303         ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
304         if (ret)
305                 return ret;
306         obj->engine = 1;
307         obj->class  = class;
308
309         nv_wo32(obj, 0x00, class);
310         nv_wo32(obj, 0x04, 0x00000000);
311         nv_wo32(obj, 0x08, 0x00000000);
312         nv_wo32(obj, 0x0c, 0x00000000);
313         dev_priv->engine.instmem.flush(dev);
314
315         ret = nouveau_ramht_insert(chan, handle, obj);
316         nouveau_gpuobj_ref(NULL, &obj);
317         return ret;
318 }
319
320 static void
321 nv50_graph_context_switch(struct drm_device *dev)
322 {
323         uint32_t inst;
324
325         nv50_graph_unload_context(dev);
326
327         inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
328         inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
329         nv50_graph_do_load_context(dev, inst);
330
331         nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
332                 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
333 }
334
335 static void
336 nv50_graph_tlb_flush(struct drm_device *dev, int engine)
337 {
338         nv50_vm_flush_engine(dev, 0);
339 }
340
341 static void
342 nv84_graph_tlb_flush(struct drm_device *dev, int engine)
343 {
344         struct drm_nouveau_private *dev_priv = dev->dev_private;
345         struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
346         bool idle, timeout = false;
347         unsigned long flags;
348         u64 start;
349         u32 tmp;
350
351         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
352         nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
353
354         start = ptimer->read(dev);
355         do {
356                 idle = true;
357
358                 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
359                         if ((tmp & 7) == 1)
360                                 idle = false;
361                 }
362
363                 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
364                         if ((tmp & 7) == 1)
365                                 idle = false;
366                 }
367
368                 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
369                         if ((tmp & 7) == 1)
370                                 idle = false;
371                 }
372         } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
373
374         if (timeout) {
375                 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
376                               "0x%08x 0x%08x 0x%08x 0x%08x\n",
377                          nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
378                          nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
379         }
380
381         nv50_vm_flush_engine(dev, 0);
382
383         nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
384         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
385 }
386
387 static struct nouveau_enum nv50_mp_exec_error_names[] = {
388         { 3, "STACK_UNDERFLOW", NULL },
389         { 4, "QUADON_ACTIVE", NULL },
390         { 8, "TIMEOUT", NULL },
391         { 0x10, "INVALID_OPCODE", NULL },
392         { 0x40, "BREAKPOINT", NULL },
393         {}
394 };
395
396 static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
397         { 0x00000001, "NOTIFY" },
398         { 0x00000002, "IN" },
399         { 0x00000004, "OUT" },
400         {}
401 };
402
403 static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
404         { 0x00000001, "FAULT" },
405         {}
406 };
407
408 static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
409         { 0x00000001, "FAULT" },
410         {}
411 };
412
413 static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
414         { 0x00000001, "FAULT" },
415         {}
416 };
417
418 /* There must be a *lot* of these. Will take some time to gather them up. */
419 struct nouveau_enum nv50_data_error_names[] = {
420         { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
421         { 0x00000004, "INVALID_VALUE", NULL },
422         { 0x00000005, "INVALID_ENUM", NULL },
423         { 0x00000008, "INVALID_OBJECT", NULL },
424         { 0x00000009, "READ_ONLY_OBJECT", NULL },
425         { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
426         { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
427         { 0x0000000c, "INVALID_BITFIELD", NULL },
428         { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
429         { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
430         { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
431         { 0x00000010, "RT_DOUBLE_BIND", NULL },
432         { 0x00000011, "RT_TYPES_MISMATCH", NULL },
433         { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
434         { 0x00000015, "FP_TOO_FEW_REGS", NULL },
435         { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
436         { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
437         { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
438         { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
439         { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
440         { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
441         { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
442         { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
443         { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
444         { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
445         { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
446         { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
447         { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
448         { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
449         { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
450         { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
451         { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
452         { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
453         { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
454         { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
455         { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
456         { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
457         { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
458         { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
459         { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
460         { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
461         {}
462 };
463
464 static struct nouveau_bitfield nv50_graph_intr[] = {
465         { 0x00000001, "NOTIFY" },
466         { 0x00000002, "COMPUTE_QUERY" },
467         { 0x00000010, "ILLEGAL_MTHD" },
468         { 0x00000020, "ILLEGAL_CLASS" },
469         { 0x00000040, "DOUBLE_NOTIFY" },
470         { 0x00001000, "CONTEXT_SWITCH" },
471         { 0x00010000, "BUFFER_NOTIFY" },
472         { 0x00100000, "DATA_ERROR" },
473         { 0x00200000, "TRAP" },
474         { 0x01000000, "SINGLE_STEP" },
475         {}
476 };
477
478 static void
479 nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
480 {
481         struct drm_nouveau_private *dev_priv = dev->dev_private;
482         uint32_t units = nv_rd32(dev, 0x1540);
483         uint32_t addr, mp10, status, pc, oplow, ophigh;
484         int i;
485         int mps = 0;
486         for (i = 0; i < 4; i++) {
487                 if (!(units & 1 << (i+24)))
488                         continue;
489                 if (dev_priv->chipset < 0xa0)
490                         addr = 0x408200 + (tpid << 12) + (i << 7);
491                 else
492                         addr = 0x408100 + (tpid << 11) + (i << 7);
493                 mp10 = nv_rd32(dev, addr + 0x10);
494                 status = nv_rd32(dev, addr + 0x14);
495                 if (!status)
496                         continue;
497                 if (display) {
498                         nv_rd32(dev, addr + 0x20);
499                         pc = nv_rd32(dev, addr + 0x24);
500                         oplow = nv_rd32(dev, addr + 0x70);
501                         ophigh = nv_rd32(dev, addr + 0x74);
502                         NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
503                                         "TP %d MP %d: ", tpid, i);
504                         nouveau_enum_print(nv50_mp_exec_error_names, status);
505                         printk(" at %06x warp %d, opcode %08x %08x\n",
506                                         pc&0xffffff, pc >> 24,
507                                         oplow, ophigh);
508                 }
509                 nv_wr32(dev, addr + 0x10, mp10);
510                 nv_wr32(dev, addr + 0x14, 0);
511                 mps++;
512         }
513         if (!mps && display)
514                 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
515                                 "No MPs claiming errors?\n", tpid);
516 }
517
518 static void
519 nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
520                 uint32_t ustatus_new, int display, const char *name)
521 {
522         struct drm_nouveau_private *dev_priv = dev->dev_private;
523         int tps = 0;
524         uint32_t units = nv_rd32(dev, 0x1540);
525         int i, r;
526         uint32_t ustatus_addr, ustatus;
527         for (i = 0; i < 16; i++) {
528                 if (!(units & (1 << i)))
529                         continue;
530                 if (dev_priv->chipset < 0xa0)
531                         ustatus_addr = ustatus_old + (i << 12);
532                 else
533                         ustatus_addr = ustatus_new + (i << 11);
534                 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
535                 if (!ustatus)
536                         continue;
537                 tps++;
538                 switch (type) {
539                 case 6: /* texture error... unknown for now */
540                         if (display) {
541                                 NV_ERROR(dev, "magic set %d:\n", i);
542                                 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
543                                         NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
544                                                 nv_rd32(dev, r));
545                         }
546                         break;
547                 case 7: /* MP error */
548                         if (ustatus & 0x04030000) {
549                                 nv50_pgraph_mp_trap(dev, i, display);
550                                 ustatus &= ~0x04030000;
551                         }
552                         break;
553                 case 8: /* TPDMA error */
554                         {
555                         uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
556                         uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
557                         uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
558                         uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
559                         uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
560                         uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
561                         uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
562                         /* 2d engine destination */
563                         if (ustatus & 0x00000010) {
564                                 if (display) {
565                                         NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
566                                                         i, e14, e10);
567                                         NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
568                                                         i, e0c, e18, e1c, e20, e24);
569                                 }
570                                 ustatus &= ~0x00000010;
571                         }
572                         /* Render target */
573                         if (ustatus & 0x00000040) {
574                                 if (display) {
575                                         NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
576                                                         i, e14, e10);
577                                         NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
578                                                         i, e0c, e18, e1c, e20, e24);
579                                 }
580                                 ustatus &= ~0x00000040;
581                         }
582                         /* CUDA memory: l[], g[] or stack. */
583                         if (ustatus & 0x00000080) {
584                                 if (display) {
585                                         if (e18 & 0x80000000) {
586                                                 /* g[] read fault? */
587                                                 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
588                                                                 i, e14, e10 | ((e18 >> 24) & 0x1f));
589                                                 e18 &= ~0x1f000000;
590                                         } else if (e18 & 0xc) {
591                                                 /* g[] write fault? */
592                                                 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
593                                                                 i, e14, e10 | ((e18 >> 7) & 0x1f));
594                                                 e18 &= ~0x00000f80;
595                                         } else {
596                                                 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
597                                                                 i, e14, e10);
598                                         }
599                                         NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
600                                                         i, e0c, e18, e1c, e20, e24);
601                                 }
602                                 ustatus &= ~0x00000080;
603                         }
604                         }
605                         break;
606                 }
607                 if (ustatus) {
608                         if (display)
609                                 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
610                 }
611                 nv_wr32(dev, ustatus_addr, 0xc0000000);
612         }
613
614         if (!tps && display)
615                 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
616 }
617
618 static int
619 nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
620 {
621         u32 status = nv_rd32(dev, 0x400108);
622         u32 ustatus;
623
624         if (!status && display) {
625                 NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
626                 return 1;
627         }
628
629         /* DISPATCH: Relays commands to other units and handles NOTIFY,
630          * COND, QUERY. If you get a trap from it, the command is still stuck
631          * in DISPATCH and you need to do something about it. */
632         if (status & 0x001) {
633                 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
634                 if (!ustatus && display) {
635                         NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
636                 }
637
638                 nv_wr32(dev, 0x400500, 0x00000000);
639
640                 /* Known to be triggered by screwed up NOTIFY and COND... */
641                 if (ustatus & 0x00000001) {
642                         u32 addr = nv_rd32(dev, 0x400808);
643                         u32 subc = (addr & 0x00070000) >> 16;
644                         u32 mthd = (addr & 0x00001ffc);
645                         u32 datal = nv_rd32(dev, 0x40080c);
646                         u32 datah = nv_rd32(dev, 0x400810);
647                         u32 class = nv_rd32(dev, 0x400814);
648                         u32 r848 = nv_rd32(dev, 0x400848);
649
650                         NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
651                         if (display && (addr & 0x80000000)) {
652                                 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
653                                              "subc %d class 0x%04x mthd 0x%04x "
654                                              "data 0x%08x%08x "
655                                              "400808 0x%08x 400848 0x%08x\n",
656                                         chid, inst, subc, class, mthd, datah,
657                                         datal, addr, r848);
658                         } else
659                         if (display) {
660                                 NV_INFO(dev, "PGRAPH - no stuck command?\n");
661                         }
662
663                         nv_wr32(dev, 0x400808, 0);
664                         nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
665                         nv_wr32(dev, 0x400848, 0);
666                         ustatus &= ~0x00000001;
667                 }
668
669                 if (ustatus & 0x00000002) {
670                         u32 addr = nv_rd32(dev, 0x40084c);
671                         u32 subc = (addr & 0x00070000) >> 16;
672                         u32 mthd = (addr & 0x00001ffc);
673                         u32 data = nv_rd32(dev, 0x40085c);
674                         u32 class = nv_rd32(dev, 0x400814);
675
676                         NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
677                         if (display && (addr & 0x80000000)) {
678                                 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
679                                              "subc %d class 0x%04x mthd 0x%04x "
680                                              "data 0x%08x 40084c 0x%08x\n",
681                                         chid, inst, subc, class, mthd,
682                                         data, addr);
683                         } else
684                         if (display) {
685                                 NV_INFO(dev, "PGRAPH - no stuck command?\n");
686                         }
687
688                         nv_wr32(dev, 0x40084c, 0);
689                         ustatus &= ~0x00000002;
690                 }
691
692                 if (ustatus && display) {
693                         NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
694                                       "0x%08x)\n", ustatus);
695                 }
696
697                 nv_wr32(dev, 0x400804, 0xc0000000);
698                 nv_wr32(dev, 0x400108, 0x001);
699                 status &= ~0x001;
700                 if (!status)
701                         return 0;
702         }
703
704         /* M2MF: Memory to memory copy engine. */
705         if (status & 0x002) {
706                 u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
707                 if (display) {
708                         NV_INFO(dev, "PGRAPH - TRAP_M2MF");
709                         nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
710                         printk("\n");
711                         NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
712                                 nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
713                                 nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
714
715                 }
716
717                 /* No sane way found yet -- just reset the bugger. */
718                 nv_wr32(dev, 0x400040, 2);
719                 nv_wr32(dev, 0x400040, 0);
720                 nv_wr32(dev, 0x406800, 0xc0000000);
721                 nv_wr32(dev, 0x400108, 0x002);
722                 status &= ~0x002;
723         }
724
725         /* VFETCH: Fetches data from vertex buffers. */
726         if (status & 0x004) {
727                 u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
728                 if (display) {
729                         NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
730                         nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
731                         printk("\n");
732                         NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
733                                 nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
734                                 nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
735                 }
736
737                 nv_wr32(dev, 0x400c04, 0xc0000000);
738                 nv_wr32(dev, 0x400108, 0x004);
739                 status &= ~0x004;
740         }
741
742         /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
743         if (status & 0x008) {
744                 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
745                 if (display) {
746                         NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
747                         nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
748                         printk("\n");
749                         NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
750                                 nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
751                                 nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
752
753                 }
754
755                 /* No sane way found yet -- just reset the bugger. */
756                 nv_wr32(dev, 0x400040, 0x80);
757                 nv_wr32(dev, 0x400040, 0);
758                 nv_wr32(dev, 0x401800, 0xc0000000);
759                 nv_wr32(dev, 0x400108, 0x008);
760                 status &= ~0x008;
761         }
762
763         /* CCACHE: Handles code and c[] caches and fills them. */
764         if (status & 0x010) {
765                 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
766                 if (display) {
767                         NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
768                         nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
769                         printk("\n");
770                         NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
771                                      " %08x %08x %08x\n",
772                                 nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
773                                 nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
774                                 nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
775                                 nv_rd32(dev, 0x40501c));
776
777                 }
778
779                 nv_wr32(dev, 0x405018, 0xc0000000);
780                 nv_wr32(dev, 0x400108, 0x010);
781                 status &= ~0x010;
782         }
783
784         /* Unknown, not seen yet... 0x402000 is the only trap status reg
785          * remaining, so try to handle it anyway. Perhaps related to that
786          * unknown DMA slot on tesla? */
787         if (status & 0x20) {
788                 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
789                 if (display)
790                         NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
791                 nv_wr32(dev, 0x402000, 0xc0000000);
792                 /* no status modifiction on purpose */
793         }
794
795         /* TEXTURE: CUDA texturing units */
796         if (status & 0x040) {
797                 nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
798                                     "PGRAPH - TRAP_TEXTURE");
799                 nv_wr32(dev, 0x400108, 0x040);
800                 status &= ~0x040;
801         }
802
803         /* MP: CUDA execution engines. */
804         if (status & 0x080) {
805                 nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
806                                     "PGRAPH - TRAP_MP");
807                 nv_wr32(dev, 0x400108, 0x080);
808                 status &= ~0x080;
809         }
810
811         /* TPDMA:  Handles TP-initiated uncached memory accesses:
812          * l[], g[], stack, 2d surfaces, render targets. */
813         if (status & 0x100) {
814                 nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
815                                     "PGRAPH - TRAP_TPDMA");
816                 nv_wr32(dev, 0x400108, 0x100);
817                 status &= ~0x100;
818         }
819
820         if (status) {
821                 if (display)
822                         NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
823                 nv_wr32(dev, 0x400108, status);
824         }
825
826         return 1;
827 }
828
829 int
830 nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
831 {
832         struct drm_nouveau_private *dev_priv = dev->dev_private;
833         struct nouveau_channel *chan;
834         unsigned long flags;
835         int i;
836
837         spin_lock_irqsave(&dev_priv->channels.lock, flags);
838         for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
839                 chan = dev_priv->channels.ptr[i];
840                 if (!chan || !chan->ramin)
841                         continue;
842
843                 if (inst == chan->ramin->vinst)
844                         break;
845         }
846         spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
847         return i;
848 }
849
850 static void
851 nv50_graph_isr(struct drm_device *dev)
852 {
853         u32 stat;
854
855         while ((stat = nv_rd32(dev, 0x400100))) {
856                 u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
857                 u32 chid = nv50_graph_isr_chid(dev, inst);
858                 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
859                 u32 subc = (addr & 0x00070000) >> 16;
860                 u32 mthd = (addr & 0x00001ffc);
861                 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
862                 u32 class = nv_rd32(dev, 0x400814);
863                 u32 show = stat;
864
865                 if (stat & 0x00000010) {
866                         if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
867                                                        mthd, data))
868                                 show &= ~0x00000010;
869                 }
870
871                 if (stat & 0x00001000) {
872                         nv_wr32(dev, 0x400500, 0x00000000);
873                         nv_wr32(dev, 0x400100, 0x00001000);
874                         nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
875                         nv50_graph_context_switch(dev);
876                         stat &= ~0x00001000;
877                         show &= ~0x00001000;
878                 }
879
880                 show = (show && nouveau_ratelimit()) ? show : 0;
881
882                 if (show & 0x00100000) {
883                         u32 ecode = nv_rd32(dev, 0x400110);
884                         NV_INFO(dev, "PGRAPH - DATA_ERROR ");
885                         nouveau_enum_print(nv50_data_error_names, ecode);
886                         printk("\n");
887                 }
888
889                 if (stat & 0x00200000) {
890                         if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
891                                 show &= ~0x00200000;
892                 }
893
894                 nv_wr32(dev, 0x400100, stat);
895                 nv_wr32(dev, 0x400500, 0x00010001);
896
897                 if (show) {
898                         NV_INFO(dev, "PGRAPH -");
899                         nouveau_bitfield_print(nv50_graph_intr, show);
900                         printk("\n");
901                         NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
902                                      "class 0x%04x mthd 0x%04x data 0x%08x\n",
903                                 chid, inst, subc, class, mthd, data);
904                         nv50_fb_vm_trap(dev, 1);
905                 }
906         }
907
908         if (nv_rd32(dev, 0x400824) & (1 << 31))
909                 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
910 }
911
912 static void
913 nv50_graph_destroy(struct drm_device *dev, int engine)
914 {
915         struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
916
917         NVOBJ_ENGINE_DEL(dev, GR);
918
919         nouveau_irq_unregister(dev, 12);
920         kfree(pgraph);
921 }
922
923 int
924 nv50_graph_create(struct drm_device *dev)
925 {
926         struct drm_nouveau_private *dev_priv = dev->dev_private;
927         struct nv50_graph_engine *pgraph;
928         int ret;
929
930         pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
931         if (!pgraph)
932                 return -ENOMEM;
933
934         ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
935                                   &pgraph->ctxprog_size,
936                                   &pgraph->grctx_size);
937         if (ret) {
938                 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
939                 kfree(pgraph);
940                 return 0;
941         }
942
943         pgraph->base.destroy = nv50_graph_destroy;
944         pgraph->base.init = nv50_graph_init;
945         pgraph->base.fini = nv50_graph_fini;
946         pgraph->base.context_new = nv50_graph_context_new;
947         pgraph->base.context_del = nv50_graph_context_del;
948         pgraph->base.object_new = nv50_graph_object_new;
949         if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
950                 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
951         else
952                 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
953
954         nouveau_irq_register(dev, 12, nv50_graph_isr);
955
956         NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
957         NVOBJ_CLASS(dev, 0x0030, GR); /* null */
958         NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
959         NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
960
961         /* tesla */
962         if (dev_priv->chipset == 0x50)
963                 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
964         else
965         if (dev_priv->chipset < 0xa0)
966                 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
967         else {
968                 switch (dev_priv->chipset) {
969                 case 0xa0:
970                 case 0xaa:
971                 case 0xac:
972                         NVOBJ_CLASS(dev, 0x8397, GR);
973                         break;
974                 case 0xa3:
975                 case 0xa5:
976                 case 0xa8:
977                         NVOBJ_CLASS(dev, 0x8597, GR);
978                         break;
979                 case 0xaf:
980                         NVOBJ_CLASS(dev, 0x8697, GR);
981                         break;
982                 }
983         }
984
985         /* compute */
986         NVOBJ_CLASS(dev, 0x50c0, GR);
987         if (dev_priv->chipset  > 0xa0 &&
988             dev_priv->chipset != 0xaa &&
989             dev_priv->chipset != 0xac)
990                 NVOBJ_CLASS(dev, 0x85c0, GR);
991
992         return 0;
993 }