drm/nouveau/fifo: remove all the "special" engine hooks
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / nouveau / nv04_fifo.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_ramht.h"
31 #include "nouveau_util.h"
32
33 #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
34 #define NV04_RAMFC__SIZE 32
35 #define NV04_RAMFC_DMA_PUT                                       0x00
36 #define NV04_RAMFC_DMA_GET                                       0x04
37 #define NV04_RAMFC_DMA_INSTANCE                                  0x08
38 #define NV04_RAMFC_DMA_STATE                                     0x0C
39 #define NV04_RAMFC_DMA_FETCH                                     0x10
40 #define NV04_RAMFC_ENGINE                                        0x14
41 #define NV04_RAMFC_PULL1_ENGINE                                  0x18
42
43 #define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
44 #define RAMFC_RD(offset)      nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
45
46 bool
47 nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
48 {
49         int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
50
51         if (!enable) {
52                 /* In some cases the PFIFO puller may be left in an
53                  * inconsistent state if you try to stop it when it's
54                  * busy translating handles. Sometimes you get a
55                  * PFIFO_CACHE_ERROR, sometimes it just fails silently
56                  * sending incorrect instance offsets to PGRAPH after
57                  * it's started up again. To avoid the latter we
58                  * invalidate the most recently calculated instance.
59                  */
60                 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
61                              NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
62                         NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
63
64                 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
65                     NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
66                         nv_wr32(dev, NV03_PFIFO_INTR_0,
67                                 NV_PFIFO_INTR_CACHE_ERROR);
68
69                 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
70         }
71
72         return pull & 1;
73 }
74
75 #ifdef __BIG_ENDIAN
76 #define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
77 #else
78 #define DMA_FETCH_ENDIANNESS 0
79 #endif
80
81 int
82 nv04_fifo_create_context(struct nouveau_channel *chan)
83 {
84         struct drm_device *dev = chan->dev;
85         struct drm_nouveau_private *dev_priv = dev->dev_private;
86         unsigned long flags;
87         int ret;
88
89         ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
90                                                 NV04_RAMFC__SIZE,
91                                                 NVOBJ_FLAG_ZERO_ALLOC |
92                                                 NVOBJ_FLAG_ZERO_FREE,
93                                                 &chan->ramfc);
94         if (ret)
95                 return ret;
96
97         chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
98                              NV03_USER(chan->id), PAGE_SIZE);
99         if (!chan->user)
100                 return -ENOMEM;
101
102         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103
104         /* Setup initial state */
105         RAMFC_WR(DMA_PUT, chan->pushbuf_base);
106         RAMFC_WR(DMA_GET, chan->pushbuf_base);
107         RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
108         RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
109                              NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
110                              NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
111                              DMA_FETCH_ENDIANNESS));
112
113         /* enable the fifo dma operation */
114         nv_wr32(dev, NV04_PFIFO_MODE,
115                 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
116
117         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
118         return 0;
119 }
120
121 void
122 nv04_fifo_destroy_context(struct nouveau_channel *chan)
123 {
124         struct drm_device *dev = chan->dev;
125         struct drm_nouveau_private *dev_priv = dev->dev_private;
126         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
127         unsigned long flags;
128
129         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
130         nv_wr32(dev, NV03_PFIFO_CACHES, 0);
131
132         /* Unload the context if it's the currently active one */
133         if ((nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0xf) == chan->id) {
134                 nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
135                 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
136                 nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
137                 pfifo->unload_context(dev);
138                 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
139                 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
140         }
141
142         /* Keep it from being rescheduled */
143         nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
144         nv_wr32(dev, NV03_PFIFO_CACHES, 1);
145         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
146
147         /* Free the channel resources */
148         if (chan->user) {
149                 iounmap(chan->user);
150                 chan->user = NULL;
151         }
152         nouveau_gpuobj_ref(NULL, &chan->ramfc);
153 }
154
155 static void
156 nv04_fifo_do_load_context(struct drm_device *dev, int chid)
157 {
158         struct drm_nouveau_private *dev_priv = dev->dev_private;
159         uint32_t fc = NV04_RAMFC(chid), tmp;
160
161         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
162         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
163         tmp = nv_ri32(dev, fc + 8);
164         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
165         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
166         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
167         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
168         nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
169         nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
170
171         nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
172         nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
173 }
174
175 int
176 nv04_fifo_load_context(struct nouveau_channel *chan)
177 {
178         uint32_t tmp;
179
180         nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
181                            NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
182         nv04_fifo_do_load_context(chan->dev, chan->id);
183         nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
184
185         /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
186         tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
187         nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
188
189         return 0;
190 }
191
192 int
193 nv04_fifo_unload_context(struct drm_device *dev)
194 {
195         struct drm_nouveau_private *dev_priv = dev->dev_private;
196         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
197         struct nouveau_channel *chan = NULL;
198         uint32_t tmp;
199         int chid;
200
201         chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0xf;
202         if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
203                 return 0;
204
205         chan = dev_priv->channels.ptr[chid];
206         if (!chan) {
207                 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
208                 return -EINVAL;
209         }
210
211         RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
212         RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
213         tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
214         tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
215         RAMFC_WR(DMA_INSTANCE, tmp);
216         RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
217         RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
218         RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
219         RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
220
221         nv04_fifo_do_load_context(dev, pfifo->channels - 1);
222         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
223         return 0;
224 }
225
226 static void
227 nv04_fifo_init_reset(struct drm_device *dev)
228 {
229         nv_wr32(dev, NV03_PMC_ENABLE,
230                 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
231         nv_wr32(dev, NV03_PMC_ENABLE,
232                 nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
233
234         nv_wr32(dev, 0x003224, 0x000f0078);
235         nv_wr32(dev, 0x002044, 0x0101ffff);
236         nv_wr32(dev, 0x002040, 0x000000ff);
237         nv_wr32(dev, 0x002500, 0x00000000);
238         nv_wr32(dev, 0x003000, 0x00000000);
239         nv_wr32(dev, 0x003050, 0x00000000);
240         nv_wr32(dev, 0x003200, 0x00000000);
241         nv_wr32(dev, 0x003250, 0x00000000);
242         nv_wr32(dev, 0x003220, 0x00000000);
243
244         nv_wr32(dev, 0x003250, 0x00000000);
245         nv_wr32(dev, 0x003270, 0x00000000);
246         nv_wr32(dev, 0x003210, 0x00000000);
247 }
248
249 static void
250 nv04_fifo_init_ramxx(struct drm_device *dev)
251 {
252         struct drm_nouveau_private *dev_priv = dev->dev_private;
253
254         nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
255                                        ((dev_priv->ramht->bits - 9) << 16) |
256                                        (dev_priv->ramht->gpuobj->pinst >> 8));
257         nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
258         nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
259 }
260
261 static void
262 nv04_fifo_init_intr(struct drm_device *dev)
263 {
264         nouveau_irq_register(dev, 8, nv04_fifo_isr);
265         nv_wr32(dev, 0x002100, 0xffffffff);
266         nv_wr32(dev, 0x002140, 0xffffffff);
267 }
268
269 int
270 nv04_fifo_init(struct drm_device *dev)
271 {
272         struct drm_nouveau_private *dev_priv = dev->dev_private;
273         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
274         int i;
275
276         nv04_fifo_init_reset(dev);
277         nv04_fifo_init_ramxx(dev);
278
279         nv04_fifo_do_load_context(dev, pfifo->channels - 1);
280         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
281
282         nv04_fifo_init_intr(dev);
283         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
284         nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
285         nv_wr32(dev, NV03_PFIFO_CACHES, 1);
286
287         for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
288                 if (dev_priv->channels.ptr[i]) {
289                         uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
290                         nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
291                 }
292         }
293
294         return 0;
295 }
296
297 void
298 nv04_fifo_fini(struct drm_device *dev)
299 {
300         nv_wr32(dev, 0x2140, 0x00000000);
301         nouveau_irq_unregister(dev, 8);
302 }
303
304 static bool
305 nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
306 {
307         struct drm_nouveau_private *dev_priv = dev->dev_private;
308         struct nouveau_channel *chan = NULL;
309         struct nouveau_gpuobj *obj;
310         unsigned long flags;
311         const int subc = (addr >> 13) & 0x7;
312         const int mthd = addr & 0x1ffc;
313         bool handled = false;
314         u32 engine;
315
316         spin_lock_irqsave(&dev_priv->channels.lock, flags);
317         if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
318                 chan = dev_priv->channels.ptr[chid];
319         if (unlikely(!chan))
320                 goto out;
321
322         switch (mthd) {
323         case 0x0000: /* bind object to subchannel */
324                 obj = nouveau_ramht_find(chan, data);
325                 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
326                         break;
327
328                 chan->sw_subchannel[subc] = obj->class;
329                 engine = 0x0000000f << (subc * 4);
330
331                 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
332                 handled = true;
333                 break;
334         default:
335                 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
336                 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
337                         break;
338
339                 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
340                                               mthd, data))
341                         handled = true;
342                 break;
343         }
344
345 out:
346         spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
347         return handled;
348 }
349
350 static const char *nv_dma_state_err(u32 state)
351 {
352         static const char * const desc[] = {
353                 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
354                 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
355         };
356         return desc[(state >> 29) & 0x7];
357 }
358
359 void
360 nv04_fifo_isr(struct drm_device *dev)
361 {
362         struct drm_nouveau_private *dev_priv = dev->dev_private;
363         uint32_t status, reassign;
364         int cnt = 0;
365
366         reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
367         while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
368                 uint32_t chid, get;
369
370                 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
371
372                 chid  = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1);
373                 chid &= dev_priv->engine.fifo.channels - 1;
374                 get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
375
376                 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
377                         uint32_t mthd, data;
378                         int ptr;
379
380                         /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
381                          * wrapping on my G80 chips, but CACHE1 isn't big
382                          * enough for this much data.. Tests show that it
383                          * wraps around to the start at GET=0x800.. No clue
384                          * as to why..
385                          */
386                         ptr = (get & 0x7ff) >> 2;
387
388                         if (dev_priv->card_type < NV_40) {
389                                 mthd = nv_rd32(dev,
390                                         NV04_PFIFO_CACHE1_METHOD(ptr));
391                                 data = nv_rd32(dev,
392                                         NV04_PFIFO_CACHE1_DATA(ptr));
393                         } else {
394                                 mthd = nv_rd32(dev,
395                                         NV40_PFIFO_CACHE1_METHOD(ptr));
396                                 data = nv_rd32(dev,
397                                         NV40_PFIFO_CACHE1_DATA(ptr));
398                         }
399
400                         if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
401                                 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
402                                              "Mthd 0x%04x Data 0x%08x\n",
403                                         chid, (mthd >> 13) & 7, mthd & 0x1ffc,
404                                         data);
405                         }
406
407                         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
408                         nv_wr32(dev, NV03_PFIFO_INTR_0,
409                                                 NV_PFIFO_INTR_CACHE_ERROR);
410
411                         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
412                                 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
413                         nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
414                         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
415                                 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
416                         nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
417
418                         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
419                                 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
420                         nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
421
422                         status &= ~NV_PFIFO_INTR_CACHE_ERROR;
423                 }
424
425                 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
426                         u32 dma_get = nv_rd32(dev, 0x003244);
427                         u32 dma_put = nv_rd32(dev, 0x003240);
428                         u32 push = nv_rd32(dev, 0x003220);
429                         u32 state = nv_rd32(dev, 0x003228);
430
431                         if (dev_priv->card_type == NV_50) {
432                                 u32 ho_get = nv_rd32(dev, 0x003328);
433                                 u32 ho_put = nv_rd32(dev, 0x003320);
434                                 u32 ib_get = nv_rd32(dev, 0x003334);
435                                 u32 ib_put = nv_rd32(dev, 0x003330);
436
437                                 if (nouveau_ratelimit())
438                                         NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
439                                              "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
440                                              "State 0x%08x (err: %s) Push 0x%08x\n",
441                                                 chid, ho_get, dma_get, ho_put,
442                                                 dma_put, ib_get, ib_put, state,
443                                                 nv_dma_state_err(state),
444                                                 push);
445
446                                 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
447                                 nv_wr32(dev, 0x003364, 0x00000000);
448                                 if (dma_get != dma_put || ho_get != ho_put) {
449                                         nv_wr32(dev, 0x003244, dma_put);
450                                         nv_wr32(dev, 0x003328, ho_put);
451                                 } else
452                                 if (ib_get != ib_put) {
453                                         nv_wr32(dev, 0x003334, ib_put);
454                                 }
455                         } else {
456                                 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
457                                              "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
458                                         chid, dma_get, dma_put, state,
459                                         nv_dma_state_err(state), push);
460
461                                 if (dma_get != dma_put)
462                                         nv_wr32(dev, 0x003244, dma_put);
463                         }
464
465                         nv_wr32(dev, 0x003228, 0x00000000);
466                         nv_wr32(dev, 0x003220, 0x00000001);
467                         nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
468                         status &= ~NV_PFIFO_INTR_DMA_PUSHER;
469                 }
470
471                 if (status & NV_PFIFO_INTR_SEMAPHORE) {
472                         uint32_t sem;
473
474                         status &= ~NV_PFIFO_INTR_SEMAPHORE;
475                         nv_wr32(dev, NV03_PFIFO_INTR_0,
476                                 NV_PFIFO_INTR_SEMAPHORE);
477
478                         sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
479                         nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
480
481                         nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
482                         nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
483                 }
484
485                 if (dev_priv->card_type == NV_50) {
486                         if (status & 0x00000010) {
487                                 nv50_fb_vm_trap(dev, nouveau_ratelimit());
488                                 status &= ~0x00000010;
489                                 nv_wr32(dev, 0x002100, 0x00000010);
490                         }
491                 }
492
493                 if (status) {
494                         if (nouveau_ratelimit())
495                                 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
496                                         status, chid);
497                         nv_wr32(dev, NV03_PFIFO_INTR_0, status);
498                         status = 0;
499                 }
500
501                 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
502         }
503
504         if (status) {
505                 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
506                 nv_wr32(dev, 0x2140, 0);
507                 nv_wr32(dev, 0x140, 0);
508         }
509
510         nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
511 }