drm/nouveau/fifo: remove all the "special" engine hooks
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / nouveau / nv50_fifo.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_ramht.h"
31 #include "nouveau_vm.h"
32
33 static void
34 nv50_fifo_playlist_update(struct drm_device *dev)
35 {
36         struct drm_nouveau_private *dev_priv = dev->dev_private;
37         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
38         struct nouveau_gpuobj *cur;
39         int i, nr;
40
41         NV_DEBUG(dev, "\n");
42
43         cur = pfifo->playlist[pfifo->cur_playlist];
44         pfifo->cur_playlist = !pfifo->cur_playlist;
45
46         /* We never schedule channel 0 or 127 */
47         for (i = 1, nr = 0; i < 127; i++) {
48                 if (dev_priv->channels.ptr[i] &&
49                     dev_priv->channels.ptr[i]->ramfc) {
50                         nv_wo32(cur, (nr * 4), i);
51                         nr++;
52                 }
53         }
54         dev_priv->engine.instmem.flush(dev);
55
56         nv_wr32(dev, 0x32f4, cur->vinst >> 12);
57         nv_wr32(dev, 0x32ec, nr);
58         nv_wr32(dev, 0x2500, 0x101);
59 }
60
61 static void
62 nv50_fifo_channel_enable(struct drm_device *dev, int channel)
63 {
64         struct drm_nouveau_private *dev_priv = dev->dev_private;
65         struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
66         uint32_t inst;
67
68         NV_DEBUG(dev, "ch%d\n", channel);
69
70         if (dev_priv->chipset == 0x50)
71                 inst = chan->ramfc->vinst >> 12;
72         else
73                 inst = chan->ramfc->vinst >> 8;
74
75         nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
76                      NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
77 }
78
79 static void
80 nv50_fifo_channel_disable(struct drm_device *dev, int channel)
81 {
82         struct drm_nouveau_private *dev_priv = dev->dev_private;
83         uint32_t inst;
84
85         NV_DEBUG(dev, "ch%d\n", channel);
86
87         if (dev_priv->chipset == 0x50)
88                 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
89         else
90                 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
91         nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
92 }
93
94 static void
95 nv50_fifo_init_reset(struct drm_device *dev)
96 {
97         uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
98
99         NV_DEBUG(dev, "\n");
100
101         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
102         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
103 }
104
105 static void
106 nv50_fifo_init_intr(struct drm_device *dev)
107 {
108         NV_DEBUG(dev, "\n");
109
110         nouveau_irq_register(dev, 8, nv04_fifo_isr);
111         nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
112         nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
113 }
114
115 static void
116 nv50_fifo_init_context_table(struct drm_device *dev)
117 {
118         struct drm_nouveau_private *dev_priv = dev->dev_private;
119         int i;
120
121         NV_DEBUG(dev, "\n");
122
123         for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
124                 if (dev_priv->channels.ptr[i])
125                         nv50_fifo_channel_enable(dev, i);
126                 else
127                         nv50_fifo_channel_disable(dev, i);
128         }
129
130         nv50_fifo_playlist_update(dev);
131 }
132
133 static void
134 nv50_fifo_init_regs__nv(struct drm_device *dev)
135 {
136         NV_DEBUG(dev, "\n");
137
138         nv_wr32(dev, 0x250c, 0x6f3cfc34);
139 }
140
141 static void
142 nv50_fifo_init_regs(struct drm_device *dev)
143 {
144         NV_DEBUG(dev, "\n");
145
146         nv_wr32(dev, 0x2500, 0);
147         nv_wr32(dev, 0x3250, 0);
148         nv_wr32(dev, 0x3220, 0);
149         nv_wr32(dev, 0x3204, 0);
150         nv_wr32(dev, 0x3210, 0);
151         nv_wr32(dev, 0x3270, 0);
152         nv_wr32(dev, 0x2044, 0x01003fff);
153
154         /* Enable dummy channels setup by nv50_instmem.c */
155         nv50_fifo_channel_enable(dev, 0);
156         nv50_fifo_channel_enable(dev, 127);
157 }
158
159 int
160 nv50_fifo_init(struct drm_device *dev)
161 {
162         struct drm_nouveau_private *dev_priv = dev->dev_private;
163         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
164         int ret;
165
166         NV_DEBUG(dev, "\n");
167
168         if (pfifo->playlist[0]) {
169                 pfifo->cur_playlist = !pfifo->cur_playlist;
170                 goto just_reset;
171         }
172
173         ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
174                                  NVOBJ_FLAG_ZERO_ALLOC,
175                                  &pfifo->playlist[0]);
176         if (ret) {
177                 NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
178                 return ret;
179         }
180
181         ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
182                                  NVOBJ_FLAG_ZERO_ALLOC,
183                                  &pfifo->playlist[1]);
184         if (ret) {
185                 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
186                 NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
187                 return ret;
188         }
189
190 just_reset:
191         nv50_fifo_init_reset(dev);
192         nv50_fifo_init_intr(dev);
193         nv50_fifo_init_context_table(dev);
194         nv50_fifo_init_regs__nv(dev);
195         nv50_fifo_init_regs(dev);
196         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
197         nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
198         nv_wr32(dev, NV03_PFIFO_CACHES, 1);
199
200         return 0;
201 }
202
203 void
204 nv50_fifo_takedown(struct drm_device *dev)
205 {
206         struct drm_nouveau_private *dev_priv = dev->dev_private;
207         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
208
209         NV_DEBUG(dev, "\n");
210
211         if (!pfifo->playlist[0])
212                 return;
213
214         nv_wr32(dev, 0x2140, 0x00000000);
215         nouveau_irq_unregister(dev, 8);
216
217         nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
218         nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
219 }
220
221 int
222 nv50_fifo_create_context(struct nouveau_channel *chan)
223 {
224         struct drm_device *dev = chan->dev;
225         struct drm_nouveau_private *dev_priv = dev->dev_private;
226         struct nouveau_gpuobj *ramfc = NULL;
227         uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
228         unsigned long flags;
229         int ret;
230
231         NV_DEBUG(dev, "ch%d\n", chan->id);
232
233         if (dev_priv->chipset == 0x50) {
234                 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
235                                               chan->ramin->vinst, 0x100,
236                                               NVOBJ_FLAG_ZERO_ALLOC |
237                                               NVOBJ_FLAG_ZERO_FREE,
238                                               &chan->ramfc);
239                 if (ret)
240                         return ret;
241
242                 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
243                                               chan->ramin->vinst + 0x0400,
244                                               4096, 0, &chan->cache);
245                 if (ret)
246                         return ret;
247         } else {
248                 ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
249                                          NVOBJ_FLAG_ZERO_ALLOC |
250                                          NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
251                 if (ret)
252                         return ret;
253
254                 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
255                                          0, &chan->cache);
256                 if (ret)
257                         return ret;
258         }
259         ramfc = chan->ramfc;
260
261         chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
262                              NV50_USER(chan->id), PAGE_SIZE);
263         if (!chan->user)
264                 return -ENOMEM;
265
266         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
267
268         nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
269         nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
270                              (4 << 24) /* SEARCH_FULL */ |
271                              (chan->ramht->gpuobj->cinst >> 4));
272         nv_wo32(ramfc, 0x44, 0x01003fff);
273         nv_wo32(ramfc, 0x60, 0x7fffffff);
274         nv_wo32(ramfc, 0x40, 0x00000000);
275         nv_wo32(ramfc, 0x7c, 0x30000001);
276         nv_wo32(ramfc, 0x78, 0x00000000);
277         nv_wo32(ramfc, 0x3c, 0x403f6078);
278         nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
279         nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
280                 drm_order(chan->dma.ib_max + 1) << 16);
281
282         if (dev_priv->chipset != 0x50) {
283                 nv_wo32(chan->ramin, 0, chan->id);
284                 nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
285
286                 nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
287                 nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
288         }
289
290         dev_priv->engine.instmem.flush(dev);
291
292         nv50_fifo_channel_enable(dev, chan->id);
293         nv50_fifo_playlist_update(dev);
294         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
295         return 0;
296 }
297
298 void
299 nv50_fifo_destroy_context(struct nouveau_channel *chan)
300 {
301         struct drm_device *dev = chan->dev;
302         struct drm_nouveau_private *dev_priv = dev->dev_private;
303         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
304         struct nouveau_gpuobj *ramfc = NULL;
305         unsigned long flags;
306
307         NV_DEBUG(dev, "ch%d\n", chan->id);
308
309         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
310         nv_wr32(dev, NV03_PFIFO_CACHES, 0);
311
312         /* Unload the context if it's the currently active one */
313         if ((nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0x7f) == chan->id) {
314                 nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
315                 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
316                 nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
317                 pfifo->unload_context(dev);
318                 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
319                 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
320         }
321
322         /* This will ensure the channel is seen as disabled. */
323         nouveau_gpuobj_ref(chan->ramfc, &ramfc);
324         nouveau_gpuobj_ref(NULL, &chan->ramfc);
325         nv50_fifo_channel_disable(dev, chan->id);
326
327         /* Dummy channel, also used on ch 127 */
328         if (chan->id == 0)
329                 nv50_fifo_channel_disable(dev, 127);
330         nv50_fifo_playlist_update(dev);
331
332         nv_wr32(dev, NV03_PFIFO_CACHES, 1);
333         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
334
335         /* Free the channel resources */
336         if (chan->user) {
337                 iounmap(chan->user);
338                 chan->user = NULL;
339         }
340         nouveau_gpuobj_ref(NULL, &ramfc);
341         nouveau_gpuobj_ref(NULL, &chan->cache);
342 }
343
344 int
345 nv50_fifo_load_context(struct nouveau_channel *chan)
346 {
347         struct drm_device *dev = chan->dev;
348         struct drm_nouveau_private *dev_priv = dev->dev_private;
349         struct nouveau_gpuobj *ramfc = chan->ramfc;
350         struct nouveau_gpuobj *cache = chan->cache;
351         int ptr, cnt;
352
353         NV_DEBUG(dev, "ch%d\n", chan->id);
354
355         nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
356         nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
357         nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
358         nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
359         nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
360         nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
361         nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
362         nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
363         nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
364         nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
365         nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
366         nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
367         nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
368         nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
369         nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
370         nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
371         nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
372         nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
373         nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
374         nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
375         nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
376         nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
377         nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
378         nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
379         nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
380         nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
381         nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
382         nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
383         nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
384         nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
385         nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
386         nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
387         nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
388
389         cnt = nv_ro32(ramfc, 0x84);
390         for (ptr = 0; ptr < cnt; ptr++) {
391                 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
392                         nv_ro32(cache, (ptr * 8) + 0));
393                 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
394                         nv_ro32(cache, (ptr * 8) + 4));
395         }
396         nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
397         nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
398
399         /* guessing that all the 0x34xx regs aren't on NV50 */
400         if (dev_priv->chipset != 0x50) {
401                 nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
402                 nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
403                 nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
404                 nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
405                 nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
406         }
407
408         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
409         return 0;
410 }
411
412 int
413 nv50_fifo_unload_context(struct drm_device *dev)
414 {
415         struct drm_nouveau_private *dev_priv = dev->dev_private;
416         struct nouveau_gpuobj *ramfc, *cache;
417         struct nouveau_channel *chan = NULL;
418         int chid, get, put, ptr;
419
420         NV_DEBUG(dev, "\n");
421
422         chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0x7f;
423         if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
424                 return 0;
425
426         chan = dev_priv->channels.ptr[chid];
427         if (!chan) {
428                 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
429                 return -EINVAL;
430         }
431         NV_DEBUG(dev, "ch%d\n", chan->id);
432         ramfc = chan->ramfc;
433         cache = chan->cache;
434
435         nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
436         nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
437         nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
438         nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
439         nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
440         nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
441         nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
442         nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
443         nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
444         nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
445         nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
446         nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
447         nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
448         nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
449         nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
450         nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
451         nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
452         nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
453         nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
454         nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
455         nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
456         nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
457         nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
458         nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
459         nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
460         nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
461         nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
462         nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
463         nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
464         nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
465         nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
466         nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
467         nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
468
469         put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
470         get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
471         ptr = 0;
472         while (put != get) {
473                 nv_wo32(cache, ptr + 0,
474                         nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
475                 nv_wo32(cache, ptr + 4,
476                         nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
477                 get = (get + 1) & 0x1ff;
478                 ptr += 8;
479         }
480
481         /* guessing that all the 0x34xx regs aren't on NV50 */
482         if (dev_priv->chipset != 0x50) {
483                 nv_wo32(ramfc, 0x84, ptr >> 3);
484                 nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
485                 nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
486                 nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
487                 nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
488                 nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
489         }
490
491         dev_priv->engine.instmem.flush(dev);
492
493         /*XXX: probably reload ch127 (NULL) state back too */
494         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
495         return 0;
496 }
497
498 void
499 nv50_fifo_tlb_flush(struct drm_device *dev)
500 {
501         nv50_vm_flush_engine(dev, 5);
502 }