via DRM: Tightened the security for some functions of the
[profile/ivi/libdrm.git] / shared-core / via_dma.c
1 /* via_dma.c -- DMA support for the VIA Unichrome/Pro
2  */
3 /**************************************************************************
4  * 
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
9  * All Rights Reserved.
10  * 
11  * Copyright 2004 The Unichrome project.
12  * All Rights Reserved.
13  *
14  **************************************************************************/
15
16 #include "drmP.h"
17 #include "drm.h"
18 #include "via_drm.h"
19 #include "via_drv.h"
20 #include "via_3d_reg.h"
21
22 #define via_flush_write_combine() DRM_MEMORYBARRIER()
23
24 #define VIA_OUT_RING_QW(w1,w2)                  \
25         *vb++ = (w1);                           \
26         *vb++ = (w2);                           \
27         dev_priv->dma_low += 8; 
28
29 #define PCI_BUF_SIZE 512000
30
31 static char pci_buf[PCI_BUF_SIZE];
32 static unsigned long pci_bufsiz = PCI_BUF_SIZE;  
33
34
35 static void via_cmdbuf_start(drm_via_private_t * dev_priv);
36 static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
37 static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
38 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
39
40 /*
41  * Free space in command buffer.
42  */
43
44 static uint32_t
45 via_cmdbuf_space(drm_via_private_t *dev_priv)
46 {
47         uint32_t agp_base = dev_priv->dma_offset + 
48                 (uint32_t) dev_priv->agpAddr;
49         uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
50         
51         return ((hw_addr <= dev_priv->dma_low) ? 
52                 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) : 
53                 (hw_addr - dev_priv->dma_low));
54 }
55
56 /*
57  * How much does the command regulator lag behind?
58  */
59
60 static uint32_t
61 via_cmdbuf_lag(drm_via_private_t *dev_priv)
62 {
63         uint32_t agp_base = dev_priv->dma_offset + 
64                 (uint32_t) dev_priv->agpAddr;
65         uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
66         
67         return ((hw_addr <= dev_priv->dma_low) ? 
68                 (dev_priv->dma_low - hw_addr) : 
69                 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
70 }
71
72 /*
73  * Check that the given size fits in the buffer, otherwise wait.
74  */
75
76 static inline int
77 via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
78 {
79         uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
80         uint32_t cur_addr, hw_addr, next_addr;
81         volatile uint32_t *hw_addr_ptr;
82         uint32_t count;
83         hw_addr_ptr = dev_priv->hw_addr_ptr;
84         cur_addr = dev_priv->dma_low;
85         next_addr = cur_addr + size + 512*1024;
86         count = 1000000;
87         do {
88                 hw_addr = *hw_addr_ptr - agp_base;
89                 if (count-- == 0) {
90                         DRM_ERROR("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
91                                   hw_addr, cur_addr, next_addr);
92                         return -1;
93                 }
94         } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
95         return 0;
96 }
97
98
99 /*
100  * Checks whether buffer head has reach the end. Rewind the ring buffer
101  * when necessary.
102  *
103  * Returns virtual pointer to ring buffer.
104  */
105
106 static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
107                                       unsigned int size)
108 {
109         if ((dev_priv->dma_low + size + 0x400) > dev_priv->dma_high) {
110                 via_cmdbuf_rewind(dev_priv);
111         }
112         if (via_cmdbuf_wait(dev_priv, size) != 0) {
113                 return NULL;
114         }
115
116         return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
117 }
118
119 int via_dma_cleanup(drm_device_t * dev)
120 {
121         if (dev->dev_private) {
122                 drm_via_private_t *dev_priv =
123                         (drm_via_private_t *) dev->dev_private;
124
125                 if (dev_priv->ring.virtual_start) {
126                         via_cmdbuf_reset(dev_priv);
127
128                         drm_core_ioremapfree(&dev_priv->ring.map, dev);
129                         dev_priv->ring.virtual_start = NULL;
130                 }
131
132         }
133
134         return 0;
135 }
136
137 static int via_initialize(drm_device_t * dev,
138                           drm_via_private_t * dev_priv,
139                           drm_via_dma_init_t * init)
140 {
141         if (!dev_priv || !dev_priv->mmio) {
142                 DRM_ERROR("via_dma_init called before via_map_init\n");
143                 return DRM_ERR(EFAULT);
144         }
145
146         if (dev_priv->ring.virtual_start != NULL) {
147                 DRM_ERROR("%s called again without calling cleanup\n",
148                           __FUNCTION__);
149                 return DRM_ERR(EFAULT);
150         }
151
152         dev_priv->ring.map.offset = dev->agp->base + init->offset;
153         dev_priv->ring.map.size = init->size;
154         dev_priv->ring.map.type = 0;
155         dev_priv->ring.map.flags = 0;
156         dev_priv->ring.map.mtrr = 0;
157
158         drm_core_ioremap(&dev_priv->ring.map, dev);
159
160         if (dev_priv->ring.map.handle == NULL) {
161                 via_dma_cleanup(dev);
162                 DRM_ERROR("can not ioremap virtual address for"
163                           " ring buffer\n");
164                 return DRM_ERR(ENOMEM);
165         }
166
167         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
168
169         dev_priv->dma_ptr = dev_priv->ring.virtual_start;
170         dev_priv->dma_low = 0;
171         dev_priv->dma_high = init->size;
172         dev_priv->dma_wrap = init->size;
173         dev_priv->dma_offset = init->offset;
174         dev_priv->last_pause_ptr = NULL;
175         dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr;
176
177         via_cmdbuf_start(dev_priv);
178
179         return 0;
180 }
181
182 int via_dma_init(DRM_IOCTL_ARGS)
183 {
184         DRM_DEVICE;
185         drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
186         drm_via_dma_init_t init;
187         int retcode = 0;
188
189         DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *) data,
190                                  sizeof(init));
191
192         switch (init.func) {
193         case VIA_INIT_DMA:
194                 if (!capable(CAP_SYS_ADMIN))
195                         retcode = DRM_ERR(EPERM);
196                 else
197                         retcode = via_initialize(dev, dev_priv, &init);
198                 break;
199         case VIA_CLEANUP_DMA:
200                 if (!capable(CAP_SYS_ADMIN))
201                         retcode = DRM_ERR(EPERM);
202                 else
203                         retcode = via_dma_cleanup(dev);
204                 break;
205         case VIA_DMA_INITIALIZED:
206                 retcode = (dev_priv->ring.virtual_start != NULL) ? 
207                         0: DRM_ERR( EFAULT );
208                 break;
209         default:
210                 retcode = DRM_ERR(EINVAL);
211                 break;
212         }
213
214         return retcode;
215 }
216
217 static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
218 {
219         drm_via_private_t *dev_priv;
220         uint32_t *vb;
221         int ret;
222
223         dev_priv = (drm_via_private_t *) dev->dev_private;
224
225         if (dev_priv->ring.virtual_start == NULL) {
226                 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
227                           __FUNCTION__);
228                 return DRM_ERR(EFAULT);
229         }
230
231         if (cmd->size > pci_bufsiz && pci_bufsiz > 0) {
232                 return DRM_ERR(ENOMEM);
233         } 
234
235
236         if (DRM_COPY_FROM_USER(pci_buf, cmd->buf, cmd->size))
237                 return DRM_ERR(EFAULT);
238
239         /*
240          * Running this function on AGP memory is dead slow. Therefore
241          * we run it on a temporary cacheable system memory buffer and
242          * copy it to AGP memory when ready.
243          */
244
245         
246         if ((ret = via_verify_command_stream((uint32_t *)pci_buf, cmd->size, dev))) {
247                 return ret;
248         }
249         
250         vb = via_check_dma(dev_priv, cmd->size);
251         if (vb == NULL) {
252                 return DRM_ERR(EAGAIN);
253         }
254
255         memcpy(vb, pci_buf, cmd->size);
256         
257         dev_priv->dma_low += cmd->size;
258         via_cmdbuf_pause(dev_priv);
259
260         return 0;
261 }
262
263 static int via_quiescent(drm_device_t * dev)
264 {
265         drm_via_private_t *dev_priv = dev->dev_private;
266
267         if (!via_wait_idle(dev_priv)) {
268                 return DRM_ERR(EAGAIN);
269         }
270         return 0;
271 }
272
273 int via_flush_ioctl(DRM_IOCTL_ARGS)
274 {
275         DRM_DEVICE;
276
277         LOCK_TEST_WITH_RETURN( dev, filp );
278
279         return via_quiescent(dev);
280 }
281
282 int via_cmdbuffer(DRM_IOCTL_ARGS)
283 {
284         DRM_DEVICE;
285         drm_via_cmdbuffer_t cmdbuf;
286         int ret;
287
288         LOCK_TEST_WITH_RETURN( dev, filp );
289
290         DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
291                                  sizeof(cmdbuf));
292
293         DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
294
295         ret = via_dispatch_cmdbuffer(dev, &cmdbuf);
296         if (ret) {
297                 return ret;
298         }
299
300         return 0;
301 }
302
303 static int via_parse_pci_cmdbuffer(drm_device_t * dev, const char *buf,
304                                    unsigned int size)
305 {
306         drm_via_private_t *dev_priv = dev->dev_private;
307         const uint32_t *regbuf = (const uint32_t *) buf;
308         const uint32_t *regend = regbuf + (size >> 2);
309         int ret;
310         int check_2d_cmd = 1;
311
312         if ((ret = via_verify_command_stream(regbuf, size, dev)))
313                 return ret;
314
315         while (regbuf != regend) {      
316                 if ( *regbuf == HALCYON_HEADER2 ) {
317                   
318                         regbuf++;
319                         check_2d_cmd = ( *regbuf != HALCYON_SUB_ADDR0 );
320                         VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *regbuf++);
321                         
322                 } else if ( check_2d_cmd && ((*regbuf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 )) {
323
324                         register uint32_t addr = ( (*regbuf++ ) & ~HALCYON_HEADER1MASK) << 2;
325                         VIA_WRITE( addr, *regbuf++ );
326
327                 } else if ( ( *regbuf & HALCYON_FIREMASK ) == HALCYON_FIRECMD ) {
328
329                         VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE, *regbuf++);
330                         if ( ( regbuf != regend ) && 
331                              ((*regbuf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
332                                 regbuf++;
333                         if (( *regbuf & HALCYON_CMDBMASK ) != HC_ACMD_HCmdB )
334                                 check_2d_cmd = 1;
335                 } else {
336
337                         VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE , *regbuf++);
338
339                 }
340         } 
341         return 0;
342
343 }
344
345 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
346                                       drm_via_cmdbuffer_t * cmd)
347 {
348         int ret;
349
350         if (cmd->size > pci_bufsiz && pci_bufsiz > 0) {
351                 return DRM_ERR(ENOMEM);
352         } 
353         if (DRM_COPY_FROM_USER(pci_buf, cmd->buf, cmd->size))
354                 return DRM_ERR(EFAULT);
355         ret = via_parse_pci_cmdbuffer(dev, pci_buf, cmd->size);
356         return ret;
357 }
358
359 int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
360 {
361         DRM_DEVICE;
362         drm_via_cmdbuffer_t cmdbuf;
363         int ret;
364
365         LOCK_TEST_WITH_RETURN( dev, filp );
366
367         DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
368                                  sizeof(cmdbuf));
369
370         DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
371                   cmdbuf.size);
372
373         ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
374         if (ret) {
375                 return ret;
376         }
377
378         return 0;
379 }
380
381 /************************************************************************/
382
383 #define CMDBUF_ALIGNMENT_SIZE   (0x100)
384 #define CMDBUF_ALIGNMENT_MASK   (0xff)
385
386 /* defines for VIA 3D registers */
387 #define VIA_REG_STATUS          0x400
388 #define VIA_REG_TRANSET         0x43C
389 #define VIA_REG_TRANSPACE       0x440
390
391 /* VIA_REG_STATUS(0x400): Engine Status */
392 #define VIA_CMD_RGTR_BUSY       0x00000080      /* Command Regulator is busy */
393 #define VIA_2D_ENG_BUSY         0x00000001      /* 2D Engine is busy */
394 #define VIA_3D_ENG_BUSY         0x00000002      /* 3D Engine is busy */
395 #define VIA_VR_QUEUE_BUSY       0x00020000      /* Virtual Queue is busy */
396
397 #define SetReg2DAGP(nReg, nData) {                              \
398         *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;  \
399         *((uint32_t *)(vb) + 1) = (nData);                      \
400         vb = ((uint32_t *)vb) + 2;                              \
401         dev_priv->dma_low +=8;                                  \
402 }
403
404 static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
405                                          uint32_t * vb, int qw_count)
406 {
407         for (; qw_count > 0; --qw_count) {
408                 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
409         }
410         return vb;
411 }
412
413
414 /*
415  * This function is used internally by ring buffer mangement code.
416  *
417  * Returns virtual pointer to ring buffer.
418  */
419 static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
420 {
421         return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
422 }
423
424 /*
425  * Hooks a segment of data into the tail of the ring-buffer by
426  * modifying the pause address stored in the buffer itself. If
427  * the regulator has already paused, restart it.
428  */
429 static int via_hook_segment(drm_via_private_t *dev_priv,
430                             uint32_t pause_addr_hi, uint32_t pause_addr_lo,
431                             int no_pci_fire)
432 {
433         int paused, count;
434         volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
435
436         via_flush_write_combine();
437         while(! *(via_get_dma(dev_priv)-1));
438         *dev_priv->last_pause_ptr = pause_addr_lo;
439         via_flush_write_combine();
440
441         /*
442          * The below statement is inserted to really force the flush.
443          * Not sure it is needed.
444          */
445
446         while(! *dev_priv->last_pause_ptr);
447         dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
448         while(! *dev_priv->last_pause_ptr);
449
450         paused = 0;
451         count = 20; 
452
453         while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
454         if ((count <= 8) && (count >= 0)) {
455                 uint32_t rgtr, ptr;
456                 rgtr = *(dev_priv->hw_addr_ptr);
457                 ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) + 
458                         dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 - 0x100;
459                 if (rgtr <= ptr) {
460                         DRM_ERROR("Command regulator\npaused at count %d, address %x, "
461                                   "while current pause address is %x.\n"
462                                   "Please mail this message to "
463                                   "<unichrome-devel@lists.sourceforge.net>\n",
464                                   count, rgtr, ptr);
465                 }
466         }
467                 
468         if (paused && !no_pci_fire) {
469                 uint32_t rgtr,ptr;
470
471                 rgtr = *(dev_priv->hw_addr_ptr);
472                 ptr = ((char *)paused_at - dev_priv->dma_ptr) + 
473                         dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
474
475                 if (rgtr <= ptr && rgtr >= ptr - 0x100) {
476                         VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
477                         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
478                         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
479                 }
480         }
481         return paused;
482 }
483
484
485
486 int via_wait_idle(drm_via_private_t * dev_priv)
487 {
488         int count = 10000000;
489         while (count-- && (VIA_READ(VIA_REG_STATUS) &
490                            (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
491                             VIA_3D_ENG_BUSY))) ;
492         return count;
493 }
494
495 static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
496                                uint32_t addr, uint32_t *cmd_addr_hi, 
497                                uint32_t *cmd_addr_lo,
498                                int skip_wait)
499 {
500         uint32_t agp_base;
501         uint32_t cmd_addr, addr_lo, addr_hi;
502         uint32_t *vb;
503         uint32_t qw_pad_count;
504
505         if (!skip_wait)
506                 via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE);
507
508         vb = via_get_dma(dev_priv);
509         VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
510                          (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); 
511         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
512         qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
513                 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
514
515         
516         cmd_addr = (addr) ? addr : 
517                 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
518         addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
519                    (cmd_addr & HC_HAGPBpL_MASK));
520         addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
521
522         vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
523         VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, 
524                         *cmd_addr_lo = addr_lo);
525         return vb;
526 }
527
528
529
530
531 static void via_cmdbuf_start(drm_via_private_t * dev_priv)
532 {
533         uint32_t pause_addr_lo, pause_addr_hi;
534         uint32_t start_addr, start_addr_lo;
535         uint32_t end_addr, end_addr_lo;
536         uint32_t command;
537         uint32_t agp_base;
538
539
540         dev_priv->dma_low = 0;
541
542         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
543         start_addr = agp_base;
544         end_addr = agp_base + dev_priv->dma_high;
545
546         start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
547         end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
548         command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
549                    ((end_addr & 0xff000000) >> 16));
550
551         dev_priv->last_pause_ptr = 
552                 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, 
553                               &pause_addr_hi, & pause_addr_lo, 1) - 1;
554
555         via_flush_write_combine();
556         while(! *dev_priv->last_pause_ptr);
557
558         VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
559         VIA_WRITE(VIA_REG_TRANSPACE, command);
560         VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
561         VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
562
563         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
564         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
565
566         VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
567 }
568
569 static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
570 {
571         uint32_t *vb = via_get_dma(dev_priv);
572         SetReg2DAGP(0x0C, (0 | (0 << 16)));
573         SetReg2DAGP(0x10, 0 | (0 << 16));
574         SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000); 
575 }
576
577
578 static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
579 {
580         uint32_t agp_base;
581         uint32_t pause_addr_lo, pause_addr_hi;
582         uint32_t jump_addr_lo, jump_addr_hi;
583         volatile uint32_t *last_pause_ptr;
584         uint32_t dma_low_save1, dma_low_save2;
585
586         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
587         via_align_cmd(dev_priv,  HC_HAGPBpID_JUMP, 0, &jump_addr_hi, 
588                       &jump_addr_lo, 0);
589         
590         dev_priv->dma_wrap = dev_priv->dma_low;
591
592
593         /*
594          * Wrap command buffer to the beginning.
595          */
596
597         dev_priv->dma_low = 0;
598         if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
599                 DRM_ERROR("via_cmdbuf_jump failed\n");
600         }
601
602         via_dummy_bitblt(dev_priv);
603         via_dummy_bitblt(dev_priv); 
604
605         last_pause_ptr = via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 
606                                        &pause_addr_lo, 0) -1;
607         via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 
608                       &pause_addr_lo, 0);
609
610         *last_pause_ptr = pause_addr_lo;
611         dma_low_save1 = dev_priv->dma_low;
612
613         /*
614          * Now, set a trap that will pause the regulator if it tries to rerun the old
615          * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
616          * and reissues the jump command over PCI, while the regulator has already taken the jump
617          * and actually paused at the current buffer end).
618          * There appears to be no other way to detect this condition, since the hw_addr_pointer
619          * does not seem to get updated immediately when a jump occurs.
620          */
621
622         last_pause_ptr = via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 
623                                        &pause_addr_lo, 0) -1;
624         via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 
625                       &pause_addr_lo, 0);
626         *last_pause_ptr = pause_addr_lo;
627
628         dma_low_save2 = dev_priv->dma_low;
629         dev_priv->dma_low = dma_low_save1;      
630         via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
631         dev_priv->dma_low = dma_low_save2;
632         via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
633 }
634
635
636 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
637 {
638         via_cmdbuf_jump(dev_priv); 
639 }
640
641 static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
642 {
643         uint32_t pause_addr_lo, pause_addr_hi;
644
645         via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
646         via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
647 }
648
649
650 static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
651 {
652         via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
653 }
654
655 static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
656 {
657         via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
658         via_wait_idle(dev_priv);
659 }
660
661 /*
662  * User interface to the space and lag function.
663  */
664
665 int 
666 via_cmdbuf_size(DRM_IOCTL_ARGS)
667 {
668         DRM_DEVICE;
669         drm_via_cmdbuf_size_t d_siz;
670         int ret = 0;
671         uint32_t tmp_size, count;
672         drm_via_private_t *dev_priv;
673
674         DRM_DEBUG("via cmdbuf_size\n");
675         LOCK_TEST_WITH_RETURN( dev, filp );
676
677         dev_priv = (drm_via_private_t *) dev->dev_private;
678
679         if (dev_priv->ring.virtual_start == NULL) {
680                 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
681                           __FUNCTION__);
682                 return DRM_ERR(EFAULT);
683         }
684
685         DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t *) data,
686                                  sizeof(d_siz));
687
688
689         count = 1000000;
690         tmp_size = d_siz.size;
691         switch(d_siz.func) {
692         case VIA_CMDBUF_SPACE:
693                 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) && count--) {
694                         if (!d_siz.wait) {
695                                 break;
696                         }
697                 }
698                 if (!count) {
699                         DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
700                         ret = DRM_ERR(EAGAIN);
701                 }
702                 break;
703         case VIA_CMDBUF_LAG:
704                 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) && count--) {
705                         if (!d_siz.wait) {
706                                 break;
707                         }
708                 }
709                 if (!count) {
710                         DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
711                         ret = DRM_ERR(EAGAIN);
712                 }
713                 break;
714         default:
715                 ret = DRM_ERR(EFAULT);
716         }
717         d_siz.size = tmp_size;
718
719         DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t *) data, d_siz,
720                                sizeof(d_siz));
721         return ret;
722 }