1 /* via_dma.c -- DMA support for the VIA Unichrome/Pro
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
9 * Copyright 2004 The Unichrome project.
10 * All Rights Reserved.
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sub license,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #include "via_3d_reg.h"
40 #define PCI_BUF_SIZE 512000
41 #define CMDBUF_ALIGNMENT_SIZE (0x100)
42 #define CMDBUF_ALIGNMENT_MASK (0xff)
44 /* defines for VIA 3D registers */
45 #define VIA_REG_STATUS 0x400
46 #define VIA_REG_TRANSET 0x43C
47 #define VIA_REG_TRANSPACE 0x440
49 /* VIA_REG_STATUS(0x400): Engine Status */
50 #define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
51 #define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
52 #define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
53 #define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
55 #define SetReg2DAGP(nReg, nData) { \
56 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
57 *((uint32_t *)(vb) + 1) = (nData); \
58 vb = ((uint32_t *)vb) + 2; \
59 dev_priv->dma_low +=8; \
62 #define via_flush_write_combine() DRM_MEMORYBARRIER()
64 #define VIA_OUT_RING_QW(w1,w2) \
67 dev_priv->dma_low += 8;
70 static char pci_buf[PCI_BUF_SIZE];
71 static unsigned long pci_bufsiz = PCI_BUF_SIZE;
73 static void via_cmdbuf_start(drm_via_private_t * dev_priv);
74 static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
75 static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
76 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
79 * Free space in command buffer.
83 via_cmdbuf_space(drm_via_private_t *dev_priv)
85 uint32_t agp_base = dev_priv->dma_offset +
86 (uint32_t) dev_priv->agpAddr;
87 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
89 return ((hw_addr <= dev_priv->dma_low) ?
90 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
91 (hw_addr - dev_priv->dma_low));
95 * How much does the command regulator lag behind?
99 via_cmdbuf_lag(drm_via_private_t *dev_priv)
101 uint32_t agp_base = dev_priv->dma_offset +
102 (uint32_t) dev_priv->agpAddr;
103 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
105 return ((hw_addr <= dev_priv->dma_low) ?
106 (dev_priv->dma_low - hw_addr) :
107 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
111 * Check that the given size fits in the buffer, otherwise wait.
115 via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
117 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
118 uint32_t cur_addr, hw_addr, next_addr;
119 volatile uint32_t *hw_addr_ptr;
121 hw_addr_ptr = dev_priv->hw_addr_ptr;
122 cur_addr = dev_priv->dma_low;
123 next_addr = cur_addr + size + 512*1024;
126 hw_addr = *hw_addr_ptr - agp_base;
128 DRM_ERROR("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
129 hw_addr, cur_addr, next_addr);
132 } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
138 * Checks whether buffer head has reach the end. Rewind the ring buffer
141 * Returns virtual pointer to ring buffer.
144 static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
147 if ((dev_priv->dma_low + size + 4*CMDBUF_ALIGNMENT_SIZE) > dev_priv->dma_high) {
148 via_cmdbuf_rewind(dev_priv);
150 if (via_cmdbuf_wait(dev_priv, size) != 0) {
154 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
157 int via_dma_cleanup(drm_device_t * dev)
159 if (dev->dev_private) {
160 drm_via_private_t *dev_priv =
161 (drm_via_private_t *) dev->dev_private;
163 if (dev_priv->ring.virtual_start) {
164 via_cmdbuf_reset(dev_priv);
166 drm_core_ioremapfree(&dev_priv->ring.map, dev);
167 dev_priv->ring.virtual_start = NULL;
175 static int via_initialize(drm_device_t * dev,
176 drm_via_private_t * dev_priv,
177 drm_via_dma_init_t * init)
179 if (!dev_priv || !dev_priv->mmio) {
180 DRM_ERROR("via_dma_init called before via_map_init\n");
181 return DRM_ERR(EFAULT);
184 if (dev_priv->ring.virtual_start != NULL) {
185 DRM_ERROR("%s called again without calling cleanup\n",
187 return DRM_ERR(EFAULT);
190 dev_priv->ring.map.offset = dev->agp->base + init->offset;
191 dev_priv->ring.map.size = init->size;
192 dev_priv->ring.map.type = 0;
193 dev_priv->ring.map.flags = 0;
194 dev_priv->ring.map.mtrr = 0;
196 drm_core_ioremap(&dev_priv->ring.map, dev);
198 if (dev_priv->ring.map.handle == NULL) {
199 via_dma_cleanup(dev);
200 DRM_ERROR("can not ioremap virtual address for"
202 return DRM_ERR(ENOMEM);
205 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
207 dev_priv->dma_ptr = dev_priv->ring.virtual_start;
208 dev_priv->dma_low = 0;
209 dev_priv->dma_high = init->size;
210 dev_priv->dma_wrap = init->size;
211 dev_priv->dma_offset = init->offset;
212 dev_priv->last_pause_ptr = NULL;
213 dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr;
215 via_cmdbuf_start(dev_priv);
220 int via_dma_init(DRM_IOCTL_ARGS)
223 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
224 drm_via_dma_init_t init;
227 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *) data,
232 if (!capable(CAP_SYS_ADMIN))
233 retcode = DRM_ERR(EPERM);
235 retcode = via_initialize(dev, dev_priv, &init);
237 case VIA_CLEANUP_DMA:
238 if (!capable(CAP_SYS_ADMIN))
239 retcode = DRM_ERR(EPERM);
241 retcode = via_dma_cleanup(dev);
243 case VIA_DMA_INITIALIZED:
244 retcode = (dev_priv->ring.virtual_start != NULL) ?
245 0: DRM_ERR( EFAULT );
248 retcode = DRM_ERR(EINVAL);
255 static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
257 drm_via_private_t *dev_priv;
261 dev_priv = (drm_via_private_t *) dev->dev_private;
263 if (dev_priv->ring.virtual_start == NULL) {
264 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
266 return DRM_ERR(EFAULT);
269 if (cmd->size > pci_bufsiz && pci_bufsiz > 0) {
270 return DRM_ERR(ENOMEM);
274 if (DRM_COPY_FROM_USER(pci_buf, cmd->buf, cmd->size))
275 return DRM_ERR(EFAULT);
278 * Running this function on AGP memory is dead slow. Therefore
279 * we run it on a temporary cacheable system memory buffer and
280 * copy it to AGP memory when ready.
284 if ((ret = via_verify_command_stream((uint32_t *)pci_buf, cmd->size, dev))) {
288 vb = via_check_dma(dev_priv, cmd->size);
290 return DRM_ERR(EAGAIN);
293 memcpy(vb, pci_buf, cmd->size);
295 dev_priv->dma_low += cmd->size;
296 via_cmdbuf_pause(dev_priv);
301 static int via_quiescent(drm_device_t * dev)
303 drm_via_private_t *dev_priv = dev->dev_private;
305 if (!via_wait_idle(dev_priv)) {
306 return DRM_ERR(EAGAIN);
311 int via_flush_ioctl(DRM_IOCTL_ARGS)
315 LOCK_TEST_WITH_RETURN( dev, filp );
317 return via_quiescent(dev);
320 int via_cmdbuffer(DRM_IOCTL_ARGS)
323 drm_via_cmdbuffer_t cmdbuf;
326 LOCK_TEST_WITH_RETURN( dev, filp );
328 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
331 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
333 ret = via_dispatch_cmdbuffer(dev, &cmdbuf);
341 static int via_parse_pci_cmdbuffer(drm_device_t * dev, const char *buf,
344 drm_via_private_t *dev_priv = dev->dev_private;
345 const uint32_t *regbuf = (const uint32_t *) buf;
346 const uint32_t *regend = regbuf + (size >> 2);
348 int check_2d_cmd = 1;
350 if ((ret = via_verify_command_stream(regbuf, size, dev)))
353 while (regbuf != regend) {
354 if ( *regbuf == HALCYON_HEADER2 ) {
357 check_2d_cmd = ( *regbuf != HALCYON_SUB_ADDR0 );
358 VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *regbuf++);
360 } else if ( check_2d_cmd && ((*regbuf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 )) {
362 register uint32_t addr = ( (*regbuf++ ) & ~HALCYON_HEADER1MASK) << 2;
363 VIA_WRITE( addr, *regbuf++ );
365 } else if ( ( *regbuf & HALCYON_FIREMASK ) == HALCYON_FIRECMD ) {
367 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE, *regbuf++);
368 if ( ( regbuf != regend ) &&
369 ((*regbuf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
371 if (( *regbuf & HALCYON_CMDBMASK ) != HC_ACMD_HCmdB )
375 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE , *regbuf++);
383 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
384 drm_via_cmdbuffer_t * cmd)
388 if (cmd->size > pci_bufsiz && pci_bufsiz > 0) {
389 return DRM_ERR(ENOMEM);
391 if (DRM_COPY_FROM_USER(pci_buf, cmd->buf, cmd->size))
392 return DRM_ERR(EFAULT);
393 ret = via_parse_pci_cmdbuffer(dev, pci_buf, cmd->size);
397 int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
400 drm_via_cmdbuffer_t cmdbuf;
403 LOCK_TEST_WITH_RETURN( dev, filp );
405 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
408 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
411 ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
420 static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
421 uint32_t * vb, int qw_count)
423 for (; qw_count > 0; --qw_count) {
424 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
431 * This function is used internally by ring buffer mangement code.
433 * Returns virtual pointer to ring buffer.
435 static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
437 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
441 * Hooks a segment of data into the tail of the ring-buffer by
442 * modifying the pause address stored in the buffer itself. If
443 * the regulator has already paused, restart it.
445 static int via_hook_segment(drm_via_private_t *dev_priv,
446 uint32_t pause_addr_hi, uint32_t pause_addr_lo,
450 volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
452 via_flush_write_combine();
453 while(! *(via_get_dma(dev_priv)-1));
454 *dev_priv->last_pause_ptr = pause_addr_lo;
455 via_flush_write_combine();
458 * The below statement is inserted to really force the flush.
459 * Not sure it is needed.
462 while(! *dev_priv->last_pause_ptr);
463 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
464 while(! *dev_priv->last_pause_ptr);
470 while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
471 if ((count <= 8) && (count >= 0)) {
473 rgtr = *(dev_priv->hw_addr_ptr);
474 ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
475 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 -
476 CMDBUF_ALIGNMENT_SIZE;
478 DRM_ERROR("Command regulator\npaused at count %d, address %x, "
479 "while current pause address is %x.\n"
480 "Please mail this message to "
481 "<unichrome-devel@lists.sourceforge.net>\n",
486 if (paused && !no_pci_fire) {
491 while ((VIA_READ(VIA_REG_STATUS) & VIA_CMD_RGTR_BUSY) && count--);
493 rgtr = *(dev_priv->hw_addr_ptr);
494 ptr = ((char *)paused_at - dev_priv->dma_ptr) +
495 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
498 ptr_low = (ptr > 3*CMDBUF_ALIGNMENT_SIZE) ?
499 ptr - 3*CMDBUF_ALIGNMENT_SIZE : 0;
500 if (rgtr <= ptr && rgtr >= ptr_low) {
501 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
502 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
503 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
511 int via_wait_idle(drm_via_private_t * dev_priv)
513 int count = 10000000;
514 while (count-- && (VIA_READ(VIA_REG_STATUS) &
515 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
520 static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
521 uint32_t addr, uint32_t *cmd_addr_hi,
522 uint32_t *cmd_addr_lo,
526 uint32_t cmd_addr, addr_lo, addr_hi;
528 uint32_t qw_pad_count;
531 via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE);
533 vb = via_get_dma(dev_priv);
534 VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
535 (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
536 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
537 qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
538 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
541 cmd_addr = (addr) ? addr :
542 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
543 addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
544 (cmd_addr & HC_HAGPBpL_MASK));
545 addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
547 vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
548 VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi,
549 *cmd_addr_lo = addr_lo);
556 static void via_cmdbuf_start(drm_via_private_t * dev_priv)
558 uint32_t pause_addr_lo, pause_addr_hi;
559 uint32_t start_addr, start_addr_lo;
560 uint32_t end_addr, end_addr_lo;
565 dev_priv->dma_low = 0;
567 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
568 start_addr = agp_base;
569 end_addr = agp_base + dev_priv->dma_high;
571 start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
572 end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
573 command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
574 ((end_addr & 0xff000000) >> 16));
576 dev_priv->last_pause_ptr =
577 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
578 &pause_addr_hi, & pause_addr_lo, 1) - 1;
580 via_flush_write_combine();
581 while(! *dev_priv->last_pause_ptr);
583 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
584 VIA_WRITE(VIA_REG_TRANSPACE, command);
585 VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
586 VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
588 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
589 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
591 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
594 static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
596 uint32_t *vb = via_get_dma(dev_priv);
597 SetReg2DAGP(0x0C, (0 | (0 << 16)));
598 SetReg2DAGP(0x10, 0 | (0 << 16));
599 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
603 static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
606 uint32_t pause_addr_lo, pause_addr_hi;
607 uint32_t jump_addr_lo, jump_addr_hi;
608 volatile uint32_t *last_pause_ptr;
609 uint32_t dma_low_save1, dma_low_save2;
611 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
612 via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
615 dev_priv->dma_wrap = dev_priv->dma_low;
619 * Wrap command buffer to the beginning.
622 dev_priv->dma_low = 0;
623 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
624 DRM_ERROR("via_cmdbuf_jump failed\n");
627 via_dummy_bitblt(dev_priv);
628 via_dummy_bitblt(dev_priv);
630 last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
631 &pause_addr_lo, 0) -1;
632 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
635 *last_pause_ptr = pause_addr_lo;
636 dma_low_save1 = dev_priv->dma_low;
639 * Now, set a trap that will pause the regulator if it tries to rerun the old
640 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
641 * and reissues the jump command over PCI, while the regulator has already taken the jump
642 * and actually paused at the current buffer end).
643 * There appears to be no other way to detect this condition, since the hw_addr_pointer
644 * does not seem to get updated immediately when a jump occurs.
647 last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
648 &pause_addr_lo, 0) -1;
649 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
651 *last_pause_ptr = pause_addr_lo;
653 dma_low_save2 = dev_priv->dma_low;
654 dev_priv->dma_low = dma_low_save1;
655 via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
656 dev_priv->dma_low = dma_low_save2;
657 via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
661 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
663 via_cmdbuf_jump(dev_priv);
666 static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
668 uint32_t pause_addr_lo, pause_addr_hi;
670 via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
671 via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
675 static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
677 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
680 static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
682 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
683 via_wait_idle(dev_priv);
687 * User interface to the space and lag functions.
691 via_cmdbuf_size(DRM_IOCTL_ARGS)
694 drm_via_cmdbuf_size_t d_siz;
696 uint32_t tmp_size, count;
697 drm_via_private_t *dev_priv;
699 DRM_DEBUG("via cmdbuf_size\n");
700 LOCK_TEST_WITH_RETURN( dev, filp );
702 dev_priv = (drm_via_private_t *) dev->dev_private;
704 if (dev_priv->ring.virtual_start == NULL) {
705 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
707 return DRM_ERR(EFAULT);
710 DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t *) data,
715 tmp_size = d_siz.size;
717 case VIA_CMDBUF_SPACE:
718 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) && count--) {
724 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
725 ret = DRM_ERR(EAGAIN);
729 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) && count--) {
735 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
736 ret = DRM_ERR(EAGAIN);
740 ret = DRM_ERR(EFAULT);
742 d_siz.size = tmp_size;
744 DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t *) data, d_siz,