1 /* via_dma.c -- DMA support for the VIA Unichrome/Pro
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
9 * Copyright 2004 The Unichrome project.
10 * All Rights Reserved.
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sub license,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
41 #include "via_3d_reg.h"
43 #define SetReg2DAGP(nReg, nData) { \
44 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
45 *((uint32_t *)(vb) + 1) = (nData); \
46 vb = ((uint32_t *)vb) + 2; \
47 dev_priv->dma_low +=8; \
50 #define via_flush_write_combine() DRM_MEMORYBARRIER()
52 #define VIA_OUT_RING_QW(w1,w2) \
55 dev_priv->dma_low += 8;
57 static void via_cmdbuf_start(drm_via_private_t * dev_priv);
58 static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
59 static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
60 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
61 static int via_wait_idle(drm_via_private_t * dev_priv);
62 static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
66 * Free space in command buffer.
69 static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
71 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
72 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
74 return ((hw_addr <= dev_priv->dma_low) ?
75 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
76 (hw_addr - dev_priv->dma_low));
80 * How much does the command regulator lag behind?
83 static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
85 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
86 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
88 return ((hw_addr <= dev_priv->dma_low) ?
89 (dev_priv->dma_low - hw_addr) :
90 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
94 * Check that the given size fits in the buffer, otherwise wait.
98 via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
100 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
101 uint32_t cur_addr, hw_addr, next_addr;
102 volatile uint32_t *hw_addr_ptr;
104 hw_addr_ptr = dev_priv->hw_addr_ptr;
105 cur_addr = dev_priv->dma_low;
106 next_addr = cur_addr + size + 512 * 1024;
109 hw_addr = *hw_addr_ptr - agp_base;
112 ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
113 hw_addr, cur_addr, next_addr);
116 } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
122 * Checks whether buffer head has reach the end. Rewind the ring buffer
125 * Returns virtual pointer to ring buffer.
128 static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
131 if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
132 dev_priv->dma_high) {
133 via_cmdbuf_rewind(dev_priv);
135 if (via_cmdbuf_wait(dev_priv, size) != 0) {
139 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
142 int via_dma_cleanup(drm_device_t * dev)
144 if (dev->dev_private) {
145 drm_via_private_t *dev_priv =
146 (drm_via_private_t *) dev->dev_private;
148 if (dev_priv->ring.virtual_start) {
149 via_cmdbuf_reset(dev_priv);
151 drm_core_ioremapfree(&dev_priv->ring.map, dev);
152 dev_priv->ring.virtual_start = NULL;
160 static int via_initialize(drm_device_t * dev,
161 drm_via_private_t * dev_priv,
162 drm_via_dma_init_t * init)
164 if (!dev_priv || !dev_priv->mmio) {
165 DRM_ERROR("via_dma_init called before via_map_init\n");
166 return DRM_ERR(EFAULT);
169 if (dev_priv->ring.virtual_start != NULL) {
170 DRM_ERROR("%s called again without calling cleanup\n",
172 return DRM_ERR(EFAULT);
175 if (!dev->agp || !dev->agp->base) {
176 DRM_ERROR("%s called with no agp memory available\n",
178 return DRM_ERR(EFAULT);
181 if (dev_priv->chipset == VIA_DX9_0) {
182 DRM_ERROR("AGP DMA is not supported on this chip\n");
183 return DRM_ERR(EINVAL);
186 dev_priv->ring.map.offset = dev->agp->base + init->offset;
187 dev_priv->ring.map.size = init->size;
188 dev_priv->ring.map.type = 0;
189 dev_priv->ring.map.flags = 0;
190 dev_priv->ring.map.mtrr = 0;
192 drm_core_ioremap(&dev_priv->ring.map, dev);
194 if (dev_priv->ring.map.handle == NULL) {
195 via_dma_cleanup(dev);
196 DRM_ERROR("can not ioremap virtual address for"
198 return DRM_ERR(ENOMEM);
201 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
203 dev_priv->dma_ptr = dev_priv->ring.virtual_start;
204 dev_priv->dma_low = 0;
205 dev_priv->dma_high = init->size;
206 dev_priv->dma_wrap = init->size;
207 dev_priv->dma_offset = init->offset;
208 dev_priv->last_pause_ptr = NULL;
209 dev_priv->hw_addr_ptr =
210 (volatile uint32_t *)((char *)dev_priv->mmio->handle +
211 init->reg_pause_addr);
213 via_cmdbuf_start(dev_priv);
218 static int via_dma_init(DRM_IOCTL_ARGS)
221 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
222 drm_via_dma_init_t init;
225 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data,
230 if (!DRM_SUSER(DRM_CURPROC))
231 retcode = DRM_ERR(EPERM);
233 retcode = via_initialize(dev, dev_priv, &init);
235 case VIA_CLEANUP_DMA:
236 if (!DRM_SUSER(DRM_CURPROC))
237 retcode = DRM_ERR(EPERM);
239 retcode = via_dma_cleanup(dev);
241 case VIA_DMA_INITIALIZED:
242 retcode = (dev_priv->ring.virtual_start != NULL) ?
246 retcode = DRM_ERR(EINVAL);
255 static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
257 drm_via_private_t *dev_priv;
261 dev_priv = (drm_via_private_t *) dev->dev_private;
263 if (dev_priv->ring.virtual_start == NULL) {
264 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
266 return DRM_ERR(EFAULT);
269 if (cmd->size > VIA_PCI_BUF_SIZE) {
270 return DRM_ERR(ENOMEM);
273 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
274 return DRM_ERR(EFAULT);
277 * Running this function on AGP memory is dead slow. Therefore
278 * we run it on a temporary cacheable system memory buffer and
279 * copy it to AGP memory when ready.
283 via_verify_command_stream((uint32_t *)dev_priv->pci_buf,
284 cmd->size, dev, 1))) {
288 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
290 return DRM_ERR(EAGAIN);
293 memcpy(vb, dev_priv->pci_buf, cmd->size);
295 dev_priv->dma_low += cmd->size;
298 * Small submissions somehow stalls the CPU. (AGP cache effects?)
299 * pad to greater size.
302 if (cmd->size < 0x100)
303 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
304 via_cmdbuf_pause(dev_priv);
309 int via_driver_dma_quiescent(drm_device_t * dev)
311 drm_via_private_t *dev_priv = dev->dev_private;
313 if (!via_wait_idle(dev_priv)) {
314 return DRM_ERR(EBUSY);
319 static int via_flush_ioctl(DRM_IOCTL_ARGS)
323 LOCK_TEST_WITH_RETURN(dev, filp);
325 return via_driver_dma_quiescent(dev);
328 static int via_cmdbuffer(DRM_IOCTL_ARGS)
331 drm_via_cmdbuffer_t cmdbuf;
334 LOCK_TEST_WITH_RETURN(dev, filp);
336 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
339 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
341 ret = via_dispatch_cmdbuffer(dev, &cmdbuf);
349 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
350 drm_via_cmdbuffer_t * cmd)
352 drm_via_private_t *dev_priv = dev->dev_private;
355 if (cmd->size > VIA_PCI_BUF_SIZE) {
356 return DRM_ERR(ENOMEM);
358 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
359 return DRM_ERR(EFAULT);
362 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
363 cmd->size, dev, 0))) {
368 via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
373 static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
376 drm_via_cmdbuffer_t cmdbuf;
379 LOCK_TEST_WITH_RETURN(dev, filp);
381 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
384 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
387 ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
395 static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
396 uint32_t * vb, int qw_count)
398 for (; qw_count > 0; --qw_count) {
399 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
405 * This function is used internally by ring buffer mangement code.
407 * Returns virtual pointer to ring buffer.
409 static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
411 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
415 * Hooks a segment of data into the tail of the ring-buffer by
416 * modifying the pause address stored in the buffer itself. If
417 * the regulator has already paused, restart it.
419 static int via_hook_segment(drm_via_private_t * dev_priv,
420 uint32_t pause_addr_hi, uint32_t pause_addr_lo,
424 volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
428 via_flush_write_combine();
429 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
430 *paused_at = pause_addr_lo;
431 via_flush_write_combine();
433 reader = *(dev_priv->hw_addr_ptr);
434 ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
435 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
436 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
438 if ((ptr - reader) <= dev_priv->dma_diff ) {
440 while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
443 if (paused && !no_pci_fire) {
444 reader = *(dev_priv->hw_addr_ptr);
445 if ((ptr - reader) == dev_priv->dma_diff) {
448 * There is a concern that these writes may stall the PCI bus
449 * if the GPU is not idle. However, idling the GPU first
450 * doesn't make a difference.
453 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
454 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
455 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
456 VIA_READ(VIA_REG_TRANSPACE);
465 static int via_wait_idle(drm_via_private_t * dev_priv)
467 int count = 10000000;
469 while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
471 while (count-- && (VIA_READ(VIA_REG_STATUS) &
472 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
477 static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
478 uint32_t addr, uint32_t *cmd_addr_hi,
479 uint32_t *cmd_addr_lo, int skip_wait)
482 uint32_t cmd_addr, addr_lo, addr_hi;
484 uint32_t qw_pad_count;
487 via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE);
489 vb = via_get_dma(dev_priv);
490 VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
491 (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
492 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
493 qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
494 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
496 cmd_addr = (addr) ? addr :
497 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
498 addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
499 (cmd_addr & HC_HAGPBpL_MASK));
500 addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
502 vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
503 VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
507 static void via_cmdbuf_start(drm_via_private_t * dev_priv)
509 uint32_t pause_addr_lo, pause_addr_hi;
510 uint32_t start_addr, start_addr_lo;
511 uint32_t end_addr, end_addr_lo;
518 dev_priv->dma_low = 0;
520 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
521 start_addr = agp_base;
522 end_addr = agp_base + dev_priv->dma_high;
524 start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
525 end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
526 command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
527 ((end_addr & 0xff000000) >> 16));
529 dev_priv->last_pause_ptr =
530 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
531 &pause_addr_hi, & pause_addr_lo, 1) - 1;
533 via_flush_write_combine();
534 (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
536 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
537 VIA_WRITE(VIA_REG_TRANSPACE, command);
538 VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
539 VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
541 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
542 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
543 DRM_WRITEMEMORYBARRIER();
544 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
545 VIA_READ(VIA_REG_TRANSPACE);
547 dev_priv->dma_diff = 0;
550 while (!(VIA_READ(0x41c) & 0x80000000) && count--);
552 reader = *(dev_priv->hw_addr_ptr);
553 ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
554 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
557 * This is the difference between where we tell the
558 * command reader to pause and where it actually pauses.
559 * This differs between hw implementation so we need to
563 dev_priv->dma_diff = ptr - reader;
566 static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
570 via_cmdbuf_wait(dev_priv, qwords + 2);
571 vb = via_get_dma(dev_priv);
572 VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16);
573 via_align_buffer(dev_priv,vb,qwords);
576 static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
578 uint32_t *vb = via_get_dma(dev_priv);
579 SetReg2DAGP(0x0C, (0 | (0 << 16)));
580 SetReg2DAGP(0x10, 0 | (0 << 16));
581 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
584 static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
587 uint32_t pause_addr_lo, pause_addr_hi;
588 uint32_t jump_addr_lo, jump_addr_hi;
589 volatile uint32_t *last_pause_ptr;
591 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
592 via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
595 dev_priv->dma_wrap = dev_priv->dma_low;
599 * Wrap command buffer to the beginning.
602 dev_priv->dma_low = 0;
603 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
604 DRM_ERROR("via_cmdbuf_jump failed\n");
607 via_dummy_bitblt(dev_priv);
608 via_dummy_bitblt(dev_priv);
609 last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
610 &pause_addr_lo, 0) -1;
611 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
613 *last_pause_ptr = pause_addr_lo;
615 via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
619 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
621 via_cmdbuf_jump(dev_priv);
624 static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
626 uint32_t pause_addr_lo, pause_addr_hi;
628 via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
629 via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
633 static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
635 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
638 static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
640 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
641 via_wait_idle(dev_priv);
645 * User interface to the space and lag functions.
648 static int via_cmdbuf_size(DRM_IOCTL_ARGS)
651 drm_via_cmdbuf_size_t d_siz;
653 uint32_t tmp_size, count;
654 drm_via_private_t *dev_priv;
656 DRM_DEBUG("via cmdbuf_size\n");
657 LOCK_TEST_WITH_RETURN( dev, filp );
659 dev_priv = (drm_via_private_t *) dev->dev_private;
661 if (dev_priv->ring.virtual_start == NULL) {
662 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
664 return DRM_ERR(EFAULT);
667 DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data,
672 tmp_size = d_siz.size;
674 case VIA_CMDBUF_SPACE:
675 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size)
682 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
683 ret = DRM_ERR(EAGAIN);
687 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size)
694 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
695 ret = DRM_ERR(EAGAIN);
699 ret = DRM_ERR(EFAULT);
701 d_siz.size = tmp_size;
703 DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz,
708 #ifndef VIA_HAVE_DMABLIT
710 via_dma_blit_sync( DRM_IOCTL_ARGS ) {
711 DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
712 return DRM_ERR(EINVAL);
715 via_dma_blit( DRM_IOCTL_ARGS ) {
716 DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
717 return DRM_ERR(EINVAL);
721 drm_ioctl_desc_t via_ioctls[] = {
722 [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH},
723 [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH},
724 [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER},
725 [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER},
726 [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER},
727 [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH},
728 [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH},
729 [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH},
730 [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH},
731 [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH},
732 [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH},
733 [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH},
734 [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH},
735 [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH}
738 int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);