OSDN Git Service

drm: convert drawable handling to use Linux idr
[android-x86/external-libdrm.git] / shared-core / via_dma.c
1 /* via_dma.c -- DMA support for the VIA Unichrome/Pro
2  * 
3  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4  * All Rights Reserved.
5  *
6  * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
7  * All Rights Reserved.
8  * 
9  * Copyright 2004 The Unichrome project.
10  * All Rights Reserved.
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a
13  * copy of this software and associated documentation files (the "Software"),
14  * to deal in the Software without restriction, including without limitation
15  * the rights to use, copy, modify, merge, publish, distribute, sub license,
16  * and/or sell copies of the Software, and to permit persons to whom the
17  * Software is furnished to do so, subject to the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  * Authors: 
32  *    Tungsten Graphics, 
33  *    Erdi Chen, 
34  *    Thomas Hellstrom.
35  */
36
37 #include "drmP.h"
38 #include "drm.h"
39 #include "via_drm.h"
40 #include "via_drv.h"
41 #include "via_3d_reg.h"
42
43 #define SetReg2DAGP(nReg, nData) {                              \
44         *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;  \
45         *((uint32_t *)(vb) + 1) = (nData);                      \
46         vb = ((uint32_t *)vb) + 2;                              \
47         dev_priv->dma_low +=8;                                  \
48 }
49
50 #define via_flush_write_combine() DRM_MEMORYBARRIER() 
51
52 #define VIA_OUT_RING_QW(w1,w2)                  \
53         *vb++ = (w1);                           \
54         *vb++ = (w2);                           \
55         dev_priv->dma_low += 8; 
56
57 static void via_cmdbuf_start(drm_via_private_t * dev_priv);
58 static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
59 static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
60 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
61 static int via_wait_idle(drm_via_private_t * dev_priv);
62 static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
63
64
65 /*
66  * Free space in command buffer.
67  */
68
69 static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
70 {
71         uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
72         uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
73         
74         return ((hw_addr <= dev_priv->dma_low) ? 
75                 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) : 
76                 (hw_addr - dev_priv->dma_low));
77 }
78
79 /*
80  * How much does the command regulator lag behind?
81  */
82
83 static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
84 {
85         uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
86         uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
87
88         return ((hw_addr <= dev_priv->dma_low) ?
89                 (dev_priv->dma_low - hw_addr) :
90                 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
91 }
92
93 /*
94  * Check that the given size fits in the buffer, otherwise wait.
95  */
96
97 static inline int
98 via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
99 {
100         uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
101         uint32_t cur_addr, hw_addr, next_addr;
102         volatile uint32_t *hw_addr_ptr;
103         uint32_t count;
104         hw_addr_ptr = dev_priv->hw_addr_ptr;
105         cur_addr = dev_priv->dma_low;
106         next_addr = cur_addr + size + 512 * 1024;
107         count = 1000000;
108         do {
109                 hw_addr = *hw_addr_ptr - agp_base;
110                 if (count-- == 0) {
111                         DRM_ERROR
112                             ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
113                             hw_addr, cur_addr, next_addr);
114                         return -1;
115                 }
116         } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
117         return 0;
118 }
119
120
121 /*
122  * Checks whether buffer head has reach the end. Rewind the ring buffer
123  * when necessary.
124  *
125  * Returns virtual pointer to ring buffer.
126  */
127
128 static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
129                                       unsigned int size)
130 {
131         if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
132             dev_priv->dma_high) {
133                 via_cmdbuf_rewind(dev_priv);
134         }
135         if (via_cmdbuf_wait(dev_priv, size) != 0) {
136                 return NULL;
137         }
138
139         return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
140 }
141
142 int via_dma_cleanup(drm_device_t * dev)
143 {
144         if (dev->dev_private) {
145                 drm_via_private_t *dev_priv =
146                         (drm_via_private_t *) dev->dev_private;
147
148                 if (dev_priv->ring.virtual_start) {
149                         via_cmdbuf_reset(dev_priv);
150
151                         drm_core_ioremapfree(&dev_priv->ring.map, dev);
152                         dev_priv->ring.virtual_start = NULL;
153                 }
154
155         }
156
157         return 0;
158 }
159
160 static int via_initialize(drm_device_t * dev,
161                           drm_via_private_t * dev_priv,
162                           drm_via_dma_init_t * init)
163 {
164         if (!dev_priv || !dev_priv->mmio) {
165                 DRM_ERROR("via_dma_init called before via_map_init\n");
166                 return DRM_ERR(EFAULT);
167         }
168
169         if (dev_priv->ring.virtual_start != NULL) {
170                 DRM_ERROR("%s called again without calling cleanup\n",
171                           __FUNCTION__);
172                 return DRM_ERR(EFAULT);
173         }
174
175         if (!dev->agp || !dev->agp->base) {
176                 DRM_ERROR("%s called with no agp memory available\n", 
177                           __FUNCTION__);
178                 return DRM_ERR(EFAULT);
179         }
180
181         if (dev_priv->chipset == VIA_DX9_0) {
182                 DRM_ERROR("AGP DMA is not supported on this chip\n");
183                 return DRM_ERR(EINVAL);
184         }
185
186         dev_priv->ring.map.offset = dev->agp->base + init->offset;
187         dev_priv->ring.map.size = init->size;
188         dev_priv->ring.map.type = 0;
189         dev_priv->ring.map.flags = 0;
190         dev_priv->ring.map.mtrr = 0;
191
192         drm_core_ioremap(&dev_priv->ring.map, dev);
193
194         if (dev_priv->ring.map.handle == NULL) {
195                 via_dma_cleanup(dev);
196                 DRM_ERROR("can not ioremap virtual address for"
197                           " ring buffer\n");
198                 return DRM_ERR(ENOMEM);
199         }
200
201         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
202
203         dev_priv->dma_ptr = dev_priv->ring.virtual_start;
204         dev_priv->dma_low = 0;
205         dev_priv->dma_high = init->size;
206         dev_priv->dma_wrap = init->size;
207         dev_priv->dma_offset = init->offset;
208         dev_priv->last_pause_ptr = NULL;
209         dev_priv->hw_addr_ptr =
210                 (volatile uint32_t *)((char *)dev_priv->mmio->handle +
211                 init->reg_pause_addr);
212
213         via_cmdbuf_start(dev_priv);
214
215         return 0;
216 }
217
218 static int via_dma_init(DRM_IOCTL_ARGS)
219 {
220         DRM_DEVICE;
221         drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
222         drm_via_dma_init_t init;
223         int retcode = 0;
224
225         DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data,
226                                  sizeof(init));
227
228         switch (init.func) {
229         case VIA_INIT_DMA:
230                 if (!DRM_SUSER(DRM_CURPROC))
231                         retcode = DRM_ERR(EPERM);
232                 else
233                         retcode = via_initialize(dev, dev_priv, &init);
234                 break;
235         case VIA_CLEANUP_DMA:
236                 if (!DRM_SUSER(DRM_CURPROC))
237                         retcode = DRM_ERR(EPERM);
238                 else
239                         retcode = via_dma_cleanup(dev);
240                 break;
241         case VIA_DMA_INITIALIZED:
242                 retcode = (dev_priv->ring.virtual_start != NULL) ?
243                         0 : DRM_ERR(EFAULT);
244                 break;
245         default:
246                 retcode = DRM_ERR(EINVAL);
247                 break;
248         }
249
250         return retcode;
251 }
252
253
254
255 static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
256 {
257         drm_via_private_t *dev_priv;
258         uint32_t *vb;
259         int ret;
260
261         dev_priv = (drm_via_private_t *) dev->dev_private;
262
263         if (dev_priv->ring.virtual_start == NULL) {
264                 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
265                           __FUNCTION__);
266                 return DRM_ERR(EFAULT);
267         }
268
269         if (cmd->size > VIA_PCI_BUF_SIZE) {
270                 return DRM_ERR(ENOMEM);
271         }
272
273         if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
274                 return DRM_ERR(EFAULT);
275
276         /*
277          * Running this function on AGP memory is dead slow. Therefore
278          * we run it on a temporary cacheable system memory buffer and
279          * copy it to AGP memory when ready.
280          */
281
282         if ((ret =
283              via_verify_command_stream((uint32_t *)dev_priv->pci_buf,
284                                        cmd->size, dev, 1))) {
285                 return ret;
286         }
287
288         vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
289         if (vb == NULL) {
290                 return DRM_ERR(EAGAIN);
291         }
292
293         memcpy(vb, dev_priv->pci_buf, cmd->size);
294
295         dev_priv->dma_low += cmd->size;
296
297         /*
298          * Small submissions somehow stalls the CPU. (AGP cache effects?)
299          * pad to greater size.
300          */
301
302         if (cmd->size < 0x100)
303                 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
304         via_cmdbuf_pause(dev_priv);
305
306         return 0;
307 }
308
309 int via_driver_dma_quiescent(drm_device_t * dev)
310 {
311         drm_via_private_t *dev_priv = dev->dev_private;
312
313         if (!via_wait_idle(dev_priv)) {
314                 return DRM_ERR(EBUSY);
315         }
316         return 0;
317 }
318
319 static int via_flush_ioctl(DRM_IOCTL_ARGS)
320 {
321         DRM_DEVICE;
322
323         LOCK_TEST_WITH_RETURN(dev, filp);
324
325         return via_driver_dma_quiescent(dev);
326 }
327
328 static int via_cmdbuffer(DRM_IOCTL_ARGS)
329 {
330         DRM_DEVICE;
331         drm_via_cmdbuffer_t cmdbuf;
332         int ret;
333
334         LOCK_TEST_WITH_RETURN(dev, filp);
335
336         DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
337                                  sizeof(cmdbuf));
338
339         DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
340
341         ret = via_dispatch_cmdbuffer(dev, &cmdbuf);
342         if (ret) {
343                 return ret;
344         }
345
346         return 0;
347 }
348
349 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
350                                       drm_via_cmdbuffer_t * cmd)
351 {
352         drm_via_private_t *dev_priv = dev->dev_private;
353         int ret;
354
355         if (cmd->size > VIA_PCI_BUF_SIZE) {
356                 return DRM_ERR(ENOMEM);
357         }
358         if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
359                 return DRM_ERR(EFAULT);
360
361         if ((ret =
362              via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
363                                        cmd->size, dev, 0))) {
364                 return ret;
365         }
366
367         ret =
368             via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
369                                      cmd->size);
370         return ret;
371 }
372
373 static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
374 {
375         DRM_DEVICE;
376         drm_via_cmdbuffer_t cmdbuf;
377         int ret;
378
379         LOCK_TEST_WITH_RETURN(dev, filp);
380
381         DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
382                                  sizeof(cmdbuf));
383
384         DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
385                   cmdbuf.size);
386
387         ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
388         if (ret) {
389                 return ret;
390         }
391
392         return 0;
393 }
394
395 static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
396                                          uint32_t * vb, int qw_count)
397 {
398         for (; qw_count > 0; --qw_count) {
399                 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
400         }
401         return vb;
402 }
403
404 /*
405  * This function is used internally by ring buffer mangement code.
406  *
407  * Returns virtual pointer to ring buffer.
408  */
409 static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
410 {
411         return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
412 }
413
414 /*
415  * Hooks a segment of data into the tail of the ring-buffer by
416  * modifying the pause address stored in the buffer itself. If
417  * the regulator has already paused, restart it.
418  */
419 static int via_hook_segment(drm_via_private_t * dev_priv,
420                             uint32_t pause_addr_hi, uint32_t pause_addr_lo,
421                             int no_pci_fire)
422 {
423         int paused, count;
424         volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
425         uint32_t reader,ptr;
426
427         paused = 0;
428         via_flush_write_combine();
429         (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
430         *paused_at = pause_addr_lo;
431         via_flush_write_combine();
432         (void) *paused_at;
433         reader = *(dev_priv->hw_addr_ptr);
434         ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
435                 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
436         dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
437
438         if ((ptr - reader) <= dev_priv->dma_diff ) {
439                 count = 10000000;
440                 while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
441         }
442
443         if (paused && !no_pci_fire) {
444                 reader = *(dev_priv->hw_addr_ptr);
445                 if ((ptr - reader) == dev_priv->dma_diff) {
446
447                         /*
448                          * There is a concern that these writes may stall the PCI bus
449                          * if the GPU is not idle. However, idling the GPU first
450                          * doesn't make a difference.
451                          */
452
453                         VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
454                         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
455                         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
456                         VIA_READ(VIA_REG_TRANSPACE);
457                 }
458         }
459
460         return paused;
461 }
462
463
464
465 static int via_wait_idle(drm_via_private_t * dev_priv)
466 {
467         int count = 10000000;
468
469         while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
470
471         while (count-- && (VIA_READ(VIA_REG_STATUS) &
472                            (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
473                             VIA_3D_ENG_BUSY))) ;
474         return count;
475 }
476
477 static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
478                                uint32_t addr, uint32_t *cmd_addr_hi,
479                                uint32_t *cmd_addr_lo, int skip_wait)
480 {
481         uint32_t agp_base;
482         uint32_t cmd_addr, addr_lo, addr_hi;
483         uint32_t *vb;
484         uint32_t qw_pad_count;
485
486         if (!skip_wait)
487                 via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE);
488
489         vb = via_get_dma(dev_priv);
490         VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
491                          (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); 
492         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
493         qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
494                 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
495
496         cmd_addr = (addr) ? addr : 
497                 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
498         addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
499                    (cmd_addr & HC_HAGPBpL_MASK));
500         addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
501
502         vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
503         VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
504         return vb;
505 }
506
507 static void via_cmdbuf_start(drm_via_private_t * dev_priv)
508 {
509         uint32_t pause_addr_lo, pause_addr_hi;
510         uint32_t start_addr, start_addr_lo;
511         uint32_t end_addr, end_addr_lo;
512         uint32_t command;
513         uint32_t agp_base;
514         uint32_t ptr;
515         uint32_t reader;
516         int count;
517
518         dev_priv->dma_low = 0;
519
520         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
521         start_addr = agp_base;
522         end_addr = agp_base + dev_priv->dma_high;
523
524         start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
525         end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
526         command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
527                    ((end_addr & 0xff000000) >> 16));
528
529         dev_priv->last_pause_ptr = 
530                 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, 
531                               &pause_addr_hi, & pause_addr_lo, 1) - 1;
532
533         via_flush_write_combine();
534         (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
535
536         VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
537         VIA_WRITE(VIA_REG_TRANSPACE, command);
538         VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
539         VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
540
541         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
542         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
543         DRM_WRITEMEMORYBARRIER();
544         VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
545         VIA_READ(VIA_REG_TRANSPACE);
546
547         dev_priv->dma_diff = 0;
548
549         count = 10000000;
550         while (!(VIA_READ(0x41c) & 0x80000000) && count--);
551
552         reader = *(dev_priv->hw_addr_ptr);
553         ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
554             dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
555
556         /*
557          * This is the difference between where we tell the
558          * command reader to pause and where it actually pauses.
559          * This differs between hw implementation so we need to
560          * detect it.
561          */
562
563         dev_priv->dma_diff = ptr - reader;
564 }
565
566 static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
567 {
568         uint32_t *vb;
569
570         via_cmdbuf_wait(dev_priv, qwords + 2);
571         vb = via_get_dma(dev_priv);
572         VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16);
573         via_align_buffer(dev_priv,vb,qwords);
574 }
575
576 static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
577 {
578         uint32_t *vb = via_get_dma(dev_priv);
579         SetReg2DAGP(0x0C, (0 | (0 << 16)));
580         SetReg2DAGP(0x10, 0 | (0 << 16));
581         SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
582 }
583
584 static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
585 {
586         uint32_t agp_base;
587         uint32_t pause_addr_lo, pause_addr_hi;
588         uint32_t jump_addr_lo, jump_addr_hi;
589         volatile uint32_t *last_pause_ptr;
590
591         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
592         via_align_cmd(dev_priv,  HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
593                       &jump_addr_lo, 0);
594         
595         dev_priv->dma_wrap = dev_priv->dma_low;
596
597
598         /*
599          * Wrap command buffer to the beginning.
600          */
601
602         dev_priv->dma_low = 0;
603         if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
604                 DRM_ERROR("via_cmdbuf_jump failed\n");
605         }
606
607         via_dummy_bitblt(dev_priv);
608         via_dummy_bitblt(dev_priv);
609         last_pause_ptr = via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
610                                        &pause_addr_lo, 0) -1;
611         via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
612                       &pause_addr_lo, 0);
613         *last_pause_ptr = pause_addr_lo;
614
615         via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
616 }
617
618
619 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
620 {
621         via_cmdbuf_jump(dev_priv); 
622 }
623
624 static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
625 {
626         uint32_t pause_addr_lo, pause_addr_hi;
627
628         via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
629         via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
630 }
631
632
633 static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
634 {
635         via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
636 }
637
638 static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
639 {
640         via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
641         via_wait_idle(dev_priv);
642 }
643
644 /*
645  * User interface to the space and lag functions.
646  */
647
648 static int via_cmdbuf_size(DRM_IOCTL_ARGS)
649 {
650         DRM_DEVICE;
651         drm_via_cmdbuf_size_t d_siz;
652         int ret = 0;
653         uint32_t tmp_size, count;
654         drm_via_private_t *dev_priv;
655
656         DRM_DEBUG("via cmdbuf_size\n");
657         LOCK_TEST_WITH_RETURN( dev, filp );
658
659         dev_priv = (drm_via_private_t *) dev->dev_private;
660
661         if (dev_priv->ring.virtual_start == NULL) {
662                 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
663                           __FUNCTION__);
664                 return DRM_ERR(EFAULT);
665         }
666
667         DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data,
668                                  sizeof(d_siz));
669
670
671         count = 1000000;
672         tmp_size = d_siz.size;
673         switch(d_siz.func) {
674         case VIA_CMDBUF_SPACE:
675                 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size)
676                        && count--) {
677                         if (!d_siz.wait) {
678                                 break;
679                         }
680                 }
681                 if (!count) {
682                         DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
683                         ret = DRM_ERR(EAGAIN);
684                 }
685                 break;
686         case VIA_CMDBUF_LAG:
687                 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size)
688                        && count--) {
689                         if (!d_siz.wait) {
690                                 break;
691                         }
692                 }
693                 if (!count) {
694                         DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
695                         ret = DRM_ERR(EAGAIN);
696                 }
697                 break;
698         default:
699                 ret = DRM_ERR(EFAULT);
700         }
701         d_siz.size = tmp_size;
702
703         DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz,
704                                sizeof(d_siz));
705         return ret;
706 }
707
708 #ifndef VIA_HAVE_DMABLIT
709 int 
710 via_dma_blit_sync( DRM_IOCTL_ARGS ) {
711         DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
712         return DRM_ERR(EINVAL);
713 }
714 int 
715 via_dma_blit( DRM_IOCTL_ARGS ) {
716         DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
717         return DRM_ERR(EINVAL);
718 }
719 #endif
720
721 drm_ioctl_desc_t via_ioctls[] = {
722         [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH},
723         [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH},
724         [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER},
725         [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER},
726         [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER},
727         [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH},
728         [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH},
729         [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH},
730         [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH},
731         [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH},
732         [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH},
733         [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH},
734         [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH},
735         [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH}
736 };
737
738 int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);