OSDN Git Service

Merge branch 'modesetting-101' into modesetting-gem
[android-x86/external-libdrm.git] / shared-core / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 /* Really want an OS-independent resettable timer.  Would like to have
35  * this loop run for (eg) 3 sec, but have the timer reset every time
36  * the head pointer changes, so that EBUSY only happens if the ring
37  * actually stalls for (eg) 3 seconds.
38  */
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40 {
41         struct drm_i915_private *dev_priv = dev->dev_private;
42         struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
43         u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
44         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
45         u32 last_acthd = I915_READ(acthd_reg);
46         u32 acthd;
47         int i;
48
49         for (i = 0; i < 100000; i++) {
50                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51                 acthd = I915_READ(acthd_reg);
52                 ring->space = ring->head - (ring->tail + 8);
53                 if (ring->space < 0)
54                         ring->space += ring->Size;
55                 if (ring->space >= n)
56                         return 0;
57
58                 if (ring->head != last_head)
59                         i = 0;
60
61                 if (acthd != last_acthd)
62                         i = 0;
63
64                 last_head = ring->head;
65                 last_acthd = acthd;
66                 msleep_interruptible (10);
67         }
68
69         return -EBUSY;
70 }
71
72 #if I915_RING_VALIDATE
73 /**
74  * Validate the cached ring tail value
75  *
76  * If the X server writes to the ring and DRM doesn't
77  * reload the head and tail pointers, it will end up writing
78  * data to the wrong place in the ring, causing havoc.
79  */
80 void i915_ring_validate(struct drm_device *dev, const char *func, int line)
81 {
82         struct drm_i915_private *dev_priv = dev->dev_private;
83         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
84         u32     tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
85         u32     head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
86
87         if (tail != ring->tail) {
88                 DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
89                           func, line,
90                           ring->head, head, ring->tail, tail);
91                 BUG_ON(1);
92         }
93 }
94 #endif
95
96 void i915_kernel_lost_context(struct drm_device * dev)
97 {
98         struct drm_i915_private *dev_priv = dev->dev_private;
99         struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
100
101         /* we should never lose context on the ring with modesetting 
102          * as we don't expose it to userspace */
103         if (drm_core_check_feature(dev, DRIVER_MODESET))
104                 return;
105
106         ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
107         ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
108         ring->space = ring->head - (ring->tail + 8);
109         if (ring->space < 0)
110                 ring->space += ring->Size;
111 }
112
113 int i915_dma_cleanup(struct drm_device * dev)
114 {
115         struct drm_i915_private *dev_priv = dev->dev_private;
116
117         if (drm_core_check_feature(dev, DRIVER_MODESET))
118                 return 0;
119
120         /* Make sure interrupts are disabled here because the uninstall ioctl
121          * may not have been called from userspace and after dev_private
122          * is freed, it's too late.
123          */
124         if (dev->irq_enabled)
125                 drm_irq_uninstall(dev);
126
127         if (dev_priv->ring.virtual_start) {
128                 drm_core_ioremapfree(&dev_priv->ring.map, dev);
129                 dev_priv->ring.virtual_start = 0;
130                 dev_priv->ring.map.handle = 0;
131                 dev_priv->ring.map.size = 0;
132                 dev_priv->ring.Size = 0;
133         }
134
135         return 0;
136 }
137
138 #if defined(I915_HAVE_BUFFER) && defined(DRI2)
139 #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
140 #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
141 #define DRI2_SAREA_BLOCK_NEXT(p)                                \
142         ((void *) ((unsigned char *) (p) +                      \
143                    DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
144
145 #define DRI2_SAREA_BLOCK_END            0x0000
146 #define DRI2_SAREA_BLOCK_LOCK           0x0001
147 #define DRI2_SAREA_BLOCK_EVENT_BUFFER   0x0002
148
149 static int
150 setup_dri2_sarea(struct drm_device * dev,
151                  struct drm_file *file_priv,
152                  drm_i915_init_t * init)
153 {
154         struct drm_i915_private *dev_priv = dev->dev_private;
155         int ret;
156         unsigned int *p, *end, *next;
157
158         mutex_lock(&dev->struct_mutex);
159         dev_priv->sarea_bo =
160                 drm_lookup_buffer_object(file_priv,
161                                          init->sarea_handle, 1);
162         mutex_unlock(&dev->struct_mutex);
163
164         if (!dev_priv->sarea_bo) {
165                 DRM_ERROR("did not find sarea bo\n");
166                 return -EINVAL;
167         }
168
169         ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
170                           dev_priv->sarea_bo->num_pages,
171                           &dev_priv->sarea_kmap);
172         if (ret) {
173                 DRM_ERROR("could not map sarea bo\n");
174                 return ret;
175         }
176
177         p = dev_priv->sarea_kmap.virtual;
178         end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
179         while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
180                 switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
181                 case DRI2_SAREA_BLOCK_LOCK:
182                         dev->primary->master->lock.hw_lock = (void *) (p + 1);
183                         dev->sigdata.lock = dev->primary->master->lock.hw_lock;
184                         break;
185                 }
186                 next = DRI2_SAREA_BLOCK_NEXT(p);
187                 if (next <= p || end < next) {
188                         DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
189                                   next, p, end);
190                         return -EINVAL;
191                 }
192                 p = next;
193         }
194
195         return 0;
196 }
197 #endif
198
199 static int i915_initialize(struct drm_device * dev,
200                            struct drm_file *file_priv,
201                            drm_i915_init_t * init)
202 {
203         struct drm_i915_private *dev_priv = dev->dev_private;
204         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
205
206         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
207                 if (init->mmio_offset != 0)
208                         dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
209                 if (!dev_priv->mmio_map) {
210                         i915_dma_cleanup(dev);
211                         DRM_ERROR("can not find mmio map!\n");
212                         return -EINVAL;
213                 }
214         }
215
216 #ifdef I915_HAVE_BUFFER
217         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
218                 dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
219         }
220 #endif
221
222         if (init->ring_size != 0) {
223                 dev_priv->ring.Size = init->ring_size;
224                 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
225                 dev_priv->ring.map.offset = init->ring_start;
226                 dev_priv->ring.map.size = init->ring_size;
227                 dev_priv->ring.map.type = 0;
228                 dev_priv->ring.map.flags = 0;
229                 dev_priv->ring.map.mtrr = 0;
230                 drm_core_ioremap(&dev_priv->ring.map, dev);
231
232                 if (dev_priv->ring.map.handle == NULL) {
233                         i915_dma_cleanup(dev);
234                         DRM_ERROR("can not ioremap virtual address for"
235                                   " ring buffer\n");
236                         return -ENOMEM;
237                 }
238                 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
239         }
240
241         dev_priv->cpp = init->cpp;
242         master_priv->sarea_priv->pf_current_page = 0;
243
244         /* We are using separate values as placeholders for mechanisms for
245          * private backbuffer/depthbuffer usage.
246          */
247
248         /* Allow hardware batchbuffers unless told otherwise.
249          */
250         dev_priv->allow_batchbuffer = 1;
251
252         /* Enable vblank on pipe A for older X servers
253          */
254         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
255
256 #ifdef I915_HAVE_BUFFER
257         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
258                 mutex_init(&dev_priv->cmdbuf_mutex);
259         }
260 #ifdef DRI2
261         if (init->func == I915_INIT_DMA2) {
262                 int ret = setup_dri2_sarea(dev, file_priv, init);
263                 if (ret) {
264                         i915_dma_cleanup(dev);
265                         DRM_ERROR("could not set up dri2 sarea\n");
266                         return ret;
267                 }
268         }
269 #endif /* DRI2 */
270 #endif /* I915_HAVE_BUFFER */
271
272         return 0;
273 }
274
275 static int i915_dma_resume(struct drm_device * dev)
276 {
277         struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
278
279         DRM_DEBUG("\n");
280
281         if (drm_core_check_feature(dev, DRIVER_MODESET))
282                 return 0;
283
284         if (dev_priv->ring.map.handle == NULL) {
285                 DRM_ERROR("can not ioremap virtual address for"
286                           " ring buffer\n");
287                 return -ENOMEM;
288         }
289
290         /* Program Hardware Status Page */
291         if (!dev_priv->hw_status_page) {
292                 DRM_ERROR("Can not find hardware status page\n");
293                 return -EINVAL;
294         }
295         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
296
297         if (dev_priv->status_gfx_addr != 0)
298                 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
299         else
300                 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
301         DRM_DEBUG("Enabled hardware status page\n");
302
303         return 0;
304 }
305
306 static int i915_dma_init(struct drm_device *dev, void *data,
307                          struct drm_file *file_priv)
308 {
309         struct drm_i915_init *init = data;
310         int retcode = 0;
311
312         switch (init->func) {
313         case I915_INIT_DMA:
314         case I915_INIT_DMA2:
315                 retcode = i915_initialize(dev, file_priv, init);
316                 break;
317         case I915_CLEANUP_DMA:
318                 retcode = i915_dma_cleanup(dev);
319                 break;
320         case I915_RESUME_DMA:
321                 retcode = i915_dma_resume(dev);
322                 break;
323         default:
324                 retcode = -EINVAL;
325                 break;
326         }
327
328         return retcode;
329 }
330
331 /* Implement basically the same security restrictions as hardware does
332  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
333  *
334  * Most of the calculations below involve calculating the size of a
335  * particular instruction.  It's important to get the size right as
336  * that tells us where the next instruction to check is.  Any illegal
337  * instruction detected will be given a size of zero, which is a
338  * signal to abort the rest of the buffer.
339  */
340 static int do_validate_cmd(int cmd)
341 {
342         switch (((cmd >> 29) & 0x7)) {
343         case 0x0:
344                 switch ((cmd >> 23) & 0x3f) {
345                 case 0x0:
346                         return 1;       /* MI_NOOP */
347                 case 0x4:
348                         return 1;       /* MI_FLUSH */
349                 default:
350                         return 0;       /* disallow everything else */
351                 }
352                 break;
353         case 0x1:
354                 return 0;       /* reserved */
355         case 0x2:
356                 return (cmd & 0xff) + 2;        /* 2d commands */
357         case 0x3:
358                 if (((cmd >> 24) & 0x1f) <= 0x18)
359                         return 1;
360
361                 switch ((cmd >> 24) & 0x1f) {
362                 case 0x1c:
363                         return 1;
364                 case 0x1d:
365                         switch ((cmd >> 16) & 0xff) {
366                         case 0x3:
367                                 return (cmd & 0x1f) + 2;
368                         case 0x4:
369                                 return (cmd & 0xf) + 2;
370                         default:
371                                 return (cmd & 0xffff) + 2;
372                         }
373                 case 0x1e:
374                         if (cmd & (1 << 23))
375                                 return (cmd & 0xffff) + 1;
376                         else
377                                 return 1;
378                 case 0x1f:
379                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
380                                 return (cmd & 0x1ffff) + 2;
381                         else if (cmd & (1 << 17))       /* indirect random */
382                                 if ((cmd & 0xffff) == 0)
383                                         return 0;       /* unknown length, too hard */
384                                 else
385                                         return (((cmd & 0xffff) + 1) / 2) + 1;
386                         else
387                                 return 2;       /* indirect sequential */
388                 default:
389                         return 0;
390                 }
391         default:
392                 return 0;
393         }
394
395         return 0;
396 }
397
398 static int validate_cmd(int cmd)
399 {
400         int ret = do_validate_cmd(cmd);
401
402 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
403
404         return ret;
405 }
406
407 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
408                           int dwords)
409 {
410         struct drm_i915_private *dev_priv = dev->dev_private;
411         int i;
412         RING_LOCALS;
413
414         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
415                 return -EINVAL;
416
417         BEGIN_LP_RING((dwords+1)&~1);
418
419         for (i = 0; i < dwords;) {
420                 int cmd, sz;
421
422                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
423                         return -EINVAL;
424
425                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
426                         return -EINVAL;
427
428                 OUT_RING(cmd);
429
430                 while (++i, --sz) {
431                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
432                                                          sizeof(cmd))) {
433                                 return -EINVAL;
434                         }
435                         OUT_RING(cmd);
436                 }
437         }
438
439         if (dwords & 1)
440                 OUT_RING(0);
441
442         ADVANCE_LP_RING();
443
444         return 0;
445 }
446
447 int i915_emit_box(struct drm_device * dev,
448                   struct drm_clip_rect __user * boxes,
449                   int i, int DR1, int DR4)
450 {
451         struct drm_i915_private *dev_priv = dev->dev_private;
452         struct drm_clip_rect box;
453         RING_LOCALS;
454
455         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
456                 return -EFAULT;
457         }
458
459         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
460                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
461                           box.x1, box.y1, box.x2, box.y2);
462                 return -EINVAL;
463         }
464
465         if (IS_I965G(dev)) {
466                 BEGIN_LP_RING(4);
467                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
468                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
469                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
470                 OUT_RING(DR4);
471                 ADVANCE_LP_RING();
472         } else {
473                 BEGIN_LP_RING(6);
474                 OUT_RING(GFX_OP_DRAWRECT_INFO);
475                 OUT_RING(DR1);
476                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
477                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
478                 OUT_RING(DR4);
479                 OUT_RING(0);
480                 ADVANCE_LP_RING();
481         }
482
483         return 0;
484 }
485
486 /* XXX: Emitting the counter should really be moved to part of the IRQ
487  * emit. For now, do it in both places:
488  */
489
490 void i915_emit_breadcrumb(struct drm_device *dev)
491 {
492         struct drm_i915_private *dev_priv = dev->dev_private;
493         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
494         RING_LOCALS;
495
496         if (++dev_priv->counter > BREADCRUMB_MASK) {
497                  dev_priv->counter = 1;
498                  DRM_DEBUG("Breadcrumb counter wrapped around\n");
499         }
500
501         master_priv->sarea_priv->last_enqueue = dev_priv->counter;
502
503         BEGIN_LP_RING(4);
504         OUT_RING(MI_STORE_DWORD_INDEX);
505         OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
506         OUT_RING(dev_priv->counter);
507         OUT_RING(0);
508         ADVANCE_LP_RING();
509 }
510
511
512 int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
513 {
514         struct drm_i915_private *dev_priv = dev->dev_private;
515         uint32_t flush_cmd = MI_FLUSH;
516         RING_LOCALS;
517
518         flush_cmd |= flush;
519
520         i915_kernel_lost_context(dev);
521
522         BEGIN_LP_RING(4);
523         OUT_RING(flush_cmd);
524         OUT_RING(0);
525         OUT_RING(0);
526         OUT_RING(0);
527         ADVANCE_LP_RING();
528
529         return 0;
530 }
531
532
533 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
534                                    struct drm_i915_cmdbuffer * cmd)
535 {
536 #ifdef I915_HAVE_FENCE
537         struct drm_i915_private *dev_priv = dev->dev_private;
538 #endif
539         int nbox = cmd->num_cliprects;
540         int i = 0, count, ret;
541
542         if (cmd->sz & 0x3) {
543                 DRM_ERROR("alignment\n");
544                 return -EINVAL;
545         }
546
547         i915_kernel_lost_context(dev);
548
549         count = nbox ? nbox : 1;
550
551         for (i = 0; i < count; i++) {
552                 if (i < nbox) {
553                         ret = i915_emit_box(dev, cmd->cliprects, i,
554                                             cmd->DR1, cmd->DR4);
555                         if (ret)
556                                 return ret;
557                 }
558
559                 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
560                 if (ret)
561                         return ret;
562         }
563
564         i915_emit_breadcrumb(dev);
565 #ifdef I915_HAVE_FENCE
566         if (unlikely((dev_priv->counter & 0xFF) == 0))
567                 drm_fence_flush_old(dev, 0, dev_priv->counter);
568 #endif
569         return 0;
570 }
571
572 int i915_dispatch_batchbuffer(struct drm_device * dev,
573                               drm_i915_batchbuffer_t * batch)
574 {
575         struct drm_i915_private *dev_priv = dev->dev_private;
576         struct drm_clip_rect __user *boxes = batch->cliprects;
577         int nbox = batch->num_cliprects;
578         int i = 0, count;
579         RING_LOCALS;
580
581         if ((batch->start | batch->used) & 0x7) {
582                 DRM_ERROR("alignment\n");
583                 return -EINVAL;
584         }
585
586         i915_kernel_lost_context(dev);
587
588         count = nbox ? nbox : 1;
589
590         for (i = 0; i < count; i++) {
591                 if (i < nbox) {
592                         int ret = i915_emit_box(dev, boxes, i,
593                                                 batch->DR1, batch->DR4);
594                         if (ret)
595                                 return ret;
596                 }
597
598                 if (IS_I830(dev) || IS_845G(dev)) {
599                         BEGIN_LP_RING(4);
600                         OUT_RING(MI_BATCH_BUFFER);
601                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
602                         OUT_RING(batch->start + batch->used - 4);
603                         OUT_RING(0);
604                         ADVANCE_LP_RING();
605                 } else {
606                         BEGIN_LP_RING(2);
607                         if (IS_I965G(dev)) {
608                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
609                                 OUT_RING(batch->start);
610                         } else {
611                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
612                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
613                         }
614                         ADVANCE_LP_RING();
615                 }
616         }
617
618         i915_emit_breadcrumb(dev);
619 #ifdef I915_HAVE_FENCE
620         if (unlikely((dev_priv->counter & 0xFF) == 0))
621                 drm_fence_flush_old(dev, 0, dev_priv->counter);
622 #endif
623         return 0;
624 }
625
626 static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
627 {
628         struct drm_i915_private *dev_priv = dev->dev_private;
629         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
630         u32 num_pages, current_page, next_page, dspbase;
631         int shift = 2 * plane, x, y;
632         RING_LOCALS;
633
634         /* Calculate display base offset */
635         num_pages = master_priv->sarea_priv->third_handle ? 3 : 2;
636         current_page = (master_priv->sarea_priv->pf_current_page >> shift) & 0x3;
637         next_page = (current_page + 1) % num_pages;
638
639         switch (next_page) {
640         default:
641         case 0:
642                 dspbase = master_priv->sarea_priv->front_offset;
643                 break;
644         case 1:
645                 dspbase = master_priv->sarea_priv->back_offset;
646                 break;
647         case 2:
648                 dspbase = master_priv->sarea_priv->third_offset;
649                 break;
650         }
651
652         if (plane == 0) {
653                 x = master_priv->sarea_priv->planeA_x;
654                 y = master_priv->sarea_priv->planeA_y;
655         } else {
656                 x = master_priv->sarea_priv->planeB_x;
657                 y = master_priv->sarea_priv->planeB_y;
658         }
659
660         dspbase += (y * master_priv->sarea_priv->pitch + x) * dev_priv->cpp;
661
662         DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
663                   dspbase);
664
665         BEGIN_LP_RING(4);
666         OUT_RING(sync ? 0 :
667                  (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
668                                        MI_WAIT_FOR_PLANE_A_FLIP)));
669         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
670                  (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
671         OUT_RING(master_priv->sarea_priv->pitch * dev_priv->cpp);
672         OUT_RING(dspbase);
673         ADVANCE_LP_RING();
674
675         master_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
676         master_priv->sarea_priv->pf_current_page |= next_page << shift;
677 }
678
679 void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
680 {
681         struct drm_i915_private *dev_priv = dev->dev_private;
682         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
683         int i;
684
685         DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
686                   planes, master_priv->sarea_priv->pf_current_page);
687
688         i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
689
690         for (i = 0; i < 2; i++)
691                 if (planes & (1 << i))
692                         i915_do_dispatch_flip(dev, i, sync);
693
694         i915_emit_breadcrumb(dev);
695 #ifdef I915_HAVE_FENCE
696         if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
697                 drm_fence_flush_old(dev, 0, dev_priv->counter);
698 #endif
699 }
700
701 int i915_quiescent(struct drm_device *dev)
702 {
703         struct drm_i915_private *dev_priv = dev->dev_private;
704         int ret;
705
706         i915_kernel_lost_context(dev);
707         ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
708         if (ret)
709         {
710                 i915_kernel_lost_context (dev);
711                 DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
712                            dev_priv->ring.head,
713                            dev_priv->ring.tail,
714                            dev_priv->ring.space);
715         }
716         return ret;
717 }
718
719 static int i915_flush_ioctl(struct drm_device *dev, void *data,
720                             struct drm_file *file_priv)
721 {
722
723         LOCK_TEST_WITH_RETURN(dev, file_priv);
724
725         return i915_quiescent(dev);
726 }
727
728 static int i915_batchbuffer(struct drm_device *dev, void *data,
729                             struct drm_file *file_priv)
730 {
731         struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
732         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
733         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
734             master_priv->sarea_priv;
735         drm_i915_batchbuffer_t *batch = data;
736         int ret;
737
738         if (!dev_priv->allow_batchbuffer) {
739                 DRM_ERROR("Batchbuffer ioctl disabled\n");
740                 return -EINVAL;
741         }
742
743         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
744                   batch->start, batch->used, batch->num_cliprects);
745
746         LOCK_TEST_WITH_RETURN(dev, file_priv);
747
748         if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
749                                                         batch->num_cliprects *
750                                                         sizeof(struct drm_clip_rect)))
751                 return -EFAULT;
752
753         ret = i915_dispatch_batchbuffer(dev, batch);
754
755         sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
756         return ret;
757 }
758
759 static int i915_cmdbuffer(struct drm_device *dev, void *data,
760                           struct drm_file *file_priv)
761 {
762         struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
763         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
764         struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
765                 master_priv->sarea_priv;
766         struct drm_i915_cmdbuffer *cmdbuf = data;
767         int ret;
768
769         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
770                   cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
771
772         LOCK_TEST_WITH_RETURN(dev, file_priv);
773
774         if (cmdbuf->num_cliprects &&
775             DRM_VERIFYAREA_READ(cmdbuf->cliprects,
776                                 cmdbuf->num_cliprects *
777                                 sizeof(struct drm_clip_rect))) {
778                 DRM_ERROR("Fault accessing cliprects\n");
779                 return -EFAULT;
780         }
781
782         ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
783         if (ret) {
784                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
785                 return ret;
786         }
787
788         sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
789         return 0;
790 }
791
792 #if defined(DRM_DEBUG_CODE)
793 #define DRM_DEBUG_RELOCATION    (drm_debug != 0)
794 #else
795 #define DRM_DEBUG_RELOCATION    0
796 #endif
797
798 int i915_do_cleanup_pageflip(struct drm_device * dev)
799 {
800         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
801         int i, planes, num_pages;
802
803         DRM_DEBUG("\n");
804         num_pages = master_priv->sarea_priv->third_handle ? 3 : 2;
805         for (i = 0, planes = 0; i < 2; i++) {
806                 if (master_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
807                         master_priv->sarea_priv->pf_current_page =
808                                 (master_priv->sarea_priv->pf_current_page &
809                                  ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
810
811                         planes |= 1 << i;
812                 }
813         }
814
815         if (planes)
816                 i915_dispatch_flip(dev, planes, 0);
817
818         return 0;
819 }
820
821 static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
822 {
823         struct drm_i915_flip *param = data;
824
825         DRM_DEBUG("\n");
826
827         LOCK_TEST_WITH_RETURN(dev, file_priv);
828
829         /* This is really planes */
830         if (param->pipes & ~0x3) {
831                 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
832                           param->pipes);
833                 return -EINVAL;
834         }
835
836         i915_dispatch_flip(dev, param->pipes, 0);
837
838         return 0;
839 }
840
841
842 static int i915_getparam(struct drm_device *dev, void *data,
843                          struct drm_file *file_priv)
844 {
845         struct drm_i915_private *dev_priv = dev->dev_private;
846         struct drm_i915_getparam *param = data;
847         int value;
848
849         if (!dev_priv) {
850                 DRM_ERROR("called with no initialization\n");
851                 return -EINVAL;
852         }
853
854         switch (param->param) {
855         case I915_PARAM_IRQ_ACTIVE:
856                 value = dev->irq_enabled ? 1 : 0;
857                 break;
858         case I915_PARAM_ALLOW_BATCHBUFFER:
859                 value = dev_priv->allow_batchbuffer ? 1 : 0;
860                 break;
861         case I915_PARAM_LAST_DISPATCH:
862                 value = READ_BREADCRUMB(dev_priv);
863                 break;
864         case I915_PARAM_CHIPSET_ID:
865                 value = dev->pci_device;
866                 break;
867         case I915_PARAM_HAS_GEM:
868                 value = 1;
869                 break;
870         default:
871                 DRM_ERROR("Unknown parameter %d\n", param->param);
872                 return -EINVAL;
873         }
874
875         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
876                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
877                 return -EFAULT;
878         }
879
880         return 0;
881 }
882
883 static int i915_setparam(struct drm_device *dev, void *data,
884                          struct drm_file *file_priv)
885 {
886         struct drm_i915_private *dev_priv = dev->dev_private;
887         drm_i915_setparam_t *param = data;
888
889         if (!dev_priv) {
890                 DRM_ERROR("called with no initialization\n");
891                 return -EINVAL;
892         }
893
894         switch (param->param) {
895         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
896                 break;
897         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
898                 dev_priv->tex_lru_log_granularity = param->value;
899                 break;
900         case I915_SETPARAM_ALLOW_BATCHBUFFER:
901                 dev_priv->allow_batchbuffer = param->value;
902                 break;
903         default:
904                 DRM_ERROR("unknown parameter %d\n", param->param);
905                 return -EINVAL;
906         }
907
908         return 0;
909 }
910
911 drm_i915_mmio_entry_t mmio_table[] = {
912         [MMIO_REGS_PS_DEPTH_COUNT] = {
913                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
914                 0x2350,
915                 8
916         },
917         [MMIO_REGS_DOVSTA] = {
918                 I915_MMIO_MAY_READ,
919                 0x30008,
920                 1
921         },
922         [MMIO_REGS_GAMMA] = {
923                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
924                 0x30010,
925                 6
926         },
927         [MMIO_REGS_FENCE] = {
928                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
929                 0x2000,
930                 8
931         },
932         [MMIO_REGS_FENCE_NEW] = {
933                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
934                 0x3000,
935                 16
936         }
937 };
938
939 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
940
941 static int i915_mmio(struct drm_device *dev, void *data,
942                      struct drm_file *file_priv)
943 {
944         uint32_t buf[8];
945         struct drm_i915_private *dev_priv = dev->dev_private;
946         drm_i915_mmio_entry_t *e;        
947         drm_i915_mmio_t *mmio = data;
948         void __iomem *base;
949         int i;
950
951         if (!dev_priv) {
952                 DRM_ERROR("called with no initialization\n");
953                 return -EINVAL;
954         }
955
956         if (mmio->reg >= mmio_table_size)
957                 return -EINVAL;
958
959         e = &mmio_table[mmio->reg];
960         base = (u8 *) dev_priv->mmio_map->handle + e->offset;
961
962         switch (mmio->read_write) {
963         case I915_MMIO_READ:
964                 if (!(e->flag & I915_MMIO_MAY_READ))
965                         return -EINVAL;
966                 for (i = 0; i < e->size / 4; i++)
967                         buf[i] = I915_READ(e->offset + i * 4);
968                 if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
969                         DRM_ERROR("DRM_COPY_TO_USER failed\n");
970                         return -EFAULT;
971                 }
972                 break;
973                 
974         case I915_MMIO_WRITE:
975                 if (!(e->flag & I915_MMIO_MAY_WRITE))
976                         return -EINVAL;
977                 if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
978                         DRM_ERROR("DRM_COPY_TO_USER failed\n");
979                         return -EFAULT;
980                 }
981                 for (i = 0; i < e->size / 4; i++)
982                         I915_WRITE(e->offset + i * 4, buf[i]);
983                 break;
984         }
985         return 0;
986 }
987
988 static int i915_set_status_page(struct drm_device *dev, void *data,
989                                 struct drm_file *file_priv)
990 {
991         struct drm_i915_private *dev_priv = dev->dev_private;
992         drm_i915_hws_addr_t *hws = data;
993
994         if (!I915_NEED_GFX_HWS(dev))
995                 return -EINVAL;
996
997         if (!dev_priv) {
998                 DRM_ERROR("called with no initialization\n");
999                 return -EINVAL;
1000         }
1001
1002         if (drm_core_check_feature(dev, DRIVER_MODESET))
1003                 return 0;
1004
1005         DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1006
1007         dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
1008
1009         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1010         dev_priv->hws_map.size = 4*1024;
1011         dev_priv->hws_map.type = 0;
1012         dev_priv->hws_map.flags = 0;
1013         dev_priv->hws_map.mtrr = 0;
1014
1015         drm_core_ioremap(&dev_priv->hws_map, dev);
1016         if (dev_priv->hws_map.handle == NULL) {
1017                 i915_dma_cleanup(dev);
1018                 dev_priv->status_gfx_addr = 0;
1019                 DRM_ERROR("can not ioremap virtual address for"
1020                                 " G33 hw status page\n");
1021                 return -ENOMEM;
1022         }
1023
1024         dev_priv->hw_status_page = dev_priv->hws_map.handle;
1025
1026         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1027         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1028         DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1029
1030         return 0;
1031 }
1032
1033 struct drm_ioctl_desc i915_ioctls[] = {
1034         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER),
1035         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1036         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1037         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1038         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1039         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1040         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1041         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER),
1042         DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1043         DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1044         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1045         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1046         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1047         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1048         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1049         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1050         DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
1051         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
1052 #ifdef I915_HAVE_BUFFER
1053         DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
1054 #endif
1055         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
1056         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1057         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1058         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1059         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1060         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1061         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
1062         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
1063         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1064         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1065         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1066         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1067         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1068         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1069         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1070         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1071 };
1072
1073 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1074
1075 /**
1076  * Determine if the device really is AGP or not.
1077  *
1078  * All Intel graphics chipsets are treated as AGP, even if they are really
1079  * PCI-e.
1080  *
1081  * \param dev   The device to be tested.
1082  *
1083  * \returns
1084  * A value of 1 is always retured to indictate every i9x5 is AGP.
1085  */
1086 int i915_driver_device_is_agp(struct drm_device * dev)
1087 {
1088         return 1;
1089 }
1090