OSDN Git Service

Radeon: restructure PLL data
[android-x86/external-libdrm.git] / shared-core / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 /* Really want an OS-independent resettable timer.  Would like to have
35  * this loop run for (eg) 3 sec, but have the timer reset every time
36  * the head pointer changes, so that EBUSY only happens if the ring
37  * actually stalls for (eg) 3 seconds.
38  */
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40 {
41         struct drm_i915_private *dev_priv = dev->dev_private;
42         struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
43         u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
44         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
45         u32 last_acthd = I915_READ(acthd_reg);
46         u32 acthd;
47         int i;
48
49         for (i = 0; i < 100000; i++) {
50                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51                 acthd = I915_READ(acthd_reg);
52                 ring->space = ring->head - (ring->tail + 8);
53                 if (ring->space < 0)
54                         ring->space += ring->Size;
55                 if (ring->space >= n)
56                         return 0;
57
58                 if (ring->head != last_head)
59                         i = 0;
60
61                 if (acthd != last_acthd)
62                         i = 0;
63
64                 last_head = ring->head;
65                 last_acthd = acthd;
66                 msleep_interruptible (10);
67         }
68
69         return -EBUSY;
70 }
71
72 #if I915_RING_VALIDATE
73 /**
74  * Validate the cached ring tail value
75  *
76  * If the X server writes to the ring and DRM doesn't
77  * reload the head and tail pointers, it will end up writing
78  * data to the wrong place in the ring, causing havoc.
79  */
80 void i915_ring_validate(struct drm_device *dev, const char *func, int line)
81 {
82         struct drm_i915_private *dev_priv = dev->dev_private;
83         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
84         u32     tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
85         u32     head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
86
87         if (tail != ring->tail) {
88                 DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
89                           func, line,
90                           ring->head, head, ring->tail, tail);
91                 BUG_ON(1);
92         }
93 }
94 #endif
95
96 void i915_kernel_lost_context(struct drm_device * dev)
97 {
98         struct drm_i915_private *dev_priv = dev->dev_private;
99         struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
100
101         /* we should never lose context on the ring with modesetting 
102          * as we don't expose it to userspace */
103         if (drm_core_check_feature(dev, DRIVER_MODESET))
104                 return;
105
106         ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
107         ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
108         ring->space = ring->head - (ring->tail + 8);
109         if (ring->space < 0)
110                 ring->space += ring->Size;
111 }
112
113 int i915_dma_cleanup(struct drm_device * dev)
114 {
115         struct drm_i915_private *dev_priv = dev->dev_private;
116
117         if (drm_core_check_feature(dev, DRIVER_MODESET))
118                 return 0;
119
120         /* Make sure interrupts are disabled here because the uninstall ioctl
121          * may not have been called from userspace and after dev_private
122          * is freed, it's too late.
123          */
124         if (dev->irq_enabled)
125                 drm_irq_uninstall(dev);
126
127         if (dev_priv->ring.virtual_start) {
128                 drm_core_ioremapfree(&dev_priv->ring.map, dev);
129                 dev_priv->ring.virtual_start = 0;
130                 dev_priv->ring.map.handle = 0;
131                 dev_priv->ring.map.size = 0;
132                 dev_priv->ring.Size = 0;
133         }
134
135         return 0;
136 }
137
138 #if defined(DRI2)
139 #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
140 #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
141 #define DRI2_SAREA_BLOCK_NEXT(p)                                \
142         ((void *) ((unsigned char *) (p) +                      \
143                    DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
144
145 #define DRI2_SAREA_BLOCK_END            0x0000
146 #define DRI2_SAREA_BLOCK_LOCK           0x0001
147 #define DRI2_SAREA_BLOCK_EVENT_BUFFER   0x0002
148
149 static int
150 setup_dri2_sarea(struct drm_device * dev,
151                  struct drm_file *file_priv,
152                  drm_i915_init_t * init)
153 {
154         struct drm_i915_private *dev_priv = dev->dev_private;
155         int ret;
156         unsigned int *p, *end, *next;
157
158         mutex_lock(&dev->struct_mutex);
159         dev_priv->sarea_bo =
160                 drm_lookup_buffer_object(file_priv,
161                                          init->sarea_handle, 1);
162         mutex_unlock(&dev->struct_mutex);
163
164         if (!dev_priv->sarea_bo) {
165                 DRM_ERROR("did not find sarea bo\n");
166                 return -EINVAL;
167         }
168
169         ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
170                           dev_priv->sarea_bo->num_pages,
171                           &dev_priv->sarea_kmap);
172         if (ret) {
173                 DRM_ERROR("could not map sarea bo\n");
174                 return ret;
175         }
176
177         p = dev_priv->sarea_kmap.virtual;
178         end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
179         while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
180                 switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
181                 case DRI2_SAREA_BLOCK_LOCK:
182                         dev->primary->master->lock.hw_lock = (void *) (p + 1);
183                         dev->sigdata.lock = dev->primary->master->lock.hw_lock;
184                         break;
185                 }
186                 next = DRI2_SAREA_BLOCK_NEXT(p);
187                 if (next <= p || end < next) {
188                         DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
189                                   next, p, end);
190                         return -EINVAL;
191                 }
192                 p = next;
193         }
194
195         return 0;
196 }
197 #endif
198
199 static int i915_initialize(struct drm_device * dev,
200                            struct drm_file *file_priv,
201                            drm_i915_init_t * init)
202 {
203         struct drm_i915_private *dev_priv = dev->dev_private;
204         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
205
206         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
207                 if (init->mmio_offset != 0)
208                         dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
209                 if (!dev_priv->mmio_map) {
210                         i915_dma_cleanup(dev);
211                         DRM_ERROR("can not find mmio map!\n");
212                         return -EINVAL;
213                 }
214         }
215
216         
217         if (init->ring_size != 0) {
218                 dev_priv->ring.Size = init->ring_size;
219                 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
220                 dev_priv->ring.map.offset = init->ring_start;
221                 dev_priv->ring.map.size = init->ring_size;
222                 dev_priv->ring.map.type = 0;
223                 dev_priv->ring.map.flags = 0;
224                 dev_priv->ring.map.mtrr = 0;
225                 drm_core_ioremap(&dev_priv->ring.map, dev);
226
227                 if (dev_priv->ring.map.handle == NULL) {
228                         i915_dma_cleanup(dev);
229                         DRM_ERROR("can not ioremap virtual address for"
230                                   " ring buffer\n");
231                         return -ENOMEM;
232                 }
233                 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
234         }
235
236         dev_priv->cpp = init->cpp;
237         master_priv->sarea_priv->pf_current_page = 0;
238
239         /* We are using separate values as placeholders for mechanisms for
240          * private backbuffer/depthbuffer usage.
241          */
242
243         /* Allow hardware batchbuffers unless told otherwise.
244          */
245         dev_priv->allow_batchbuffer = 1;
246
247         /* Enable vblank on pipe A for older X servers
248          */
249         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
250
251 #ifdef DRI2
252         if (init->func == I915_INIT_DMA2) {
253                 int ret = setup_dri2_sarea(dev, file_priv, init);
254                 if (ret) {
255                         i915_dma_cleanup(dev);
256                         DRM_ERROR("could not set up dri2 sarea\n");
257                         return ret;
258                 }
259         }
260 #endif /* DRI2 */
261
262         return 0;
263 }
264
265 static int i915_dma_resume(struct drm_device * dev)
266 {
267         struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
268
269         DRM_DEBUG("\n");
270
271         if (drm_core_check_feature(dev, DRIVER_MODESET))
272                 return 0;
273
274         if (dev_priv->ring.map.handle == NULL) {
275                 DRM_ERROR("can not ioremap virtual address for"
276                           " ring buffer\n");
277                 return -ENOMEM;
278         }
279
280         /* Program Hardware Status Page */
281         if (!dev_priv->hw_status_page) {
282                 DRM_ERROR("Can not find hardware status page\n");
283                 return -EINVAL;
284         }
285         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
286
287         if (dev_priv->status_gfx_addr != 0)
288                 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
289         else
290                 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
291         DRM_DEBUG("Enabled hardware status page\n");
292
293         return 0;
294 }
295
296 static int i915_dma_init(struct drm_device *dev, void *data,
297                          struct drm_file *file_priv)
298 {
299         struct drm_i915_init *init = data;
300         int retcode = 0;
301
302         switch (init->func) {
303         case I915_INIT_DMA:
304         case I915_INIT_DMA2:
305                 retcode = i915_initialize(dev, file_priv, init);
306                 break;
307         case I915_CLEANUP_DMA:
308                 retcode = i915_dma_cleanup(dev);
309                 break;
310         case I915_RESUME_DMA:
311                 retcode = i915_dma_resume(dev);
312                 break;
313         default:
314                 retcode = -EINVAL;
315                 break;
316         }
317
318         return retcode;
319 }
320
321 /* Implement basically the same security restrictions as hardware does
322  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
323  *
324  * Most of the calculations below involve calculating the size of a
325  * particular instruction.  It's important to get the size right as
326  * that tells us where the next instruction to check is.  Any illegal
327  * instruction detected will be given a size of zero, which is a
328  * signal to abort the rest of the buffer.
329  */
330 static int do_validate_cmd(int cmd)
331 {
332         switch (((cmd >> 29) & 0x7)) {
333         case 0x0:
334                 switch ((cmd >> 23) & 0x3f) {
335                 case 0x0:
336                         return 1;       /* MI_NOOP */
337                 case 0x4:
338                         return 1;       /* MI_FLUSH */
339                 default:
340                         return 0;       /* disallow everything else */
341                 }
342                 break;
343         case 0x1:
344                 return 0;       /* reserved */
345         case 0x2:
346                 return (cmd & 0xff) + 2;        /* 2d commands */
347         case 0x3:
348                 if (((cmd >> 24) & 0x1f) <= 0x18)
349                         return 1;
350
351                 switch ((cmd >> 24) & 0x1f) {
352                 case 0x1c:
353                         return 1;
354                 case 0x1d:
355                         switch ((cmd >> 16) & 0xff) {
356                         case 0x3:
357                                 return (cmd & 0x1f) + 2;
358                         case 0x4:
359                                 return (cmd & 0xf) + 2;
360                         default:
361                                 return (cmd & 0xffff) + 2;
362                         }
363                 case 0x1e:
364                         if (cmd & (1 << 23))
365                                 return (cmd & 0xffff) + 1;
366                         else
367                                 return 1;
368                 case 0x1f:
369                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
370                                 return (cmd & 0x1ffff) + 2;
371                         else if (cmd & (1 << 17))       /* indirect random */
372                                 if ((cmd & 0xffff) == 0)
373                                         return 0;       /* unknown length, too hard */
374                                 else
375                                         return (((cmd & 0xffff) + 1) / 2) + 1;
376                         else
377                                 return 2;       /* indirect sequential */
378                 default:
379                         return 0;
380                 }
381         default:
382                 return 0;
383         }
384
385         return 0;
386 }
387
388 static int validate_cmd(int cmd)
389 {
390         int ret = do_validate_cmd(cmd);
391
392 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
393
394         return ret;
395 }
396
397 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
398                           int dwords)
399 {
400         struct drm_i915_private *dev_priv = dev->dev_private;
401         int i;
402         RING_LOCALS;
403
404         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
405                 return -EINVAL;
406
407         BEGIN_LP_RING((dwords+1)&~1);
408
409         for (i = 0; i < dwords;) {
410                 int cmd, sz;
411
412                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
413                         return -EINVAL;
414
415                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
416                         return -EINVAL;
417
418                 OUT_RING(cmd);
419
420                 while (++i, --sz) {
421                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
422                                                          sizeof(cmd))) {
423                                 return -EINVAL;
424                         }
425                         OUT_RING(cmd);
426                 }
427         }
428
429         if (dwords & 1)
430                 OUT_RING(0);
431
432         ADVANCE_LP_RING();
433
434         return 0;
435 }
436
437 int i915_emit_box(struct drm_device * dev,
438                   struct drm_clip_rect __user * boxes,
439                   int i, int DR1, int DR4)
440 {
441         struct drm_i915_private *dev_priv = dev->dev_private;
442         struct drm_clip_rect box;
443         RING_LOCALS;
444
445         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
446                 return -EFAULT;
447         }
448
449         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
450                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
451                           box.x1, box.y1, box.x2, box.y2);
452                 return -EINVAL;
453         }
454
455         if (IS_I965G(dev)) {
456                 BEGIN_LP_RING(4);
457                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
458                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
459                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
460                 OUT_RING(DR4);
461                 ADVANCE_LP_RING();
462         } else {
463                 BEGIN_LP_RING(6);
464                 OUT_RING(GFX_OP_DRAWRECT_INFO);
465                 OUT_RING(DR1);
466                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
467                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
468                 OUT_RING(DR4);
469                 OUT_RING(0);
470                 ADVANCE_LP_RING();
471         }
472
473         return 0;
474 }
475
476 /* XXX: Emitting the counter should really be moved to part of the IRQ
477  * emit. For now, do it in both places:
478  */
479
480 void i915_emit_breadcrumb(struct drm_device *dev)
481 {
482         struct drm_i915_private *dev_priv = dev->dev_private;
483         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
484         RING_LOCALS;
485
486         if (++dev_priv->counter > BREADCRUMB_MASK) {
487                  dev_priv->counter = 1;
488                  DRM_DEBUG("Breadcrumb counter wrapped around\n");
489         }
490
491         master_priv->sarea_priv->last_enqueue = dev_priv->counter;
492
493         BEGIN_LP_RING(4);
494         OUT_RING(MI_STORE_DWORD_INDEX);
495         OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
496         OUT_RING(dev_priv->counter);
497         OUT_RING(0);
498         ADVANCE_LP_RING();
499 }
500
501
502 int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
503 {
504         struct drm_i915_private *dev_priv = dev->dev_private;
505         uint32_t flush_cmd = MI_FLUSH;
506         RING_LOCALS;
507
508         flush_cmd |= flush;
509
510         i915_kernel_lost_context(dev);
511
512         BEGIN_LP_RING(4);
513         OUT_RING(flush_cmd);
514         OUT_RING(0);
515         OUT_RING(0);
516         OUT_RING(0);
517         ADVANCE_LP_RING();
518
519         return 0;
520 }
521
522
523 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
524                                    struct drm_i915_cmdbuffer * cmd)
525 {
526         int nbox = cmd->num_cliprects;
527         int i = 0, count, ret;
528
529         if (cmd->sz & 0x3) {
530                 DRM_ERROR("alignment\n");
531                 return -EINVAL;
532         }
533
534         i915_kernel_lost_context(dev);
535
536         count = nbox ? nbox : 1;
537
538         for (i = 0; i < count; i++) {
539                 if (i < nbox) {
540                         ret = i915_emit_box(dev, cmd->cliprects, i,
541                                             cmd->DR1, cmd->DR4);
542                         if (ret)
543                                 return ret;
544                 }
545
546                 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
547                 if (ret)
548                         return ret;
549         }
550
551         i915_emit_breadcrumb(dev);
552         return 0;
553 }
554
555 int i915_dispatch_batchbuffer(struct drm_device * dev,
556                               drm_i915_batchbuffer_t * batch)
557 {
558         struct drm_i915_private *dev_priv = dev->dev_private;
559         struct drm_clip_rect __user *boxes = batch->cliprects;
560         int nbox = batch->num_cliprects;
561         int i = 0, count;
562         RING_LOCALS;
563
564         if ((batch->start | batch->used) & 0x7) {
565                 DRM_ERROR("alignment\n");
566                 return -EINVAL;
567         }
568
569         i915_kernel_lost_context(dev);
570
571         count = nbox ? nbox : 1;
572
573         for (i = 0; i < count; i++) {
574                 if (i < nbox) {
575                         int ret = i915_emit_box(dev, boxes, i,
576                                                 batch->DR1, batch->DR4);
577                         if (ret)
578                                 return ret;
579                 }
580
581                 if (IS_I830(dev) || IS_845G(dev)) {
582                         BEGIN_LP_RING(4);
583                         OUT_RING(MI_BATCH_BUFFER);
584                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
585                         OUT_RING(batch->start + batch->used - 4);
586                         OUT_RING(0);
587                         ADVANCE_LP_RING();
588                 } else {
589                         BEGIN_LP_RING(2);
590                         if (IS_I965G(dev)) {
591                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
592                                 OUT_RING(batch->start);
593                         } else {
594                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
595                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
596                         }
597                         ADVANCE_LP_RING();
598                 }
599         }
600
601         i915_emit_breadcrumb(dev);
602         return 0;
603 }
604
605 static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
606 {
607         struct drm_i915_private *dev_priv = dev->dev_private;
608         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
609         u32 num_pages, current_page, next_page, dspbase;
610         int shift = 2 * plane, x, y;
611         RING_LOCALS;
612
613         /* Calculate display base offset */
614         num_pages = master_priv->sarea_priv->third_handle ? 3 : 2;
615         current_page = (master_priv->sarea_priv->pf_current_page >> shift) & 0x3;
616         next_page = (current_page + 1) % num_pages;
617
618         switch (next_page) {
619         default:
620         case 0:
621                 dspbase = master_priv->sarea_priv->front_offset;
622                 break;
623         case 1:
624                 dspbase = master_priv->sarea_priv->back_offset;
625                 break;
626         case 2:
627                 dspbase = master_priv->sarea_priv->third_offset;
628                 break;
629         }
630
631         if (plane == 0) {
632                 x = master_priv->sarea_priv->planeA_x;
633                 y = master_priv->sarea_priv->planeA_y;
634         } else {
635                 x = master_priv->sarea_priv->planeB_x;
636                 y = master_priv->sarea_priv->planeB_y;
637         }
638
639         dspbase += (y * master_priv->sarea_priv->pitch + x) * dev_priv->cpp;
640
641         DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
642                   dspbase);
643
644         BEGIN_LP_RING(4);
645         OUT_RING(sync ? 0 :
646                  (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
647                                        MI_WAIT_FOR_PLANE_A_FLIP)));
648         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
649                  (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
650         OUT_RING(master_priv->sarea_priv->pitch * dev_priv->cpp);
651         OUT_RING(dspbase);
652         ADVANCE_LP_RING();
653
654         master_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
655         master_priv->sarea_priv->pf_current_page |= next_page << shift;
656 }
657
658 void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
659 {
660         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
661         int i;
662
663         DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
664                   planes, master_priv->sarea_priv->pf_current_page);
665
666         i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
667
668         for (i = 0; i < 2; i++)
669                 if (planes & (1 << i))
670                         i915_do_dispatch_flip(dev, i, sync);
671
672         i915_emit_breadcrumb(dev);
673 }
674
675 int i915_quiescent(struct drm_device *dev)
676 {
677         struct drm_i915_private *dev_priv = dev->dev_private;
678         int ret;
679
680         i915_kernel_lost_context(dev);
681         ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
682         if (ret)
683         {
684                 i915_kernel_lost_context (dev);
685                 DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
686                            dev_priv->ring.head,
687                            dev_priv->ring.tail,
688                            dev_priv->ring.space);
689         }
690         return ret;
691 }
692
693 static int i915_flush_ioctl(struct drm_device *dev, void *data,
694                             struct drm_file *file_priv)
695 {
696
697         LOCK_TEST_WITH_RETURN(dev, file_priv);
698
699         return i915_quiescent(dev);
700 }
701
702 static int i915_batchbuffer(struct drm_device *dev, void *data,
703                             struct drm_file *file_priv)
704 {
705         struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
706         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
707         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
708             master_priv->sarea_priv;
709         drm_i915_batchbuffer_t *batch = data;
710         int ret;
711
712         if (!dev_priv->allow_batchbuffer) {
713                 DRM_ERROR("Batchbuffer ioctl disabled\n");
714                 return -EINVAL;
715         }
716
717         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
718                   batch->start, batch->used, batch->num_cliprects);
719
720         LOCK_TEST_WITH_RETURN(dev, file_priv);
721
722         if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
723                                                         batch->num_cliprects *
724                                                         sizeof(struct drm_clip_rect)))
725                 return -EFAULT;
726
727         ret = i915_dispatch_batchbuffer(dev, batch);
728
729         sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
730         return ret;
731 }
732
733 static int i915_cmdbuffer(struct drm_device *dev, void *data,
734                           struct drm_file *file_priv)
735 {
736         struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
737         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
738         struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
739                 master_priv->sarea_priv;
740         struct drm_i915_cmdbuffer *cmdbuf = data;
741         int ret;
742
743         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
744                   cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
745
746         LOCK_TEST_WITH_RETURN(dev, file_priv);
747
748         if (cmdbuf->num_cliprects &&
749             DRM_VERIFYAREA_READ(cmdbuf->cliprects,
750                                 cmdbuf->num_cliprects *
751                                 sizeof(struct drm_clip_rect))) {
752                 DRM_ERROR("Fault accessing cliprects\n");
753                 return -EFAULT;
754         }
755
756         ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
757         if (ret) {
758                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
759                 return ret;
760         }
761
762         sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
763         return 0;
764 }
765
766 #if defined(DRM_DEBUG_CODE)
767 #define DRM_DEBUG_RELOCATION    (drm_debug != 0)
768 #else
769 #define DRM_DEBUG_RELOCATION    0
770 #endif
771
772 int i915_do_cleanup_pageflip(struct drm_device * dev)
773 {
774         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
775         int i, planes, num_pages;
776
777         DRM_DEBUG("\n");
778         num_pages = master_priv->sarea_priv->third_handle ? 3 : 2;
779         for (i = 0, planes = 0; i < 2; i++) {
780                 if (master_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
781                         master_priv->sarea_priv->pf_current_page =
782                                 (master_priv->sarea_priv->pf_current_page &
783                                  ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
784
785                         planes |= 1 << i;
786                 }
787         }
788
789         if (planes)
790                 i915_dispatch_flip(dev, planes, 0);
791
792         return 0;
793 }
794
795 static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
796 {
797         struct drm_i915_flip *param = data;
798
799         DRM_DEBUG("\n");
800
801         LOCK_TEST_WITH_RETURN(dev, file_priv);
802
803         /* This is really planes */
804         if (param->pipes & ~0x3) {
805                 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
806                           param->pipes);
807                 return -EINVAL;
808         }
809
810         i915_dispatch_flip(dev, param->pipes, 0);
811
812         return 0;
813 }
814
815
816 static int i915_getparam(struct drm_device *dev, void *data,
817                          struct drm_file *file_priv)
818 {
819         struct drm_i915_private *dev_priv = dev->dev_private;
820         struct drm_i915_getparam *param = data;
821         int value;
822
823         if (!dev_priv) {
824                 DRM_ERROR("called with no initialization\n");
825                 return -EINVAL;
826         }
827
828         switch (param->param) {
829         case I915_PARAM_IRQ_ACTIVE:
830                 value = dev->irq_enabled ? 1 : 0;
831                 break;
832         case I915_PARAM_ALLOW_BATCHBUFFER:
833                 value = dev_priv->allow_batchbuffer ? 1 : 0;
834                 break;
835         case I915_PARAM_LAST_DISPATCH:
836                 value = READ_BREADCRUMB(dev_priv);
837                 break;
838         case I915_PARAM_CHIPSET_ID:
839                 value = dev->pci_device;
840                 break;
841         case I915_PARAM_HAS_GEM:
842                 value = 1;
843                 break;
844         default:
845                 DRM_ERROR("Unknown parameter %d\n", param->param);
846                 return -EINVAL;
847         }
848
849         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
850                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
851                 return -EFAULT;
852         }
853
854         return 0;
855 }
856
857 static int i915_setparam(struct drm_device *dev, void *data,
858                          struct drm_file *file_priv)
859 {
860         struct drm_i915_private *dev_priv = dev->dev_private;
861         drm_i915_setparam_t *param = data;
862
863         if (!dev_priv) {
864                 DRM_ERROR("called with no initialization\n");
865                 return -EINVAL;
866         }
867
868         switch (param->param) {
869         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
870                 break;
871         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
872                 dev_priv->tex_lru_log_granularity = param->value;
873                 break;
874         case I915_SETPARAM_ALLOW_BATCHBUFFER:
875                 dev_priv->allow_batchbuffer = param->value;
876                 break;
877         default:
878                 DRM_ERROR("unknown parameter %d\n", param->param);
879                 return -EINVAL;
880         }
881
882         return 0;
883 }
884
885 drm_i915_mmio_entry_t mmio_table[] = {
886         [MMIO_REGS_PS_DEPTH_COUNT] = {
887                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
888                 0x2350,
889                 8
890         },
891         [MMIO_REGS_DOVSTA] = {
892                 I915_MMIO_MAY_READ,
893                 0x30008,
894                 1
895         },
896         [MMIO_REGS_GAMMA] = {
897                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
898                 0x30010,
899                 6
900         },
901         [MMIO_REGS_FENCE] = {
902                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
903                 0x2000,
904                 8
905         },
906         [MMIO_REGS_FENCE_NEW] = {
907                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
908                 0x3000,
909                 16
910         }
911 };
912
913 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
914
915 static int i915_mmio(struct drm_device *dev, void *data,
916                      struct drm_file *file_priv)
917 {
918         uint32_t buf[8];
919         struct drm_i915_private *dev_priv = dev->dev_private;
920         drm_i915_mmio_entry_t *e;        
921         drm_i915_mmio_t *mmio = data;
922         void __iomem *base;
923         int i;
924
925         if (!dev_priv) {
926                 DRM_ERROR("called with no initialization\n");
927                 return -EINVAL;
928         }
929
930         if (mmio->reg >= mmio_table_size)
931                 return -EINVAL;
932
933         e = &mmio_table[mmio->reg];
934         base = (u8 *) dev_priv->mmio_map->handle + e->offset;
935
936         switch (mmio->read_write) {
937         case I915_MMIO_READ:
938                 if (!(e->flag & I915_MMIO_MAY_READ))
939                         return -EINVAL;
940                 for (i = 0; i < e->size / 4; i++)
941                         buf[i] = I915_READ(e->offset + i * 4);
942                 if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
943                         DRM_ERROR("DRM_COPY_TO_USER failed\n");
944                         return -EFAULT;
945                 }
946                 break;
947                 
948         case I915_MMIO_WRITE:
949                 if (!(e->flag & I915_MMIO_MAY_WRITE))
950                         return -EINVAL;
951                 if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
952                         DRM_ERROR("DRM_COPY_TO_USER failed\n");
953                         return -EFAULT;
954                 }
955                 for (i = 0; i < e->size / 4; i++)
956                         I915_WRITE(e->offset + i * 4, buf[i]);
957                 break;
958         }
959         return 0;
960 }
961
962 static int i915_set_status_page(struct drm_device *dev, void *data,
963                                 struct drm_file *file_priv)
964 {
965         struct drm_i915_private *dev_priv = dev->dev_private;
966         drm_i915_hws_addr_t *hws = data;
967
968         if (!I915_NEED_GFX_HWS(dev))
969                 return -EINVAL;
970
971         if (!dev_priv) {
972                 DRM_ERROR("called with no initialization\n");
973                 return -EINVAL;
974         }
975
976         if (drm_core_check_feature(dev, DRIVER_MODESET))
977                 return 0;
978
979         DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
980
981         dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
982
983         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
984         dev_priv->hws_map.size = 4*1024;
985         dev_priv->hws_map.type = 0;
986         dev_priv->hws_map.flags = 0;
987         dev_priv->hws_map.mtrr = 0;
988
989         drm_core_ioremap(&dev_priv->hws_map, dev);
990         if (dev_priv->hws_map.handle == NULL) {
991                 i915_dma_cleanup(dev);
992                 dev_priv->status_gfx_addr = 0;
993                 DRM_ERROR("can not ioremap virtual address for"
994                                 " G33 hw status page\n");
995                 return -ENOMEM;
996         }
997
998         dev_priv->hw_status_page = dev_priv->hws_map.handle;
999
1000         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1001         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1002         DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1003
1004         return 0;
1005 }
1006
1007 struct drm_ioctl_desc i915_ioctls[] = {
1008         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER),
1009         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1010         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1011         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1012         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1013         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1014         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1015         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER),
1016         DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1017         DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1018         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1019         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1020         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1021         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1022         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1023         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1024         DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
1025         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
1026         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
1027         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1028         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1029         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1030         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1031         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1032         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
1033         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
1034         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1035         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1036         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1037         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1038         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1039         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1040         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1041         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1042 };
1043
1044 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1045
1046 /**
1047  * Determine if the device really is AGP or not.
1048  *
1049  * All Intel graphics chipsets are treated as AGP, even if they are really
1050  * PCI-e.
1051  *
1052  * \param dev   The device to be tested.
1053  *
1054  * \returns
1055  * A value of 1 is always retured to indictate every i9x5 is AGP.
1056  */
1057 int i915_driver_device_is_agp(struct drm_device * dev)
1058 {
1059         return 1;
1060 }
1061