OSDN Git Service

Merge commit 'origin/master' into drm-gem
[android-x86/external-libdrm.git] / shared-core / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 /* Really want an OS-independent resettable timer.  Would like to have
35  * this loop run for (eg) 3 sec, but have the timer reset every time
36  * the head pointer changes, so that EBUSY only happens if the ring
37  * actually stalls for (eg) 3 seconds.
38  */
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40 {
41         drm_i915_private_t *dev_priv = dev->dev_private;
42         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43         u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
44         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
45         u32 last_acthd = I915_READ(acthd_reg);
46         u32 acthd;
47         int i;
48
49         for (i = 0; i < 100000; i++) {
50                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51                 acthd = I915_READ(acthd_reg);
52                 ring->space = ring->head - (ring->tail + 8);
53                 if (ring->space < 0)
54                         ring->space += ring->Size;
55                 if (ring->space >= n)
56                         return 0;
57
58                 if (ring->head != last_head)
59                         i = 0;
60
61                 if (acthd != last_acthd)
62                         i = 0;
63
64                 last_head = ring->head;
65                 last_acthd = acthd;
66                 msleep_interruptible (10);
67         }
68
69         return -EBUSY;
70 }
71
72 int i915_init_hardware_status(struct drm_device *dev)
73 {
74         drm_i915_private_t *dev_priv = dev->dev_private;
75         /* Program Hardware Status Page */
76         dev_priv->status_page_dmah =
77                 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
78
79         if (!dev_priv->status_page_dmah) {
80                 DRM_ERROR("Can not allocate hardware status page\n");
81                 return -ENOMEM;
82         }
83         dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
84         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
85
86         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
87
88         I915_WRITE(0x02080, dev_priv->dma_status_page);
89         DRM_DEBUG("Enabled hardware status page\n");
90         return 0;
91 }
92
93 void i915_free_hardware_status(struct drm_device *dev)
94 {
95         drm_i915_private_t *dev_priv = dev->dev_private;
96         if (dev_priv->status_page_dmah) {
97                 drm_pci_free(dev, dev_priv->status_page_dmah);
98                 dev_priv->status_page_dmah = NULL;
99                 /* Need to rewrite hardware status page */
100                 I915_WRITE(0x02080, 0x1ffff000);
101         }
102
103         if (dev_priv->status_gfx_addr) {
104                 dev_priv->status_gfx_addr = 0;
105                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
106                 I915_WRITE(0x02080, 0x1ffff000);
107         }
108 }
109
110 #if I915_RING_VALIDATE
111 /**
112  * Validate the cached ring tail value
113  *
114  * If the X server writes to the ring and DRM doesn't
115  * reload the head and tail pointers, it will end up writing
116  * data to the wrong place in the ring, causing havoc.
117  */
118 void i915_ring_validate(struct drm_device *dev, const char *func, int line)
119 {
120         drm_i915_private_t *dev_priv = dev->dev_private;
121         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
122         u32     tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
123         u32     head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
124
125         if (tail != ring->tail) {
126                 DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
127                           func, line,
128                           ring->head, head, ring->tail, tail);
129                 BUG_ON(1);
130         }
131 }
132 #endif
133
134 void i915_kernel_lost_context(struct drm_device * dev)
135 {
136         drm_i915_private_t *dev_priv = dev->dev_private;
137         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
138
139         ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
140         ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
141         ring->space = ring->head - (ring->tail + 8);
142         if (ring->space < 0)
143                 ring->space += ring->Size;
144 }
145
146 static int i915_dma_cleanup(struct drm_device * dev)
147 {
148         drm_i915_private_t *dev_priv = dev->dev_private;
149         /* Make sure interrupts are disabled here because the uninstall ioctl
150          * may not have been called from userspace and after dev_private
151          * is freed, it's too late.
152          */
153         if (dev->irq_enabled)
154                 drm_irq_uninstall(dev);
155
156         if (dev_priv->ring.virtual_start) {
157                 drm_core_ioremapfree(&dev_priv->ring.map, dev);
158                 dev_priv->ring.virtual_start = 0;
159                 dev_priv->ring.map.handle = 0;
160                 dev_priv->ring.map.size = 0;
161         }
162
163         if (I915_NEED_GFX_HWS(dev))
164                 i915_free_hardware_status(dev);
165
166         return 0;
167 }
168
169 #if defined(I915_HAVE_BUFFER)
170 #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
171 #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
172 #define DRI2_SAREA_BLOCK_NEXT(p)                                \
173         ((void *) ((unsigned char *) (p) +                      \
174                    DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
175
176 #define DRI2_SAREA_BLOCK_END            0x0000
177 #define DRI2_SAREA_BLOCK_LOCK           0x0001
178 #define DRI2_SAREA_BLOCK_EVENT_BUFFER   0x0002
179
180 static int
181 setup_dri2_sarea(struct drm_device * dev,
182                  struct drm_file *file_priv,
183                  drm_i915_init_t * init)
184 {
185         drm_i915_private_t *dev_priv = dev->dev_private;
186         int ret;
187         unsigned int *p, *end, *next;
188
189         mutex_lock(&dev->struct_mutex);
190         dev_priv->sarea_bo =
191                 drm_lookup_buffer_object(file_priv,
192                                          init->sarea_handle, 1);
193         mutex_unlock(&dev->struct_mutex);
194
195         if (!dev_priv->sarea_bo) {
196                 DRM_ERROR("did not find sarea bo\n");
197                 return -EINVAL;
198         }
199
200         ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
201                           dev_priv->sarea_bo->num_pages,
202                           &dev_priv->sarea_kmap);
203         if (ret) {
204                 DRM_ERROR("could not map sarea bo\n");
205                 return ret;
206         }
207
208         p = dev_priv->sarea_kmap.virtual;
209         end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
210         while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
211                 switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
212                 case DRI2_SAREA_BLOCK_LOCK:
213                         dev->lock.hw_lock = (void *) (p + 1);
214                         dev->sigdata.lock = dev->lock.hw_lock;
215                         break;
216                 }
217                 next = DRI2_SAREA_BLOCK_NEXT(p);
218                 if (next <= p || end < next) {
219                         DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
220                                   next, p, end);
221                         return -EINVAL;
222                 }
223                 p = next;
224         }
225
226         return 0;
227 }
228 #endif
229
230 static int i915_initialize(struct drm_device * dev,
231                            struct drm_file *file_priv,
232                            drm_i915_init_t * init)
233 {
234         drm_i915_private_t *dev_priv = dev->dev_private;
235 #if defined(I915_HAVE_BUFFER)
236         int ret;
237 #endif
238         dev_priv->sarea = drm_getsarea(dev);
239         if (!dev_priv->sarea) {
240                 DRM_ERROR("can not find sarea!\n");
241                 i915_dma_cleanup(dev);
242                 return -EINVAL;
243         }
244
245 #ifdef I915_HAVE_BUFFER
246         dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
247 #endif
248
249         if (init->sarea_priv_offset)
250                 dev_priv->sarea_priv = (drm_i915_sarea_t *)
251                         ((u8 *) dev_priv->sarea->handle +
252                          init->sarea_priv_offset);
253         else {
254                 /* No sarea_priv for you! */
255                 dev_priv->sarea_priv = NULL;
256         }
257
258         if (init->ring_size != 0) {
259                 dev_priv->ring.Size = init->ring_size;
260                 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
261
262                 dev_priv->ring.map.offset = init->ring_start;
263                 dev_priv->ring.map.size = init->ring_size;
264                 dev_priv->ring.map.type = 0;
265                 dev_priv->ring.map.flags = 0;
266                 dev_priv->ring.map.mtrr = 0;
267
268                 drm_core_ioremap(&dev_priv->ring.map, dev);
269
270                 if (dev_priv->ring.map.handle == NULL) {
271                         i915_dma_cleanup(dev);
272                         DRM_ERROR("can not ioremap virtual address for"
273                                   " ring buffer\n");
274                         return -ENOMEM;
275                 }
276
277                 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
278         }
279
280         dev_priv->cpp = init->cpp;
281
282         if (dev_priv->sarea_priv)
283                 dev_priv->sarea_priv->pf_current_page = 0;
284
285         /* We are using separate values as placeholders for mechanisms for
286          * private backbuffer/depthbuffer usage.
287          */
288
289         /* Allow hardware batchbuffers unless told otherwise.
290          */
291         dev_priv->allow_batchbuffer = 1;
292
293         /* Enable vblank on pipe A for older X servers
294          */
295         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
296
297 #ifdef I915_HAVE_BUFFER
298         mutex_init(&dev_priv->cmdbuf_mutex);
299 #endif
300 #if defined(I915_HAVE_BUFFER)
301         if (init->func == I915_INIT_DMA2) {
302                 ret = setup_dri2_sarea(dev, file_priv, init);
303                 if (ret) {
304                         i915_dma_cleanup(dev);
305                         DRM_ERROR("could not set up dri2 sarea\n");
306                         return ret;
307                 }
308         }
309 #endif
310
311         return 0;
312 }
313
314 static int i915_dma_resume(struct drm_device * dev)
315 {
316         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
317
318         DRM_DEBUG("\n");
319
320         if (!dev_priv->sarea) {
321                 DRM_ERROR("can not find sarea!\n");
322                 return -EINVAL;
323         }
324
325         if (dev_priv->ring.map.handle == NULL) {
326                 DRM_ERROR("can not ioremap virtual address for"
327                           " ring buffer\n");
328                 return -ENOMEM;
329         }
330
331         /* Program Hardware Status Page */
332         if (!dev_priv->hw_status_page) {
333                 DRM_ERROR("Can not find hardware status page\n");
334                 return -EINVAL;
335         }
336         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
337
338         if (dev_priv->status_gfx_addr != 0)
339                 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
340         else
341                 I915_WRITE(0x02080, dev_priv->dma_status_page);
342         DRM_DEBUG("Enabled hardware status page\n");
343
344         return 0;
345 }
346
347 static int i915_dma_init(struct drm_device *dev, void *data,
348                          struct drm_file *file_priv)
349 {
350         drm_i915_init_t *init = data;
351         int retcode = 0;
352
353         switch (init->func) {
354         case I915_INIT_DMA:
355         case I915_INIT_DMA2:
356                 retcode = i915_initialize(dev, file_priv, init);
357                 break;
358         case I915_CLEANUP_DMA:
359                 retcode = i915_dma_cleanup(dev);
360                 break;
361         case I915_RESUME_DMA:
362                 retcode = i915_dma_resume(dev);
363                 break;
364         default:
365                 retcode = -EINVAL;
366                 break;
367         }
368
369         return retcode;
370 }
371
372 /* Implement basically the same security restrictions as hardware does
373  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
374  *
375  * Most of the calculations below involve calculating the size of a
376  * particular instruction.  It's important to get the size right as
377  * that tells us where the next instruction to check is.  Any illegal
378  * instruction detected will be given a size of zero, which is a
379  * signal to abort the rest of the buffer.
380  */
381 static int do_validate_cmd(int cmd)
382 {
383         switch (((cmd >> 29) & 0x7)) {
384         case 0x0:
385                 switch ((cmd >> 23) & 0x3f) {
386                 case 0x0:
387                         return 1;       /* MI_NOOP */
388                 case 0x4:
389                         return 1;       /* MI_FLUSH */
390                 default:
391                         return 0;       /* disallow everything else */
392                 }
393                 break;
394         case 0x1:
395                 return 0;       /* reserved */
396         case 0x2:
397                 return (cmd & 0xff) + 2;        /* 2d commands */
398         case 0x3:
399                 if (((cmd >> 24) & 0x1f) <= 0x18)
400                         return 1;
401
402                 switch ((cmd >> 24) & 0x1f) {
403                 case 0x1c:
404                         return 1;
405                 case 0x1d:
406                         switch ((cmd >> 16) & 0xff) {
407                         case 0x3:
408                                 return (cmd & 0x1f) + 2;
409                         case 0x4:
410                                 return (cmd & 0xf) + 2;
411                         default:
412                                 return (cmd & 0xffff) + 2;
413                         }
414                 case 0x1e:
415                         if (cmd & (1 << 23))
416                                 return (cmd & 0xffff) + 1;
417                         else
418                                 return 1;
419                 case 0x1f:
420                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
421                                 return (cmd & 0x1ffff) + 2;
422                         else if (cmd & (1 << 17))       /* indirect random */
423                                 if ((cmd & 0xffff) == 0)
424                                         return 0;       /* unknown length, too hard */
425                                 else
426                                         return (((cmd & 0xffff) + 1) / 2) + 1;
427                         else
428                                 return 2;       /* indirect sequential */
429                 default:
430                         return 0;
431                 }
432         default:
433                 return 0;
434         }
435
436         return 0;
437 }
438
439 static int validate_cmd(int cmd)
440 {
441         int ret = do_validate_cmd(cmd);
442
443 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
444
445         return ret;
446 }
447
448 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
449                           int dwords)
450 {
451         drm_i915_private_t *dev_priv = dev->dev_private;
452         int i;
453         RING_LOCALS;
454
455         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
456                 return -EINVAL;
457
458         BEGIN_LP_RING((dwords+1)&~1);
459
460         for (i = 0; i < dwords;) {
461                 int cmd, sz;
462
463                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
464                         return -EINVAL;
465
466                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
467                         return -EINVAL;
468
469                 OUT_RING(cmd);
470
471                 while (++i, --sz) {
472                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
473                                                          sizeof(cmd))) {
474                                 return -EINVAL;
475                         }
476                         OUT_RING(cmd);
477                 }
478         }
479
480         if (dwords & 1)
481                 OUT_RING(0);
482
483         ADVANCE_LP_RING();
484
485         return 0;
486 }
487
488 int i915_emit_box(struct drm_device * dev,
489                   struct drm_clip_rect __user * boxes,
490                   int i, int DR1, int DR4)
491 {
492         drm_i915_private_t *dev_priv = dev->dev_private;
493         struct drm_clip_rect box;
494         RING_LOCALS;
495
496         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
497                 return -EFAULT;
498         }
499
500         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
501                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
502                           box.x1, box.y1, box.x2, box.y2);
503                 return -EINVAL;
504         }
505
506         if (IS_I965G(dev)) {
507                 BEGIN_LP_RING(4);
508                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
509                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
510                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
511                 OUT_RING(DR4);
512                 ADVANCE_LP_RING();
513         } else {
514                 BEGIN_LP_RING(6);
515                 OUT_RING(GFX_OP_DRAWRECT_INFO);
516                 OUT_RING(DR1);
517                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
518                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
519                 OUT_RING(DR4);
520                 OUT_RING(0);
521                 ADVANCE_LP_RING();
522         }
523
524         return 0;
525 }
526
527 /* XXX: Emitting the counter should really be moved to part of the IRQ
528  * emit. For now, do it in both places:
529  */
530
531 void i915_emit_breadcrumb(struct drm_device *dev)
532 {
533         drm_i915_private_t *dev_priv = dev->dev_private;
534         RING_LOCALS;
535
536         if (++dev_priv->counter > BREADCRUMB_MASK) {
537                  dev_priv->counter = 1;
538                  DRM_DEBUG("Breadcrumb counter wrapped around\n");
539         }
540
541         if (dev_priv->sarea_priv)
542                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
543
544         BEGIN_LP_RING(4);
545         OUT_RING(MI_STORE_DWORD_INDEX);
546         OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
547         OUT_RING(dev_priv->counter);
548         OUT_RING(0);
549         ADVANCE_LP_RING();
550 }
551
552
553 int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
554 {
555         drm_i915_private_t *dev_priv = dev->dev_private;
556         uint32_t flush_cmd = MI_FLUSH;
557         RING_LOCALS;
558
559         flush_cmd |= flush;
560
561         i915_kernel_lost_context(dev);
562
563         BEGIN_LP_RING(4);
564         OUT_RING(flush_cmd);
565         OUT_RING(0);
566         OUT_RING(0);
567         OUT_RING(0);
568         ADVANCE_LP_RING();
569
570         return 0;
571 }
572
573
574 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
575                                    drm_i915_cmdbuffer_t * cmd)
576 {
577 #ifdef I915_HAVE_FENCE
578         drm_i915_private_t *dev_priv = dev->dev_private;
579 #endif
580         int nbox = cmd->num_cliprects;
581         int i = 0, count, ret;
582
583         if (cmd->sz & 0x3) {
584                 DRM_ERROR("alignment\n");
585                 return -EINVAL;
586         }
587
588         i915_kernel_lost_context(dev);
589
590         count = nbox ? nbox : 1;
591
592         for (i = 0; i < count; i++) {
593                 if (i < nbox) {
594                         ret = i915_emit_box(dev, cmd->cliprects, i,
595                                             cmd->DR1, cmd->DR4);
596                         if (ret)
597                                 return ret;
598                 }
599
600                 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
601                 if (ret)
602                         return ret;
603         }
604
605         i915_emit_breadcrumb(dev);
606 #ifdef I915_HAVE_FENCE
607         if (unlikely((dev_priv->counter & 0xFF) == 0))
608                 drm_fence_flush_old(dev, 0, dev_priv->counter);
609 #endif
610         return 0;
611 }
612
613 int i915_dispatch_batchbuffer(struct drm_device * dev,
614                               drm_i915_batchbuffer_t * batch)
615 {
616         drm_i915_private_t *dev_priv = dev->dev_private;
617         struct drm_clip_rect __user *boxes = batch->cliprects;
618         int nbox = batch->num_cliprects;
619         int i = 0, count;
620         RING_LOCALS;
621
622         if ((batch->start | batch->used) & 0x7) {
623                 DRM_ERROR("alignment\n");
624                 return -EINVAL;
625         }
626
627         i915_kernel_lost_context(dev);
628
629         count = nbox ? nbox : 1;
630
631         for (i = 0; i < count; i++) {
632                 if (i < nbox) {
633                         int ret = i915_emit_box(dev, boxes, i,
634                                                 batch->DR1, batch->DR4);
635                         if (ret)
636                                 return ret;
637                 }
638
639                 if (IS_I830(dev) || IS_845G(dev)) {
640                         BEGIN_LP_RING(4);
641                         OUT_RING(MI_BATCH_BUFFER);
642                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
643                         OUT_RING(batch->start + batch->used - 4);
644                         OUT_RING(0);
645                         ADVANCE_LP_RING();
646                 } else {
647                         BEGIN_LP_RING(2);
648                         if (IS_I965G(dev)) {
649                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
650                                 OUT_RING(batch->start);
651                         } else {
652                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
653                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
654                         }
655                         ADVANCE_LP_RING();
656                 }
657         }
658
659         i915_emit_breadcrumb(dev);
660 #ifdef I915_HAVE_FENCE
661         if (unlikely((dev_priv->counter & 0xFF) == 0))
662                 drm_fence_flush_old(dev, 0, dev_priv->counter);
663 #endif
664         return 0;
665 }
666
667 static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
668 {
669         drm_i915_private_t *dev_priv = dev->dev_private;
670         u32 num_pages, current_page, next_page, dspbase;
671         int shift = 2 * plane, x, y;
672         RING_LOCALS;
673
674         /* Calculate display base offset */
675         num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
676         current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
677         next_page = (current_page + 1) % num_pages;
678
679         switch (next_page) {
680         default:
681         case 0:
682                 dspbase = dev_priv->sarea_priv->front_offset;
683                 break;
684         case 1:
685                 dspbase = dev_priv->sarea_priv->back_offset;
686                 break;
687         case 2:
688                 dspbase = dev_priv->sarea_priv->third_offset;
689                 break;
690         }
691
692         if (plane == 0) {
693                 x = dev_priv->sarea_priv->planeA_x;
694                 y = dev_priv->sarea_priv->planeA_y;
695         } else {
696                 x = dev_priv->sarea_priv->planeB_x;
697                 y = dev_priv->sarea_priv->planeB_y;
698         }
699
700         dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
701
702         DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
703                   dspbase);
704
705         BEGIN_LP_RING(4);
706         OUT_RING(sync ? 0 :
707                  (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
708                                        MI_WAIT_FOR_PLANE_A_FLIP)));
709         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
710                  (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
711         OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
712         OUT_RING(dspbase);
713         ADVANCE_LP_RING();
714
715         dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
716         dev_priv->sarea_priv->pf_current_page |= next_page << shift;
717 }
718
719 void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
720 {
721         drm_i915_private_t *dev_priv = dev->dev_private;
722         int i;
723
724         DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
725                   planes, dev_priv->sarea_priv->pf_current_page);
726
727         i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
728
729         for (i = 0; i < 2; i++)
730                 if (planes & (1 << i))
731                         i915_do_dispatch_flip(dev, i, sync);
732
733         i915_emit_breadcrumb(dev);
734 #ifdef I915_HAVE_FENCE
735         if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
736                 drm_fence_flush_old(dev, 0, dev_priv->counter);
737 #endif
738 }
739
740 int i915_quiescent(struct drm_device *dev)
741 {
742         drm_i915_private_t *dev_priv = dev->dev_private;
743         int ret;
744
745         i915_kernel_lost_context(dev);
746         ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
747         if (ret)
748         {
749                 i915_kernel_lost_context (dev);
750                 DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
751                            dev_priv->ring.head,
752                            dev_priv->ring.tail,
753                            dev_priv->ring.space);
754         }
755         return ret;
756 }
757
758 static int i915_flush_ioctl(struct drm_device *dev, void *data,
759                             struct drm_file *file_priv)
760 {
761
762         LOCK_TEST_WITH_RETURN(dev, file_priv);
763
764         return i915_quiescent(dev);
765 }
766
767 static int i915_batchbuffer(struct drm_device *dev, void *data,
768                             struct drm_file *file_priv)
769 {
770         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
771         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
772             dev_priv->sarea_priv;
773         drm_i915_batchbuffer_t *batch = data;
774         int ret;
775
776         if (!dev_priv->allow_batchbuffer) {
777                 DRM_ERROR("Batchbuffer ioctl disabled\n");
778                 return -EINVAL;
779         }
780
781         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
782                   batch->start, batch->used, batch->num_cliprects);
783
784         LOCK_TEST_WITH_RETURN(dev, file_priv);
785
786         if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
787                                                         batch->num_cliprects *
788                                                         sizeof(struct drm_clip_rect)))
789                 return -EFAULT;
790
791         ret = i915_dispatch_batchbuffer(dev, batch);
792
793         sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
794         return ret;
795 }
796
797 static int i915_cmdbuffer(struct drm_device *dev, void *data,
798                           struct drm_file *file_priv)
799 {
800         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
801         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
802             dev_priv->sarea_priv;
803         drm_i915_cmdbuffer_t *cmdbuf = data;
804         int ret;
805
806         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
807                   cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
808
809         LOCK_TEST_WITH_RETURN(dev, file_priv);
810
811         if (cmdbuf->num_cliprects &&
812             DRM_VERIFYAREA_READ(cmdbuf->cliprects,
813                                 cmdbuf->num_cliprects *
814                                 sizeof(struct drm_clip_rect))) {
815                 DRM_ERROR("Fault accessing cliprects\n");
816                 return -EFAULT;
817         }
818
819         ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
820         if (ret) {
821                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
822                 return ret;
823         }
824
825         sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
826         return 0;
827 }
828
829 #if defined(DRM_DEBUG_CODE)
830 #define DRM_DEBUG_RELOCATION    (drm_debug != 0)
831 #else
832 #define DRM_DEBUG_RELOCATION    0
833 #endif
834
835 static int i915_do_cleanup_pageflip(struct drm_device * dev)
836 {
837         drm_i915_private_t *dev_priv = dev->dev_private;
838         int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
839
840         DRM_DEBUG("\n");
841
842         for (i = 0, planes = 0; i < 2; i++)
843                 if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
844                         dev_priv->sarea_priv->pf_current_page =
845                                 (dev_priv->sarea_priv->pf_current_page &
846                                  ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
847
848                         planes |= 1 << i;
849                 }
850
851         if (planes)
852                 i915_dispatch_flip(dev, planes, 0);
853
854         return 0;
855 }
856
857 static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
858 {
859         drm_i915_flip_t *param = data;
860
861         DRM_DEBUG("\n");
862
863         LOCK_TEST_WITH_RETURN(dev, file_priv);
864
865         /* This is really planes */
866         if (param->pipes & ~0x3) {
867                 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
868                           param->pipes);
869                 return -EINVAL;
870         }
871
872         i915_dispatch_flip(dev, param->pipes, 0);
873
874         return 0;
875 }
876
877
878 static int i915_getparam(struct drm_device *dev, void *data,
879                          struct drm_file *file_priv)
880 {
881         drm_i915_private_t *dev_priv = dev->dev_private;
882         drm_i915_getparam_t *param = data;
883         int value;
884
885         if (!dev_priv) {
886                 DRM_ERROR("called with no initialization\n");
887                 return -EINVAL;
888         }
889
890         switch (param->param) {
891         case I915_PARAM_IRQ_ACTIVE:
892                 value = dev->irq_enabled ? 1 : 0;
893                 break;
894         case I915_PARAM_ALLOW_BATCHBUFFER:
895                 value = dev_priv->allow_batchbuffer ? 1 : 0;
896                 break;
897         case I915_PARAM_LAST_DISPATCH:
898                 value = READ_BREADCRUMB(dev_priv);
899                 break;
900         case I915_PARAM_CHIPSET_ID:
901                 value = dev->pci_device;
902                 break;
903         default:
904                 DRM_ERROR("Unknown parameter %d\n", param->param);
905                 return -EINVAL;
906         }
907
908         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
909                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
910                 return -EFAULT;
911         }
912
913         return 0;
914 }
915
916 static int i915_setparam(struct drm_device *dev, void *data,
917                          struct drm_file *file_priv)
918 {
919         drm_i915_private_t *dev_priv = dev->dev_private;
920         drm_i915_setparam_t *param = data;
921
922         if (!dev_priv) {
923                 DRM_ERROR("called with no initialization\n");
924                 return -EINVAL;
925         }
926
927         switch (param->param) {
928         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
929                 break;
930         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
931                 dev_priv->tex_lru_log_granularity = param->value;
932                 break;
933         case I915_SETPARAM_ALLOW_BATCHBUFFER:
934                 dev_priv->allow_batchbuffer = param->value;
935                 break;
936         default:
937                 DRM_ERROR("unknown parameter %d\n", param->param);
938                 return -EINVAL;
939         }
940
941         return 0;
942 }
943
944 drm_i915_mmio_entry_t mmio_table[] = {
945         [MMIO_REGS_PS_DEPTH_COUNT] = {
946                 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
947                 0x2350,
948                 8
949         }
950 };
951
952 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
953
954 static int i915_mmio(struct drm_device *dev, void *data,
955                      struct drm_file *file_priv)
956 {
957         uint32_t buf[8];
958         drm_i915_private_t *dev_priv = dev->dev_private;
959         drm_i915_mmio_entry_t *e;
960         drm_i915_mmio_t *mmio = data;
961         void __iomem *base;
962         int i;
963
964         if (!dev_priv) {
965                 DRM_ERROR("called with no initialization\n");
966                 return -EINVAL;
967         }
968
969         if (mmio->reg >= mmio_table_size)
970                 return -EINVAL;
971
972         e = &mmio_table[mmio->reg];
973         base = (u8 *) dev_priv->mmio_map->handle + e->offset;
974
975         switch (mmio->read_write) {
976         case I915_MMIO_READ:
977                 if (!(e->flag & I915_MMIO_MAY_READ))
978                         return -EINVAL;
979                 for (i = 0; i < e->size / 4; i++)
980                         buf[i] = I915_READ(e->offset + i * 4);
981                 if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
982                         DRM_ERROR("DRM_COPY_TO_USER failed\n");
983                         return -EFAULT;
984                 }
985                 break;
986                 
987         case I915_MMIO_WRITE:
988                 if (!(e->flag & I915_MMIO_MAY_WRITE))
989                         return -EINVAL;
990                 if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
991                         DRM_ERROR("DRM_COPY_TO_USER failed\n");
992                         return -EFAULT;
993                 }
994                 for (i = 0; i < e->size / 4; i++)
995                         I915_WRITE(e->offset + i * 4, buf[i]);
996                 break;
997         }
998         return 0;
999 }
1000
1001 static int i915_set_status_page(struct drm_device *dev, void *data,
1002                                 struct drm_file *file_priv)
1003 {
1004         drm_i915_private_t *dev_priv = dev->dev_private;
1005         drm_i915_hws_addr_t *hws = data;
1006
1007         if (!I915_NEED_GFX_HWS(dev))
1008                 return -EINVAL;
1009
1010         if (!dev_priv) {
1011                 DRM_ERROR("called with no initialization\n");
1012                 return -EINVAL;
1013         }
1014         DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1015
1016         dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
1017
1018         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1019         dev_priv->hws_map.size = 4*1024;
1020         dev_priv->hws_map.type = 0;
1021         dev_priv->hws_map.flags = 0;
1022         dev_priv->hws_map.mtrr = 0;
1023
1024         drm_core_ioremap(&dev_priv->hws_map, dev);
1025         if (dev_priv->hws_map.handle == NULL) {
1026                 i915_dma_cleanup(dev);
1027                 dev_priv->status_gfx_addr = 0;
1028                 DRM_ERROR("can not ioremap virtual address for"
1029                                 " G33 hw status page\n");
1030                 return -ENOMEM;
1031         }
1032         dev_priv->hw_status_page = dev_priv->hws_map.handle;
1033
1034         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1035         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1036         DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
1037                         dev_priv->status_gfx_addr);
1038         DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1039         return 0;
1040 }
1041
1042 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1043 {
1044         struct drm_i915_private *dev_priv;
1045         unsigned long base, size;
1046         int ret = 0, num_pipes = 2, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1047
1048         /* i915 has 4 more counters */
1049         dev->counters += 4;
1050         dev->types[6] = _DRM_STAT_IRQ;
1051         dev->types[7] = _DRM_STAT_PRIMARY;
1052         dev->types[8] = _DRM_STAT_SECONDARY;
1053         dev->types[9] = _DRM_STAT_DMA;
1054
1055         dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
1056         if (dev_priv == NULL)
1057                 return -ENOMEM;
1058
1059         memset(dev_priv, 0, sizeof(drm_i915_private_t));
1060
1061         dev->dev_private = (void *)dev_priv;
1062         dev_priv->dev = dev;
1063
1064         /* Add register map (needed for suspend/resume) */
1065         base = drm_get_resource_start(dev, mmio_bar);
1066         size = drm_get_resource_len(dev, mmio_bar);
1067
1068         ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1069                 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1070
1071         i915_gem_load(dev);
1072
1073 #ifdef __linux__
1074 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1075         intel_init_chipset_flush_compat(dev);
1076 #endif
1077         intel_opregion_init(dev);
1078 #endif
1079
1080         /* Init HWS */
1081         if (!I915_NEED_GFX_HWS(dev)) {
1082                 ret = i915_init_hardware_status(dev);
1083                 if(ret)
1084                         return ret;
1085         }
1086
1087         I915_WRITE16(HWSTAM, 0xeffe);
1088         I915_WRITE16(IMR, 0x0);
1089         I915_WRITE16(IER, 0x0);
1090
1091         DRM_SPININIT(&dev_priv->swaps_lock, "swap");
1092         INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
1093         dev_priv->swaps_pending = 0;
1094
1095         DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
1096         dev_priv->user_irq_refcount = 0;
1097         dev_priv->irq_mask_reg = ~0;
1098
1099         ret = drm_vblank_init(dev, num_pipes);
1100         if (ret)
1101                 return ret;
1102
1103         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1104         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1105
1106         i915_enable_interrupt(dev);
1107         DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1108
1109         /*
1110          * Initialize the hardware status page IRQ location.
1111          */
1112
1113         I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1114
1115         return ret;
1116 }
1117
1118 int i915_driver_unload(struct drm_device *dev)
1119 {
1120         struct drm_i915_private *dev_priv = dev->dev_private;
1121         u32 temp;
1122
1123         if (dev_priv) {
1124                 dev_priv->vblank_pipe = 0;
1125
1126                 dev_priv->irq_enabled = 0;
1127                 I915_WRITE(HWSTAM, 0xffffffff);
1128                 I915_WRITE(IMR, 0xffffffff);
1129                 I915_WRITE(IER, 0x0);
1130
1131                 temp = I915_READ(PIPEASTAT);
1132                 I915_WRITE(PIPEASTAT, temp);
1133                 temp = I915_READ(PIPEBSTAT);
1134                 I915_WRITE(PIPEBSTAT, temp);
1135                 temp = I915_READ(IIR);
1136                 I915_WRITE(IIR, temp);
1137         }
1138
1139         i915_free_hardware_status(dev);
1140
1141         drm_rmmap(dev, dev_priv->mmio_map);
1142
1143 #ifdef __linux__
1144         intel_opregion_free(dev);
1145 #endif
1146
1147         drm_free(dev->dev_private, sizeof(drm_i915_private_t),
1148                  DRM_MEM_DRIVER);
1149 #ifdef __linux__
1150 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1151         intel_fini_chipset_flush_compat(dev);
1152 #endif
1153 #endif
1154         return 0;
1155 }
1156
1157 void i915_driver_lastclose(struct drm_device * dev)
1158 {
1159         drm_i915_private_t *dev_priv = dev->dev_private;
1160
1161         /* agp off can use this to get called before dev_priv */
1162         if (!dev_priv)
1163                 return;
1164
1165 #ifdef I915_HAVE_BUFFER
1166         if (dev_priv->val_bufs) {
1167                 vfree(dev_priv->val_bufs);
1168                 dev_priv->val_bufs = NULL;
1169         }
1170 #endif
1171         i915_gem_lastclose(dev);
1172
1173         if (drm_getsarea(dev) && dev_priv->sarea_priv)
1174                 i915_do_cleanup_pageflip(dev);
1175         if (dev_priv->agp_heap)
1176                 i915_mem_takedown(&(dev_priv->agp_heap));
1177 #if defined(I915_HAVE_BUFFER)
1178         if (dev_priv->sarea_kmap.virtual) {
1179                 drm_bo_kunmap(&dev_priv->sarea_kmap);
1180                 dev_priv->sarea_kmap.virtual = NULL;
1181                 dev->lock.hw_lock = NULL;
1182                 dev->sigdata.lock = NULL;
1183         }
1184
1185         if (dev_priv->sarea_bo) {
1186                 mutex_lock(&dev->struct_mutex);
1187                 drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
1188                 mutex_unlock(&dev->struct_mutex);
1189                 dev_priv->sarea_bo = NULL;
1190         }
1191 #endif
1192         i915_dma_cleanup(dev);
1193 }
1194
1195 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1196 {
1197         struct drm_i915_file_private *i915_file_priv;
1198
1199         DRM_DEBUG("\n");
1200         i915_file_priv = (struct drm_i915_file_private *)
1201             drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
1202
1203         if (!i915_file_priv)
1204                 return -ENOMEM;
1205
1206         file_priv->driver_priv = i915_file_priv;
1207
1208         i915_file_priv->mm.last_gem_seqno = 0;
1209         i915_file_priv->mm.last_gem_throttle_seqno = 0;
1210
1211         return 0;
1212 }
1213
1214 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1215 {
1216         drm_i915_private_t *dev_priv = dev->dev_private;
1217         i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1218 }
1219
1220 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1221 {
1222         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1223
1224         drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
1225 }
1226
1227 struct drm_ioctl_desc i915_ioctls[] = {
1228         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1229         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1230         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1231         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1232         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1233         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1234         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1235         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1236         DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1237         DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1238         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1239         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1240         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1241         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1242         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1243         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1244         DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
1245         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
1246 #ifdef I915_HAVE_BUFFER
1247         DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
1248 #endif
1249         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
1250         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1251         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1252         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1253         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1254         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1255         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
1256         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
1257         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1258         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1259         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1260         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1261         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1262         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1263         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1264         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1265 };
1266
1267 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1268
1269 /**
1270  * Determine if the device really is AGP or not.
1271  *
1272  * All Intel graphics chipsets are treated as AGP, even if they are really
1273  * PCI-e.
1274  *
1275  * \param dev   The device to be tested.
1276  *
1277  * \returns
1278  * A value of 1 is always retured to indictate every i9x5 is AGP.
1279  */
1280 int i915_driver_device_is_agp(struct drm_device * dev)
1281 {
1282         return 1;
1283 }
1284
1285 int i915_driver_firstopen(struct drm_device *dev)
1286 {
1287 #ifdef I915_HAVE_BUFFER
1288         drm_bo_driver_init(dev);
1289 #endif
1290         return 0;
1291 }