1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
38 #define MAX_NOPID ((u32)~0)
41 * Emit a synchronous flip.
43 * This function must be called with the drawable spinlock held.
46 i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe)
48 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
49 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
51 int pf_pipes = 1 << pipe;
53 /* If the window is visible on the other pipe, we have to flip on that
57 x1 = sarea_priv->pipeA_x;
58 y1 = sarea_priv->pipeA_y;
59 x2 = x1 + sarea_priv->pipeA_w;
60 y2 = y1 + sarea_priv->pipeA_h;
62 x1 = sarea_priv->pipeB_x;
63 y1 = sarea_priv->pipeB_y;
64 x2 = x1 + sarea_priv->pipeB_w;
65 y2 = y1 + sarea_priv->pipeB_h;
68 if (x2 > 0 && y2 > 0) {
69 int i, num_rects = drw->num_rects;
70 drm_clip_rect_t *rect = drw->rects;
72 for (i = 0; i < num_rects; i++)
73 if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
74 rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
81 i915_dispatch_flip(dev, pf_pipes, 1);
85 * Emit blits for scheduled buffer swaps.
87 * This function will be called with the HW lock held.
89 static void i915_vblank_tasklet(drm_device_t *dev)
91 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
92 unsigned long irqflags;
93 struct list_head *list, *tmp, hits, *hit;
94 int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
95 unsigned counter[2] = { atomic_read(&dev->vbl_received),
96 atomic_read(&dev->vbl_received2) };
97 drm_drawable_info_t *drw;
98 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
99 u32 cpp = dev_priv->cpp, offsets[3];
100 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
101 XY_SRC_COPY_BLT_WRITE_ALPHA |
102 XY_SRC_COPY_BLT_WRITE_RGB)
103 : XY_SRC_COPY_BLT_CMD;
104 u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
105 (cpp << 23) | (1 << 24);
110 INIT_LIST_HEAD(&hits);
114 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
116 /* Find buffer swaps scheduled for this vertical blank */
117 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
118 drm_i915_vbl_swap_t *vbl_swap =
119 list_entry(list, drm_i915_vbl_swap_t, head);
121 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
125 dev_priv->swaps_pending--;
127 spin_unlock(&dev_priv->swaps_lock);
128 spin_lock(&dev->drw_lock);
130 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
133 spin_unlock(&dev->drw_lock);
134 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
135 spin_lock(&dev_priv->swaps_lock);
139 list_for_each(hit, &hits) {
140 drm_i915_vbl_swap_t *swap_cmp =
141 list_entry(hit, drm_i915_vbl_swap_t, head);
142 drm_drawable_info_t *drw_cmp =
143 drm_get_drawable_info(dev, swap_cmp->drw_id);
146 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
147 list_add_tail(list, hit);
152 spin_unlock(&dev->drw_lock);
154 /* List of hits was empty, or we reached the end of it */
156 list_add_tail(list, hits.prev);
160 spin_lock(&dev_priv->swaps_lock);
164 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
168 spin_unlock(&dev_priv->swaps_lock);
170 i915_kernel_lost_context(dev);
172 upper[0] = upper[1] = 0;
173 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
174 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
175 lower[0] = sarea_priv->pipeA_y + slice[0];
176 lower[1] = sarea_priv->pipeB_y + slice[0];
178 offsets[0] = sarea_priv->front_offset;
179 offsets[1] = sarea_priv->back_offset;
180 offsets[2] = sarea_priv->third_offset;
181 num_pages = sarea_priv->third_handle ? 3 : 2;
183 spin_lock(&dev->drw_lock);
185 /* Emit blits for buffer swaps, partitioning both outputs into as many
186 * slices as there are buffer swaps scheduled in order to avoid tearing
187 * (based on the assumption that a single buffer swap would always
188 * complete before scanout starts).
190 for (i = 0; i++ < nhits;
191 upper[0] = lower[0], lower[0] += slice[0],
192 upper[1] = lower[1], lower[1] += slice[1]) {
193 int init_drawrect = 1;
196 lower[0] = lower[1] = sarea_priv->height;
198 list_for_each(hit, &hits) {
199 drm_i915_vbl_swap_t *swap_hit =
200 list_entry(hit, drm_i915_vbl_swap_t, head);
201 drm_clip_rect_t *rect;
202 int num_rects, pipe, front, back;
203 unsigned short top, bottom;
205 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
210 pipe = swap_hit->pipe;
212 if (swap_hit->flip) {
213 i915_dispatch_vsync_flip(dev, drw, pipe);
220 OUT_RING(GFX_OP_DRAWRECT_INFO);
223 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
224 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
229 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
236 bottom = lower[pipe];
238 front = (dev_priv->sarea_priv->pf_current_page >>
240 back = (front + 1) % num_pages;
242 for (num_rects = drw->num_rects; num_rects--; rect++) {
243 int y1 = max(rect->y1, top);
244 int y2 = min(rect->y2, bottom);
252 OUT_RING(pitchropcpp);
253 OUT_RING((y1 << 16) | rect->x1);
254 OUT_RING((y2 << 16) | rect->x2);
255 OUT_RING(offsets[front]);
256 OUT_RING((y1 << 16) | rect->x1);
257 OUT_RING(pitchropcpp & 0xffff);
258 OUT_RING(offsets[back]);
265 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
267 list_for_each_safe(hit, tmp, &hits) {
268 drm_i915_vbl_swap_t *swap_hit =
269 list_entry(hit, drm_i915_vbl_swap_t, head);
273 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
277 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
279 drm_device_t *dev = (drm_device_t *) arg;
280 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
282 u32 pipea_stats, pipeb_stats;
284 pipea_stats = I915_READ(I915REG_PIPEASTAT);
285 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
287 temp = I915_READ16(I915REG_INT_IDENTITY_R);
288 temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
291 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
296 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
297 (void) I915_READ16(I915REG_INT_IDENTITY_R);
298 DRM_READMEMORYBARRIER();
300 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
302 if (temp & USER_INT_FLAG) {
303 DRM_WAKEUP(&dev_priv->irq_queue);
304 #ifdef I915_HAVE_FENCE
305 i915_fence_handler(dev);
309 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
310 int vblank_pipe = dev_priv->vblank_pipe;
313 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
314 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
315 if (temp & VSYNC_PIPEA_FLAG)
316 atomic_inc(&dev->vbl_received);
317 if (temp & VSYNC_PIPEB_FLAG)
318 atomic_inc(&dev->vbl_received2);
319 } else if (((temp & VSYNC_PIPEA_FLAG) &&
320 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
321 ((temp & VSYNC_PIPEB_FLAG) &&
322 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
323 atomic_inc(&dev->vbl_received);
325 DRM_WAKEUP(&dev->vbl_queue);
326 drm_vbl_send_signals(dev);
328 if (dev_priv->swaps_pending > 0)
329 drm_locked_tasklet(dev, i915_vblank_tasklet);
330 I915_WRITE(I915REG_PIPEASTAT,
331 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
333 I915_WRITE(I915REG_PIPEBSTAT,
334 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
341 int i915_emit_irq(drm_device_t * dev)
344 drm_i915_private_t *dev_priv = dev->dev_private;
347 i915_kernel_lost_context(dev);
349 DRM_DEBUG("%s\n", __FUNCTION__);
351 i915_emit_breadcrumb(dev);
355 OUT_RING(GFX_OP_USER_INTERRUPT);
358 return dev_priv->counter;
363 void i915_user_irq_on(drm_i915_private_t *dev_priv)
365 spin_lock(&dev_priv->user_irq_lock);
366 if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
367 dev_priv->irq_enable_reg |= USER_INT_FLAG;
368 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
370 spin_unlock(&dev_priv->user_irq_lock);
374 void i915_user_irq_off(drm_i915_private_t *dev_priv)
376 spin_lock(&dev_priv->user_irq_lock);
377 if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
378 // dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
379 // I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
381 spin_unlock(&dev_priv->user_irq_lock);
384 static int wait_compare(struct drm_device *dev, void *priv)
386 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
387 int irq_nr = (u64)priv;
389 return (READ_BREADCRUMB(dev_priv) >= irq_nr);
392 static int i915_wait_irq(drm_device_t * dev, int irq_nr)
394 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
397 DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
398 READ_BREADCRUMB(dev_priv));
400 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
403 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
405 i915_user_irq_on(dev_priv);
406 ret = drm_wait_on(dev, &dev_priv->irq_queue, 3 * DRM_HZ, wait_compare,
407 (void *)(u64)irq_nr);
408 i915_user_irq_off(dev_priv);
410 if (ret == DRM_ERR(EBUSY)) {
411 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
413 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
416 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
420 static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
423 drm_i915_private_t *dev_priv = dev->dev_private;
424 unsigned int cur_vblank;
428 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
429 return DRM_ERR(EINVAL);
432 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
433 (((cur_vblank = atomic_read(counter))
434 - *sequence) <= (1<<23)));
436 *sequence = cur_vblank;
441 int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
443 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
446 int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
448 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
451 /* Needs the lock as it touches the ring.
453 int i915_irq_emit(DRM_IOCTL_ARGS)
456 drm_i915_private_t *dev_priv = dev->dev_private;
457 drm_i915_irq_emit_t emit;
460 LOCK_TEST_WITH_RETURN(dev, filp);
463 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
464 return DRM_ERR(EINVAL);
467 DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
470 result = i915_emit_irq(dev);
472 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
473 DRM_ERROR("copy_to_user\n");
474 return DRM_ERR(EFAULT);
480 /* Doesn't need the hardware lock.
482 int i915_irq_wait(DRM_IOCTL_ARGS)
485 drm_i915_private_t *dev_priv = dev->dev_private;
486 drm_i915_irq_wait_t irqwait;
489 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
490 return DRM_ERR(EINVAL);
493 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data,
496 return i915_wait_irq(dev, irqwait.irq_seq);
499 static void i915_enable_interrupt (drm_device_t *dev)
501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
503 dev_priv->irq_enable_reg = USER_INT_FLAG;
504 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
505 dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
506 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
507 dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
509 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
510 dev_priv->irq_enabled = 1;
513 /* Set the vblank monitor pipe
515 int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
518 drm_i915_private_t *dev_priv = dev->dev_private;
519 drm_i915_vblank_pipe_t pipe;
522 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
523 return DRM_ERR(EINVAL);
526 DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
529 if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
530 DRM_ERROR("%s called with invalid pipe 0x%x\n",
531 __FUNCTION__, pipe.pipe);
532 return DRM_ERR(EINVAL);
535 dev_priv->vblank_pipe = pipe.pipe;
537 i915_enable_interrupt (dev);
542 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 drm_i915_vblank_pipe_t pipe;
550 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
551 return DRM_ERR(EINVAL);
554 flag = I915_READ(I915REG_INT_ENABLE_R);
556 if (flag & VSYNC_PIPEA_FLAG)
557 pipe.pipe |= DRM_I915_VBLANK_PIPE_A;
558 if (flag & VSYNC_PIPEB_FLAG)
559 pipe.pipe |= DRM_I915_VBLANK_PIPE_B;
560 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe,
566 * Schedule buffer swap at given vertical blank.
568 int i915_vblank_swap(DRM_IOCTL_ARGS)
571 drm_i915_private_t *dev_priv = dev->dev_private;
572 drm_i915_vblank_swap_t swap;
573 drm_i915_vbl_swap_t *vbl_swap;
574 unsigned int pipe, seqtype, curseq;
575 unsigned long irqflags;
576 struct list_head *list;
579 DRM_ERROR("%s called with no initialization\n", __func__);
580 return DRM_ERR(EINVAL);
583 if (dev_priv->sarea_priv->rotation) {
584 DRM_DEBUG("Rotation not supported\n");
585 return DRM_ERR(EINVAL);
588 DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
591 if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
592 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
594 DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
595 return DRM_ERR(EINVAL);
598 pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
600 seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
602 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
603 DRM_ERROR("Invalid pipe %d\n", pipe);
604 return DRM_ERR(EINVAL);
607 spin_lock_irqsave(&dev->drw_lock, irqflags);
609 if (!drm_get_drawable_info(dev, swap.drawable)) {
610 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
611 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
612 return DRM_ERR(EINVAL);
615 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
617 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
619 if (seqtype == _DRM_VBLANK_RELATIVE)
620 swap.sequence += curseq;
622 if ((curseq - swap.sequence) <= (1<<23)) {
623 if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) {
624 swap.sequence = curseq + 1;
626 DRM_DEBUG("Missed target sequence\n");
627 return DRM_ERR(EINVAL);
631 if (swap.seqtype & _DRM_VBLANK_FLIP) {
634 if ((curseq - swap.sequence) <= (1<<23)) {
635 drm_drawable_info_t *drw;
637 LOCK_TEST_WITH_RETURN(dev, filp);
639 spin_lock_irqsave(&dev->drw_lock, irqflags);
641 drw = drm_get_drawable_info(dev, swap.drawable);
644 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
645 DRM_DEBUG("Invalid drawable ID %d\n",
647 return DRM_ERR(EINVAL);
650 i915_dispatch_vsync_flip(dev, drw, pipe);
652 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
658 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
660 list_for_each(list, &dev_priv->vbl_swaps.head) {
661 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
663 if (vbl_swap->drw_id == swap.drawable &&
664 vbl_swap->pipe == pipe &&
665 vbl_swap->sequence == swap.sequence) {
666 vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP);
667 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
668 DRM_DEBUG("Already scheduled\n");
673 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
675 if (dev_priv->swaps_pending >= 100) {
676 DRM_DEBUG("Too many swaps queued\n");
677 return DRM_ERR(EBUSY);
680 vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
683 DRM_ERROR("Failed to allocate memory to queue swap\n");
684 return DRM_ERR(ENOMEM);
689 vbl_swap->drw_id = swap.drawable;
690 vbl_swap->pipe = pipe;
691 vbl_swap->sequence = swap.sequence;
692 vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP);
697 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
699 list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
700 dev_priv->swaps_pending++;
702 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
704 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
712 void i915_driver_irq_preinstall(drm_device_t * dev)
714 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
716 I915_WRITE16(I915REG_HWSTAM, 0xeffe);
717 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
718 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
721 void i915_driver_irq_postinstall(drm_device_t * dev)
723 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
725 dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
726 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
727 dev_priv->swaps_pending = 0;
729 dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
730 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
731 dev_priv->swaps_pending = 0;
733 dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
734 dev_priv->user_irq_refcount = 0;
736 i915_enable_interrupt(dev);
737 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
740 * Initialize the hardware status page IRQ location.
743 I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21));
746 void i915_driver_irq_uninstall(drm_device_t * dev)
748 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
753 dev_priv->irq_enabled = 0;
754 I915_WRITE16(I915REG_HWSTAM, 0xffff);
755 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
756 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
758 temp = I915_READ16(I915REG_INT_IDENTITY_R);
759 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);