1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include "intel_drv.h"
34 #include "drm_crtc_helper.h"
36 #define MAX_NOPID ((u32)~0)
39 * These are the interrupts used by the driver
41 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
42 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
43 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
46 i915_enable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
48 if ((dev_priv->irq_mask_reg & mask) != 0) {
49 dev_priv->irq_mask_reg &= ~mask;
50 I915_WRITE(IMR, dev_priv->irq_mask_reg);
51 (void) I915_READ(IMR);
56 i915_disable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
58 if ((dev_priv->irq_mask_reg & mask) != mask) {
59 dev_priv->irq_mask_reg |= mask;
60 I915_WRITE(IMR, dev_priv->irq_mask_reg);
61 (void) I915_READ(IMR);
66 * i915_get_pipe - return the the pipe associated with a given plane
68 * @plane: plane to look for
70 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
71 * rather than a pipe number, since they may not always be equal. This routine
72 * maps the given @plane back to a pipe number.
75 i915_get_pipe(struct drm_device *dev, int plane)
77 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
80 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
82 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
86 * i915_get_plane - return the the plane associated with a given pipe
88 * @pipe: pipe to look for
90 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
91 * rather than a plane number, since they may not always be equal. This routine
92 * maps the given @pipe back to a plane number.
95 i915_get_plane(struct drm_device *dev, int pipe)
97 if (i915_get_pipe(dev, 0) == pipe)
103 * i915_pipe_enabled - check if a pipe is enabled
105 * @pipe: pipe to check
107 * Reading certain registers when the pipe is disabled can hang the chip.
108 * Use this routine to make sure the PLL is running and the pipe is active
109 * before reading such registers if unsure.
112 i915_pipe_enabled(struct drm_device *dev, int pipe)
114 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
115 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
117 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
124 * Emit a synchronous flip.
126 * This function must be called with the drawable spinlock held.
129 i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
132 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
133 struct drm_i915_sarea *sarea_priv = master_priv->sarea_priv;
135 int pf_planes = 1 << plane;
137 DRM_SPINLOCK_ASSERT(&dev->drw_lock);
139 /* If the window is visible on the other plane, we have to flip on that
143 x1 = sarea_priv->planeA_x;
144 y1 = sarea_priv->planeA_y;
145 x2 = x1 + sarea_priv->planeA_w;
146 y2 = y1 + sarea_priv->planeA_h;
148 x1 = sarea_priv->planeB_x;
149 y1 = sarea_priv->planeB_y;
150 x2 = x1 + sarea_priv->planeB_w;
151 y2 = y1 + sarea_priv->planeB_h;
154 if (x2 > 0 && y2 > 0) {
155 int i, num_rects = drw->num_rects;
156 struct drm_clip_rect *rect = drw->rects;
158 for (i = 0; i < num_rects; i++)
159 if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
160 rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
167 i915_dispatch_flip(dev, pf_planes, 1);
171 * Emit blits for scheduled buffer swaps.
173 * This function will be called with the HW lock held.
175 static void i915_vblank_tasklet(struct drm_device *dev)
177 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
178 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
179 struct list_head *list, *tmp, hits, *hit;
180 int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
182 struct drm_drawable_info *drw;
183 struct drm_i915_sarea *sarea_priv = master_priv->sarea_priv;
184 u32 cpp = dev_priv->cpp, offsets[3];
185 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
186 XY_SRC_COPY_BLT_WRITE_ALPHA |
187 XY_SRC_COPY_BLT_WRITE_RGB)
188 : XY_SRC_COPY_BLT_CMD;
189 u32 src_pitch = sarea_priv->pitch * cpp;
190 u32 dst_pitch = sarea_priv->pitch * cpp;
191 /* COPY rop (0xcc), map cpp to magic color depth constants */
192 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
195 if (IS_I965G(dev) && sarea_priv->front_tiled) {
196 cmd |= XY_SRC_COPY_BLT_DST_TILED;
199 if (IS_I965G(dev) && sarea_priv->back_tiled) {
200 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
204 counter[0] = drm_vblank_count(dev, 0);
205 counter[1] = drm_vblank_count(dev, 1);
209 INIT_LIST_HEAD(&hits);
213 /* No irqsave/restore necessary. This tasklet may be run in an
214 * interrupt context or normal context, but we don't have to worry
215 * about getting interrupted by something acquiring the lock, because
216 * we are the interrupt context thing that acquires the lock.
218 DRM_SPINLOCK(&dev_priv->swaps_lock);
220 /* Find buffer swaps scheduled for this vertical blank */
221 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
222 struct drm_i915_vbl_swap *vbl_swap =
223 list_entry(list, struct drm_i915_vbl_swap, head);
224 int pipe = i915_get_pipe(dev, vbl_swap->plane);
226 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
229 master_priv = vbl_swap->minor->master->driver_priv;
230 sarea_priv = master_priv->sarea_priv;
233 dev_priv->swaps_pending--;
234 drm_vblank_put(dev, pipe);
236 DRM_SPINUNLOCK(&dev_priv->swaps_lock);
237 DRM_SPINLOCK(&dev->drw_lock);
239 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
242 DRM_SPINUNLOCK(&dev->drw_lock);
243 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
244 DRM_SPINLOCK(&dev_priv->swaps_lock);
248 list_for_each(hit, &hits) {
249 struct drm_i915_vbl_swap *swap_cmp =
250 list_entry(hit, struct drm_i915_vbl_swap, head);
251 struct drm_drawable_info *drw_cmp =
252 drm_get_drawable_info(dev, swap_cmp->drw_id);
255 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
256 list_add_tail(list, hit);
261 DRM_SPINUNLOCK(&dev->drw_lock);
263 /* List of hits was empty, or we reached the end of it */
265 list_add_tail(list, hits.prev);
269 DRM_SPINLOCK(&dev_priv->swaps_lock);
272 DRM_SPINUNLOCK(&dev_priv->swaps_lock);
278 i915_kernel_lost_context(dev);
280 upper[0] = upper[1] = 0;
281 slice[0] = max(sarea_priv->planeA_h / nhits, 1);
282 slice[1] = max(sarea_priv->planeB_h / nhits, 1);
283 lower[0] = sarea_priv->planeA_y + slice[0];
284 lower[1] = sarea_priv->planeB_y + slice[0];
286 offsets[0] = sarea_priv->front_offset;
287 offsets[1] = sarea_priv->back_offset;
288 offsets[2] = sarea_priv->third_offset;
289 num_pages = sarea_priv->third_handle ? 3 : 2;
291 DRM_SPINLOCK(&dev->drw_lock);
293 /* Emit blits for buffer swaps, partitioning both outputs into as many
294 * slices as there are buffer swaps scheduled in order to avoid tearing
295 * (based on the assumption that a single buffer swap would always
296 * complete before scanout starts).
298 for (i = 0; i++ < nhits;
299 upper[0] = lower[0], lower[0] += slice[0],
300 upper[1] = lower[1], lower[1] += slice[1]) {
301 int init_drawrect = 1;
304 lower[0] = lower[1] = sarea_priv->height;
306 list_for_each(hit, &hits) {
307 struct drm_i915_vbl_swap *swap_hit =
308 list_entry(hit, struct drm_i915_vbl_swap, head);
309 struct drm_clip_rect *rect;
310 int num_rects, plane, front, back;
311 unsigned short top, bottom;
313 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
318 plane = swap_hit->plane;
320 if (swap_hit->flip) {
321 i915_dispatch_vsync_flip(dev, drw, plane);
326 int width = sarea_priv->width;
327 int height = sarea_priv->height;
331 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
333 OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
340 OUT_RING(GFX_OP_DRAWRECT_INFO);
343 OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
350 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
357 bottom = lower[plane];
359 front = (master_priv->sarea_priv->pf_current_page >>
361 back = (front + 1) % num_pages;
363 for (num_rects = drw->num_rects; num_rects--; rect++) {
364 int y1 = max(rect->y1, top);
365 int y2 = min(rect->y2, bottom);
373 OUT_RING(ropcpp | dst_pitch);
374 OUT_RING((y1 << 16) | rect->x1);
375 OUT_RING((y2 << 16) | rect->x2);
376 OUT_RING(offsets[front]);
377 OUT_RING((y1 << 16) | rect->x1);
379 OUT_RING(offsets[back]);
386 DRM_SPINUNLOCK(&dev->drw_lock);
388 list_for_each_safe(hit, tmp, &hits) {
389 struct drm_i915_vbl_swap *swap_hit =
390 list_entry(hit, struct drm_i915_vbl_swap, head);
394 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
398 u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
400 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
401 unsigned long high_frame;
402 unsigned long low_frame;
403 u32 high1, high2, low, count;
406 pipe = i915_get_pipe(dev, plane);
407 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
408 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
410 if (!i915_pipe_enabled(dev, pipe)) {
411 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
416 * High & low register fields aren't synchronized, so make sure
417 * we get a low value that's stable across two reads of the high
421 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
422 PIPE_FRAME_HIGH_SHIFT);
423 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
424 PIPE_FRAME_LOW_SHIFT);
425 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
426 PIPE_FRAME_HIGH_SHIFT);
427 } while (high1 != high2);
429 count = (high1 << 8) | low;
434 static struct drm_device *hotplug_dev;
437 * Handler for user interrupts in process context (able to sleep, do VFS
440 * If another IRQ comes in while we're in this handler, it will still get put
441 * on the queue again to be rerun when we finish.
443 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
444 static void i915_hotplug_work_func(void *work)
446 static void i915_hotplug_work_func(struct work_struct *work)
449 struct drm_device *dev = hotplug_dev;
451 drm_helper_hotplug_stage_two(dev);
452 drm_handle_hotplug(dev);
455 static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
457 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
458 static DECLARE_WORK(hotplug, i915_hotplug_work_func, NULL);
460 static DECLARE_WORK(hotplug, i915_hotplug_work_func);
462 struct drm_i915_private *dev_priv = dev->dev_private;
466 if (stat & TV_HOTPLUG_INT_STATUS) {
467 DRM_DEBUG("TV event\n");
470 if (stat & CRT_HOTPLUG_INT_STATUS) {
471 DRM_DEBUG("CRT event\n");
474 if (stat & SDVOB_HOTPLUG_INT_STATUS) {
475 DRM_DEBUG("sDVOB event\n");
478 if (stat & SDVOC_HOTPLUG_INT_STATUS) {
479 DRM_DEBUG("sDVOC event\n");
481 queue_work(dev_priv->wq, &hotplug);
486 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
488 struct drm_device *dev = (struct drm_device *) arg;
489 struct drm_i915_master_private *master_priv;
490 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
492 u32 pipea_stats = 0, pipeb_stats = 0, tvdac;
496 if (dev->pdev->msi_enabled)
498 iir = I915_READ(IIR);
499 atomic_inc(&dev_priv->irq_received);
501 if (dev->pdev->msi_enabled) {
502 I915_WRITE(IMR, dev_priv->irq_mask_reg);
503 (void) I915_READ(IMR);
509 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
510 * we may get extra interrupts.
512 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
513 pipea_stats = I915_READ(PIPEASTAT);
514 I915_WRITE(PIPEASTAT, pipea_stats);
517 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
518 pipeb_stats = I915_READ(PIPEBSTAT);
519 I915_WRITE(PIPEBSTAT, pipeb_stats);
522 I915_WRITE(IIR, iir);
523 if (dev->pdev->msi_enabled)
524 I915_WRITE(IMR, dev_priv->irq_mask_reg);
525 (void) I915_READ(IIR); /* Flush posted writes */
527 /* This is a global event, and not a pipe A event */
528 if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
531 if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS) {
533 /* Toggle hotplug detection to clear hotplug status */
534 tvdac = I915_READ(TV_DAC);
535 I915_WRITE(TV_DAC, tvdac & ~TVDAC_STATE_CHG_EN);
536 I915_WRITE(TV_DAC, tvdac | TVDAC_STATE_CHG_EN);
539 if (dev->primary->master) {
540 master_priv = dev->primary->master->driver_priv;
541 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
545 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
546 if ((iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) &&
547 (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE))
548 opregion_asle_intr(dev);
549 if (iir & I915_ASLE_INTERRUPT)
550 opregion_asle_intr(dev);
554 if (iir & I915_USER_INTERRUPT) {
555 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
556 DRM_WAKEUP(&dev_priv->irq_queue);
559 if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
560 PIPE_VBLANK_INTERRUPT_STATUS)) {
562 drm_handle_vblank(dev, i915_get_plane(dev, 0));
565 /* The vblank interrupt gets enabled even if we didn't ask for
566 it, so make sure it's shut down again */
567 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
568 pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE);
570 if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
571 PIPE_VBLANK_INTERRUPT_STATUS)) {
573 drm_handle_vblank(dev, i915_get_plane(dev, 1));
577 if (dev_priv->swaps_pending > 0)
578 drm_locked_tasklet(dev, i915_vblank_tasklet);
581 if ((iir & I915_DISPLAY_PORT_INTERRUPT) || hotplug) {
584 DRM_INFO("Hotplug event received\n");
586 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev)) {
587 if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
588 temp2 |= SDVOB_HOTPLUG_INT_STATUS |
589 SDVOC_HOTPLUG_INT_STATUS;
590 if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS)
591 temp2 |= TV_HOTPLUG_INT_STATUS;
593 temp2 = I915_READ(PORT_HOTPLUG_STAT);
595 I915_WRITE(PORT_HOTPLUG_STAT, temp2);
597 i915_run_hotplug_tasklet(dev, temp2);
603 int i915_emit_irq(struct drm_device *dev)
605 struct drm_i915_private *dev_priv = dev->dev_private;
608 i915_kernel_lost_context(dev);
612 i915_emit_breadcrumb(dev);
616 OUT_RING(MI_USER_INTERRUPT);
619 return dev_priv->counter;
622 void i915_user_irq_on(struct drm_device *dev)
624 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
626 DRM_SPINLOCK(&dev_priv->user_irq_lock);
627 if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
628 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
629 DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
632 void i915_user_irq_off(struct drm_device *dev)
634 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
636 DRM_SPINLOCK(&dev_priv->user_irq_lock);
637 BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
638 if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
639 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
640 DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
644 int i915_wait_irq(struct drm_device * dev, int irq_nr)
646 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
647 struct drm_i915_master_private *master_priv;
651 DRM_ERROR("called with no initialization\n");
655 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
656 READ_BREADCRUMB(dev_priv));
658 master_priv = dev->primary->master->driver_priv;
661 DRM_ERROR("no master priv?\n");
665 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
666 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
670 i915_user_irq_on(dev);
671 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
672 READ_BREADCRUMB(dev_priv) >= irq_nr);
673 i915_user_irq_off(dev);
676 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
677 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
680 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
681 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
686 /* Needs the lock as it touches the ring.
688 int i915_irq_emit(struct drm_device *dev, void *data,
689 struct drm_file *file_priv)
691 struct drm_i915_private *dev_priv = dev->dev_private;
692 struct drm_i915_irq_emit *emit = data;
695 LOCK_TEST_WITH_RETURN(dev, file_priv);
698 DRM_ERROR("called with no initialization\n");
702 result = i915_emit_irq(dev);
704 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
705 DRM_ERROR("copy_to_user\n");
712 /* Doesn't need the hardware lock.
714 int i915_irq_wait(struct drm_device *dev, void *data,
715 struct drm_file *file_priv)
717 struct drm_i915_private *dev_priv = dev->dev_private;
718 struct drm_i915_irq_wait *irqwait = data;
721 DRM_ERROR("called with no initialization\n");
725 return i915_wait_irq(dev, irqwait->irq_seq);
728 int i915_enable_vblank(struct drm_device *dev, int plane)
730 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
731 int pipe = i915_get_pipe(dev, plane);
732 u32 pipestat_reg = 0;
738 pipestat_reg = PIPEASTAT;
739 mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
742 pipestat_reg = PIPEBSTAT;
743 mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
746 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
753 pipestat = I915_READ (pipestat_reg);
755 * Older chips didn't have the start vblank interrupt,
759 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
761 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
763 * Clear any pending status
765 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
766 PIPE_VBLANK_INTERRUPT_STATUS);
767 I915_WRITE(pipestat_reg, pipestat);
769 DRM_SPINLOCK(&dev_priv->user_irq_lock);
770 i915_enable_irq(dev_priv, mask_reg);
771 DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
776 void i915_disable_vblank(struct drm_device *dev, int plane)
778 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
779 int pipe = i915_get_pipe(dev, plane);
780 u32 pipestat_reg = 0;
786 pipestat_reg = PIPEASTAT;
787 mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
790 pipestat_reg = PIPEBSTAT;
791 mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
794 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
799 DRM_SPINLOCK(&dev_priv->user_irq_lock);
800 i915_disable_irq(dev_priv, mask_reg);
801 DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
804 pipestat = I915_READ (pipestat_reg);
805 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
806 PIPE_VBLANK_INTERRUPT_ENABLE);
808 * Clear any pending status
810 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
811 PIPE_VBLANK_INTERRUPT_STATUS);
812 I915_WRITE(pipestat_reg, pipestat);
813 (void) I915_READ(pipestat_reg);
817 void i915_enable_interrupt (struct drm_device *dev)
819 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
820 struct drm_connector *o;
822 dev_priv->irq_mask_reg &= ~0;
824 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
825 if (dev->mode_config.num_connector)
826 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
828 if (dev->mode_config.num_connector)
829 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
831 /* Enable global interrupts for hotplug - not a pipeA event */
832 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) |
833 PIPE_HOTPLUG_INTERRUPT_ENABLE |
834 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE |
835 PIPE_HOTPLUG_TV_INTERRUPT_STATUS |
836 PIPE_HOTPLUG_INTERRUPT_STATUS);
839 if (!(dev_priv->irq_mask_reg & I915_DISPLAY_PORT_INTERRUPT) ||
840 !(dev_priv->irq_mask_reg & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
843 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
844 temp = I915_READ(PORT_HOTPLUG_EN);
846 /* Activate the CRT */
847 temp |= CRT_HOTPLUG_INT_EN;
852 o = intel_sdvo_find(dev, 1);
853 if (o && intel_sdvo_supports_hotplug(o)) {
854 intel_sdvo_set_hotplug(o, 1);
855 temp |= SDVOB_HOTPLUG_INT_EN;
859 o = intel_sdvo_find(dev, 0);
860 if (o && intel_sdvo_supports_hotplug(o)) {
861 intel_sdvo_set_hotplug(o, 1);
862 temp |= SDVOC_HOTPLUG_INT_EN;
865 I915_WRITE(SDVOB, I915_READ(SDVOB) | SDVO_INTERRUPT_ENABLE);
866 I915_WRITE(SDVOC, I915_READ(SDVOC) | SDVO_INTERRUPT_ENABLE);
869 I915_WRITE(TV_DAC, I915_READ(TV_DAC) | TVDAC_STATE_CHG_EN);
874 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
875 I915_WRITE(PORT_HOTPLUG_EN, temp);
877 DRM_DEBUG("HEN %08x\n",I915_READ(PORT_HOTPLUG_EN));
878 DRM_DEBUG("HST %08x\n",I915_READ(PORT_HOTPLUG_STAT));
880 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
885 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
886 opregion_enable_asle(dev);
889 dev_priv->irq_enabled = 1;
892 /* Set the vblank monitor pipe
894 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
895 struct drm_file *file_priv)
897 struct drm_i915_private *dev_priv = dev->dev_private;
900 DRM_ERROR("called with no initialization\n");
907 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
908 struct drm_file *file_priv)
910 struct drm_i915_private *dev_priv = dev->dev_private;
911 struct drm_i915_vblank_pipe *pipe = data;
915 DRM_ERROR("called with no initialization\n");
919 if (dev_priv->irq_enabled)
920 flag = ~dev_priv->irq_mask_reg;
923 if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
924 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
925 if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
926 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
932 * Schedule buffer swap at given vertical blank.
934 int i915_vblank_swap(struct drm_device *dev, void *data,
935 struct drm_file *file_priv)
937 struct drm_i915_private *dev_priv = dev->dev_private;
938 struct drm_i915_master_private *master_priv;
939 struct drm_i915_vblank_swap *swap = data;
940 struct drm_i915_vbl_swap *vbl_swap;
941 unsigned int pipe, seqtype, curseq, plane;
942 unsigned long irqflags;
943 struct list_head *list;
947 DRM_ERROR("%s called with no initialization\n", __func__);
951 if (!dev->primary->master)
954 master_priv = dev->primary->master->driver_priv;
956 if (master_priv->sarea_priv->rotation) {
957 DRM_DEBUG("Rotation not supported\n");
961 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
962 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
964 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
968 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
969 pipe = i915_get_pipe(dev, plane);
971 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
973 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
974 DRM_ERROR("Invalid pipe %d\n", pipe);
978 DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
980 /* It makes no sense to schedule a swap for a drawable that doesn't have
981 * valid information at this point. E.g. this could mean that the X
982 * server is too old to push drawable information to the DRM, in which
983 * case all such swaps would become ineffective.
985 if (!drm_get_drawable_info(dev, swap->drawable)) {
986 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
987 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
991 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
994 * We take the ref here and put it when the swap actually completes
997 ret = drm_vblank_get(dev, pipe);
1000 curseq = drm_vblank_count(dev, pipe);
1002 if (seqtype == _DRM_VBLANK_RELATIVE)
1003 swap->sequence += curseq;
1005 if ((curseq - swap->sequence) <= (1<<23)) {
1006 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
1007 swap->sequence = curseq + 1;
1009 DRM_DEBUG("Missed target sequence\n");
1010 drm_vblank_put(dev, pipe);
1015 if (swap->seqtype & _DRM_VBLANK_FLIP) {
1018 if ((curseq - swap->sequence) <= (1<<23)) {
1019 struct drm_drawable_info *drw;
1021 LOCK_TEST_WITH_RETURN(dev, file_priv);
1023 DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
1025 drw = drm_get_drawable_info(dev, swap->drawable);
1028 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock,
1030 DRM_DEBUG("Invalid drawable ID %d\n",
1032 drm_vblank_put(dev, pipe);
1036 i915_dispatch_vsync_flip(dev, drw, plane);
1038 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
1040 drm_vblank_put(dev, pipe);
1045 DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
1047 list_for_each(list, &dev_priv->vbl_swaps.head) {
1048 vbl_swap = list_entry(list, struct drm_i915_vbl_swap, head);
1050 if (vbl_swap->drw_id == swap->drawable &&
1051 vbl_swap->plane == plane &&
1052 vbl_swap->sequence == swap->sequence) {
1053 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
1054 DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
1055 DRM_DEBUG("Already scheduled\n");
1060 DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
1062 if (dev_priv->swaps_pending >= 100) {
1063 DRM_DEBUG("Too many swaps queued\n");
1064 drm_vblank_put(dev, pipe);
1068 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
1071 DRM_ERROR("Failed to allocate memory to queue swap\n");
1072 drm_vblank_put(dev, pipe);
1078 vbl_swap->drw_id = swap->drawable;
1079 vbl_swap->plane = plane;
1080 vbl_swap->sequence = swap->sequence;
1081 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
1082 vbl_swap->minor = file_priv->minor;
1087 DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
1089 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
1090 dev_priv->swaps_pending++;
1092 DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
1099 void i915_driver_irq_preinstall(struct drm_device * dev)
1101 struct drm_i915_private *dev_priv = dev->dev_private;
1103 I915_WRITE16(HWSTAM, 0xeffe);
1104 I915_WRITE16(IMR, 0x0);
1105 I915_WRITE16(IER, 0x0);
1108 int i915_driver_irq_postinstall(struct drm_device * dev)
1110 struct drm_i915_private *dev_priv = dev->dev_private;
1111 int ret, num_pipes = 2;
1113 DRM_SPININIT(&dev_priv->swaps_lock, "swap");
1114 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
1115 dev_priv->swaps_pending = 0;
1117 DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
1118 dev_priv->user_irq_refcount = 0;
1119 dev_priv->irq_mask_reg = ~0;
1121 ret = drm_vblank_init(dev, num_pipes);
1125 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1126 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1128 i915_enable_interrupt(dev);
1129 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1132 * Initialize the hardware status page IRQ location.
1135 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1139 void i915_driver_irq_uninstall(struct drm_device * dev)
1141 struct drm_i915_private *dev_priv = dev->dev_private;
1147 dev_priv->vblank_pipe = 0;
1149 dev_priv->irq_enabled = 0;
1150 I915_WRITE(HWSTAM, 0xffffffff);
1151 I915_WRITE(IMR, 0xffffffff);
1152 I915_WRITE(IER, 0x0);
1154 temp = I915_READ(PIPEASTAT);
1155 I915_WRITE(PIPEASTAT, temp);
1156 temp = I915_READ(PIPEBSTAT);
1157 I915_WRITE(PIPEBSTAT, temp);
1158 temp = I915_READ(IIR);
1159 I915_WRITE(IIR, temp);