1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/sysrq.h>
30 #include <linux/slab.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #define MAX_NOPID ((u32)~0)
41 * Interrupts that are always left unmasked.
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
47 #define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
55 /** Interrupts that we mask and unmask at runtime. */
56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
67 /* For display hotplug interrupt */
69 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
71 if ((dev_priv->irq_mask & mask) != 0) {
72 dev_priv->irq_mask &= ~mask;
73 I915_WRITE(DEIMR, dev_priv->irq_mask);
79 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
81 if ((dev_priv->irq_mask & mask) != mask) {
82 dev_priv->irq_mask |= mask;
83 I915_WRITE(DEIMR, dev_priv->irq_mask);
89 i915_pipestat(int pipe)
99 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
101 if ((dev_priv->pipestat[pipe] & mask) != mask) {
102 u32 reg = i915_pipestat(pipe);
104 dev_priv->pipestat[pipe] |= mask;
105 /* Enable the interrupt, clear any pending status */
106 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
112 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
114 if ((dev_priv->pipestat[pipe] & mask) != 0) {
115 u32 reg = i915_pipestat(pipe);
117 dev_priv->pipestat[pipe] &= ~mask;
118 I915_WRITE(reg, dev_priv->pipestat[pipe]);
124 * intel_enable_asle - enable ASLE interrupt for OpRegion
126 void intel_enable_asle(struct drm_device *dev)
128 drm_i915_private_t *dev_priv = dev->dev_private;
129 unsigned long irqflags;
131 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
133 if (HAS_PCH_SPLIT(dev))
134 ironlake_enable_display_irq(dev_priv, DE_GSE);
136 i915_enable_pipestat(dev_priv, 1,
137 PIPE_LEGACY_BLC_EVENT_ENABLE);
138 if (INTEL_INFO(dev)->gen >= 4)
139 i915_enable_pipestat(dev_priv, 0,
140 PIPE_LEGACY_BLC_EVENT_ENABLE);
143 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
147 * i915_pipe_enabled - check if a pipe is enabled
149 * @pipe: pipe to check
151 * Reading certain registers when the pipe is disabled can hang the chip.
152 * Use this routine to make sure the PLL is running and the pipe is active
153 * before reading such registers if unsure.
156 i915_pipe_enabled(struct drm_device *dev, int pipe)
158 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
159 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
162 /* Called from drm generic code, passed a 'crtc', which
163 * we use as a pipe index
165 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168 unsigned long high_frame;
169 unsigned long low_frame;
170 u32 high1, high2, low;
172 if (!i915_pipe_enabled(dev, pipe)) {
173 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
178 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
179 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
182 * High & low register fields aren't synchronized, so make sure
183 * we get a low value that's stable across two reads of the high
187 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
188 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
189 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
190 } while (high1 != high2);
192 high1 >>= PIPE_FRAME_HIGH_SHIFT;
193 low >>= PIPE_FRAME_LOW_SHIFT;
194 return (high1 << 8) | low;
197 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
199 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
200 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
202 if (!i915_pipe_enabled(dev, pipe)) {
203 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
208 return I915_READ(reg);
211 int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
212 int *vpos, int *hpos)
214 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
215 u32 vbl = 0, position = 0;
216 int vbl_start, vbl_end, htotal, vtotal;
220 if (!i915_pipe_enabled(dev, pipe)) {
221 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
227 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
229 if (INTEL_INFO(dev)->gen >= 4) {
230 /* No obvious pixelcount register. Only query vertical
231 * scanout position from Display scan line register.
233 position = I915_READ(PIPEDSL(pipe));
235 /* Decode into vertical scanout position. Don't have
236 * horizontal scanout position.
238 *vpos = position & 0x1fff;
241 /* Have access to pixelcount since start of frame.
242 * We can split this into vertical and horizontal
245 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
247 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
248 *vpos = position / htotal;
249 *hpos = position - (*vpos * htotal);
252 /* Query vblank area. */
253 vbl = I915_READ(VBLANK(pipe));
255 /* Test position against vblank region. */
256 vbl_start = vbl & 0x1fff;
257 vbl_end = (vbl >> 16) & 0x1fff;
259 if ((*vpos < vbl_start) || (*vpos > vbl_end))
262 /* Inside "upper part" of vblank area? Apply corrective offset: */
263 if (in_vbl && (*vpos >= vbl_start))
264 *vpos = *vpos - vtotal;
266 /* Readouts valid? */
268 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
272 ret |= DRM_SCANOUTPOS_INVBL;
277 int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
279 struct timeval *vblank_time,
282 struct drm_crtc *drmcrtc;
284 if (crtc < 0 || crtc >= dev->num_crtcs) {
285 DRM_ERROR("Invalid crtc %d\n", crtc);
289 /* Get drm_crtc to timestamp: */
290 drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
292 /* Helper routine in DRM core does all the work: */
293 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
294 vblank_time, flags, drmcrtc);
298 * Handle hotplug events outside the interrupt handler proper.
300 static void i915_hotplug_work_func(struct work_struct *work)
302 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
304 struct drm_device *dev = dev_priv->dev;
305 struct drm_mode_config *mode_config = &dev->mode_config;
306 struct intel_encoder *encoder;
308 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
309 if (encoder->hot_plug)
310 encoder->hot_plug(encoder);
312 /* Just fire off a uevent and let userspace tell us what to do */
313 drm_helper_hpd_irq_event(dev);
316 static void i915_handle_rps_change(struct drm_device *dev)
318 drm_i915_private_t *dev_priv = dev->dev_private;
319 u32 busy_up, busy_down, max_avg, min_avg;
320 u8 new_delay = dev_priv->cur_delay;
322 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
323 busy_up = I915_READ(RCPREVBSYTUPAVG);
324 busy_down = I915_READ(RCPREVBSYTDNAVG);
325 max_avg = I915_READ(RCBMAXAVG);
326 min_avg = I915_READ(RCBMINAVG);
328 /* Handle RCS change request from hw */
329 if (busy_up > max_avg) {
330 if (dev_priv->cur_delay != dev_priv->max_delay)
331 new_delay = dev_priv->cur_delay - 1;
332 if (new_delay < dev_priv->max_delay)
333 new_delay = dev_priv->max_delay;
334 } else if (busy_down < min_avg) {
335 if (dev_priv->cur_delay != dev_priv->min_delay)
336 new_delay = dev_priv->cur_delay + 1;
337 if (new_delay > dev_priv->min_delay)
338 new_delay = dev_priv->min_delay;
341 if (ironlake_set_drps(dev, new_delay))
342 dev_priv->cur_delay = new_delay;
347 static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring)
350 struct drm_i915_private *dev_priv = dev->dev_private;
351 u32 seqno = ring->get_seqno(ring);
353 trace_i915_gem_request_complete(dev, seqno);
355 ring->irq_seqno = seqno;
356 wake_up_all(&ring->irq_queue);
358 dev_priv->hangcheck_count = 0;
359 mod_timer(&dev_priv->hangcheck_timer,
360 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
363 static void gen6_pm_irq_handler(struct drm_device *dev)
365 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
366 u8 new_delay = dev_priv->cur_delay;
369 pm_iir = I915_READ(GEN6_PMIIR);
373 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
374 if (dev_priv->cur_delay != dev_priv->max_delay)
375 new_delay = dev_priv->cur_delay + 1;
376 if (new_delay > dev_priv->max_delay)
377 new_delay = dev_priv->max_delay;
378 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
379 if (dev_priv->cur_delay != dev_priv->min_delay)
380 new_delay = dev_priv->cur_delay - 1;
381 if (new_delay < dev_priv->min_delay) {
382 new_delay = dev_priv->min_delay;
383 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
384 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
385 ((new_delay << 16) & 0x3f0000));
387 /* Make sure we continue to get down interrupts
388 * until we hit the minimum frequency */
389 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
390 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
395 gen6_set_rps(dev, new_delay);
396 dev_priv->cur_delay = new_delay;
398 I915_WRITE(GEN6_PMIIR, pm_iir);
401 static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
403 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
405 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
407 struct drm_i915_master_private *master_priv;
408 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
411 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
413 /* disable master interrupt before clearing iir */
414 de_ier = I915_READ(DEIER);
415 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
418 de_iir = I915_READ(DEIIR);
419 gt_iir = I915_READ(GTIIR);
420 pch_iir = I915_READ(SDEIIR);
421 pm_iir = I915_READ(GEN6_PMIIR);
423 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
424 (!IS_GEN6(dev) || pm_iir == 0))
427 if (HAS_PCH_CPT(dev))
428 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
430 hotplug_mask = SDE_HOTPLUG_MASK;
434 if (dev->primary->master) {
435 master_priv = dev->primary->master->driver_priv;
436 if (master_priv->sarea_priv)
437 master_priv->sarea_priv->last_dispatch =
438 READ_BREADCRUMB(dev_priv);
441 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
442 notify_ring(dev, &dev_priv->ring[RCS]);
443 if (gt_iir & bsd_usr_interrupt)
444 notify_ring(dev, &dev_priv->ring[VCS]);
445 if (gt_iir & GT_BLT_USER_INTERRUPT)
446 notify_ring(dev, &dev_priv->ring[BCS]);
449 intel_opregion_gse_intr(dev);
451 if (de_iir & DE_PLANEA_FLIP_DONE) {
452 intel_prepare_page_flip(dev, 0);
453 intel_finish_page_flip_plane(dev, 0);
456 if (de_iir & DE_PLANEB_FLIP_DONE) {
457 intel_prepare_page_flip(dev, 1);
458 intel_finish_page_flip_plane(dev, 1);
461 if (de_iir & DE_PIPEA_VBLANK)
462 drm_handle_vblank(dev, 0);
464 if (de_iir & DE_PIPEB_VBLANK)
465 drm_handle_vblank(dev, 1);
467 /* check event from PCH */
468 if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
469 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
471 if (de_iir & DE_PCU_EVENT) {
472 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
473 i915_handle_rps_change(dev);
477 gen6_pm_irq_handler(dev);
479 /* should clear PCH hotplug event before clear CPU irq */
480 I915_WRITE(SDEIIR, pch_iir);
481 I915_WRITE(GTIIR, gt_iir);
482 I915_WRITE(DEIIR, de_iir);
485 I915_WRITE(DEIER, de_ier);
492 * i915_error_work_func - do process context error handling work
495 * Fire an error uevent so userspace can see that a hang or error
498 static void i915_error_work_func(struct work_struct *work)
500 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
502 struct drm_device *dev = dev_priv->dev;
503 char *error_event[] = { "ERROR=1", NULL };
504 char *reset_event[] = { "RESET=1", NULL };
505 char *reset_done_event[] = { "ERROR=0", NULL };
507 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
509 if (atomic_read(&dev_priv->mm.wedged)) {
510 DRM_DEBUG_DRIVER("resetting chip\n");
511 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
512 if (!i915_reset(dev, GRDOM_RENDER)) {
513 atomic_set(&dev_priv->mm.wedged, 0);
514 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
516 complete_all(&dev_priv->error_completion);
520 #ifdef CONFIG_DEBUG_FS
521 static struct drm_i915_error_object *
522 i915_error_object_create(struct drm_device *dev,
523 struct drm_i915_gem_object *src)
525 drm_i915_private_t *dev_priv = dev->dev_private;
526 struct drm_i915_error_object *dst;
527 int page, page_count;
530 if (src == NULL || src->pages == NULL)
533 page_count = src->base.size / PAGE_SIZE;
535 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
539 reloc_offset = src->gtt_offset;
540 for (page = 0; page < page_count; page++) {
545 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
549 local_irq_save(flags);
550 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
552 memcpy_fromio(d, s, PAGE_SIZE);
553 io_mapping_unmap_atomic(s);
554 local_irq_restore(flags);
556 dst->pages[page] = d;
558 reloc_offset += PAGE_SIZE;
560 dst->page_count = page_count;
561 dst->gtt_offset = src->gtt_offset;
567 kfree(dst->pages[page]);
573 i915_error_object_free(struct drm_i915_error_object *obj)
580 for (page = 0; page < obj->page_count; page++)
581 kfree(obj->pages[page]);
587 i915_error_state_free(struct drm_device *dev,
588 struct drm_i915_error_state *error)
590 i915_error_object_free(error->batchbuffer[0]);
591 i915_error_object_free(error->batchbuffer[1]);
592 i915_error_object_free(error->ringbuffer);
593 kfree(error->active_bo);
594 kfree(error->overlay);
599 i915_get_bbaddr(struct drm_device *dev, u32 *ring)
603 if (IS_I830(dev) || IS_845G(dev))
604 cmd = MI_BATCH_BUFFER;
605 else if (INTEL_INFO(dev)->gen >= 4)
606 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
607 MI_BATCH_NON_SECURE_I965);
609 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
611 return ring[0] == cmd ? ring[1] : 0;
615 i915_ringbuffer_last_batch(struct drm_device *dev,
616 struct intel_ring_buffer *ring)
618 struct drm_i915_private *dev_priv = dev->dev_private;
622 /* Locate the current position in the ringbuffer and walk back
623 * to find the most recently dispatched batch buffer.
625 head = I915_READ_HEAD(ring) & HEAD_ADDR;
627 val = (u32 *)(ring->virtual_start + head);
628 while (--val >= (u32 *)ring->virtual_start) {
629 bbaddr = i915_get_bbaddr(dev, val);
634 val = (u32 *)(ring->virtual_start + ring->size);
635 while (--val >= (u32 *)ring->virtual_start) {
636 bbaddr = i915_get_bbaddr(dev, val);
644 static u32 capture_bo_list(struct drm_i915_error_buffer *err,
646 struct list_head *head)
648 struct drm_i915_gem_object *obj;
651 list_for_each_entry(obj, head, mm_list) {
652 err->size = obj->base.size;
653 err->name = obj->base.name;
654 err->seqno = obj->last_rendering_seqno;
655 err->gtt_offset = obj->gtt_offset;
656 err->read_domains = obj->base.read_domains;
657 err->write_domain = obj->base.write_domain;
658 err->fence_reg = obj->fence_reg;
660 if (obj->pin_count > 0)
662 if (obj->user_pin_count > 0)
664 err->tiling = obj->tiling_mode;
665 err->dirty = obj->dirty;
666 err->purgeable = obj->madv != I915_MADV_WILLNEED;
667 err->ring = obj->ring ? obj->ring->id : 0;
678 static void i915_gem_record_fences(struct drm_device *dev,
679 struct drm_i915_error_state *error)
681 struct drm_i915_private *dev_priv = dev->dev_private;
685 switch (INTEL_INFO(dev)->gen) {
687 for (i = 0; i < 16; i++)
688 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
692 for (i = 0; i < 16; i++)
693 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
696 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
697 for (i = 0; i < 8; i++)
698 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
700 for (i = 0; i < 8; i++)
701 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
708 * i915_capture_error_state - capture an error record for later analysis
711 * Should be called when an error is detected (either a hang or an error
712 * interrupt) to capture error state from the time of the error. Fills
713 * out a structure which becomes available in debugfs for user level tools
716 static void i915_capture_error_state(struct drm_device *dev)
718 struct drm_i915_private *dev_priv = dev->dev_private;
719 struct drm_i915_gem_object *obj;
720 struct drm_i915_error_state *error;
721 struct drm_i915_gem_object *batchbuffer[2];
726 spin_lock_irqsave(&dev_priv->error_lock, flags);
727 error = dev_priv->first_error;
728 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
732 error = kmalloc(sizeof(*error), GFP_ATOMIC);
734 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
738 DRM_DEBUG_DRIVER("generating error event\n");
740 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
741 error->eir = I915_READ(EIR);
742 error->pgtbl_er = I915_READ(PGTBL_ER);
743 error->pipeastat = I915_READ(PIPEASTAT);
744 error->pipebstat = I915_READ(PIPEBSTAT);
745 error->instpm = I915_READ(INSTPM);
747 if (INTEL_INFO(dev)->gen >= 6) {
748 error->error = I915_READ(ERROR_GEN6);
750 error->bcs_acthd = I915_READ(BCS_ACTHD);
751 error->bcs_ipehr = I915_READ(BCS_IPEHR);
752 error->bcs_ipeir = I915_READ(BCS_IPEIR);
753 error->bcs_instdone = I915_READ(BCS_INSTDONE);
754 error->bcs_seqno = 0;
755 if (dev_priv->ring[BCS].get_seqno)
756 error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
758 error->vcs_acthd = I915_READ(VCS_ACTHD);
759 error->vcs_ipehr = I915_READ(VCS_IPEHR);
760 error->vcs_ipeir = I915_READ(VCS_IPEIR);
761 error->vcs_instdone = I915_READ(VCS_INSTDONE);
762 error->vcs_seqno = 0;
763 if (dev_priv->ring[VCS].get_seqno)
764 error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
766 if (INTEL_INFO(dev)->gen >= 4) {
767 error->ipeir = I915_READ(IPEIR_I965);
768 error->ipehr = I915_READ(IPEHR_I965);
769 error->instdone = I915_READ(INSTDONE_I965);
770 error->instps = I915_READ(INSTPS);
771 error->instdone1 = I915_READ(INSTDONE1);
772 error->acthd = I915_READ(ACTHD_I965);
773 error->bbaddr = I915_READ64(BB_ADDR);
775 error->ipeir = I915_READ(IPEIR);
776 error->ipehr = I915_READ(IPEHR);
777 error->instdone = I915_READ(INSTDONE);
778 error->acthd = I915_READ(ACTHD);
781 i915_gem_record_fences(dev, error);
783 bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
785 /* Grab the current batchbuffer, most likely to have crashed. */
786 batchbuffer[0] = NULL;
787 batchbuffer[1] = NULL;
789 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
790 if (batchbuffer[0] == NULL &&
791 bbaddr >= obj->gtt_offset &&
792 bbaddr < obj->gtt_offset + obj->base.size)
793 batchbuffer[0] = obj;
795 if (batchbuffer[1] == NULL &&
796 error->acthd >= obj->gtt_offset &&
797 error->acthd < obj->gtt_offset + obj->base.size)
798 batchbuffer[1] = obj;
802 /* Scan the other lists for completeness for those bizarre errors. */
803 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
804 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
805 if (batchbuffer[0] == NULL &&
806 bbaddr >= obj->gtt_offset &&
807 bbaddr < obj->gtt_offset + obj->base.size)
808 batchbuffer[0] = obj;
810 if (batchbuffer[1] == NULL &&
811 error->acthd >= obj->gtt_offset &&
812 error->acthd < obj->gtt_offset + obj->base.size)
813 batchbuffer[1] = obj;
815 if (batchbuffer[0] && batchbuffer[1])
819 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
820 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
821 if (batchbuffer[0] == NULL &&
822 bbaddr >= obj->gtt_offset &&
823 bbaddr < obj->gtt_offset + obj->base.size)
824 batchbuffer[0] = obj;
826 if (batchbuffer[1] == NULL &&
827 error->acthd >= obj->gtt_offset &&
828 error->acthd < obj->gtt_offset + obj->base.size)
829 batchbuffer[1] = obj;
831 if (batchbuffer[0] && batchbuffer[1])
836 /* We need to copy these to an anonymous buffer as the simplest
837 * method to avoid being overwritten by userspace.
839 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
840 if (batchbuffer[1] != batchbuffer[0])
841 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
843 error->batchbuffer[1] = NULL;
845 /* Record the ringbuffer */
846 error->ringbuffer = i915_error_object_create(dev,
847 dev_priv->ring[RCS].obj);
849 /* Record buffers on the active and pinned lists. */
850 error->active_bo = NULL;
851 error->pinned_bo = NULL;
853 error->active_bo_count = count;
854 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
856 error->pinned_bo_count = count - error->active_bo_count;
859 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
861 if (error->active_bo)
863 error->active_bo + error->active_bo_count;
866 if (error->active_bo)
867 error->active_bo_count =
868 capture_bo_list(error->active_bo,
869 error->active_bo_count,
870 &dev_priv->mm.active_list);
872 if (error->pinned_bo)
873 error->pinned_bo_count =
874 capture_bo_list(error->pinned_bo,
875 error->pinned_bo_count,
876 &dev_priv->mm.pinned_list);
878 do_gettimeofday(&error->time);
880 error->overlay = intel_overlay_capture_error_state(dev);
881 error->display = intel_display_capture_error_state(dev);
883 spin_lock_irqsave(&dev_priv->error_lock, flags);
884 if (dev_priv->first_error == NULL) {
885 dev_priv->first_error = error;
888 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
891 i915_error_state_free(dev, error);
894 void i915_destroy_error_state(struct drm_device *dev)
896 struct drm_i915_private *dev_priv = dev->dev_private;
897 struct drm_i915_error_state *error;
899 spin_lock(&dev_priv->error_lock);
900 error = dev_priv->first_error;
901 dev_priv->first_error = NULL;
902 spin_unlock(&dev_priv->error_lock);
905 i915_error_state_free(dev, error);
908 #define i915_capture_error_state(x)
911 static void i915_report_and_clear_eir(struct drm_device *dev)
913 struct drm_i915_private *dev_priv = dev->dev_private;
914 u32 eir = I915_READ(EIR);
919 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
923 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
924 u32 ipeir = I915_READ(IPEIR_I965);
926 printk(KERN_ERR " IPEIR: 0x%08x\n",
927 I915_READ(IPEIR_I965));
928 printk(KERN_ERR " IPEHR: 0x%08x\n",
929 I915_READ(IPEHR_I965));
930 printk(KERN_ERR " INSTDONE: 0x%08x\n",
931 I915_READ(INSTDONE_I965));
932 printk(KERN_ERR " INSTPS: 0x%08x\n",
934 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
935 I915_READ(INSTDONE1));
936 printk(KERN_ERR " ACTHD: 0x%08x\n",
937 I915_READ(ACTHD_I965));
938 I915_WRITE(IPEIR_I965, ipeir);
939 POSTING_READ(IPEIR_I965);
941 if (eir & GM45_ERROR_PAGE_TABLE) {
942 u32 pgtbl_err = I915_READ(PGTBL_ER);
943 printk(KERN_ERR "page table error\n");
944 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
946 I915_WRITE(PGTBL_ER, pgtbl_err);
947 POSTING_READ(PGTBL_ER);
952 if (eir & I915_ERROR_PAGE_TABLE) {
953 u32 pgtbl_err = I915_READ(PGTBL_ER);
954 printk(KERN_ERR "page table error\n");
955 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
957 I915_WRITE(PGTBL_ER, pgtbl_err);
958 POSTING_READ(PGTBL_ER);
962 if (eir & I915_ERROR_MEMORY_REFRESH) {
963 u32 pipea_stats = I915_READ(PIPEASTAT);
964 u32 pipeb_stats = I915_READ(PIPEBSTAT);
966 printk(KERN_ERR "memory refresh error\n");
967 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
969 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
971 /* pipestat has already been acked */
973 if (eir & I915_ERROR_INSTRUCTION) {
974 printk(KERN_ERR "instruction error\n");
975 printk(KERN_ERR " INSTPM: 0x%08x\n",
977 if (INTEL_INFO(dev)->gen < 4) {
978 u32 ipeir = I915_READ(IPEIR);
980 printk(KERN_ERR " IPEIR: 0x%08x\n",
982 printk(KERN_ERR " IPEHR: 0x%08x\n",
984 printk(KERN_ERR " INSTDONE: 0x%08x\n",
985 I915_READ(INSTDONE));
986 printk(KERN_ERR " ACTHD: 0x%08x\n",
988 I915_WRITE(IPEIR, ipeir);
991 u32 ipeir = I915_READ(IPEIR_I965);
993 printk(KERN_ERR " IPEIR: 0x%08x\n",
994 I915_READ(IPEIR_I965));
995 printk(KERN_ERR " IPEHR: 0x%08x\n",
996 I915_READ(IPEHR_I965));
997 printk(KERN_ERR " INSTDONE: 0x%08x\n",
998 I915_READ(INSTDONE_I965));
999 printk(KERN_ERR " INSTPS: 0x%08x\n",
1001 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
1002 I915_READ(INSTDONE1));
1003 printk(KERN_ERR " ACTHD: 0x%08x\n",
1004 I915_READ(ACTHD_I965));
1005 I915_WRITE(IPEIR_I965, ipeir);
1006 POSTING_READ(IPEIR_I965);
1010 I915_WRITE(EIR, eir);
1012 eir = I915_READ(EIR);
1015 * some errors might have become stuck,
1018 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1019 I915_WRITE(EMR, I915_READ(EMR) | eir);
1020 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1025 * i915_handle_error - handle an error interrupt
1028 * Do some basic checking of regsiter state at error interrupt time and
1029 * dump it to the syslog. Also call i915_capture_error_state() to make
1030 * sure we get a record and make it available in debugfs. Fire a uevent
1031 * so userspace knows something bad happened (should trigger collection
1032 * of a ring dump etc.).
1034 void i915_handle_error(struct drm_device *dev, bool wedged)
1036 struct drm_i915_private *dev_priv = dev->dev_private;
1038 i915_capture_error_state(dev);
1039 i915_report_and_clear_eir(dev);
1042 INIT_COMPLETION(dev_priv->error_completion);
1043 atomic_set(&dev_priv->mm.wedged, 1);
1046 * Wakeup waiting processes so they don't hang
1048 wake_up_all(&dev_priv->ring[RCS].irq_queue);
1050 wake_up_all(&dev_priv->ring[VCS].irq_queue);
1052 wake_up_all(&dev_priv->ring[BCS].irq_queue);
1055 queue_work(dev_priv->wq, &dev_priv->error_work);
1058 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1060 drm_i915_private_t *dev_priv = dev->dev_private;
1061 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1062 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1063 struct drm_i915_gem_object *obj;
1064 struct intel_unpin_work *work;
1065 unsigned long flags;
1066 bool stall_detected;
1068 /* Ignore early vblank irqs */
1069 if (intel_crtc == NULL)
1072 spin_lock_irqsave(&dev->event_lock, flags);
1073 work = intel_crtc->unpin_work;
1075 if (work == NULL || work->pending || !work->enable_stall_check) {
1076 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1077 spin_unlock_irqrestore(&dev->event_lock, flags);
1081 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1082 obj = work->pending_flip_obj;
1083 if (INTEL_INFO(dev)->gen >= 4) {
1084 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
1085 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
1087 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
1088 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1089 crtc->y * crtc->fb->pitch +
1090 crtc->x * crtc->fb->bits_per_pixel/8);
1093 spin_unlock_irqrestore(&dev->event_lock, flags);
1095 if (stall_detected) {
1096 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1097 intel_prepare_page_flip(dev, intel_crtc->plane);
1101 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1103 struct drm_device *dev = (struct drm_device *) arg;
1104 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1105 struct drm_i915_master_private *master_priv;
1107 u32 pipea_stats, pipeb_stats;
1110 unsigned long irqflags;
1114 atomic_inc(&dev_priv->irq_received);
1116 if (HAS_PCH_SPLIT(dev))
1117 return ironlake_irq_handler(dev);
1119 iir = I915_READ(IIR);
1121 if (INTEL_INFO(dev)->gen >= 4)
1122 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
1124 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
1127 irq_received = iir != 0;
1129 /* Can't rely on pipestat interrupt bit in iir as it might
1130 * have been cleared after the pipestat interrupt was received.
1131 * It doesn't set the bit in iir again, but it still produces
1132 * interrupts (for non-MSI).
1134 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1135 pipea_stats = I915_READ(PIPEASTAT);
1136 pipeb_stats = I915_READ(PIPEBSTAT);
1138 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1139 i915_handle_error(dev, false);
1142 * Clear the PIPE(A|B)STAT regs before the IIR
1144 if (pipea_stats & 0x8000ffff) {
1145 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
1146 DRM_DEBUG_DRIVER("pipe a underrun\n");
1147 I915_WRITE(PIPEASTAT, pipea_stats);
1151 if (pipeb_stats & 0x8000ffff) {
1152 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
1153 DRM_DEBUG_DRIVER("pipe b underrun\n");
1154 I915_WRITE(PIPEBSTAT, pipeb_stats);
1157 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1164 /* Consume port. Then clear IIR or we'll miss events */
1165 if ((I915_HAS_HOTPLUG(dev)) &&
1166 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
1167 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1169 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1171 if (hotplug_status & dev_priv->hotplug_supported_mask)
1172 queue_work(dev_priv->wq,
1173 &dev_priv->hotplug_work);
1175 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1176 I915_READ(PORT_HOTPLUG_STAT);
1179 I915_WRITE(IIR, iir);
1180 new_iir = I915_READ(IIR); /* Flush posted writes */
1182 if (dev->primary->master) {
1183 master_priv = dev->primary->master->driver_priv;
1184 if (master_priv->sarea_priv)
1185 master_priv->sarea_priv->last_dispatch =
1186 READ_BREADCRUMB(dev_priv);
1189 if (iir & I915_USER_INTERRUPT)
1190 notify_ring(dev, &dev_priv->ring[RCS]);
1191 if (iir & I915_BSD_USER_INTERRUPT)
1192 notify_ring(dev, &dev_priv->ring[VCS]);
1194 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1195 intel_prepare_page_flip(dev, 0);
1196 if (dev_priv->flip_pending_is_done)
1197 intel_finish_page_flip_plane(dev, 0);
1200 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1201 intel_prepare_page_flip(dev, 1);
1202 if (dev_priv->flip_pending_is_done)
1203 intel_finish_page_flip_plane(dev, 1);
1206 if (pipea_stats & vblank_status) {
1208 drm_handle_vblank(dev, 0);
1209 if (!dev_priv->flip_pending_is_done) {
1210 i915_pageflip_stall_check(dev, 0);
1211 intel_finish_page_flip(dev, 0);
1215 if (pipeb_stats & vblank_status) {
1217 drm_handle_vblank(dev, 1);
1218 if (!dev_priv->flip_pending_is_done) {
1219 i915_pageflip_stall_check(dev, 1);
1220 intel_finish_page_flip(dev, 1);
1224 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
1225 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
1226 (iir & I915_ASLE_INTERRUPT))
1227 intel_opregion_asle_intr(dev);
1229 /* With MSI, interrupts are only generated when iir
1230 * transitions from zero to nonzero. If another bit got
1231 * set while we were handling the existing iir bits, then
1232 * we would never get another interrupt.
1234 * This is fine on non-MSI as well, as if we hit this path
1235 * we avoid exiting the interrupt handler only to generate
1238 * Note that for MSI this could cause a stray interrupt report
1239 * if an interrupt landed in the time between writing IIR and
1240 * the posting read. This should be rare enough to never
1241 * trigger the 99% of 100,000 interrupts test for disabling
1250 static int i915_emit_irq(struct drm_device * dev)
1252 drm_i915_private_t *dev_priv = dev->dev_private;
1253 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1255 i915_kernel_lost_context(dev);
1257 DRM_DEBUG_DRIVER("\n");
1259 dev_priv->counter++;
1260 if (dev_priv->counter > 0x7FFFFFFFUL)
1261 dev_priv->counter = 1;
1262 if (master_priv->sarea_priv)
1263 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1265 if (BEGIN_LP_RING(4) == 0) {
1266 OUT_RING(MI_STORE_DWORD_INDEX);
1267 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1268 OUT_RING(dev_priv->counter);
1269 OUT_RING(MI_USER_INTERRUPT);
1273 return dev_priv->counter;
1276 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1278 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1279 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1281 if (dev_priv->trace_irq_seqno == 0 &&
1282 ring->irq_get(ring))
1283 dev_priv->trace_irq_seqno = seqno;
1286 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1288 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1289 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1291 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1293 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1294 READ_BREADCRUMB(dev_priv));
1296 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1297 if (master_priv->sarea_priv)
1298 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1302 if (master_priv->sarea_priv)
1303 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1306 if (ring->irq_get(ring)) {
1307 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1308 READ_BREADCRUMB(dev_priv) >= irq_nr);
1309 ring->irq_put(ring);
1312 if (ret == -EBUSY) {
1313 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1314 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1320 /* Needs the lock as it touches the ring.
1322 int i915_irq_emit(struct drm_device *dev, void *data,
1323 struct drm_file *file_priv)
1325 drm_i915_private_t *dev_priv = dev->dev_private;
1326 drm_i915_irq_emit_t *emit = data;
1329 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1330 DRM_ERROR("called with no initialization\n");
1334 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1336 mutex_lock(&dev->struct_mutex);
1337 result = i915_emit_irq(dev);
1338 mutex_unlock(&dev->struct_mutex);
1340 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1341 DRM_ERROR("copy_to_user\n");
1348 /* Doesn't need the hardware lock.
1350 int i915_irq_wait(struct drm_device *dev, void *data,
1351 struct drm_file *file_priv)
1353 drm_i915_private_t *dev_priv = dev->dev_private;
1354 drm_i915_irq_wait_t *irqwait = data;
1357 DRM_ERROR("called with no initialization\n");
1361 return i915_wait_irq(dev, irqwait->irq_seq);
1364 /* Called from drm generic code, passed 'crtc' which
1365 * we use as a pipe index
1367 int i915_enable_vblank(struct drm_device *dev, int pipe)
1369 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1370 unsigned long irqflags;
1372 if (!i915_pipe_enabled(dev, pipe))
1375 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1376 if (HAS_PCH_SPLIT(dev))
1377 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1378 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1379 else if (INTEL_INFO(dev)->gen >= 4)
1380 i915_enable_pipestat(dev_priv, pipe,
1381 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1383 i915_enable_pipestat(dev_priv, pipe,
1384 PIPE_VBLANK_INTERRUPT_ENABLE);
1385 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1389 /* Called from drm generic code, passed 'crtc' which
1390 * we use as a pipe index
1392 void i915_disable_vblank(struct drm_device *dev, int pipe)
1394 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1395 unsigned long irqflags;
1397 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1398 if (HAS_PCH_SPLIT(dev))
1399 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1400 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1402 i915_disable_pipestat(dev_priv, pipe,
1403 PIPE_VBLANK_INTERRUPT_ENABLE |
1404 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1405 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1408 void i915_enable_interrupt (struct drm_device *dev)
1410 struct drm_i915_private *dev_priv = dev->dev_private;
1412 if (!HAS_PCH_SPLIT(dev))
1413 intel_opregion_enable_asle(dev);
1414 dev_priv->irq_enabled = 1;
1418 /* Set the vblank monitor pipe
1420 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1421 struct drm_file *file_priv)
1423 drm_i915_private_t *dev_priv = dev->dev_private;
1426 DRM_ERROR("called with no initialization\n");
1433 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1434 struct drm_file *file_priv)
1436 drm_i915_private_t *dev_priv = dev->dev_private;
1437 drm_i915_vblank_pipe_t *pipe = data;
1440 DRM_ERROR("called with no initialization\n");
1444 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1450 * Schedule buffer swap at given vertical blank.
1452 int i915_vblank_swap(struct drm_device *dev, void *data,
1453 struct drm_file *file_priv)
1455 /* The delayed swap mechanism was fundamentally racy, and has been
1456 * removed. The model was that the client requested a delayed flip/swap
1457 * from the kernel, then waited for vblank before continuing to perform
1458 * rendering. The problem was that the kernel might wake the client
1459 * up before it dispatched the vblank swap (since the lock has to be
1460 * held while touching the ringbuffer), in which case the client would
1461 * clear and start the next frame before the swap occurred, and
1462 * flicker would occur in addition to likely missing the vblank.
1464 * In the absence of this ioctl, userland falls back to a correct path
1465 * of waiting for a vblank, then dispatching the swap on its own.
1466 * Context switching to userland and back is plenty fast enough for
1467 * meeting the requirements of vblank swapping.
1473 ring_last_seqno(struct intel_ring_buffer *ring)
1475 return list_entry(ring->request_list.prev,
1476 struct drm_i915_gem_request, list)->seqno;
1479 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1481 if (list_empty(&ring->request_list) ||
1482 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1483 /* Issue a wake-up to catch stuck h/w. */
1484 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
1485 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1487 ring->waiting_seqno,
1488 ring->get_seqno(ring));
1489 wake_up_all(&ring->irq_queue);
1497 static bool kick_ring(struct intel_ring_buffer *ring)
1499 struct drm_device *dev = ring->dev;
1500 struct drm_i915_private *dev_priv = dev->dev_private;
1501 u32 tmp = I915_READ_CTL(ring);
1502 if (tmp & RING_WAIT) {
1503 DRM_ERROR("Kicking stuck wait on %s\n",
1505 I915_WRITE_CTL(ring, tmp);
1509 (tmp & RING_WAIT_SEMAPHORE)) {
1510 DRM_ERROR("Kicking stuck semaphore on %s\n",
1512 I915_WRITE_CTL(ring, tmp);
1519 * This is called when the chip hasn't reported back with completed
1520 * batchbuffers in a long time. The first time this is called we simply record
1521 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1522 * again, we assume the chip is wedged and try to fix it.
1524 void i915_hangcheck_elapsed(unsigned long data)
1526 struct drm_device *dev = (struct drm_device *)data;
1527 drm_i915_private_t *dev_priv = dev->dev_private;
1528 uint32_t acthd, instdone, instdone1;
1531 /* If all work is done then ACTHD clearly hasn't advanced. */
1532 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1533 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1534 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1535 dev_priv->hangcheck_count = 0;
1541 if (INTEL_INFO(dev)->gen < 4) {
1542 acthd = I915_READ(ACTHD);
1543 instdone = I915_READ(INSTDONE);
1546 acthd = I915_READ(ACTHD_I965);
1547 instdone = I915_READ(INSTDONE_I965);
1548 instdone1 = I915_READ(INSTDONE1);
1551 if (dev_priv->last_acthd == acthd &&
1552 dev_priv->last_instdone == instdone &&
1553 dev_priv->last_instdone1 == instdone1) {
1554 if (dev_priv->hangcheck_count++ > 1) {
1555 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1557 if (!IS_GEN2(dev)) {
1558 /* Is the chip hanging on a WAIT_FOR_EVENT?
1559 * If so we can simply poke the RB_WAIT bit
1560 * and break the hang. This should work on
1561 * all but the second generation chipsets.
1564 if (kick_ring(&dev_priv->ring[RCS]))
1568 kick_ring(&dev_priv->ring[VCS]))
1572 kick_ring(&dev_priv->ring[BCS]))
1576 i915_handle_error(dev, true);
1580 dev_priv->hangcheck_count = 0;
1582 dev_priv->last_acthd = acthd;
1583 dev_priv->last_instdone = instdone;
1584 dev_priv->last_instdone1 = instdone1;
1588 /* Reset timer case chip hangs without another request being added */
1589 mod_timer(&dev_priv->hangcheck_timer,
1590 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1595 static void ironlake_irq_preinstall(struct drm_device *dev)
1597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1599 I915_WRITE(HWSTAM, 0xeffe);
1601 /* XXX hotplug from PCH */
1603 I915_WRITE(DEIMR, 0xffffffff);
1604 I915_WRITE(DEIER, 0x0);
1605 POSTING_READ(DEIER);
1608 I915_WRITE(GTIMR, 0xffffffff);
1609 I915_WRITE(GTIER, 0x0);
1610 POSTING_READ(GTIER);
1612 /* south display irq */
1613 I915_WRITE(SDEIMR, 0xffffffff);
1614 I915_WRITE(SDEIER, 0x0);
1615 POSTING_READ(SDEIER);
1618 static int ironlake_irq_postinstall(struct drm_device *dev)
1620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1621 /* enable kind of interrupts always enabled */
1622 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1623 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1627 dev_priv->irq_mask = ~display_mask;
1629 /* should always can generate irq */
1630 I915_WRITE(DEIIR, I915_READ(DEIIR));
1631 I915_WRITE(DEIMR, dev_priv->irq_mask);
1632 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1633 POSTING_READ(DEIER);
1635 dev_priv->gt_irq_mask = ~0;
1637 I915_WRITE(GTIIR, I915_READ(GTIIR));
1638 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1643 GT_GEN6_BSD_USER_INTERRUPT |
1644 GT_BLT_USER_INTERRUPT;
1649 GT_BSD_USER_INTERRUPT;
1650 I915_WRITE(GTIER, render_irqs);
1651 POSTING_READ(GTIER);
1653 if (HAS_PCH_CPT(dev)) {
1654 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
1655 SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
1657 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1658 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1661 dev_priv->pch_irq_mask = ~hotplug_mask;
1663 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1664 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1665 I915_WRITE(SDEIER, hotplug_mask);
1666 POSTING_READ(SDEIER);
1668 if (IS_IRONLAKE_M(dev)) {
1669 /* Clear & enable PCU event interrupts */
1670 I915_WRITE(DEIIR, DE_PCU_EVENT);
1671 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1672 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1678 void i915_driver_irq_preinstall(struct drm_device * dev)
1680 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1682 atomic_set(&dev_priv->irq_received, 0);
1684 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1685 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1687 if (HAS_PCH_SPLIT(dev)) {
1688 ironlake_irq_preinstall(dev);
1692 if (I915_HAS_HOTPLUG(dev)) {
1693 I915_WRITE(PORT_HOTPLUG_EN, 0);
1694 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1697 I915_WRITE(HWSTAM, 0xeffe);
1698 I915_WRITE(PIPEASTAT, 0);
1699 I915_WRITE(PIPEBSTAT, 0);
1700 I915_WRITE(IMR, 0xffffffff);
1701 I915_WRITE(IER, 0x0);
1706 * Must be called after intel_modeset_init or hotplug interrupts won't be
1707 * enabled correctly.
1709 int i915_driver_irq_postinstall(struct drm_device *dev)
1711 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1712 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1715 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1717 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1719 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1721 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1723 if (HAS_PCH_SPLIT(dev))
1724 return ironlake_irq_postinstall(dev);
1726 /* Unmask the interrupts that we always want on. */
1727 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1729 dev_priv->pipestat[0] = 0;
1730 dev_priv->pipestat[1] = 0;
1732 if (I915_HAS_HOTPLUG(dev)) {
1733 /* Enable in IER... */
1734 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1735 /* and unmask in IMR */
1736 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1740 * Enable some error detection, note the instruction error mask
1741 * bit is reserved, so we leave it masked.
1744 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1745 GM45_ERROR_MEM_PRIV |
1746 GM45_ERROR_CP_PRIV |
1747 I915_ERROR_MEMORY_REFRESH);
1749 error_mask = ~(I915_ERROR_PAGE_TABLE |
1750 I915_ERROR_MEMORY_REFRESH);
1752 I915_WRITE(EMR, error_mask);
1754 I915_WRITE(IMR, dev_priv->irq_mask);
1755 I915_WRITE(IER, enable_mask);
1758 if (I915_HAS_HOTPLUG(dev)) {
1759 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1761 /* Note HDMI and DP share bits */
1762 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1763 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1764 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1765 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1766 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1767 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1768 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1769 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1770 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1771 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1772 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1773 hotplug_en |= CRT_HOTPLUG_INT_EN;
1775 /* Programming the CRT detection parameters tends
1776 to generate a spurious hotplug event about three
1777 seconds later. So just do it once.
1780 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
1781 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1784 /* Ignore TV since it's buggy */
1786 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1789 intel_opregion_enable_asle(dev);
1794 static void ironlake_irq_uninstall(struct drm_device *dev)
1796 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1797 I915_WRITE(HWSTAM, 0xffffffff);
1799 I915_WRITE(DEIMR, 0xffffffff);
1800 I915_WRITE(DEIER, 0x0);
1801 I915_WRITE(DEIIR, I915_READ(DEIIR));
1803 I915_WRITE(GTIMR, 0xffffffff);
1804 I915_WRITE(GTIER, 0x0);
1805 I915_WRITE(GTIIR, I915_READ(GTIIR));
1808 void i915_driver_irq_uninstall(struct drm_device * dev)
1810 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1815 dev_priv->vblank_pipe = 0;
1817 if (HAS_PCH_SPLIT(dev)) {
1818 ironlake_irq_uninstall(dev);
1822 if (I915_HAS_HOTPLUG(dev)) {
1823 I915_WRITE(PORT_HOTPLUG_EN, 0);
1824 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1827 I915_WRITE(HWSTAM, 0xffffffff);
1828 I915_WRITE(PIPEASTAT, 0);
1829 I915_WRITE(PIPEBSTAT, 0);
1830 I915_WRITE(IMR, 0xffffffff);
1831 I915_WRITE(IER, 0x0);
1833 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1834 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1835 I915_WRITE(IIR, I915_READ(IIR));