OSDN Git Service

Merge tag 'drm-intel-next-2015-04-23-fixed' of git://anongit.freedesktop.org/drm...
[uclinux-h8/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47
48 static const u32 hpd_ibx[HPD_NUM_PINS] = {
49         [HPD_CRT] = SDE_CRT_HOTPLUG,
50         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55
56 static const u32 hpd_cpt[HPD_NUM_PINS] = {
57         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63
64 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71 };
72
73 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
74         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
83         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91 /* BXT hpd list */
92 static const u32 hpd_bxt[HPD_NUM_PINS] = {
93         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
94         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
95 };
96
97 /* IIR can theoretically queue up two events. Be paranoid. */
98 #define GEN8_IRQ_RESET_NDX(type, which) do { \
99         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
100         POSTING_READ(GEN8_##type##_IMR(which)); \
101         I915_WRITE(GEN8_##type##_IER(which), 0); \
102         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
103         POSTING_READ(GEN8_##type##_IIR(which)); \
104         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
105         POSTING_READ(GEN8_##type##_IIR(which)); \
106 } while (0)
107
108 #define GEN5_IRQ_RESET(type) do { \
109         I915_WRITE(type##IMR, 0xffffffff); \
110         POSTING_READ(type##IMR); \
111         I915_WRITE(type##IER, 0); \
112         I915_WRITE(type##IIR, 0xffffffff); \
113         POSTING_READ(type##IIR); \
114         I915_WRITE(type##IIR, 0xffffffff); \
115         POSTING_READ(type##IIR); \
116 } while (0)
117
118 /*
119  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
120  */
121 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
122         u32 val = I915_READ(reg); \
123         if (val) { \
124                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
125                      (reg), val); \
126                 I915_WRITE((reg), 0xffffffff); \
127                 POSTING_READ(reg); \
128                 I915_WRITE((reg), 0xffffffff); \
129                 POSTING_READ(reg); \
130         } \
131 } while (0)
132
133 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
134         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
135         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
136         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
137         POSTING_READ(GEN8_##type##_IMR(which)); \
138 } while (0)
139
140 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
141         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
142         I915_WRITE(type##IER, (ier_val)); \
143         I915_WRITE(type##IMR, (imr_val)); \
144         POSTING_READ(type##IMR); \
145 } while (0)
146
147 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
148
149 /* For display hotplug interrupt */
150 void
151 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
152 {
153         assert_spin_locked(&dev_priv->irq_lock);
154
155         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
156                 return;
157
158         if ((dev_priv->irq_mask & mask) != 0) {
159                 dev_priv->irq_mask &= ~mask;
160                 I915_WRITE(DEIMR, dev_priv->irq_mask);
161                 POSTING_READ(DEIMR);
162         }
163 }
164
165 void
166 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
167 {
168         assert_spin_locked(&dev_priv->irq_lock);
169
170         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
171                 return;
172
173         if ((dev_priv->irq_mask & mask) != mask) {
174                 dev_priv->irq_mask |= mask;
175                 I915_WRITE(DEIMR, dev_priv->irq_mask);
176                 POSTING_READ(DEIMR);
177         }
178 }
179
180 /**
181  * ilk_update_gt_irq - update GTIMR
182  * @dev_priv: driver private
183  * @interrupt_mask: mask of interrupt bits to update
184  * @enabled_irq_mask: mask of interrupt bits to enable
185  */
186 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
187                               uint32_t interrupt_mask,
188                               uint32_t enabled_irq_mask)
189 {
190         assert_spin_locked(&dev_priv->irq_lock);
191
192         WARN_ON(enabled_irq_mask & ~interrupt_mask);
193
194         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
195                 return;
196
197         dev_priv->gt_irq_mask &= ~interrupt_mask;
198         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
199         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
200         POSTING_READ(GTIMR);
201 }
202
203 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
204 {
205         ilk_update_gt_irq(dev_priv, mask, mask);
206 }
207
208 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
209 {
210         ilk_update_gt_irq(dev_priv, mask, 0);
211 }
212
213 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
214 {
215         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
216 }
217
218 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
219 {
220         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
221 }
222
223 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
224 {
225         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
226 }
227
228 /**
229   * snb_update_pm_irq - update GEN6_PMIMR
230   * @dev_priv: driver private
231   * @interrupt_mask: mask of interrupt bits to update
232   * @enabled_irq_mask: mask of interrupt bits to enable
233   */
234 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
235                               uint32_t interrupt_mask,
236                               uint32_t enabled_irq_mask)
237 {
238         uint32_t new_val;
239
240         WARN_ON(enabled_irq_mask & ~interrupt_mask);
241
242         assert_spin_locked(&dev_priv->irq_lock);
243
244         new_val = dev_priv->pm_irq_mask;
245         new_val &= ~interrupt_mask;
246         new_val |= (~enabled_irq_mask & interrupt_mask);
247
248         if (new_val != dev_priv->pm_irq_mask) {
249                 dev_priv->pm_irq_mask = new_val;
250                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
251                 POSTING_READ(gen6_pm_imr(dev_priv));
252         }
253 }
254
255 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
256 {
257         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
258                 return;
259
260         snb_update_pm_irq(dev_priv, mask, mask);
261 }
262
263 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
264                                   uint32_t mask)
265 {
266         snb_update_pm_irq(dev_priv, mask, 0);
267 }
268
269 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
270 {
271         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
272                 return;
273
274         __gen6_disable_pm_irq(dev_priv, mask);
275 }
276
277 void gen6_reset_rps_interrupts(struct drm_device *dev)
278 {
279         struct drm_i915_private *dev_priv = dev->dev_private;
280         uint32_t reg = gen6_pm_iir(dev_priv);
281
282         spin_lock_irq(&dev_priv->irq_lock);
283         I915_WRITE(reg, dev_priv->pm_rps_events);
284         I915_WRITE(reg, dev_priv->pm_rps_events);
285         POSTING_READ(reg);
286         dev_priv->rps.pm_iir = 0;
287         spin_unlock_irq(&dev_priv->irq_lock);
288 }
289
290 void gen6_enable_rps_interrupts(struct drm_device *dev)
291 {
292         struct drm_i915_private *dev_priv = dev->dev_private;
293
294         spin_lock_irq(&dev_priv->irq_lock);
295
296         WARN_ON(dev_priv->rps.pm_iir);
297         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
298         dev_priv->rps.interrupts_enabled = true;
299         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
300                                 dev_priv->pm_rps_events);
301         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
302
303         spin_unlock_irq(&dev_priv->irq_lock);
304 }
305
306 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
307 {
308         /*
309          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
310          * if GEN6_PM_UP_EI_EXPIRED is masked.
311          *
312          * TODO: verify if this can be reproduced on VLV,CHV.
313          */
314         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
315                 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
316
317         if (INTEL_INFO(dev_priv)->gen >= 8)
318                 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
319
320         return mask;
321 }
322
323 void gen6_disable_rps_interrupts(struct drm_device *dev)
324 {
325         struct drm_i915_private *dev_priv = dev->dev_private;
326
327         spin_lock_irq(&dev_priv->irq_lock);
328         dev_priv->rps.interrupts_enabled = false;
329         spin_unlock_irq(&dev_priv->irq_lock);
330
331         cancel_work_sync(&dev_priv->rps.work);
332
333         spin_lock_irq(&dev_priv->irq_lock);
334
335         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
336
337         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
338         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
339                                 ~dev_priv->pm_rps_events);
340
341         spin_unlock_irq(&dev_priv->irq_lock);
342
343         synchronize_irq(dev->irq);
344 }
345
346 /**
347  * ibx_display_interrupt_update - update SDEIMR
348  * @dev_priv: driver private
349  * @interrupt_mask: mask of interrupt bits to update
350  * @enabled_irq_mask: mask of interrupt bits to enable
351  */
352 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
353                                   uint32_t interrupt_mask,
354                                   uint32_t enabled_irq_mask)
355 {
356         uint32_t sdeimr = I915_READ(SDEIMR);
357         sdeimr &= ~interrupt_mask;
358         sdeimr |= (~enabled_irq_mask & interrupt_mask);
359
360         WARN_ON(enabled_irq_mask & ~interrupt_mask);
361
362         assert_spin_locked(&dev_priv->irq_lock);
363
364         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
365                 return;
366
367         I915_WRITE(SDEIMR, sdeimr);
368         POSTING_READ(SDEIMR);
369 }
370
371 static void
372 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
373                        u32 enable_mask, u32 status_mask)
374 {
375         u32 reg = PIPESTAT(pipe);
376         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
377
378         assert_spin_locked(&dev_priv->irq_lock);
379         WARN_ON(!intel_irqs_enabled(dev_priv));
380
381         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
382                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
383                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
384                       pipe_name(pipe), enable_mask, status_mask))
385                 return;
386
387         if ((pipestat & enable_mask) == enable_mask)
388                 return;
389
390         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
391
392         /* Enable the interrupt, clear any pending status */
393         pipestat |= enable_mask | status_mask;
394         I915_WRITE(reg, pipestat);
395         POSTING_READ(reg);
396 }
397
398 static void
399 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
400                         u32 enable_mask, u32 status_mask)
401 {
402         u32 reg = PIPESTAT(pipe);
403         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
404
405         assert_spin_locked(&dev_priv->irq_lock);
406         WARN_ON(!intel_irqs_enabled(dev_priv));
407
408         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
409                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
410                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
411                       pipe_name(pipe), enable_mask, status_mask))
412                 return;
413
414         if ((pipestat & enable_mask) == 0)
415                 return;
416
417         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
418
419         pipestat &= ~enable_mask;
420         I915_WRITE(reg, pipestat);
421         POSTING_READ(reg);
422 }
423
424 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
425 {
426         u32 enable_mask = status_mask << 16;
427
428         /*
429          * On pipe A we don't support the PSR interrupt yet,
430          * on pipe B and C the same bit MBZ.
431          */
432         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
433                 return 0;
434         /*
435          * On pipe B and C we don't support the PSR interrupt yet, on pipe
436          * A the same bit is for perf counters which we don't use either.
437          */
438         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
439                 return 0;
440
441         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
442                          SPRITE0_FLIP_DONE_INT_EN_VLV |
443                          SPRITE1_FLIP_DONE_INT_EN_VLV);
444         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
445                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
446         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
447                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
448
449         return enable_mask;
450 }
451
452 void
453 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
454                      u32 status_mask)
455 {
456         u32 enable_mask;
457
458         if (IS_VALLEYVIEW(dev_priv->dev))
459                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
460                                                            status_mask);
461         else
462                 enable_mask = status_mask << 16;
463         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
464 }
465
466 void
467 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
468                       u32 status_mask)
469 {
470         u32 enable_mask;
471
472         if (IS_VALLEYVIEW(dev_priv->dev))
473                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
474                                                            status_mask);
475         else
476                 enable_mask = status_mask << 16;
477         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
478 }
479
480 /**
481  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
482  */
483 static void i915_enable_asle_pipestat(struct drm_device *dev)
484 {
485         struct drm_i915_private *dev_priv = dev->dev_private;
486
487         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
488                 return;
489
490         spin_lock_irq(&dev_priv->irq_lock);
491
492         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
493         if (INTEL_INFO(dev)->gen >= 4)
494                 i915_enable_pipestat(dev_priv, PIPE_A,
495                                      PIPE_LEGACY_BLC_EVENT_STATUS);
496
497         spin_unlock_irq(&dev_priv->irq_lock);
498 }
499
500 /*
501  * This timing diagram depicts the video signal in and
502  * around the vertical blanking period.
503  *
504  * Assumptions about the fictitious mode used in this example:
505  *  vblank_start >= 3
506  *  vsync_start = vblank_start + 1
507  *  vsync_end = vblank_start + 2
508  *  vtotal = vblank_start + 3
509  *
510  *           start of vblank:
511  *           latch double buffered registers
512  *           increment frame counter (ctg+)
513  *           generate start of vblank interrupt (gen4+)
514  *           |
515  *           |          frame start:
516  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
517  *           |          may be shifted forward 1-3 extra lines via PIPECONF
518  *           |          |
519  *           |          |  start of vsync:
520  *           |          |  generate vsync interrupt
521  *           |          |  |
522  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
523  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
524  * ----va---> <-----------------vb--------------------> <--------va-------------
525  *       |          |       <----vs----->                     |
526  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
527  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
528  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
529  *       |          |                                         |
530  *       last visible pixel                                   first visible pixel
531  *                  |                                         increment frame counter (gen3/4)
532  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
533  *
534  * x  = horizontal active
535  * _  = horizontal blanking
536  * hs = horizontal sync
537  * va = vertical active
538  * vb = vertical blanking
539  * vs = vertical sync
540  * vbs = vblank_start (number)
541  *
542  * Summary:
543  * - most events happen at the start of horizontal sync
544  * - frame start happens at the start of horizontal blank, 1-4 lines
545  *   (depending on PIPECONF settings) after the start of vblank
546  * - gen3/4 pixel and frame counter are synchronized with the start
547  *   of horizontal active on the first line of vertical active
548  */
549
550 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
551 {
552         /* Gen2 doesn't have a hardware frame counter */
553         return 0;
554 }
555
556 /* Called from drm generic code, passed a 'crtc', which
557  * we use as a pipe index
558  */
559 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
560 {
561         struct drm_i915_private *dev_priv = dev->dev_private;
562         unsigned long high_frame;
563         unsigned long low_frame;
564         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
565         struct intel_crtc *intel_crtc =
566                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
567         const struct drm_display_mode *mode =
568                 &intel_crtc->config->base.adjusted_mode;
569
570         htotal = mode->crtc_htotal;
571         hsync_start = mode->crtc_hsync_start;
572         vbl_start = mode->crtc_vblank_start;
573         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
574                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
575
576         /* Convert to pixel count */
577         vbl_start *= htotal;
578
579         /* Start of vblank event occurs at start of hsync */
580         vbl_start -= htotal - hsync_start;
581
582         high_frame = PIPEFRAME(pipe);
583         low_frame = PIPEFRAMEPIXEL(pipe);
584
585         /*
586          * High & low register fields aren't synchronized, so make sure
587          * we get a low value that's stable across two reads of the high
588          * register.
589          */
590         do {
591                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
592                 low   = I915_READ(low_frame);
593                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
594         } while (high1 != high2);
595
596         high1 >>= PIPE_FRAME_HIGH_SHIFT;
597         pixel = low & PIPE_PIXEL_MASK;
598         low >>= PIPE_FRAME_LOW_SHIFT;
599
600         /*
601          * The frame counter increments at beginning of active.
602          * Cook up a vblank counter by also checking the pixel
603          * counter against vblank start.
604          */
605         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
606 }
607
608 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
609 {
610         struct drm_i915_private *dev_priv = dev->dev_private;
611         int reg = PIPE_FRMCOUNT_GM45(pipe);
612
613         return I915_READ(reg);
614 }
615
616 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
617 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
618
619 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
620 {
621         struct drm_device *dev = crtc->base.dev;
622         struct drm_i915_private *dev_priv = dev->dev_private;
623         const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
624         enum pipe pipe = crtc->pipe;
625         int position, vtotal;
626
627         vtotal = mode->crtc_vtotal;
628         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
629                 vtotal /= 2;
630
631         if (IS_GEN2(dev))
632                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
633         else
634                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
635
636         /*
637          * See update_scanline_offset() for the details on the
638          * scanline_offset adjustment.
639          */
640         return (position + crtc->scanline_offset) % vtotal;
641 }
642
643 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
644                                     unsigned int flags, int *vpos, int *hpos,
645                                     ktime_t *stime, ktime_t *etime)
646 {
647         struct drm_i915_private *dev_priv = dev->dev_private;
648         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
649         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
650         const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
651         int position;
652         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
653         bool in_vbl = true;
654         int ret = 0;
655         unsigned long irqflags;
656
657         if (!intel_crtc->active) {
658                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
659                                  "pipe %c\n", pipe_name(pipe));
660                 return 0;
661         }
662
663         htotal = mode->crtc_htotal;
664         hsync_start = mode->crtc_hsync_start;
665         vtotal = mode->crtc_vtotal;
666         vbl_start = mode->crtc_vblank_start;
667         vbl_end = mode->crtc_vblank_end;
668
669         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
670                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
671                 vbl_end /= 2;
672                 vtotal /= 2;
673         }
674
675         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
676
677         /*
678          * Lock uncore.lock, as we will do multiple timing critical raw
679          * register reads, potentially with preemption disabled, so the
680          * following code must not block on uncore.lock.
681          */
682         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
683
684         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
685
686         /* Get optional system timestamp before query. */
687         if (stime)
688                 *stime = ktime_get();
689
690         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
691                 /* No obvious pixelcount register. Only query vertical
692                  * scanout position from Display scan line register.
693                  */
694                 position = __intel_get_crtc_scanline(intel_crtc);
695         } else {
696                 /* Have access to pixelcount since start of frame.
697                  * We can split this into vertical and horizontal
698                  * scanout position.
699                  */
700                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
701
702                 /* convert to pixel counts */
703                 vbl_start *= htotal;
704                 vbl_end *= htotal;
705                 vtotal *= htotal;
706
707                 /*
708                  * In interlaced modes, the pixel counter counts all pixels,
709                  * so one field will have htotal more pixels. In order to avoid
710                  * the reported position from jumping backwards when the pixel
711                  * counter is beyond the length of the shorter field, just
712                  * clamp the position the length of the shorter field. This
713                  * matches how the scanline counter based position works since
714                  * the scanline counter doesn't count the two half lines.
715                  */
716                 if (position >= vtotal)
717                         position = vtotal - 1;
718
719                 /*
720                  * Start of vblank interrupt is triggered at start of hsync,
721                  * just prior to the first active line of vblank. However we
722                  * consider lines to start at the leading edge of horizontal
723                  * active. So, should we get here before we've crossed into
724                  * the horizontal active of the first line in vblank, we would
725                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
726                  * always add htotal-hsync_start to the current pixel position.
727                  */
728                 position = (position + htotal - hsync_start) % vtotal;
729         }
730
731         /* Get optional system timestamp after query. */
732         if (etime)
733                 *etime = ktime_get();
734
735         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
736
737         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
738
739         in_vbl = position >= vbl_start && position < vbl_end;
740
741         /*
742          * While in vblank, position will be negative
743          * counting up towards 0 at vbl_end. And outside
744          * vblank, position will be positive counting
745          * up since vbl_end.
746          */
747         if (position >= vbl_start)
748                 position -= vbl_end;
749         else
750                 position += vtotal - vbl_end;
751
752         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
753                 *vpos = position;
754                 *hpos = 0;
755         } else {
756                 *vpos = position / htotal;
757                 *hpos = position - (*vpos * htotal);
758         }
759
760         /* In vblank? */
761         if (in_vbl)
762                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
763
764         return ret;
765 }
766
767 int intel_get_crtc_scanline(struct intel_crtc *crtc)
768 {
769         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
770         unsigned long irqflags;
771         int position;
772
773         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
774         position = __intel_get_crtc_scanline(crtc);
775         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
776
777         return position;
778 }
779
780 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
781                               int *max_error,
782                               struct timeval *vblank_time,
783                               unsigned flags)
784 {
785         struct drm_crtc *crtc;
786
787         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
788                 DRM_ERROR("Invalid crtc %d\n", pipe);
789                 return -EINVAL;
790         }
791
792         /* Get drm_crtc to timestamp: */
793         crtc = intel_get_crtc_for_pipe(dev, pipe);
794         if (crtc == NULL) {
795                 DRM_ERROR("Invalid crtc %d\n", pipe);
796                 return -EINVAL;
797         }
798
799         if (!crtc->state->enable) {
800                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
801                 return -EBUSY;
802         }
803
804         /* Helper routine in DRM core does all the work: */
805         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
806                                                      vblank_time, flags,
807                                                      crtc,
808                                                      &to_intel_crtc(crtc)->config->base.adjusted_mode);
809 }
810
811 static bool intel_hpd_irq_event(struct drm_device *dev,
812                                 struct drm_connector *connector)
813 {
814         enum drm_connector_status old_status;
815
816         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
817         old_status = connector->status;
818
819         connector->status = connector->funcs->detect(connector, false);
820         if (old_status == connector->status)
821                 return false;
822
823         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
824                       connector->base.id,
825                       connector->name,
826                       drm_get_connector_status_name(old_status),
827                       drm_get_connector_status_name(connector->status));
828
829         return true;
830 }
831
832 static void i915_digport_work_func(struct work_struct *work)
833 {
834         struct drm_i915_private *dev_priv =
835                 container_of(work, struct drm_i915_private, dig_port_work);
836         u32 long_port_mask, short_port_mask;
837         struct intel_digital_port *intel_dig_port;
838         int i;
839         u32 old_bits = 0;
840
841         spin_lock_irq(&dev_priv->irq_lock);
842         long_port_mask = dev_priv->long_hpd_port_mask;
843         dev_priv->long_hpd_port_mask = 0;
844         short_port_mask = dev_priv->short_hpd_port_mask;
845         dev_priv->short_hpd_port_mask = 0;
846         spin_unlock_irq(&dev_priv->irq_lock);
847
848         for (i = 0; i < I915_MAX_PORTS; i++) {
849                 bool valid = false;
850                 bool long_hpd = false;
851                 intel_dig_port = dev_priv->hpd_irq_port[i];
852                 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
853                         continue;
854
855                 if (long_port_mask & (1 << i))  {
856                         valid = true;
857                         long_hpd = true;
858                 } else if (short_port_mask & (1 << i))
859                         valid = true;
860
861                 if (valid) {
862                         enum irqreturn ret;
863
864                         ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
865                         if (ret == IRQ_NONE) {
866                                 /* fall back to old school hpd */
867                                 old_bits |= (1 << intel_dig_port->base.hpd_pin);
868                         }
869                 }
870         }
871
872         if (old_bits) {
873                 spin_lock_irq(&dev_priv->irq_lock);
874                 dev_priv->hpd_event_bits |= old_bits;
875                 spin_unlock_irq(&dev_priv->irq_lock);
876                 schedule_work(&dev_priv->hotplug_work);
877         }
878 }
879
880 /*
881  * Handle hotplug events outside the interrupt handler proper.
882  */
883 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
884
885 static void i915_hotplug_work_func(struct work_struct *work)
886 {
887         struct drm_i915_private *dev_priv =
888                 container_of(work, struct drm_i915_private, hotplug_work);
889         struct drm_device *dev = dev_priv->dev;
890         struct drm_mode_config *mode_config = &dev->mode_config;
891         struct intel_connector *intel_connector;
892         struct intel_encoder *intel_encoder;
893         struct drm_connector *connector;
894         bool hpd_disabled = false;
895         bool changed = false;
896         u32 hpd_event_bits;
897
898         mutex_lock(&mode_config->mutex);
899         DRM_DEBUG_KMS("running encoder hotplug functions\n");
900
901         spin_lock_irq(&dev_priv->irq_lock);
902
903         hpd_event_bits = dev_priv->hpd_event_bits;
904         dev_priv->hpd_event_bits = 0;
905         list_for_each_entry(connector, &mode_config->connector_list, head) {
906                 intel_connector = to_intel_connector(connector);
907                 if (!intel_connector->encoder)
908                         continue;
909                 intel_encoder = intel_connector->encoder;
910                 if (intel_encoder->hpd_pin > HPD_NONE &&
911                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
912                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
913                         DRM_INFO("HPD interrupt storm detected on connector %s: "
914                                  "switching from hotplug detection to polling\n",
915                                 connector->name);
916                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
917                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
918                                 | DRM_CONNECTOR_POLL_DISCONNECT;
919                         hpd_disabled = true;
920                 }
921                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
922                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
923                                       connector->name, intel_encoder->hpd_pin);
924                 }
925         }
926          /* if there were no outputs to poll, poll was disabled,
927           * therefore make sure it's enabled when disabling HPD on
928           * some connectors */
929         if (hpd_disabled) {
930                 drm_kms_helper_poll_enable(dev);
931                 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
932                                  msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
933         }
934
935         spin_unlock_irq(&dev_priv->irq_lock);
936
937         list_for_each_entry(connector, &mode_config->connector_list, head) {
938                 intel_connector = to_intel_connector(connector);
939                 if (!intel_connector->encoder)
940                         continue;
941                 intel_encoder = intel_connector->encoder;
942                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
943                         if (intel_encoder->hot_plug)
944                                 intel_encoder->hot_plug(intel_encoder);
945                         if (intel_hpd_irq_event(dev, connector))
946                                 changed = true;
947                 }
948         }
949         mutex_unlock(&mode_config->mutex);
950
951         if (changed)
952                 drm_kms_helper_hotplug_event(dev);
953 }
954
955 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
956 {
957         struct drm_i915_private *dev_priv = dev->dev_private;
958         u32 busy_up, busy_down, max_avg, min_avg;
959         u8 new_delay;
960
961         spin_lock(&mchdev_lock);
962
963         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
964
965         new_delay = dev_priv->ips.cur_delay;
966
967         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
968         busy_up = I915_READ(RCPREVBSYTUPAVG);
969         busy_down = I915_READ(RCPREVBSYTDNAVG);
970         max_avg = I915_READ(RCBMAXAVG);
971         min_avg = I915_READ(RCBMINAVG);
972
973         /* Handle RCS change request from hw */
974         if (busy_up > max_avg) {
975                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
976                         new_delay = dev_priv->ips.cur_delay - 1;
977                 if (new_delay < dev_priv->ips.max_delay)
978                         new_delay = dev_priv->ips.max_delay;
979         } else if (busy_down < min_avg) {
980                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
981                         new_delay = dev_priv->ips.cur_delay + 1;
982                 if (new_delay > dev_priv->ips.min_delay)
983                         new_delay = dev_priv->ips.min_delay;
984         }
985
986         if (ironlake_set_drps(dev, new_delay))
987                 dev_priv->ips.cur_delay = new_delay;
988
989         spin_unlock(&mchdev_lock);
990
991         return;
992 }
993
994 static void notify_ring(struct intel_engine_cs *ring)
995 {
996         if (!intel_ring_initialized(ring))
997                 return;
998
999         trace_i915_gem_request_notify(ring);
1000
1001         wake_up_all(&ring->irq_queue);
1002 }
1003
1004 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1005                         struct intel_rps_ei *ei)
1006 {
1007         ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1008         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1009         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1010 }
1011
1012 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1013                          const struct intel_rps_ei *old,
1014                          const struct intel_rps_ei *now,
1015                          int threshold)
1016 {
1017         u64 time, c0;
1018
1019         if (old->cz_clock == 0)
1020                 return false;
1021
1022         time = now->cz_clock - old->cz_clock;
1023         time *= threshold * dev_priv->mem_freq;
1024
1025         /* Workload can be split between render + media, e.g. SwapBuffers
1026          * being blitted in X after being rendered in mesa. To account for
1027          * this we need to combine both engines into our activity counter.
1028          */
1029         c0 = now->render_c0 - old->render_c0;
1030         c0 += now->media_c0 - old->media_c0;
1031         c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
1032
1033         return c0 >= time;
1034 }
1035
1036 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1037 {
1038         vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1039         dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1040 }
1041
1042 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1043 {
1044         struct intel_rps_ei now;
1045         u32 events = 0;
1046
1047         if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1048                 return 0;
1049
1050         vlv_c0_read(dev_priv, &now);
1051         if (now.cz_clock == 0)
1052                 return 0;
1053
1054         if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1055                 if (!vlv_c0_above(dev_priv,
1056                                   &dev_priv->rps.down_ei, &now,
1057                                   dev_priv->rps.down_threshold))
1058                         events |= GEN6_PM_RP_DOWN_THRESHOLD;
1059                 dev_priv->rps.down_ei = now;
1060         }
1061
1062         if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1063                 if (vlv_c0_above(dev_priv,
1064                                  &dev_priv->rps.up_ei, &now,
1065                                  dev_priv->rps.up_threshold))
1066                         events |= GEN6_PM_RP_UP_THRESHOLD;
1067                 dev_priv->rps.up_ei = now;
1068         }
1069
1070         return events;
1071 }
1072
1073 static void gen6_pm_rps_work(struct work_struct *work)
1074 {
1075         struct drm_i915_private *dev_priv =
1076                 container_of(work, struct drm_i915_private, rps.work);
1077         u32 pm_iir;
1078         int new_delay, adj;
1079
1080         spin_lock_irq(&dev_priv->irq_lock);
1081         /* Speed up work cancelation during disabling rps interrupts. */
1082         if (!dev_priv->rps.interrupts_enabled) {
1083                 spin_unlock_irq(&dev_priv->irq_lock);
1084                 return;
1085         }
1086         pm_iir = dev_priv->rps.pm_iir;
1087         dev_priv->rps.pm_iir = 0;
1088         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1090         spin_unlock_irq(&dev_priv->irq_lock);
1091
1092         /* Make sure we didn't queue anything we're not going to process. */
1093         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1094
1095         if ((pm_iir & dev_priv->pm_rps_events) == 0)
1096                 return;
1097
1098         mutex_lock(&dev_priv->rps.hw_lock);
1099
1100         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1101
1102         adj = dev_priv->rps.last_adj;
1103         new_delay = dev_priv->rps.cur_freq;
1104         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1105                 if (adj > 0)
1106                         adj *= 2;
1107                 else /* CHV needs even encode values */
1108                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1109                 /*
1110                  * For better performance, jump directly
1111                  * to RPe if we're below it.
1112                  */
1113                 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1114                         new_delay = dev_priv->rps.efficient_freq;
1115                         adj = 0;
1116                 }
1117         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1118                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1119                         new_delay = dev_priv->rps.efficient_freq;
1120                 else
1121                         new_delay = dev_priv->rps.min_freq_softlimit;
1122                 adj = 0;
1123         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1124                 if (adj < 0)
1125                         adj *= 2;
1126                 else /* CHV needs even encode values */
1127                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1128         } else { /* unknown event */
1129                 adj = 0;
1130         }
1131
1132         dev_priv->rps.last_adj = adj;
1133
1134         /* sysfs frequency interfaces may have snuck in while servicing the
1135          * interrupt
1136          */
1137         new_delay += adj;
1138         new_delay = clamp_t(int, new_delay,
1139                             dev_priv->rps.min_freq_softlimit,
1140                             dev_priv->rps.max_freq_softlimit);
1141
1142         intel_set_rps(dev_priv->dev, new_delay);
1143
1144         mutex_unlock(&dev_priv->rps.hw_lock);
1145 }
1146
1147
1148 /**
1149  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1150  * occurred.
1151  * @work: workqueue struct
1152  *
1153  * Doesn't actually do anything except notify userspace. As a consequence of
1154  * this event, userspace should try to remap the bad rows since statistically
1155  * it is likely the same row is more likely to go bad again.
1156  */
1157 static void ivybridge_parity_work(struct work_struct *work)
1158 {
1159         struct drm_i915_private *dev_priv =
1160                 container_of(work, struct drm_i915_private, l3_parity.error_work);
1161         u32 error_status, row, bank, subbank;
1162         char *parity_event[6];
1163         uint32_t misccpctl;
1164         uint8_t slice = 0;
1165
1166         /* We must turn off DOP level clock gating to access the L3 registers.
1167          * In order to prevent a get/put style interface, acquire struct mutex
1168          * any time we access those registers.
1169          */
1170         mutex_lock(&dev_priv->dev->struct_mutex);
1171
1172         /* If we've screwed up tracking, just let the interrupt fire again */
1173         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1174                 goto out;
1175
1176         misccpctl = I915_READ(GEN7_MISCCPCTL);
1177         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1178         POSTING_READ(GEN7_MISCCPCTL);
1179
1180         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1181                 u32 reg;
1182
1183                 slice--;
1184                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1185                         break;
1186
1187                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1188
1189                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1190
1191                 error_status = I915_READ(reg);
1192                 row = GEN7_PARITY_ERROR_ROW(error_status);
1193                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1194                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1195
1196                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1197                 POSTING_READ(reg);
1198
1199                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1200                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1201                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1202                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1203                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1204                 parity_event[5] = NULL;
1205
1206                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1207                                    KOBJ_CHANGE, parity_event);
1208
1209                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1210                           slice, row, bank, subbank);
1211
1212                 kfree(parity_event[4]);
1213                 kfree(parity_event[3]);
1214                 kfree(parity_event[2]);
1215                 kfree(parity_event[1]);
1216         }
1217
1218         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1219
1220 out:
1221         WARN_ON(dev_priv->l3_parity.which_slice);
1222         spin_lock_irq(&dev_priv->irq_lock);
1223         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1224         spin_unlock_irq(&dev_priv->irq_lock);
1225
1226         mutex_unlock(&dev_priv->dev->struct_mutex);
1227 }
1228
1229 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1230 {
1231         struct drm_i915_private *dev_priv = dev->dev_private;
1232
1233         if (!HAS_L3_DPF(dev))
1234                 return;
1235
1236         spin_lock(&dev_priv->irq_lock);
1237         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1238         spin_unlock(&dev_priv->irq_lock);
1239
1240         iir &= GT_PARITY_ERROR(dev);
1241         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1242                 dev_priv->l3_parity.which_slice |= 1 << 1;
1243
1244         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1245                 dev_priv->l3_parity.which_slice |= 1 << 0;
1246
1247         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1248 }
1249
1250 static void ilk_gt_irq_handler(struct drm_device *dev,
1251                                struct drm_i915_private *dev_priv,
1252                                u32 gt_iir)
1253 {
1254         if (gt_iir &
1255             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1256                 notify_ring(&dev_priv->ring[RCS]);
1257         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1258                 notify_ring(&dev_priv->ring[VCS]);
1259 }
1260
1261 static void snb_gt_irq_handler(struct drm_device *dev,
1262                                struct drm_i915_private *dev_priv,
1263                                u32 gt_iir)
1264 {
1265
1266         if (gt_iir &
1267             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1268                 notify_ring(&dev_priv->ring[RCS]);
1269         if (gt_iir & GT_BSD_USER_INTERRUPT)
1270                 notify_ring(&dev_priv->ring[VCS]);
1271         if (gt_iir & GT_BLT_USER_INTERRUPT)
1272                 notify_ring(&dev_priv->ring[BCS]);
1273
1274         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1275                       GT_BSD_CS_ERROR_INTERRUPT |
1276                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1277                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1278
1279         if (gt_iir & GT_PARITY_ERROR(dev))
1280                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1281 }
1282
1283 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1284                                        u32 master_ctl)
1285 {
1286         irqreturn_t ret = IRQ_NONE;
1287
1288         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1289                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1290                 if (tmp) {
1291                         I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1292                         ret = IRQ_HANDLED;
1293
1294                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1295                                 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1296                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1297                                 notify_ring(&dev_priv->ring[RCS]);
1298
1299                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1300                                 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1301                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1302                                 notify_ring(&dev_priv->ring[BCS]);
1303                 } else
1304                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1305         }
1306
1307         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1308                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1309                 if (tmp) {
1310                         I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1311                         ret = IRQ_HANDLED;
1312
1313                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1314                                 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1315                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1316                                 notify_ring(&dev_priv->ring[VCS]);
1317
1318                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1319                                 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1320                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1321                                 notify_ring(&dev_priv->ring[VCS2]);
1322                 } else
1323                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1324         }
1325
1326         if (master_ctl & GEN8_GT_VECS_IRQ) {
1327                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1328                 if (tmp) {
1329                         I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1330                         ret = IRQ_HANDLED;
1331
1332                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1333                                 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1334                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1335                                 notify_ring(&dev_priv->ring[VECS]);
1336                 } else
1337                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1338         }
1339
1340         if (master_ctl & GEN8_GT_PM_IRQ) {
1341                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1342                 if (tmp & dev_priv->pm_rps_events) {
1343                         I915_WRITE_FW(GEN8_GT_IIR(2),
1344                                       tmp & dev_priv->pm_rps_events);
1345                         ret = IRQ_HANDLED;
1346                         gen6_rps_irq_handler(dev_priv, tmp);
1347                 } else
1348                         DRM_ERROR("The master control interrupt lied (PM)!\n");
1349         }
1350
1351         return ret;
1352 }
1353
1354 #define HPD_STORM_DETECT_PERIOD 1000
1355 #define HPD_STORM_THRESHOLD 5
1356
1357 static int pch_port_to_hotplug_shift(enum port port)
1358 {
1359         switch (port) {
1360         case PORT_A:
1361         case PORT_E:
1362         default:
1363                 return -1;
1364         case PORT_B:
1365                 return 0;
1366         case PORT_C:
1367                 return 8;
1368         case PORT_D:
1369                 return 16;
1370         }
1371 }
1372
1373 static int i915_port_to_hotplug_shift(enum port port)
1374 {
1375         switch (port) {
1376         case PORT_A:
1377         case PORT_E:
1378         default:
1379                 return -1;
1380         case PORT_B:
1381                 return 17;
1382         case PORT_C:
1383                 return 19;
1384         case PORT_D:
1385                 return 21;
1386         }
1387 }
1388
1389 static inline enum port get_port_from_pin(enum hpd_pin pin)
1390 {
1391         switch (pin) {
1392         case HPD_PORT_B:
1393                 return PORT_B;
1394         case HPD_PORT_C:
1395                 return PORT_C;
1396         case HPD_PORT_D:
1397                 return PORT_D;
1398         default:
1399                 return PORT_A; /* no hpd */
1400         }
1401 }
1402
1403 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1404                                          u32 hotplug_trigger,
1405                                          u32 dig_hotplug_reg,
1406                                          const u32 hpd[HPD_NUM_PINS])
1407 {
1408         struct drm_i915_private *dev_priv = dev->dev_private;
1409         int i;
1410         enum port port;
1411         bool storm_detected = false;
1412         bool queue_dig = false, queue_hp = false;
1413         u32 dig_shift;
1414         u32 dig_port_mask = 0;
1415
1416         if (!hotplug_trigger)
1417                 return;
1418
1419         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1420                          hotplug_trigger, dig_hotplug_reg);
1421
1422         spin_lock(&dev_priv->irq_lock);
1423         for (i = 1; i < HPD_NUM_PINS; i++) {
1424                 if (!(hpd[i] & hotplug_trigger))
1425                         continue;
1426
1427                 port = get_port_from_pin(i);
1428                 if (port && dev_priv->hpd_irq_port[port]) {
1429                         bool long_hpd;
1430
1431                         if (!HAS_GMCH_DISPLAY(dev_priv)) {
1432                                 dig_shift = pch_port_to_hotplug_shift(port);
1433                                 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1434                         } else {
1435                                 dig_shift = i915_port_to_hotplug_shift(port);
1436                                 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1437                         }
1438
1439                         DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1440                                          port_name(port),
1441                                          long_hpd ? "long" : "short");
1442                         /* for long HPD pulses we want to have the digital queue happen,
1443                            but we still want HPD storm detection to function. */
1444                         if (long_hpd) {
1445                                 dev_priv->long_hpd_port_mask |= (1 << port);
1446                                 dig_port_mask |= hpd[i];
1447                         } else {
1448                                 /* for short HPD just trigger the digital queue */
1449                                 dev_priv->short_hpd_port_mask |= (1 << port);
1450                                 hotplug_trigger &= ~hpd[i];
1451                         }
1452                         queue_dig = true;
1453                 }
1454         }
1455
1456         for (i = 1; i < HPD_NUM_PINS; i++) {
1457                 if (hpd[i] & hotplug_trigger &&
1458                     dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1459                         /*
1460                          * On GMCH platforms the interrupt mask bits only
1461                          * prevent irq generation, not the setting of the
1462                          * hotplug bits itself. So only WARN about unexpected
1463                          * interrupts on saner platforms.
1464                          */
1465                         WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1466                                   "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1467                                   hotplug_trigger, i, hpd[i]);
1468
1469                         continue;
1470                 }
1471
1472                 if (!(hpd[i] & hotplug_trigger) ||
1473                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1474                         continue;
1475
1476                 if (!(dig_port_mask & hpd[i])) {
1477                         dev_priv->hpd_event_bits |= (1 << i);
1478                         queue_hp = true;
1479                 }
1480
1481                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1482                                    dev_priv->hpd_stats[i].hpd_last_jiffies
1483                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1484                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1485                         dev_priv->hpd_stats[i].hpd_cnt = 0;
1486                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1487                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1488                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1489                         dev_priv->hpd_event_bits &= ~(1 << i);
1490                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1491                         storm_detected = true;
1492                 } else {
1493                         dev_priv->hpd_stats[i].hpd_cnt++;
1494                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1495                                       dev_priv->hpd_stats[i].hpd_cnt);
1496                 }
1497         }
1498
1499         if (storm_detected)
1500                 dev_priv->display.hpd_irq_setup(dev);
1501         spin_unlock(&dev_priv->irq_lock);
1502
1503         /*
1504          * Our hotplug handler can grab modeset locks (by calling down into the
1505          * fb helpers). Hence it must not be run on our own dev-priv->wq work
1506          * queue for otherwise the flush_work in the pageflip code will
1507          * deadlock.
1508          */
1509         if (queue_dig)
1510                 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1511         if (queue_hp)
1512                 schedule_work(&dev_priv->hotplug_work);
1513 }
1514
1515 static void gmbus_irq_handler(struct drm_device *dev)
1516 {
1517         struct drm_i915_private *dev_priv = dev->dev_private;
1518
1519         wake_up_all(&dev_priv->gmbus_wait_queue);
1520 }
1521
1522 static void dp_aux_irq_handler(struct drm_device *dev)
1523 {
1524         struct drm_i915_private *dev_priv = dev->dev_private;
1525
1526         wake_up_all(&dev_priv->gmbus_wait_queue);
1527 }
1528
1529 #if defined(CONFIG_DEBUG_FS)
1530 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1531                                          uint32_t crc0, uint32_t crc1,
1532                                          uint32_t crc2, uint32_t crc3,
1533                                          uint32_t crc4)
1534 {
1535         struct drm_i915_private *dev_priv = dev->dev_private;
1536         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1537         struct intel_pipe_crc_entry *entry;
1538         int head, tail;
1539
1540         spin_lock(&pipe_crc->lock);
1541
1542         if (!pipe_crc->entries) {
1543                 spin_unlock(&pipe_crc->lock);
1544                 DRM_DEBUG_KMS("spurious interrupt\n");
1545                 return;
1546         }
1547
1548         head = pipe_crc->head;
1549         tail = pipe_crc->tail;
1550
1551         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1552                 spin_unlock(&pipe_crc->lock);
1553                 DRM_ERROR("CRC buffer overflowing\n");
1554                 return;
1555         }
1556
1557         entry = &pipe_crc->entries[head];
1558
1559         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1560         entry->crc[0] = crc0;
1561         entry->crc[1] = crc1;
1562         entry->crc[2] = crc2;
1563         entry->crc[3] = crc3;
1564         entry->crc[4] = crc4;
1565
1566         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1567         pipe_crc->head = head;
1568
1569         spin_unlock(&pipe_crc->lock);
1570
1571         wake_up_interruptible(&pipe_crc->wq);
1572 }
1573 #else
1574 static inline void
1575 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1576                              uint32_t crc0, uint32_t crc1,
1577                              uint32_t crc2, uint32_t crc3,
1578                              uint32_t crc4) {}
1579 #endif
1580
1581
1582 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1583 {
1584         struct drm_i915_private *dev_priv = dev->dev_private;
1585
1586         display_pipe_crc_irq_handler(dev, pipe,
1587                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1588                                      0, 0, 0, 0);
1589 }
1590
1591 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1592 {
1593         struct drm_i915_private *dev_priv = dev->dev_private;
1594
1595         display_pipe_crc_irq_handler(dev, pipe,
1596                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1597                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1598                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1599                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1600                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1601 }
1602
1603 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1604 {
1605         struct drm_i915_private *dev_priv = dev->dev_private;
1606         uint32_t res1, res2;
1607
1608         if (INTEL_INFO(dev)->gen >= 3)
1609                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1610         else
1611                 res1 = 0;
1612
1613         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1614                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1615         else
1616                 res2 = 0;
1617
1618         display_pipe_crc_irq_handler(dev, pipe,
1619                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1620                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1621                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1622                                      res1, res2);
1623 }
1624
1625 /* The RPS events need forcewake, so we add them to a work queue and mask their
1626  * IMR bits until the work is done. Other interrupts can be processed without
1627  * the work queue. */
1628 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1629 {
1630         if (pm_iir & dev_priv->pm_rps_events) {
1631                 spin_lock(&dev_priv->irq_lock);
1632                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1633                 if (dev_priv->rps.interrupts_enabled) {
1634                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1635                         queue_work(dev_priv->wq, &dev_priv->rps.work);
1636                 }
1637                 spin_unlock(&dev_priv->irq_lock);
1638         }
1639
1640         if (INTEL_INFO(dev_priv)->gen >= 8)
1641                 return;
1642
1643         if (HAS_VEBOX(dev_priv->dev)) {
1644                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1645                         notify_ring(&dev_priv->ring[VECS]);
1646
1647                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1648                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1649         }
1650 }
1651
1652 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1653 {
1654         if (!drm_handle_vblank(dev, pipe))
1655                 return false;
1656
1657         return true;
1658 }
1659
1660 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1661 {
1662         struct drm_i915_private *dev_priv = dev->dev_private;
1663         u32 pipe_stats[I915_MAX_PIPES] = { };
1664         int pipe;
1665
1666         spin_lock(&dev_priv->irq_lock);
1667         for_each_pipe(dev_priv, pipe) {
1668                 int reg;
1669                 u32 mask, iir_bit = 0;
1670
1671                 /*
1672                  * PIPESTAT bits get signalled even when the interrupt is
1673                  * disabled with the mask bits, and some of the status bits do
1674                  * not generate interrupts at all (like the underrun bit). Hence
1675                  * we need to be careful that we only handle what we want to
1676                  * handle.
1677                  */
1678
1679                 /* fifo underruns are filterered in the underrun handler. */
1680                 mask = PIPE_FIFO_UNDERRUN_STATUS;
1681
1682                 switch (pipe) {
1683                 case PIPE_A:
1684                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1685                         break;
1686                 case PIPE_B:
1687                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1688                         break;
1689                 case PIPE_C:
1690                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1691                         break;
1692                 }
1693                 if (iir & iir_bit)
1694                         mask |= dev_priv->pipestat_irq_mask[pipe];
1695
1696                 if (!mask)
1697                         continue;
1698
1699                 reg = PIPESTAT(pipe);
1700                 mask |= PIPESTAT_INT_ENABLE_MASK;
1701                 pipe_stats[pipe] = I915_READ(reg) & mask;
1702
1703                 /*
1704                  * Clear the PIPE*STAT regs before the IIR
1705                  */
1706                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1707                                         PIPESTAT_INT_STATUS_MASK))
1708                         I915_WRITE(reg, pipe_stats[pipe]);
1709         }
1710         spin_unlock(&dev_priv->irq_lock);
1711
1712         for_each_pipe(dev_priv, pipe) {
1713                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1714                     intel_pipe_handle_vblank(dev, pipe))
1715                         intel_check_page_flip(dev, pipe);
1716
1717                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1718                         intel_prepare_page_flip(dev, pipe);
1719                         intel_finish_page_flip(dev, pipe);
1720                 }
1721
1722                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1723                         i9xx_pipe_crc_irq_handler(dev, pipe);
1724
1725                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1726                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1727         }
1728
1729         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1730                 gmbus_irq_handler(dev);
1731 }
1732
1733 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1734 {
1735         struct drm_i915_private *dev_priv = dev->dev_private;
1736         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1737
1738         if (hotplug_status) {
1739                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1740                 /*
1741                  * Make sure hotplug status is cleared before we clear IIR, or else we
1742                  * may miss hotplug events.
1743                  */
1744                 POSTING_READ(PORT_HOTPLUG_STAT);
1745
1746                 if (IS_G4X(dev)) {
1747                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1748
1749                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1750                 } else {
1751                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1752
1753                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1754                 }
1755
1756                 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1757                     hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1758                         dp_aux_irq_handler(dev);
1759         }
1760 }
1761
1762 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1763 {
1764         struct drm_device *dev = arg;
1765         struct drm_i915_private *dev_priv = dev->dev_private;
1766         u32 iir, gt_iir, pm_iir;
1767         irqreturn_t ret = IRQ_NONE;
1768
1769         if (!intel_irqs_enabled(dev_priv))
1770                 return IRQ_NONE;
1771
1772         while (true) {
1773                 /* Find, clear, then process each source of interrupt */
1774
1775                 gt_iir = I915_READ(GTIIR);
1776                 if (gt_iir)
1777                         I915_WRITE(GTIIR, gt_iir);
1778
1779                 pm_iir = I915_READ(GEN6_PMIIR);
1780                 if (pm_iir)
1781                         I915_WRITE(GEN6_PMIIR, pm_iir);
1782
1783                 iir = I915_READ(VLV_IIR);
1784                 if (iir) {
1785                         /* Consume port before clearing IIR or we'll miss events */
1786                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1787                                 i9xx_hpd_irq_handler(dev);
1788                         I915_WRITE(VLV_IIR, iir);
1789                 }
1790
1791                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1792                         goto out;
1793
1794                 ret = IRQ_HANDLED;
1795
1796                 if (gt_iir)
1797                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1798                 if (pm_iir)
1799                         gen6_rps_irq_handler(dev_priv, pm_iir);
1800                 /* Call regardless, as some status bits might not be
1801                  * signalled in iir */
1802                 valleyview_pipestat_irq_handler(dev, iir);
1803         }
1804
1805 out:
1806         return ret;
1807 }
1808
1809 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1810 {
1811         struct drm_device *dev = arg;
1812         struct drm_i915_private *dev_priv = dev->dev_private;
1813         u32 master_ctl, iir;
1814         irqreturn_t ret = IRQ_NONE;
1815
1816         if (!intel_irqs_enabled(dev_priv))
1817                 return IRQ_NONE;
1818
1819         for (;;) {
1820                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1821                 iir = I915_READ(VLV_IIR);
1822
1823                 if (master_ctl == 0 && iir == 0)
1824                         break;
1825
1826                 ret = IRQ_HANDLED;
1827
1828                 I915_WRITE(GEN8_MASTER_IRQ, 0);
1829
1830                 /* Find, clear, then process each source of interrupt */
1831
1832                 if (iir) {
1833                         /* Consume port before clearing IIR or we'll miss events */
1834                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1835                                 i9xx_hpd_irq_handler(dev);
1836                         I915_WRITE(VLV_IIR, iir);
1837                 }
1838
1839                 gen8_gt_irq_handler(dev_priv, master_ctl);
1840
1841                 /* Call regardless, as some status bits might not be
1842                  * signalled in iir */
1843                 valleyview_pipestat_irq_handler(dev, iir);
1844
1845                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1846                 POSTING_READ(GEN8_MASTER_IRQ);
1847         }
1848
1849         return ret;
1850 }
1851
1852 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1853 {
1854         struct drm_i915_private *dev_priv = dev->dev_private;
1855         int pipe;
1856         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1857         u32 dig_hotplug_reg;
1858
1859         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1860         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1861
1862         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1863
1864         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1865                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1866                                SDE_AUDIO_POWER_SHIFT);
1867                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1868                                  port_name(port));
1869         }
1870
1871         if (pch_iir & SDE_AUX_MASK)
1872                 dp_aux_irq_handler(dev);
1873
1874         if (pch_iir & SDE_GMBUS)
1875                 gmbus_irq_handler(dev);
1876
1877         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1878                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1879
1880         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1881                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1882
1883         if (pch_iir & SDE_POISON)
1884                 DRM_ERROR("PCH poison interrupt\n");
1885
1886         if (pch_iir & SDE_FDI_MASK)
1887                 for_each_pipe(dev_priv, pipe)
1888                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1889                                          pipe_name(pipe),
1890                                          I915_READ(FDI_RX_IIR(pipe)));
1891
1892         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1893                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1894
1895         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1896                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1897
1898         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1899                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1900
1901         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1902                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1903 }
1904
1905 static void ivb_err_int_handler(struct drm_device *dev)
1906 {
1907         struct drm_i915_private *dev_priv = dev->dev_private;
1908         u32 err_int = I915_READ(GEN7_ERR_INT);
1909         enum pipe pipe;
1910
1911         if (err_int & ERR_INT_POISON)
1912                 DRM_ERROR("Poison interrupt\n");
1913
1914         for_each_pipe(dev_priv, pipe) {
1915                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1916                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1917
1918                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1919                         if (IS_IVYBRIDGE(dev))
1920                                 ivb_pipe_crc_irq_handler(dev, pipe);
1921                         else
1922                                 hsw_pipe_crc_irq_handler(dev, pipe);
1923                 }
1924         }
1925
1926         I915_WRITE(GEN7_ERR_INT, err_int);
1927 }
1928
1929 static void cpt_serr_int_handler(struct drm_device *dev)
1930 {
1931         struct drm_i915_private *dev_priv = dev->dev_private;
1932         u32 serr_int = I915_READ(SERR_INT);
1933
1934         if (serr_int & SERR_INT_POISON)
1935                 DRM_ERROR("PCH poison interrupt\n");
1936
1937         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1938                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1939
1940         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1941                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1942
1943         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1944                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1945
1946         I915_WRITE(SERR_INT, serr_int);
1947 }
1948
1949 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1950 {
1951         struct drm_i915_private *dev_priv = dev->dev_private;
1952         int pipe;
1953         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1954         u32 dig_hotplug_reg;
1955
1956         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1957         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1958
1959         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
1960
1961         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1962                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1963                                SDE_AUDIO_POWER_SHIFT_CPT);
1964                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1965                                  port_name(port));
1966         }
1967
1968         if (pch_iir & SDE_AUX_MASK_CPT)
1969                 dp_aux_irq_handler(dev);
1970
1971         if (pch_iir & SDE_GMBUS_CPT)
1972                 gmbus_irq_handler(dev);
1973
1974         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1975                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1976
1977         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1978                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1979
1980         if (pch_iir & SDE_FDI_MASK_CPT)
1981                 for_each_pipe(dev_priv, pipe)
1982                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1983                                          pipe_name(pipe),
1984                                          I915_READ(FDI_RX_IIR(pipe)));
1985
1986         if (pch_iir & SDE_ERROR_CPT)
1987                 cpt_serr_int_handler(dev);
1988 }
1989
1990 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1991 {
1992         struct drm_i915_private *dev_priv = dev->dev_private;
1993         enum pipe pipe;
1994
1995         if (de_iir & DE_AUX_CHANNEL_A)
1996                 dp_aux_irq_handler(dev);
1997
1998         if (de_iir & DE_GSE)
1999                 intel_opregion_asle_intr(dev);
2000
2001         if (de_iir & DE_POISON)
2002                 DRM_ERROR("Poison interrupt\n");
2003
2004         for_each_pipe(dev_priv, pipe) {
2005                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2006                     intel_pipe_handle_vblank(dev, pipe))
2007                         intel_check_page_flip(dev, pipe);
2008
2009                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2010                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2011
2012                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2013                         i9xx_pipe_crc_irq_handler(dev, pipe);
2014
2015                 /* plane/pipes map 1:1 on ilk+ */
2016                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2017                         intel_prepare_page_flip(dev, pipe);
2018                         intel_finish_page_flip_plane(dev, pipe);
2019                 }
2020         }
2021
2022         /* check event from PCH */
2023         if (de_iir & DE_PCH_EVENT) {
2024                 u32 pch_iir = I915_READ(SDEIIR);
2025
2026                 if (HAS_PCH_CPT(dev))
2027                         cpt_irq_handler(dev, pch_iir);
2028                 else
2029                         ibx_irq_handler(dev, pch_iir);
2030
2031                 /* should clear PCH hotplug event before clear CPU irq */
2032                 I915_WRITE(SDEIIR, pch_iir);
2033         }
2034
2035         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2036                 ironlake_rps_change_irq_handler(dev);
2037 }
2038
2039 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2040 {
2041         struct drm_i915_private *dev_priv = dev->dev_private;
2042         enum pipe pipe;
2043
2044         if (de_iir & DE_ERR_INT_IVB)
2045                 ivb_err_int_handler(dev);
2046
2047         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2048                 dp_aux_irq_handler(dev);
2049
2050         if (de_iir & DE_GSE_IVB)
2051                 intel_opregion_asle_intr(dev);
2052
2053         for_each_pipe(dev_priv, pipe) {
2054                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2055                     intel_pipe_handle_vblank(dev, pipe))
2056                         intel_check_page_flip(dev, pipe);
2057
2058                 /* plane/pipes map 1:1 on ilk+ */
2059                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2060                         intel_prepare_page_flip(dev, pipe);
2061                         intel_finish_page_flip_plane(dev, pipe);
2062                 }
2063         }
2064
2065         /* check event from PCH */
2066         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2067                 u32 pch_iir = I915_READ(SDEIIR);
2068
2069                 cpt_irq_handler(dev, pch_iir);
2070
2071                 /* clear PCH hotplug event before clear CPU irq */
2072                 I915_WRITE(SDEIIR, pch_iir);
2073         }
2074 }
2075
2076 /*
2077  * To handle irqs with the minimum potential races with fresh interrupts, we:
2078  * 1 - Disable Master Interrupt Control.
2079  * 2 - Find the source(s) of the interrupt.
2080  * 3 - Clear the Interrupt Identity bits (IIR).
2081  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2082  * 5 - Re-enable Master Interrupt Control.
2083  */
2084 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2085 {
2086         struct drm_device *dev = arg;
2087         struct drm_i915_private *dev_priv = dev->dev_private;
2088         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2089         irqreturn_t ret = IRQ_NONE;
2090
2091         if (!intel_irqs_enabled(dev_priv))
2092                 return IRQ_NONE;
2093
2094         /* We get interrupts on unclaimed registers, so check for this before we
2095          * do any I915_{READ,WRITE}. */
2096         intel_uncore_check_errors(dev);
2097
2098         /* disable master interrupt before clearing iir  */
2099         de_ier = I915_READ(DEIER);
2100         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2101         POSTING_READ(DEIER);
2102
2103         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2104          * interrupts will will be stored on its back queue, and then we'll be
2105          * able to process them after we restore SDEIER (as soon as we restore
2106          * it, we'll get an interrupt if SDEIIR still has something to process
2107          * due to its back queue). */
2108         if (!HAS_PCH_NOP(dev)) {
2109                 sde_ier = I915_READ(SDEIER);
2110                 I915_WRITE(SDEIER, 0);
2111                 POSTING_READ(SDEIER);
2112         }
2113
2114         /* Find, clear, then process each source of interrupt */
2115
2116         gt_iir = I915_READ(GTIIR);
2117         if (gt_iir) {
2118                 I915_WRITE(GTIIR, gt_iir);
2119                 ret = IRQ_HANDLED;
2120                 if (INTEL_INFO(dev)->gen >= 6)
2121                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
2122                 else
2123                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2124         }
2125
2126         de_iir = I915_READ(DEIIR);
2127         if (de_iir) {
2128                 I915_WRITE(DEIIR, de_iir);
2129                 ret = IRQ_HANDLED;
2130                 if (INTEL_INFO(dev)->gen >= 7)
2131                         ivb_display_irq_handler(dev, de_iir);
2132                 else
2133                         ilk_display_irq_handler(dev, de_iir);
2134         }
2135
2136         if (INTEL_INFO(dev)->gen >= 6) {
2137                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2138                 if (pm_iir) {
2139                         I915_WRITE(GEN6_PMIIR, pm_iir);
2140                         ret = IRQ_HANDLED;
2141                         gen6_rps_irq_handler(dev_priv, pm_iir);
2142                 }
2143         }
2144
2145         I915_WRITE(DEIER, de_ier);
2146         POSTING_READ(DEIER);
2147         if (!HAS_PCH_NOP(dev)) {
2148                 I915_WRITE(SDEIER, sde_ier);
2149                 POSTING_READ(SDEIER);
2150         }
2151
2152         return ret;
2153 }
2154
2155 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2156 {
2157         struct drm_i915_private *dev_priv = dev->dev_private;
2158         uint32_t hp_control;
2159         uint32_t hp_trigger;
2160
2161         /* Get the status */
2162         hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2163         hp_control = I915_READ(BXT_HOTPLUG_CTL);
2164
2165         /* Hotplug not enabled ? */
2166         if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2167                 DRM_ERROR("Interrupt when HPD disabled\n");
2168                 return;
2169         }
2170
2171         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2172                 hp_control & BXT_HOTPLUG_CTL_MASK);
2173
2174         /* Check for HPD storm and schedule bottom half */
2175         intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
2176
2177         /*
2178          * FIXME: Save the hot plug status for bottom half before
2179          * clearing the sticky status bits, else the status will be
2180          * lost.
2181          */
2182
2183         /* Clear sticky bits in hpd status */
2184         I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
2185 }
2186
2187 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2188 {
2189         struct drm_device *dev = arg;
2190         struct drm_i915_private *dev_priv = dev->dev_private;
2191         u32 master_ctl;
2192         irqreturn_t ret = IRQ_NONE;
2193         uint32_t tmp = 0;
2194         enum pipe pipe;
2195         u32 aux_mask = GEN8_AUX_CHANNEL_A;
2196
2197         if (!intel_irqs_enabled(dev_priv))
2198                 return IRQ_NONE;
2199
2200         if (IS_GEN9(dev))
2201                 aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2202                         GEN9_AUX_CHANNEL_D;
2203
2204         master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2205         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2206         if (!master_ctl)
2207                 return IRQ_NONE;
2208
2209         I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2210
2211         /* Find, clear, then process each source of interrupt */
2212
2213         ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2214
2215         if (master_ctl & GEN8_DE_MISC_IRQ) {
2216                 tmp = I915_READ(GEN8_DE_MISC_IIR);
2217                 if (tmp) {
2218                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2219                         ret = IRQ_HANDLED;
2220                         if (tmp & GEN8_DE_MISC_GSE)
2221                                 intel_opregion_asle_intr(dev);
2222                         else
2223                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2224                 }
2225                 else
2226                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2227         }
2228
2229         if (master_ctl & GEN8_DE_PORT_IRQ) {
2230                 tmp = I915_READ(GEN8_DE_PORT_IIR);
2231                 if (tmp) {
2232                         bool found = false;
2233
2234                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2235                         ret = IRQ_HANDLED;
2236
2237                         if (tmp & aux_mask) {
2238                                 dp_aux_irq_handler(dev);
2239                                 found = true;
2240                         }
2241
2242                         if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2243                                 bxt_hpd_handler(dev, tmp);
2244                                 found = true;
2245                         }
2246
2247                         if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2248                                 gmbus_irq_handler(dev);
2249                                 found = true;
2250                         }
2251
2252                         if (!found)
2253                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2254                 }
2255                 else
2256                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2257         }
2258
2259         for_each_pipe(dev_priv, pipe) {
2260                 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2261
2262                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2263                         continue;
2264
2265                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2266                 if (pipe_iir) {
2267                         ret = IRQ_HANDLED;
2268                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2269
2270                         if (pipe_iir & GEN8_PIPE_VBLANK &&
2271                             intel_pipe_handle_vblank(dev, pipe))
2272                                 intel_check_page_flip(dev, pipe);
2273
2274                         if (IS_GEN9(dev))
2275                                 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2276                         else
2277                                 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2278
2279                         if (flip_done) {
2280                                 intel_prepare_page_flip(dev, pipe);
2281                                 intel_finish_page_flip_plane(dev, pipe);
2282                         }
2283
2284                         if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2285                                 hsw_pipe_crc_irq_handler(dev, pipe);
2286
2287                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2288                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2289                                                                     pipe);
2290
2291
2292                         if (IS_GEN9(dev))
2293                                 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2294                         else
2295                                 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2296
2297                         if (fault_errors)
2298                                 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2299                                           pipe_name(pipe),
2300                                           pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2301                 } else
2302                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2303         }
2304
2305         if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2306             master_ctl & GEN8_DE_PCH_IRQ) {
2307                 /*
2308                  * FIXME(BDW): Assume for now that the new interrupt handling
2309                  * scheme also closed the SDE interrupt handling race we've seen
2310                  * on older pch-split platforms. But this needs testing.
2311                  */
2312                 u32 pch_iir = I915_READ(SDEIIR);
2313                 if (pch_iir) {
2314                         I915_WRITE(SDEIIR, pch_iir);
2315                         ret = IRQ_HANDLED;
2316                         cpt_irq_handler(dev, pch_iir);
2317                 } else
2318                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
2319
2320         }
2321
2322         I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2323         POSTING_READ_FW(GEN8_MASTER_IRQ);
2324
2325         return ret;
2326 }
2327
2328 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2329                                bool reset_completed)
2330 {
2331         struct intel_engine_cs *ring;
2332         int i;
2333
2334         /*
2335          * Notify all waiters for GPU completion events that reset state has
2336          * been changed, and that they need to restart their wait after
2337          * checking for potential errors (and bail out to drop locks if there is
2338          * a gpu reset pending so that i915_error_work_func can acquire them).
2339          */
2340
2341         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2342         for_each_ring(ring, dev_priv, i)
2343                 wake_up_all(&ring->irq_queue);
2344
2345         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2346         wake_up_all(&dev_priv->pending_flip_queue);
2347
2348         /*
2349          * Signal tasks blocked in i915_gem_wait_for_error that the pending
2350          * reset state is cleared.
2351          */
2352         if (reset_completed)
2353                 wake_up_all(&dev_priv->gpu_error.reset_queue);
2354 }
2355
2356 /**
2357  * i915_reset_and_wakeup - do process context error handling work
2358  *
2359  * Fire an error uevent so userspace can see that a hang or error
2360  * was detected.
2361  */
2362 static void i915_reset_and_wakeup(struct drm_device *dev)
2363 {
2364         struct drm_i915_private *dev_priv = to_i915(dev);
2365         struct i915_gpu_error *error = &dev_priv->gpu_error;
2366         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2367         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2368         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2369         int ret;
2370
2371         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2372
2373         /*
2374          * Note that there's only one work item which does gpu resets, so we
2375          * need not worry about concurrent gpu resets potentially incrementing
2376          * error->reset_counter twice. We only need to take care of another
2377          * racing irq/hangcheck declaring the gpu dead for a second time. A
2378          * quick check for that is good enough: schedule_work ensures the
2379          * correct ordering between hang detection and this work item, and since
2380          * the reset in-progress bit is only ever set by code outside of this
2381          * work we don't need to worry about any other races.
2382          */
2383         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2384                 DRM_DEBUG_DRIVER("resetting chip\n");
2385                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2386                                    reset_event);
2387
2388                 /*
2389                  * In most cases it's guaranteed that we get here with an RPM
2390                  * reference held, for example because there is a pending GPU
2391                  * request that won't finish until the reset is done. This
2392                  * isn't the case at least when we get here by doing a
2393                  * simulated reset via debugs, so get an RPM reference.
2394                  */
2395                 intel_runtime_pm_get(dev_priv);
2396
2397                 intel_prepare_reset(dev);
2398
2399                 /*
2400                  * All state reset _must_ be completed before we update the
2401                  * reset counter, for otherwise waiters might miss the reset
2402                  * pending state and not properly drop locks, resulting in
2403                  * deadlocks with the reset work.
2404                  */
2405                 ret = i915_reset(dev);
2406
2407                 intel_finish_reset(dev);
2408
2409                 intel_runtime_pm_put(dev_priv);
2410
2411                 if (ret == 0) {
2412                         /*
2413                          * After all the gem state is reset, increment the reset
2414                          * counter and wake up everyone waiting for the reset to
2415                          * complete.
2416                          *
2417                          * Since unlock operations are a one-sided barrier only,
2418                          * we need to insert a barrier here to order any seqno
2419                          * updates before
2420                          * the counter increment.
2421                          */
2422                         smp_mb__before_atomic();
2423                         atomic_inc(&dev_priv->gpu_error.reset_counter);
2424
2425                         kobject_uevent_env(&dev->primary->kdev->kobj,
2426                                            KOBJ_CHANGE, reset_done_event);
2427                 } else {
2428                         atomic_set_mask(I915_WEDGED, &error->reset_counter);
2429                 }
2430
2431                 /*
2432                  * Note: The wake_up also serves as a memory barrier so that
2433                  * waiters see the update value of the reset counter atomic_t.
2434                  */
2435                 i915_error_wake_up(dev_priv, true);
2436         }
2437 }
2438
2439 static void i915_report_and_clear_eir(struct drm_device *dev)
2440 {
2441         struct drm_i915_private *dev_priv = dev->dev_private;
2442         uint32_t instdone[I915_NUM_INSTDONE_REG];
2443         u32 eir = I915_READ(EIR);
2444         int pipe, i;
2445
2446         if (!eir)
2447                 return;
2448
2449         pr_err("render error detected, EIR: 0x%08x\n", eir);
2450
2451         i915_get_extra_instdone(dev, instdone);
2452
2453         if (IS_G4X(dev)) {
2454                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2455                         u32 ipeir = I915_READ(IPEIR_I965);
2456
2457                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2458                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2459                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
2460                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2461                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2462                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2463                         I915_WRITE(IPEIR_I965, ipeir);
2464                         POSTING_READ(IPEIR_I965);
2465                 }
2466                 if (eir & GM45_ERROR_PAGE_TABLE) {
2467                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2468                         pr_err("page table error\n");
2469                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2470                         I915_WRITE(PGTBL_ER, pgtbl_err);
2471                         POSTING_READ(PGTBL_ER);
2472                 }
2473         }
2474
2475         if (!IS_GEN2(dev)) {
2476                 if (eir & I915_ERROR_PAGE_TABLE) {
2477                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2478                         pr_err("page table error\n");
2479                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2480                         I915_WRITE(PGTBL_ER, pgtbl_err);
2481                         POSTING_READ(PGTBL_ER);
2482                 }
2483         }
2484
2485         if (eir & I915_ERROR_MEMORY_REFRESH) {
2486                 pr_err("memory refresh error:\n");
2487                 for_each_pipe(dev_priv, pipe)
2488                         pr_err("pipe %c stat: 0x%08x\n",
2489                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2490                 /* pipestat has already been acked */
2491         }
2492         if (eir & I915_ERROR_INSTRUCTION) {
2493                 pr_err("instruction error\n");
2494                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2495                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2496                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2497                 if (INTEL_INFO(dev)->gen < 4) {
2498                         u32 ipeir = I915_READ(IPEIR);
2499
2500                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2501                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2502                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2503                         I915_WRITE(IPEIR, ipeir);
2504                         POSTING_READ(IPEIR);
2505                 } else {
2506                         u32 ipeir = I915_READ(IPEIR_I965);
2507
2508                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2509                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2510                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2511                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2512                         I915_WRITE(IPEIR_I965, ipeir);
2513                         POSTING_READ(IPEIR_I965);
2514                 }
2515         }
2516
2517         I915_WRITE(EIR, eir);
2518         POSTING_READ(EIR);
2519         eir = I915_READ(EIR);
2520         if (eir) {
2521                 /*
2522                  * some errors might have become stuck,
2523                  * mask them.
2524                  */
2525                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2526                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2527                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2528         }
2529 }
2530
2531 /**
2532  * i915_handle_error - handle a gpu error
2533  * @dev: drm device
2534  *
2535  * Do some basic checking of regsiter state at error time and
2536  * dump it to the syslog.  Also call i915_capture_error_state() to make
2537  * sure we get a record and make it available in debugfs.  Fire a uevent
2538  * so userspace knows something bad happened (should trigger collection
2539  * of a ring dump etc.).
2540  */
2541 void i915_handle_error(struct drm_device *dev, bool wedged,
2542                        const char *fmt, ...)
2543 {
2544         struct drm_i915_private *dev_priv = dev->dev_private;
2545         va_list args;
2546         char error_msg[80];
2547
2548         va_start(args, fmt);
2549         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2550         va_end(args);
2551
2552         i915_capture_error_state(dev, wedged, error_msg);
2553         i915_report_and_clear_eir(dev);
2554
2555         if (wedged) {
2556                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2557                                 &dev_priv->gpu_error.reset_counter);
2558
2559                 /*
2560                  * Wakeup waiting processes so that the reset function
2561                  * i915_reset_and_wakeup doesn't deadlock trying to grab
2562                  * various locks. By bumping the reset counter first, the woken
2563                  * processes will see a reset in progress and back off,
2564                  * releasing their locks and then wait for the reset completion.
2565                  * We must do this for _all_ gpu waiters that might hold locks
2566                  * that the reset work needs to acquire.
2567                  *
2568                  * Note: The wake_up serves as the required memory barrier to
2569                  * ensure that the waiters see the updated value of the reset
2570                  * counter atomic_t.
2571                  */
2572                 i915_error_wake_up(dev_priv, false);
2573         }
2574
2575         i915_reset_and_wakeup(dev);
2576 }
2577
2578 /* Called from drm generic code, passed 'crtc' which
2579  * we use as a pipe index
2580  */
2581 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2582 {
2583         struct drm_i915_private *dev_priv = dev->dev_private;
2584         unsigned long irqflags;
2585
2586         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2587         if (INTEL_INFO(dev)->gen >= 4)
2588                 i915_enable_pipestat(dev_priv, pipe,
2589                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
2590         else
2591                 i915_enable_pipestat(dev_priv, pipe,
2592                                      PIPE_VBLANK_INTERRUPT_STATUS);
2593         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2594
2595         return 0;
2596 }
2597
2598 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2599 {
2600         struct drm_i915_private *dev_priv = dev->dev_private;
2601         unsigned long irqflags;
2602         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2603                                                      DE_PIPE_VBLANK(pipe);
2604
2605         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2606         ironlake_enable_display_irq(dev_priv, bit);
2607         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2608
2609         return 0;
2610 }
2611
2612 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2613 {
2614         struct drm_i915_private *dev_priv = dev->dev_private;
2615         unsigned long irqflags;
2616
2617         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2618         i915_enable_pipestat(dev_priv, pipe,
2619                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2620         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2621
2622         return 0;
2623 }
2624
2625 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2626 {
2627         struct drm_i915_private *dev_priv = dev->dev_private;
2628         unsigned long irqflags;
2629
2630         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2631         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2632         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2633         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2634         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2635         return 0;
2636 }
2637
2638 /* Called from drm generic code, passed 'crtc' which
2639  * we use as a pipe index
2640  */
2641 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2642 {
2643         struct drm_i915_private *dev_priv = dev->dev_private;
2644         unsigned long irqflags;
2645
2646         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2647         i915_disable_pipestat(dev_priv, pipe,
2648                               PIPE_VBLANK_INTERRUPT_STATUS |
2649                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2650         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2651 }
2652
2653 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2654 {
2655         struct drm_i915_private *dev_priv = dev->dev_private;
2656         unsigned long irqflags;
2657         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2658                                                      DE_PIPE_VBLANK(pipe);
2659
2660         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2661         ironlake_disable_display_irq(dev_priv, bit);
2662         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2663 }
2664
2665 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2666 {
2667         struct drm_i915_private *dev_priv = dev->dev_private;
2668         unsigned long irqflags;
2669
2670         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2671         i915_disable_pipestat(dev_priv, pipe,
2672                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2673         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2674 }
2675
2676 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2677 {
2678         struct drm_i915_private *dev_priv = dev->dev_private;
2679         unsigned long irqflags;
2680
2681         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2682         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2683         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2684         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2685         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2686 }
2687
2688 static struct drm_i915_gem_request *
2689 ring_last_request(struct intel_engine_cs *ring)
2690 {
2691         return list_entry(ring->request_list.prev,
2692                           struct drm_i915_gem_request, list);
2693 }
2694
2695 static bool
2696 ring_idle(struct intel_engine_cs *ring)
2697 {
2698         return (list_empty(&ring->request_list) ||
2699                 i915_gem_request_completed(ring_last_request(ring), false));
2700 }
2701
2702 static bool
2703 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2704 {
2705         if (INTEL_INFO(dev)->gen >= 8) {
2706                 return (ipehr >> 23) == 0x1c;
2707         } else {
2708                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2709                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2710                                  MI_SEMAPHORE_REGISTER);
2711         }
2712 }
2713
2714 static struct intel_engine_cs *
2715 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2716 {
2717         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2718         struct intel_engine_cs *signaller;
2719         int i;
2720
2721         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2722                 for_each_ring(signaller, dev_priv, i) {
2723                         if (ring == signaller)
2724                                 continue;
2725
2726                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
2727                                 return signaller;
2728                 }
2729         } else {
2730                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2731
2732                 for_each_ring(signaller, dev_priv, i) {
2733                         if(ring == signaller)
2734                                 continue;
2735
2736                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2737                                 return signaller;
2738                 }
2739         }
2740
2741         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2742                   ring->id, ipehr, offset);
2743
2744         return NULL;
2745 }
2746
2747 static struct intel_engine_cs *
2748 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2749 {
2750         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2751         u32 cmd, ipehr, head;
2752         u64 offset = 0;
2753         int i, backwards;
2754
2755         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2756         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2757                 return NULL;
2758
2759         /*
2760          * HEAD is likely pointing to the dword after the actual command,
2761          * so scan backwards until we find the MBOX. But limit it to just 3
2762          * or 4 dwords depending on the semaphore wait command size.
2763          * Note that we don't care about ACTHD here since that might
2764          * point at at batch, and semaphores are always emitted into the
2765          * ringbuffer itself.
2766          */
2767         head = I915_READ_HEAD(ring) & HEAD_ADDR;
2768         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2769
2770         for (i = backwards; i; --i) {
2771                 /*
2772                  * Be paranoid and presume the hw has gone off into the wild -
2773                  * our ring is smaller than what the hardware (and hence
2774                  * HEAD_ADDR) allows. Also handles wrap-around.
2775                  */
2776                 head &= ring->buffer->size - 1;
2777
2778                 /* This here seems to blow up */
2779                 cmd = ioread32(ring->buffer->virtual_start + head);
2780                 if (cmd == ipehr)
2781                         break;
2782
2783                 head -= 4;
2784         }
2785
2786         if (!i)
2787                 return NULL;
2788
2789         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2790         if (INTEL_INFO(ring->dev)->gen >= 8) {
2791                 offset = ioread32(ring->buffer->virtual_start + head + 12);
2792                 offset <<= 32;
2793                 offset = ioread32(ring->buffer->virtual_start + head + 8);
2794         }
2795         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2796 }
2797
2798 static int semaphore_passed(struct intel_engine_cs *ring)
2799 {
2800         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2801         struct intel_engine_cs *signaller;
2802         u32 seqno;
2803
2804         ring->hangcheck.deadlock++;
2805
2806         signaller = semaphore_waits_for(ring, &seqno);
2807         if (signaller == NULL)
2808                 return -1;
2809
2810         /* Prevent pathological recursion due to driver bugs */
2811         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2812                 return -1;
2813
2814         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2815                 return 1;
2816
2817         /* cursory check for an unkickable deadlock */
2818         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2819             semaphore_passed(signaller) < 0)
2820                 return -1;
2821
2822         return 0;
2823 }
2824
2825 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2826 {
2827         struct intel_engine_cs *ring;
2828         int i;
2829
2830         for_each_ring(ring, dev_priv, i)
2831                 ring->hangcheck.deadlock = 0;
2832 }
2833
2834 static enum intel_ring_hangcheck_action
2835 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2836 {
2837         struct drm_device *dev = ring->dev;
2838         struct drm_i915_private *dev_priv = dev->dev_private;
2839         u32 tmp;
2840
2841         if (acthd != ring->hangcheck.acthd) {
2842                 if (acthd > ring->hangcheck.max_acthd) {
2843                         ring->hangcheck.max_acthd = acthd;
2844                         return HANGCHECK_ACTIVE;
2845                 }
2846
2847                 return HANGCHECK_ACTIVE_LOOP;
2848         }
2849
2850         if (IS_GEN2(dev))
2851                 return HANGCHECK_HUNG;
2852
2853         /* Is the chip hanging on a WAIT_FOR_EVENT?
2854          * If so we can simply poke the RB_WAIT bit
2855          * and break the hang. This should work on
2856          * all but the second generation chipsets.
2857          */
2858         tmp = I915_READ_CTL(ring);
2859         if (tmp & RING_WAIT) {
2860                 i915_handle_error(dev, false,
2861                                   "Kicking stuck wait on %s",
2862                                   ring->name);
2863                 I915_WRITE_CTL(ring, tmp);
2864                 return HANGCHECK_KICK;
2865         }
2866
2867         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2868                 switch (semaphore_passed(ring)) {
2869                 default:
2870                         return HANGCHECK_HUNG;
2871                 case 1:
2872                         i915_handle_error(dev, false,
2873                                           "Kicking stuck semaphore on %s",
2874                                           ring->name);
2875                         I915_WRITE_CTL(ring, tmp);
2876                         return HANGCHECK_KICK;
2877                 case 0:
2878                         return HANGCHECK_WAIT;
2879                 }
2880         }
2881
2882         return HANGCHECK_HUNG;
2883 }
2884
2885 /*
2886  * This is called when the chip hasn't reported back with completed
2887  * batchbuffers in a long time. We keep track per ring seqno progress and
2888  * if there are no progress, hangcheck score for that ring is increased.
2889  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2890  * we kick the ring. If we see no progress on three subsequent calls
2891  * we assume chip is wedged and try to fix it by resetting the chip.
2892  */
2893 static void i915_hangcheck_elapsed(struct work_struct *work)
2894 {
2895         struct drm_i915_private *dev_priv =
2896                 container_of(work, typeof(*dev_priv),
2897                              gpu_error.hangcheck_work.work);
2898         struct drm_device *dev = dev_priv->dev;
2899         struct intel_engine_cs *ring;
2900         int i;
2901         int busy_count = 0, rings_hung = 0;
2902         bool stuck[I915_NUM_RINGS] = { 0 };
2903 #define BUSY 1
2904 #define KICK 5
2905 #define HUNG 20
2906
2907         if (!i915.enable_hangcheck)
2908                 return;
2909
2910         for_each_ring(ring, dev_priv, i) {
2911                 u64 acthd;
2912                 u32 seqno;
2913                 bool busy = true;
2914
2915                 semaphore_clear_deadlocks(dev_priv);
2916
2917                 seqno = ring->get_seqno(ring, false);
2918                 acthd = intel_ring_get_active_head(ring);
2919
2920                 if (ring->hangcheck.seqno == seqno) {
2921                         if (ring_idle(ring)) {
2922                                 ring->hangcheck.action = HANGCHECK_IDLE;
2923
2924                                 if (waitqueue_active(&ring->irq_queue)) {
2925                                         /* Issue a wake-up to catch stuck h/w. */
2926                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2927                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2928                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2929                                                                   ring->name);
2930                                                 else
2931                                                         DRM_INFO("Fake missed irq on %s\n",
2932                                                                  ring->name);
2933                                                 wake_up_all(&ring->irq_queue);
2934                                         }
2935                                         /* Safeguard against driver failure */
2936                                         ring->hangcheck.score += BUSY;
2937                                 } else
2938                                         busy = false;
2939                         } else {
2940                                 /* We always increment the hangcheck score
2941                                  * if the ring is busy and still processing
2942                                  * the same request, so that no single request
2943                                  * can run indefinitely (such as a chain of
2944                                  * batches). The only time we do not increment
2945                                  * the hangcheck score on this ring, if this
2946                                  * ring is in a legitimate wait for another
2947                                  * ring. In that case the waiting ring is a
2948                                  * victim and we want to be sure we catch the
2949                                  * right culprit. Then every time we do kick
2950                                  * the ring, add a small increment to the
2951                                  * score so that we can catch a batch that is
2952                                  * being repeatedly kicked and so responsible
2953                                  * for stalling the machine.
2954                                  */
2955                                 ring->hangcheck.action = ring_stuck(ring,
2956                                                                     acthd);
2957
2958                                 switch (ring->hangcheck.action) {
2959                                 case HANGCHECK_IDLE:
2960                                 case HANGCHECK_WAIT:
2961                                 case HANGCHECK_ACTIVE:
2962                                         break;
2963                                 case HANGCHECK_ACTIVE_LOOP:
2964                                         ring->hangcheck.score += BUSY;
2965                                         break;
2966                                 case HANGCHECK_KICK:
2967                                         ring->hangcheck.score += KICK;
2968                                         break;
2969                                 case HANGCHECK_HUNG:
2970                                         ring->hangcheck.score += HUNG;
2971                                         stuck[i] = true;
2972                                         break;
2973                                 }
2974                         }
2975                 } else {
2976                         ring->hangcheck.action = HANGCHECK_ACTIVE;
2977
2978                         /* Gradually reduce the count so that we catch DoS
2979                          * attempts across multiple batches.
2980                          */
2981                         if (ring->hangcheck.score > 0)
2982                                 ring->hangcheck.score--;
2983
2984                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2985                 }
2986
2987                 ring->hangcheck.seqno = seqno;
2988                 ring->hangcheck.acthd = acthd;
2989                 busy_count += busy;
2990         }
2991
2992         for_each_ring(ring, dev_priv, i) {
2993                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2994                         DRM_INFO("%s on %s\n",
2995                                  stuck[i] ? "stuck" : "no progress",
2996                                  ring->name);
2997                         rings_hung++;
2998                 }
2999         }
3000
3001         if (rings_hung)
3002                 return i915_handle_error(dev, true, "Ring hung");
3003
3004         if (busy_count)
3005                 /* Reset timer case chip hangs without another request
3006                  * being added */
3007                 i915_queue_hangcheck(dev);
3008 }
3009
3010 void i915_queue_hangcheck(struct drm_device *dev)
3011 {
3012         struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3013
3014         if (!i915.enable_hangcheck)
3015                 return;
3016
3017         /* Don't continually defer the hangcheck so that it is always run at
3018          * least once after work has been scheduled on any ring. Otherwise,
3019          * we will ignore a hung ring if a second ring is kept busy.
3020          */
3021
3022         queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3023                            round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3024 }
3025
3026 static void ibx_irq_reset(struct drm_device *dev)
3027 {
3028         struct drm_i915_private *dev_priv = dev->dev_private;
3029
3030         if (HAS_PCH_NOP(dev))
3031                 return;
3032
3033         GEN5_IRQ_RESET(SDE);
3034
3035         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3036                 I915_WRITE(SERR_INT, 0xffffffff);
3037 }
3038
3039 /*
3040  * SDEIER is also touched by the interrupt handler to work around missed PCH
3041  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3042  * instead we unconditionally enable all PCH interrupt sources here, but then
3043  * only unmask them as needed with SDEIMR.
3044  *
3045  * This function needs to be called before interrupts are enabled.
3046  */
3047 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3048 {
3049         struct drm_i915_private *dev_priv = dev->dev_private;
3050
3051         if (HAS_PCH_NOP(dev))
3052                 return;
3053
3054         WARN_ON(I915_READ(SDEIER) != 0);
3055         I915_WRITE(SDEIER, 0xffffffff);
3056         POSTING_READ(SDEIER);
3057 }
3058
3059 static void gen5_gt_irq_reset(struct drm_device *dev)
3060 {
3061         struct drm_i915_private *dev_priv = dev->dev_private;
3062
3063         GEN5_IRQ_RESET(GT);
3064         if (INTEL_INFO(dev)->gen >= 6)
3065                 GEN5_IRQ_RESET(GEN6_PM);
3066 }
3067
3068 /* drm_dma.h hooks
3069 */
3070 static void ironlake_irq_reset(struct drm_device *dev)
3071 {
3072         struct drm_i915_private *dev_priv = dev->dev_private;
3073
3074         I915_WRITE(HWSTAM, 0xffffffff);
3075
3076         GEN5_IRQ_RESET(DE);
3077         if (IS_GEN7(dev))
3078                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3079
3080         gen5_gt_irq_reset(dev);
3081
3082         ibx_irq_reset(dev);
3083 }
3084
3085 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3086 {
3087         enum pipe pipe;
3088
3089         I915_WRITE(PORT_HOTPLUG_EN, 0);
3090         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3091
3092         for_each_pipe(dev_priv, pipe)
3093                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3094
3095         GEN5_IRQ_RESET(VLV_);
3096 }
3097
3098 static void valleyview_irq_preinstall(struct drm_device *dev)
3099 {
3100         struct drm_i915_private *dev_priv = dev->dev_private;
3101
3102         /* VLV magic */
3103         I915_WRITE(VLV_IMR, 0);
3104         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3105         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3106         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3107
3108         gen5_gt_irq_reset(dev);
3109
3110         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3111
3112         vlv_display_irq_reset(dev_priv);
3113 }
3114
3115 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3116 {
3117         GEN8_IRQ_RESET_NDX(GT, 0);
3118         GEN8_IRQ_RESET_NDX(GT, 1);
3119         GEN8_IRQ_RESET_NDX(GT, 2);
3120         GEN8_IRQ_RESET_NDX(GT, 3);
3121 }
3122
3123 static void gen8_irq_reset(struct drm_device *dev)
3124 {
3125         struct drm_i915_private *dev_priv = dev->dev_private;
3126         int pipe;
3127
3128         I915_WRITE(GEN8_MASTER_IRQ, 0);
3129         POSTING_READ(GEN8_MASTER_IRQ);
3130
3131         gen8_gt_irq_reset(dev_priv);
3132
3133         for_each_pipe(dev_priv, pipe)
3134                 if (intel_display_power_is_enabled(dev_priv,
3135                                                    POWER_DOMAIN_PIPE(pipe)))
3136                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3137
3138         GEN5_IRQ_RESET(GEN8_DE_PORT_);
3139         GEN5_IRQ_RESET(GEN8_DE_MISC_);
3140         GEN5_IRQ_RESET(GEN8_PCU_);
3141
3142         if (HAS_PCH_SPLIT(dev))
3143                 ibx_irq_reset(dev);
3144 }
3145
3146 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3147                                      unsigned int pipe_mask)
3148 {
3149         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3150
3151         spin_lock_irq(&dev_priv->irq_lock);
3152         if (pipe_mask & 1 << PIPE_A)
3153                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3154                                   dev_priv->de_irq_mask[PIPE_A],
3155                                   ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3156         if (pipe_mask & 1 << PIPE_B)
3157                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3158                                   dev_priv->de_irq_mask[PIPE_B],
3159                                   ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3160         if (pipe_mask & 1 << PIPE_C)
3161                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3162                                   dev_priv->de_irq_mask[PIPE_C],
3163                                   ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3164         spin_unlock_irq(&dev_priv->irq_lock);
3165 }
3166
3167 static void cherryview_irq_preinstall(struct drm_device *dev)
3168 {
3169         struct drm_i915_private *dev_priv = dev->dev_private;
3170
3171         I915_WRITE(GEN8_MASTER_IRQ, 0);
3172         POSTING_READ(GEN8_MASTER_IRQ);
3173
3174         gen8_gt_irq_reset(dev_priv);
3175
3176         GEN5_IRQ_RESET(GEN8_PCU_);
3177
3178         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3179
3180         vlv_display_irq_reset(dev_priv);
3181 }
3182
3183 static void ibx_hpd_irq_setup(struct drm_device *dev)
3184 {
3185         struct drm_i915_private *dev_priv = dev->dev_private;
3186         struct intel_encoder *intel_encoder;
3187         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3188
3189         if (HAS_PCH_IBX(dev)) {
3190                 hotplug_irqs = SDE_HOTPLUG_MASK;
3191                 for_each_intel_encoder(dev, intel_encoder)
3192                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3193                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3194         } else {
3195                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3196                 for_each_intel_encoder(dev, intel_encoder)
3197                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3198                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3199         }
3200
3201         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3202
3203         /*
3204          * Enable digital hotplug on the PCH, and configure the DP short pulse
3205          * duration to 2ms (which is the minimum in the Display Port spec)
3206          *
3207          * This register is the same on all known PCH chips.
3208          */
3209         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3210         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3211         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3212         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3213         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3214         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3215 }
3216
3217 static void bxt_hpd_irq_setup(struct drm_device *dev)
3218 {
3219         struct drm_i915_private *dev_priv = dev->dev_private;
3220         struct intel_encoder *intel_encoder;
3221         u32 hotplug_port = 0;
3222         u32 hotplug_ctrl;
3223
3224         /* Now, enable HPD */
3225         for_each_intel_encoder(dev, intel_encoder) {
3226                 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
3227                                 == HPD_ENABLED)
3228                         hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
3229         }
3230
3231         /* Mask all HPD control bits */
3232         hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3233
3234         /* Enable requested port in hotplug control */
3235         /* TODO: implement (short) HPD support on port A */
3236         WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
3237         if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3238                 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3239         if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3240                 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3241         I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3242
3243         /* Unmask DDI hotplug in IMR */
3244         hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3245         I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3246
3247         /* Enable DDI hotplug in IER */
3248         hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3249         I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3250         POSTING_READ(GEN8_DE_PORT_IER);
3251 }
3252
3253 static void ibx_irq_postinstall(struct drm_device *dev)
3254 {
3255         struct drm_i915_private *dev_priv = dev->dev_private;
3256         u32 mask;
3257
3258         if (HAS_PCH_NOP(dev))
3259                 return;
3260
3261         if (HAS_PCH_IBX(dev))
3262                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3263         else
3264                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3265
3266         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3267         I915_WRITE(SDEIMR, ~mask);
3268 }
3269
3270 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3271 {
3272         struct drm_i915_private *dev_priv = dev->dev_private;
3273         u32 pm_irqs, gt_irqs;
3274
3275         pm_irqs = gt_irqs = 0;
3276
3277         dev_priv->gt_irq_mask = ~0;
3278         if (HAS_L3_DPF(dev)) {
3279                 /* L3 parity interrupt is always unmasked. */
3280                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3281                 gt_irqs |= GT_PARITY_ERROR(dev);
3282         }
3283
3284         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3285         if (IS_GEN5(dev)) {
3286                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3287                            ILK_BSD_USER_INTERRUPT;
3288         } else {
3289                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3290         }
3291
3292         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3293
3294         if (INTEL_INFO(dev)->gen >= 6) {
3295                 /*
3296                  * RPS interrupts will get enabled/disabled on demand when RPS
3297                  * itself is enabled/disabled.
3298                  */
3299                 if (HAS_VEBOX(dev))
3300                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3301
3302                 dev_priv->pm_irq_mask = 0xffffffff;
3303                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3304         }
3305 }
3306
3307 static int ironlake_irq_postinstall(struct drm_device *dev)
3308 {
3309         struct drm_i915_private *dev_priv = dev->dev_private;
3310         u32 display_mask, extra_mask;
3311
3312         if (INTEL_INFO(dev)->gen >= 7) {
3313                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3314                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3315                                 DE_PLANEB_FLIP_DONE_IVB |
3316                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3317                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3318                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3319         } else {
3320                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3321                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3322                                 DE_AUX_CHANNEL_A |
3323                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3324                                 DE_POISON);
3325                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3326                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3327         }
3328
3329         dev_priv->irq_mask = ~display_mask;
3330
3331         I915_WRITE(HWSTAM, 0xeffe);
3332
3333         ibx_irq_pre_postinstall(dev);
3334
3335         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3336
3337         gen5_gt_irq_postinstall(dev);
3338
3339         ibx_irq_postinstall(dev);
3340
3341         if (IS_IRONLAKE_M(dev)) {
3342                 /* Enable PCU event interrupts
3343                  *
3344                  * spinlocking not required here for correctness since interrupt
3345                  * setup is guaranteed to run in single-threaded context. But we
3346                  * need it to make the assert_spin_locked happy. */
3347                 spin_lock_irq(&dev_priv->irq_lock);
3348                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3349                 spin_unlock_irq(&dev_priv->irq_lock);
3350         }
3351
3352         return 0;
3353 }
3354
3355 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3356 {
3357         u32 pipestat_mask;
3358         u32 iir_mask;
3359         enum pipe pipe;
3360
3361         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3362                         PIPE_FIFO_UNDERRUN_STATUS;
3363
3364         for_each_pipe(dev_priv, pipe)
3365                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3366         POSTING_READ(PIPESTAT(PIPE_A));
3367
3368         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3369                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3370
3371         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3372         for_each_pipe(dev_priv, pipe)
3373                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3374
3375         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3376                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3377                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3378         if (IS_CHERRYVIEW(dev_priv))
3379                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3380         dev_priv->irq_mask &= ~iir_mask;
3381
3382         I915_WRITE(VLV_IIR, iir_mask);
3383         I915_WRITE(VLV_IIR, iir_mask);
3384         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3385         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3386         POSTING_READ(VLV_IMR);
3387 }
3388
3389 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3390 {
3391         u32 pipestat_mask;
3392         u32 iir_mask;
3393         enum pipe pipe;
3394
3395         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3396                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3397                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3398         if (IS_CHERRYVIEW(dev_priv))
3399                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3400
3401         dev_priv->irq_mask |= iir_mask;
3402         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3403         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3404         I915_WRITE(VLV_IIR, iir_mask);
3405         I915_WRITE(VLV_IIR, iir_mask);
3406         POSTING_READ(VLV_IIR);
3407
3408         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3409                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3410
3411         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3412         for_each_pipe(dev_priv, pipe)
3413                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3414
3415         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3416                         PIPE_FIFO_UNDERRUN_STATUS;
3417
3418         for_each_pipe(dev_priv, pipe)
3419                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3420         POSTING_READ(PIPESTAT(PIPE_A));
3421 }
3422
3423 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3424 {
3425         assert_spin_locked(&dev_priv->irq_lock);
3426
3427         if (dev_priv->display_irqs_enabled)
3428                 return;
3429
3430         dev_priv->display_irqs_enabled = true;
3431
3432         if (intel_irqs_enabled(dev_priv))
3433                 valleyview_display_irqs_install(dev_priv);
3434 }
3435
3436 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3437 {
3438         assert_spin_locked(&dev_priv->irq_lock);
3439
3440         if (!dev_priv->display_irqs_enabled)
3441                 return;
3442
3443         dev_priv->display_irqs_enabled = false;
3444
3445         if (intel_irqs_enabled(dev_priv))
3446                 valleyview_display_irqs_uninstall(dev_priv);
3447 }
3448
3449 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3450 {
3451         dev_priv->irq_mask = ~0;
3452
3453         I915_WRITE(PORT_HOTPLUG_EN, 0);
3454         POSTING_READ(PORT_HOTPLUG_EN);
3455
3456         I915_WRITE(VLV_IIR, 0xffffffff);
3457         I915_WRITE(VLV_IIR, 0xffffffff);
3458         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3459         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3460         POSTING_READ(VLV_IMR);
3461
3462         /* Interrupt setup is already guaranteed to be single-threaded, this is
3463          * just to make the assert_spin_locked check happy. */
3464         spin_lock_irq(&dev_priv->irq_lock);
3465         if (dev_priv->display_irqs_enabled)
3466                 valleyview_display_irqs_install(dev_priv);
3467         spin_unlock_irq(&dev_priv->irq_lock);
3468 }
3469
3470 static int valleyview_irq_postinstall(struct drm_device *dev)
3471 {
3472         struct drm_i915_private *dev_priv = dev->dev_private;
3473
3474         vlv_display_irq_postinstall(dev_priv);
3475
3476         gen5_gt_irq_postinstall(dev);
3477
3478         /* ack & enable invalid PTE error interrupts */
3479 #if 0 /* FIXME: add support to irq handler for checking these bits */
3480         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3481         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3482 #endif
3483
3484         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3485
3486         return 0;
3487 }
3488
3489 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3490 {
3491         /* These are interrupts we'll toggle with the ring mask register */
3492         uint32_t gt_interrupts[] = {
3493                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3494                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3495                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3496                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3497                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3498                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3499                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3500                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3501                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3502                 0,
3503                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3504                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3505                 };
3506
3507         dev_priv->pm_irq_mask = 0xffffffff;
3508         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3509         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3510         /*
3511          * RPS interrupts will get enabled/disabled on demand when RPS itself
3512          * is enabled/disabled.
3513          */
3514         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3515         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3516 }
3517
3518 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3519 {
3520         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3521         uint32_t de_pipe_enables;
3522         int pipe;
3523         u32 de_port_en = GEN8_AUX_CHANNEL_A;
3524
3525         if (IS_GEN9(dev_priv)) {
3526                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3527                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3528                 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3529                         GEN9_AUX_CHANNEL_D;
3530
3531                 if (IS_BROXTON(dev_priv))
3532                         de_port_en |= BXT_DE_PORT_GMBUS;
3533         } else
3534                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3535                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3536
3537         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3538                                            GEN8_PIPE_FIFO_UNDERRUN;
3539
3540         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3541         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3542         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3543
3544         for_each_pipe(dev_priv, pipe)
3545                 if (intel_display_power_is_enabled(dev_priv,
3546                                 POWER_DOMAIN_PIPE(pipe)))
3547                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3548                                           dev_priv->de_irq_mask[pipe],
3549                                           de_pipe_enables);
3550
3551         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
3552 }
3553
3554 static int gen8_irq_postinstall(struct drm_device *dev)
3555 {
3556         struct drm_i915_private *dev_priv = dev->dev_private;
3557
3558         if (HAS_PCH_SPLIT(dev))
3559                 ibx_irq_pre_postinstall(dev);
3560
3561         gen8_gt_irq_postinstall(dev_priv);
3562         gen8_de_irq_postinstall(dev_priv);
3563
3564         if (HAS_PCH_SPLIT(dev))
3565                 ibx_irq_postinstall(dev);
3566
3567         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3568         POSTING_READ(GEN8_MASTER_IRQ);
3569
3570         return 0;
3571 }
3572
3573 static int cherryview_irq_postinstall(struct drm_device *dev)
3574 {
3575         struct drm_i915_private *dev_priv = dev->dev_private;
3576
3577         vlv_display_irq_postinstall(dev_priv);
3578
3579         gen8_gt_irq_postinstall(dev_priv);
3580
3581         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3582         POSTING_READ(GEN8_MASTER_IRQ);
3583
3584         return 0;
3585 }
3586
3587 static void gen8_irq_uninstall(struct drm_device *dev)
3588 {
3589         struct drm_i915_private *dev_priv = dev->dev_private;
3590
3591         if (!dev_priv)
3592                 return;
3593
3594         gen8_irq_reset(dev);
3595 }
3596
3597 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3598 {
3599         /* Interrupt setup is already guaranteed to be single-threaded, this is
3600          * just to make the assert_spin_locked check happy. */
3601         spin_lock_irq(&dev_priv->irq_lock);
3602         if (dev_priv->display_irqs_enabled)
3603                 valleyview_display_irqs_uninstall(dev_priv);
3604         spin_unlock_irq(&dev_priv->irq_lock);
3605
3606         vlv_display_irq_reset(dev_priv);
3607
3608         dev_priv->irq_mask = ~0;
3609 }
3610
3611 static void valleyview_irq_uninstall(struct drm_device *dev)
3612 {
3613         struct drm_i915_private *dev_priv = dev->dev_private;
3614
3615         if (!dev_priv)
3616                 return;
3617
3618         I915_WRITE(VLV_MASTER_IER, 0);
3619
3620         gen5_gt_irq_reset(dev);
3621
3622         I915_WRITE(HWSTAM, 0xffffffff);
3623
3624         vlv_display_irq_uninstall(dev_priv);
3625 }
3626
3627 static void cherryview_irq_uninstall(struct drm_device *dev)
3628 {
3629         struct drm_i915_private *dev_priv = dev->dev_private;
3630
3631         if (!dev_priv)
3632                 return;
3633
3634         I915_WRITE(GEN8_MASTER_IRQ, 0);
3635         POSTING_READ(GEN8_MASTER_IRQ);
3636
3637         gen8_gt_irq_reset(dev_priv);
3638
3639         GEN5_IRQ_RESET(GEN8_PCU_);
3640
3641         vlv_display_irq_uninstall(dev_priv);
3642 }
3643
3644 static void ironlake_irq_uninstall(struct drm_device *dev)
3645 {
3646         struct drm_i915_private *dev_priv = dev->dev_private;
3647
3648         if (!dev_priv)
3649                 return;
3650
3651         ironlake_irq_reset(dev);
3652 }
3653
3654 static void i8xx_irq_preinstall(struct drm_device * dev)
3655 {
3656         struct drm_i915_private *dev_priv = dev->dev_private;
3657         int pipe;
3658
3659         for_each_pipe(dev_priv, pipe)
3660                 I915_WRITE(PIPESTAT(pipe), 0);
3661         I915_WRITE16(IMR, 0xffff);
3662         I915_WRITE16(IER, 0x0);
3663         POSTING_READ16(IER);
3664 }
3665
3666 static int i8xx_irq_postinstall(struct drm_device *dev)
3667 {
3668         struct drm_i915_private *dev_priv = dev->dev_private;
3669
3670         I915_WRITE16(EMR,
3671                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3672
3673         /* Unmask the interrupts that we always want on. */
3674         dev_priv->irq_mask =
3675                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3676                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3677                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3678                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3679         I915_WRITE16(IMR, dev_priv->irq_mask);
3680
3681         I915_WRITE16(IER,
3682                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3683                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3684                      I915_USER_INTERRUPT);
3685         POSTING_READ16(IER);
3686
3687         /* Interrupt setup is already guaranteed to be single-threaded, this is
3688          * just to make the assert_spin_locked check happy. */
3689         spin_lock_irq(&dev_priv->irq_lock);
3690         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3691         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3692         spin_unlock_irq(&dev_priv->irq_lock);
3693
3694         return 0;
3695 }
3696
3697 /*
3698  * Returns true when a page flip has completed.
3699  */
3700 static bool i8xx_handle_vblank(struct drm_device *dev,
3701                                int plane, int pipe, u32 iir)
3702 {
3703         struct drm_i915_private *dev_priv = dev->dev_private;
3704         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3705
3706         if (!intel_pipe_handle_vblank(dev, pipe))
3707                 return false;
3708
3709         if ((iir & flip_pending) == 0)
3710                 goto check_page_flip;
3711
3712         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3713          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3714          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3715          * the flip is completed (no longer pending). Since this doesn't raise
3716          * an interrupt per se, we watch for the change at vblank.
3717          */
3718         if (I915_READ16(ISR) & flip_pending)
3719                 goto check_page_flip;
3720
3721         intel_prepare_page_flip(dev, plane);
3722         intel_finish_page_flip(dev, pipe);
3723         return true;
3724
3725 check_page_flip:
3726         intel_check_page_flip(dev, pipe);
3727         return false;
3728 }
3729
3730 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3731 {
3732         struct drm_device *dev = arg;
3733         struct drm_i915_private *dev_priv = dev->dev_private;
3734         u16 iir, new_iir;
3735         u32 pipe_stats[2];
3736         int pipe;
3737         u16 flip_mask =
3738                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3739                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3740
3741         if (!intel_irqs_enabled(dev_priv))
3742                 return IRQ_NONE;
3743
3744         iir = I915_READ16(IIR);
3745         if (iir == 0)
3746                 return IRQ_NONE;
3747
3748         while (iir & ~flip_mask) {
3749                 /* Can't rely on pipestat interrupt bit in iir as it might
3750                  * have been cleared after the pipestat interrupt was received.
3751                  * It doesn't set the bit in iir again, but it still produces
3752                  * interrupts (for non-MSI).
3753                  */
3754                 spin_lock(&dev_priv->irq_lock);
3755                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3756                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3757
3758                 for_each_pipe(dev_priv, pipe) {
3759                         int reg = PIPESTAT(pipe);
3760                         pipe_stats[pipe] = I915_READ(reg);
3761
3762                         /*
3763                          * Clear the PIPE*STAT regs before the IIR
3764                          */
3765                         if (pipe_stats[pipe] & 0x8000ffff)
3766                                 I915_WRITE(reg, pipe_stats[pipe]);
3767                 }
3768                 spin_unlock(&dev_priv->irq_lock);
3769
3770                 I915_WRITE16(IIR, iir & ~flip_mask);
3771                 new_iir = I915_READ16(IIR); /* Flush posted writes */
3772
3773                 if (iir & I915_USER_INTERRUPT)
3774                         notify_ring(&dev_priv->ring[RCS]);
3775
3776                 for_each_pipe(dev_priv, pipe) {
3777                         int plane = pipe;
3778                         if (HAS_FBC(dev))
3779                                 plane = !plane;
3780
3781                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3782                             i8xx_handle_vblank(dev, plane, pipe, iir))
3783                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3784
3785                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3786                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3787
3788                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3789                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3790                                                                     pipe);
3791                 }
3792
3793                 iir = new_iir;
3794         }
3795
3796         return IRQ_HANDLED;
3797 }
3798
3799 static void i8xx_irq_uninstall(struct drm_device * dev)
3800 {
3801         struct drm_i915_private *dev_priv = dev->dev_private;
3802         int pipe;
3803
3804         for_each_pipe(dev_priv, pipe) {
3805                 /* Clear enable bits; then clear status bits */
3806                 I915_WRITE(PIPESTAT(pipe), 0);
3807                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3808         }
3809         I915_WRITE16(IMR, 0xffff);
3810         I915_WRITE16(IER, 0x0);
3811         I915_WRITE16(IIR, I915_READ16(IIR));
3812 }
3813
3814 static void i915_irq_preinstall(struct drm_device * dev)
3815 {
3816         struct drm_i915_private *dev_priv = dev->dev_private;
3817         int pipe;
3818
3819         if (I915_HAS_HOTPLUG(dev)) {
3820                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3821                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3822         }
3823
3824         I915_WRITE16(HWSTAM, 0xeffe);
3825         for_each_pipe(dev_priv, pipe)
3826                 I915_WRITE(PIPESTAT(pipe), 0);
3827         I915_WRITE(IMR, 0xffffffff);
3828         I915_WRITE(IER, 0x0);
3829         POSTING_READ(IER);
3830 }
3831
3832 static int i915_irq_postinstall(struct drm_device *dev)
3833 {
3834         struct drm_i915_private *dev_priv = dev->dev_private;
3835         u32 enable_mask;
3836
3837         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3838
3839         /* Unmask the interrupts that we always want on. */
3840         dev_priv->irq_mask =
3841                 ~(I915_ASLE_INTERRUPT |
3842                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3843                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3844                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3845                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3846
3847         enable_mask =
3848                 I915_ASLE_INTERRUPT |
3849                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3850                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3851                 I915_USER_INTERRUPT;
3852
3853         if (I915_HAS_HOTPLUG(dev)) {
3854                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3855                 POSTING_READ(PORT_HOTPLUG_EN);
3856
3857                 /* Enable in IER... */
3858                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3859                 /* and unmask in IMR */
3860                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3861         }
3862
3863         I915_WRITE(IMR, dev_priv->irq_mask);
3864         I915_WRITE(IER, enable_mask);
3865         POSTING_READ(IER);
3866
3867         i915_enable_asle_pipestat(dev);
3868
3869         /* Interrupt setup is already guaranteed to be single-threaded, this is
3870          * just to make the assert_spin_locked check happy. */
3871         spin_lock_irq(&dev_priv->irq_lock);
3872         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3873         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3874         spin_unlock_irq(&dev_priv->irq_lock);
3875
3876         return 0;
3877 }
3878
3879 /*
3880  * Returns true when a page flip has completed.
3881  */
3882 static bool i915_handle_vblank(struct drm_device *dev,
3883                                int plane, int pipe, u32 iir)
3884 {
3885         struct drm_i915_private *dev_priv = dev->dev_private;
3886         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3887
3888         if (!intel_pipe_handle_vblank(dev, pipe))
3889                 return false;
3890
3891         if ((iir & flip_pending) == 0)
3892                 goto check_page_flip;
3893
3894         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3895          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3896          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3897          * the flip is completed (no longer pending). Since this doesn't raise
3898          * an interrupt per se, we watch for the change at vblank.
3899          */
3900         if (I915_READ(ISR) & flip_pending)
3901                 goto check_page_flip;
3902
3903         intel_prepare_page_flip(dev, plane);
3904         intel_finish_page_flip(dev, pipe);
3905         return true;
3906
3907 check_page_flip:
3908         intel_check_page_flip(dev, pipe);
3909         return false;
3910 }
3911
3912 static irqreturn_t i915_irq_handler(int irq, void *arg)
3913 {
3914         struct drm_device *dev = arg;
3915         struct drm_i915_private *dev_priv = dev->dev_private;
3916         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3917         u32 flip_mask =
3918                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3919                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3920         int pipe, ret = IRQ_NONE;
3921
3922         if (!intel_irqs_enabled(dev_priv))
3923                 return IRQ_NONE;
3924
3925         iir = I915_READ(IIR);
3926         do {
3927                 bool irq_received = (iir & ~flip_mask) != 0;
3928                 bool blc_event = false;
3929
3930                 /* Can't rely on pipestat interrupt bit in iir as it might
3931                  * have been cleared after the pipestat interrupt was received.
3932                  * It doesn't set the bit in iir again, but it still produces
3933                  * interrupts (for non-MSI).
3934                  */
3935                 spin_lock(&dev_priv->irq_lock);
3936                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3937                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3938
3939                 for_each_pipe(dev_priv, pipe) {
3940                         int reg = PIPESTAT(pipe);
3941                         pipe_stats[pipe] = I915_READ(reg);
3942
3943                         /* Clear the PIPE*STAT regs before the IIR */
3944                         if (pipe_stats[pipe] & 0x8000ffff) {
3945                                 I915_WRITE(reg, pipe_stats[pipe]);
3946                                 irq_received = true;
3947                         }
3948                 }
3949                 spin_unlock(&dev_priv->irq_lock);
3950
3951                 if (!irq_received)
3952                         break;
3953
3954                 /* Consume port.  Then clear IIR or we'll miss events */
3955                 if (I915_HAS_HOTPLUG(dev) &&
3956                     iir & I915_DISPLAY_PORT_INTERRUPT)
3957                         i9xx_hpd_irq_handler(dev);
3958
3959                 I915_WRITE(IIR, iir & ~flip_mask);
3960                 new_iir = I915_READ(IIR); /* Flush posted writes */
3961
3962                 if (iir & I915_USER_INTERRUPT)
3963                         notify_ring(&dev_priv->ring[RCS]);
3964
3965                 for_each_pipe(dev_priv, pipe) {
3966                         int plane = pipe;
3967                         if (HAS_FBC(dev))
3968                                 plane = !plane;
3969
3970                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3971                             i915_handle_vblank(dev, plane, pipe, iir))
3972                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3973
3974                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3975                                 blc_event = true;
3976
3977                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3978                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3979
3980                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3981                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3982                                                                     pipe);
3983                 }
3984
3985                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3986                         intel_opregion_asle_intr(dev);
3987
3988                 /* With MSI, interrupts are only generated when iir
3989                  * transitions from zero to nonzero.  If another bit got
3990                  * set while we were handling the existing iir bits, then
3991                  * we would never get another interrupt.
3992                  *
3993                  * This is fine on non-MSI as well, as if we hit this path
3994                  * we avoid exiting the interrupt handler only to generate
3995                  * another one.
3996                  *
3997                  * Note that for MSI this could cause a stray interrupt report
3998                  * if an interrupt landed in the time between writing IIR and
3999                  * the posting read.  This should be rare enough to never
4000                  * trigger the 99% of 100,000 interrupts test for disabling
4001                  * stray interrupts.
4002                  */
4003                 ret = IRQ_HANDLED;
4004                 iir = new_iir;
4005         } while (iir & ~flip_mask);
4006
4007         return ret;
4008 }
4009
4010 static void i915_irq_uninstall(struct drm_device * dev)
4011 {
4012         struct drm_i915_private *dev_priv = dev->dev_private;
4013         int pipe;
4014
4015         if (I915_HAS_HOTPLUG(dev)) {
4016                 I915_WRITE(PORT_HOTPLUG_EN, 0);
4017                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4018         }
4019
4020         I915_WRITE16(HWSTAM, 0xffff);
4021         for_each_pipe(dev_priv, pipe) {
4022                 /* Clear enable bits; then clear status bits */
4023                 I915_WRITE(PIPESTAT(pipe), 0);
4024                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4025         }
4026         I915_WRITE(IMR, 0xffffffff);
4027         I915_WRITE(IER, 0x0);
4028
4029         I915_WRITE(IIR, I915_READ(IIR));
4030 }
4031
4032 static void i965_irq_preinstall(struct drm_device * dev)
4033 {
4034         struct drm_i915_private *dev_priv = dev->dev_private;
4035         int pipe;
4036
4037         I915_WRITE(PORT_HOTPLUG_EN, 0);
4038         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4039
4040         I915_WRITE(HWSTAM, 0xeffe);
4041         for_each_pipe(dev_priv, pipe)
4042                 I915_WRITE(PIPESTAT(pipe), 0);
4043         I915_WRITE(IMR, 0xffffffff);
4044         I915_WRITE(IER, 0x0);
4045         POSTING_READ(IER);
4046 }
4047
4048 static int i965_irq_postinstall(struct drm_device *dev)
4049 {
4050         struct drm_i915_private *dev_priv = dev->dev_private;
4051         u32 enable_mask;
4052         u32 error_mask;
4053
4054         /* Unmask the interrupts that we always want on. */
4055         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4056                                I915_DISPLAY_PORT_INTERRUPT |
4057                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4058                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4059                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4060                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4061                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4062
4063         enable_mask = ~dev_priv->irq_mask;
4064         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4065                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4066         enable_mask |= I915_USER_INTERRUPT;
4067
4068         if (IS_G4X(dev))
4069                 enable_mask |= I915_BSD_USER_INTERRUPT;
4070
4071         /* Interrupt setup is already guaranteed to be single-threaded, this is
4072          * just to make the assert_spin_locked check happy. */
4073         spin_lock_irq(&dev_priv->irq_lock);
4074         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4075         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4076         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4077         spin_unlock_irq(&dev_priv->irq_lock);
4078
4079         /*
4080          * Enable some error detection, note the instruction error mask
4081          * bit is reserved, so we leave it masked.
4082          */
4083         if (IS_G4X(dev)) {
4084                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4085                                GM45_ERROR_MEM_PRIV |
4086                                GM45_ERROR_CP_PRIV |
4087                                I915_ERROR_MEMORY_REFRESH);
4088         } else {
4089                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4090                                I915_ERROR_MEMORY_REFRESH);
4091         }
4092         I915_WRITE(EMR, error_mask);
4093
4094         I915_WRITE(IMR, dev_priv->irq_mask);
4095         I915_WRITE(IER, enable_mask);
4096         POSTING_READ(IER);
4097
4098         I915_WRITE(PORT_HOTPLUG_EN, 0);
4099         POSTING_READ(PORT_HOTPLUG_EN);
4100
4101         i915_enable_asle_pipestat(dev);
4102
4103         return 0;
4104 }
4105
4106 static void i915_hpd_irq_setup(struct drm_device *dev)
4107 {
4108         struct drm_i915_private *dev_priv = dev->dev_private;
4109         struct intel_encoder *intel_encoder;
4110         u32 hotplug_en;
4111
4112         assert_spin_locked(&dev_priv->irq_lock);
4113
4114         hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4115         hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4116         /* Note HDMI and DP share hotplug bits */
4117         /* enable bits are the same for all generations */
4118         for_each_intel_encoder(dev, intel_encoder)
4119                 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4120                         hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4121         /* Programming the CRT detection parameters tends
4122            to generate a spurious hotplug event about three
4123            seconds later.  So just do it once.
4124         */
4125         if (IS_G4X(dev))
4126                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4127         hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4128         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4129
4130         /* Ignore TV since it's buggy */
4131         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4132 }
4133
4134 static irqreturn_t i965_irq_handler(int irq, void *arg)
4135 {
4136         struct drm_device *dev = arg;
4137         struct drm_i915_private *dev_priv = dev->dev_private;
4138         u32 iir, new_iir;
4139         u32 pipe_stats[I915_MAX_PIPES];
4140         int ret = IRQ_NONE, pipe;
4141         u32 flip_mask =
4142                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4143                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4144
4145         if (!intel_irqs_enabled(dev_priv))
4146                 return IRQ_NONE;
4147
4148         iir = I915_READ(IIR);
4149
4150         for (;;) {
4151                 bool irq_received = (iir & ~flip_mask) != 0;
4152                 bool blc_event = false;
4153
4154                 /* Can't rely on pipestat interrupt bit in iir as it might
4155                  * have been cleared after the pipestat interrupt was received.
4156                  * It doesn't set the bit in iir again, but it still produces
4157                  * interrupts (for non-MSI).
4158                  */
4159                 spin_lock(&dev_priv->irq_lock);
4160                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4161                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4162
4163                 for_each_pipe(dev_priv, pipe) {
4164                         int reg = PIPESTAT(pipe);
4165                         pipe_stats[pipe] = I915_READ(reg);
4166
4167                         /*
4168                          * Clear the PIPE*STAT regs before the IIR
4169                          */
4170                         if (pipe_stats[pipe] & 0x8000ffff) {
4171                                 I915_WRITE(reg, pipe_stats[pipe]);
4172                                 irq_received = true;
4173                         }
4174                 }
4175                 spin_unlock(&dev_priv->irq_lock);
4176
4177                 if (!irq_received)
4178                         break;
4179
4180                 ret = IRQ_HANDLED;
4181
4182                 /* Consume port.  Then clear IIR or we'll miss events */
4183                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4184                         i9xx_hpd_irq_handler(dev);
4185
4186                 I915_WRITE(IIR, iir & ~flip_mask);
4187                 new_iir = I915_READ(IIR); /* Flush posted writes */
4188
4189                 if (iir & I915_USER_INTERRUPT)
4190                         notify_ring(&dev_priv->ring[RCS]);
4191                 if (iir & I915_BSD_USER_INTERRUPT)
4192                         notify_ring(&dev_priv->ring[VCS]);
4193
4194                 for_each_pipe(dev_priv, pipe) {
4195                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4196                             i915_handle_vblank(dev, pipe, pipe, iir))
4197                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4198
4199                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4200                                 blc_event = true;
4201
4202                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4203                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4204
4205                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4206                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4207                 }
4208
4209                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4210                         intel_opregion_asle_intr(dev);
4211
4212                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4213                         gmbus_irq_handler(dev);
4214
4215                 /* With MSI, interrupts are only generated when iir
4216                  * transitions from zero to nonzero.  If another bit got
4217                  * set while we were handling the existing iir bits, then
4218                  * we would never get another interrupt.
4219                  *
4220                  * This is fine on non-MSI as well, as if we hit this path
4221                  * we avoid exiting the interrupt handler only to generate
4222                  * another one.
4223                  *
4224                  * Note that for MSI this could cause a stray interrupt report
4225                  * if an interrupt landed in the time between writing IIR and
4226                  * the posting read.  This should be rare enough to never
4227                  * trigger the 99% of 100,000 interrupts test for disabling
4228                  * stray interrupts.
4229                  */
4230                 iir = new_iir;
4231         }
4232
4233         return ret;
4234 }
4235
4236 static void i965_irq_uninstall(struct drm_device * dev)
4237 {
4238         struct drm_i915_private *dev_priv = dev->dev_private;
4239         int pipe;
4240
4241         if (!dev_priv)
4242                 return;
4243
4244         I915_WRITE(PORT_HOTPLUG_EN, 0);
4245         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4246
4247         I915_WRITE(HWSTAM, 0xffffffff);
4248         for_each_pipe(dev_priv, pipe)
4249                 I915_WRITE(PIPESTAT(pipe), 0);
4250         I915_WRITE(IMR, 0xffffffff);
4251         I915_WRITE(IER, 0x0);
4252
4253         for_each_pipe(dev_priv, pipe)
4254                 I915_WRITE(PIPESTAT(pipe),
4255                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4256         I915_WRITE(IIR, I915_READ(IIR));
4257 }
4258
4259 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4260 {
4261         struct drm_i915_private *dev_priv =
4262                 container_of(work, typeof(*dev_priv),
4263                              hotplug_reenable_work.work);
4264         struct drm_device *dev = dev_priv->dev;
4265         struct drm_mode_config *mode_config = &dev->mode_config;
4266         int i;
4267
4268         intel_runtime_pm_get(dev_priv);
4269
4270         spin_lock_irq(&dev_priv->irq_lock);
4271         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4272                 struct drm_connector *connector;
4273
4274                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4275                         continue;
4276
4277                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4278
4279                 list_for_each_entry(connector, &mode_config->connector_list, head) {
4280                         struct intel_connector *intel_connector = to_intel_connector(connector);
4281
4282                         if (intel_connector->encoder->hpd_pin == i) {
4283                                 if (connector->polled != intel_connector->polled)
4284                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4285                                                          connector->name);
4286                                 connector->polled = intel_connector->polled;
4287                                 if (!connector->polled)
4288                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4289                         }
4290                 }
4291         }
4292         if (dev_priv->display.hpd_irq_setup)
4293                 dev_priv->display.hpd_irq_setup(dev);
4294         spin_unlock_irq(&dev_priv->irq_lock);
4295
4296         intel_runtime_pm_put(dev_priv);
4297 }
4298
4299 /**
4300  * intel_irq_init - initializes irq support
4301  * @dev_priv: i915 device instance
4302  *
4303  * This function initializes all the irq support including work items, timers
4304  * and all the vtables. It does not setup the interrupt itself though.
4305  */
4306 void intel_irq_init(struct drm_i915_private *dev_priv)
4307 {
4308         struct drm_device *dev = dev_priv->dev;
4309
4310         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4311         INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4312         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4313         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4314
4315         /* Let's track the enabled rps events */
4316         if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4317                 /* WaGsvRC0ResidencyMethod:vlv */
4318                 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4319         else
4320                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4321
4322         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4323                           i915_hangcheck_elapsed);
4324         INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4325                           intel_hpd_irq_reenable_work);
4326
4327         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4328
4329         if (IS_GEN2(dev_priv)) {
4330                 dev->max_vblank_count = 0;
4331                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4332         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4333                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4334                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4335         } else {
4336                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4337                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4338         }
4339
4340         /*
4341          * Opt out of the vblank disable timer on everything except gen2.
4342          * Gen2 doesn't have a hardware frame counter and so depends on
4343          * vblank interrupts to produce sane vblank seuquence numbers.
4344          */
4345         if (!IS_GEN2(dev_priv))
4346                 dev->vblank_disable_immediate = true;
4347
4348         dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4349         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4350
4351         if (IS_CHERRYVIEW(dev_priv)) {
4352                 dev->driver->irq_handler = cherryview_irq_handler;
4353                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4354                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4355                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4356                 dev->driver->enable_vblank = valleyview_enable_vblank;
4357                 dev->driver->disable_vblank = valleyview_disable_vblank;
4358                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4359         } else if (IS_VALLEYVIEW(dev_priv)) {
4360                 dev->driver->irq_handler = valleyview_irq_handler;
4361                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4362                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4363                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4364                 dev->driver->enable_vblank = valleyview_enable_vblank;
4365                 dev->driver->disable_vblank = valleyview_disable_vblank;
4366                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4367         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4368                 dev->driver->irq_handler = gen8_irq_handler;
4369                 dev->driver->irq_preinstall = gen8_irq_reset;
4370                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4371                 dev->driver->irq_uninstall = gen8_irq_uninstall;
4372                 dev->driver->enable_vblank = gen8_enable_vblank;
4373                 dev->driver->disable_vblank = gen8_disable_vblank;
4374                 if (HAS_PCH_SPLIT(dev))
4375                         dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4376                 else
4377                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4378         } else if (HAS_PCH_SPLIT(dev)) {
4379                 dev->driver->irq_handler = ironlake_irq_handler;
4380                 dev->driver->irq_preinstall = ironlake_irq_reset;
4381                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4382                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4383                 dev->driver->enable_vblank = ironlake_enable_vblank;
4384                 dev->driver->disable_vblank = ironlake_disable_vblank;
4385                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4386         } else {
4387                 if (INTEL_INFO(dev_priv)->gen == 2) {
4388                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
4389                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4390                         dev->driver->irq_handler = i8xx_irq_handler;
4391                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
4392                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4393                         dev->driver->irq_preinstall = i915_irq_preinstall;
4394                         dev->driver->irq_postinstall = i915_irq_postinstall;
4395                         dev->driver->irq_uninstall = i915_irq_uninstall;
4396                         dev->driver->irq_handler = i915_irq_handler;
4397                 } else {
4398                         dev->driver->irq_preinstall = i965_irq_preinstall;
4399                         dev->driver->irq_postinstall = i965_irq_postinstall;
4400                         dev->driver->irq_uninstall = i965_irq_uninstall;
4401                         dev->driver->irq_handler = i965_irq_handler;
4402                 }
4403                 if (I915_HAS_HOTPLUG(dev_priv))
4404                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4405                 dev->driver->enable_vblank = i915_enable_vblank;
4406                 dev->driver->disable_vblank = i915_disable_vblank;
4407         }
4408 }
4409
4410 /**
4411  * intel_hpd_init - initializes and enables hpd support
4412  * @dev_priv: i915 device instance
4413  *
4414  * This function enables the hotplug support. It requires that interrupts have
4415  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4416  * poll request can run concurrently to other code, so locking rules must be
4417  * obeyed.
4418  *
4419  * This is a separate step from interrupt enabling to simplify the locking rules
4420  * in the driver load and resume code.
4421  */
4422 void intel_hpd_init(struct drm_i915_private *dev_priv)
4423 {
4424         struct drm_device *dev = dev_priv->dev;
4425         struct drm_mode_config *mode_config = &dev->mode_config;
4426         struct drm_connector *connector;
4427         int i;
4428
4429         for (i = 1; i < HPD_NUM_PINS; i++) {
4430                 dev_priv->hpd_stats[i].hpd_cnt = 0;
4431                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4432         }
4433         list_for_each_entry(connector, &mode_config->connector_list, head) {
4434                 struct intel_connector *intel_connector = to_intel_connector(connector);
4435                 connector->polled = intel_connector->polled;
4436                 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4437                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4438                 if (intel_connector->mst_port)
4439                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4440         }
4441
4442         /* Interrupt setup is already guaranteed to be single-threaded, this is
4443          * just to make the assert_spin_locked checks happy. */
4444         spin_lock_irq(&dev_priv->irq_lock);
4445         if (dev_priv->display.hpd_irq_setup)
4446                 dev_priv->display.hpd_irq_setup(dev);
4447         spin_unlock_irq(&dev_priv->irq_lock);
4448 }
4449
4450 /**
4451  * intel_irq_install - enables the hardware interrupt
4452  * @dev_priv: i915 device instance
4453  *
4454  * This function enables the hardware interrupt handling, but leaves the hotplug
4455  * handling still disabled. It is called after intel_irq_init().
4456  *
4457  * In the driver load and resume code we need working interrupts in a few places
4458  * but don't want to deal with the hassle of concurrent probe and hotplug
4459  * workers. Hence the split into this two-stage approach.
4460  */
4461 int intel_irq_install(struct drm_i915_private *dev_priv)
4462 {
4463         /*
4464          * We enable some interrupt sources in our postinstall hooks, so mark
4465          * interrupts as enabled _before_ actually enabling them to avoid
4466          * special cases in our ordering checks.
4467          */
4468         dev_priv->pm.irqs_enabled = true;
4469
4470         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4471 }
4472
4473 /**
4474  * intel_irq_uninstall - finilizes all irq handling
4475  * @dev_priv: i915 device instance
4476  *
4477  * This stops interrupt and hotplug handling and unregisters and frees all
4478  * resources acquired in the init functions.
4479  */
4480 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4481 {
4482         drm_irq_uninstall(dev_priv->dev);
4483         intel_hpd_cancel_work(dev_priv);
4484         dev_priv->pm.irqs_enabled = false;
4485 }
4486
4487 /**
4488  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4489  * @dev_priv: i915 device instance
4490  *
4491  * This function is used to disable interrupts at runtime, both in the runtime
4492  * pm and the system suspend/resume code.
4493  */
4494 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4495 {
4496         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4497         dev_priv->pm.irqs_enabled = false;
4498         synchronize_irq(dev_priv->dev->irq);
4499 }
4500
4501 /**
4502  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4503  * @dev_priv: i915 device instance
4504  *
4505  * This function is used to enable interrupts at runtime, both in the runtime
4506  * pm and the system suspend/resume code.
4507  */
4508 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4509 {
4510         dev_priv->pm.irqs_enabled = true;
4511         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4512         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4513 }