OSDN Git Service

spi: signedness bug in qspi_trigger_transfer_out_int()
[uclinux-h8/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47
48 static const u32 hpd_ibx[HPD_NUM_PINS] = {
49         [HPD_CRT] = SDE_CRT_HOTPLUG,
50         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55
56 static const u32 hpd_cpt[HPD_NUM_PINS] = {
57         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63
64 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71 };
72
73 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
74         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
83         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94         POSTING_READ(GEN8_##type##_IMR(which)); \
95         I915_WRITE(GEN8_##type##_IER(which), 0); \
96         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97         POSTING_READ(GEN8_##type##_IIR(which)); \
98         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99         POSTING_READ(GEN8_##type##_IIR(which)); \
100 } while (0)
101
102 #define GEN5_IRQ_RESET(type) do { \
103         I915_WRITE(type##IMR, 0xffffffff); \
104         POSTING_READ(type##IMR); \
105         I915_WRITE(type##IER, 0); \
106         I915_WRITE(type##IIR, 0xffffffff); \
107         POSTING_READ(type##IIR); \
108         I915_WRITE(type##IIR, 0xffffffff); \
109         POSTING_READ(type##IIR); \
110 } while (0)
111
112 /*
113  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
114  */
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116         u32 val = I915_READ(reg); \
117         if (val) { \
118                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
119                      (reg), val); \
120                 I915_WRITE((reg), 0xffffffff); \
121                 POSTING_READ(reg); \
122                 I915_WRITE((reg), 0xffffffff); \
123                 POSTING_READ(reg); \
124         } \
125 } while (0)
126
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131         POSTING_READ(GEN8_##type##_IMR(which)); \
132 } while (0)
133
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136         I915_WRITE(type##IER, (ier_val)); \
137         I915_WRITE(type##IMR, (imr_val)); \
138         POSTING_READ(type##IMR); \
139 } while (0)
140
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
142
143 /* For display hotplug interrupt */
144 void
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
146 {
147         assert_spin_locked(&dev_priv->irq_lock);
148
149         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
150                 return;
151
152         if ((dev_priv->irq_mask & mask) != 0) {
153                 dev_priv->irq_mask &= ~mask;
154                 I915_WRITE(DEIMR, dev_priv->irq_mask);
155                 POSTING_READ(DEIMR);
156         }
157 }
158
159 void
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
161 {
162         assert_spin_locked(&dev_priv->irq_lock);
163
164         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
165                 return;
166
167         if ((dev_priv->irq_mask & mask) != mask) {
168                 dev_priv->irq_mask |= mask;
169                 I915_WRITE(DEIMR, dev_priv->irq_mask);
170                 POSTING_READ(DEIMR);
171         }
172 }
173
174 /**
175  * ilk_update_gt_irq - update GTIMR
176  * @dev_priv: driver private
177  * @interrupt_mask: mask of interrupt bits to update
178  * @enabled_irq_mask: mask of interrupt bits to enable
179  */
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181                               uint32_t interrupt_mask,
182                               uint32_t enabled_irq_mask)
183 {
184         assert_spin_locked(&dev_priv->irq_lock);
185
186         WARN_ON(enabled_irq_mask & ~interrupt_mask);
187
188         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
189                 return;
190
191         dev_priv->gt_irq_mask &= ~interrupt_mask;
192         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
193         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
194         POSTING_READ(GTIMR);
195 }
196
197 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
198 {
199         ilk_update_gt_irq(dev_priv, mask, mask);
200 }
201
202 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
203 {
204         ilk_update_gt_irq(dev_priv, mask, 0);
205 }
206
207 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
208 {
209         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
210 }
211
212 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
213 {
214         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
215 }
216
217 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
218 {
219         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
220 }
221
222 /**
223   * snb_update_pm_irq - update GEN6_PMIMR
224   * @dev_priv: driver private
225   * @interrupt_mask: mask of interrupt bits to update
226   * @enabled_irq_mask: mask of interrupt bits to enable
227   */
228 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
229                               uint32_t interrupt_mask,
230                               uint32_t enabled_irq_mask)
231 {
232         uint32_t new_val;
233
234         WARN_ON(enabled_irq_mask & ~interrupt_mask);
235
236         assert_spin_locked(&dev_priv->irq_lock);
237
238         new_val = dev_priv->pm_irq_mask;
239         new_val &= ~interrupt_mask;
240         new_val |= (~enabled_irq_mask & interrupt_mask);
241
242         if (new_val != dev_priv->pm_irq_mask) {
243                 dev_priv->pm_irq_mask = new_val;
244                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
245                 POSTING_READ(gen6_pm_imr(dev_priv));
246         }
247 }
248
249 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
250 {
251         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
252                 return;
253
254         snb_update_pm_irq(dev_priv, mask, mask);
255 }
256
257 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
258                                   uint32_t mask)
259 {
260         snb_update_pm_irq(dev_priv, mask, 0);
261 }
262
263 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
264 {
265         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
266                 return;
267
268         __gen6_disable_pm_irq(dev_priv, mask);
269 }
270
271 void gen6_reset_rps_interrupts(struct drm_device *dev)
272 {
273         struct drm_i915_private *dev_priv = dev->dev_private;
274         uint32_t reg = gen6_pm_iir(dev_priv);
275
276         spin_lock_irq(&dev_priv->irq_lock);
277         I915_WRITE(reg, dev_priv->pm_rps_events);
278         I915_WRITE(reg, dev_priv->pm_rps_events);
279         POSTING_READ(reg);
280         spin_unlock_irq(&dev_priv->irq_lock);
281 }
282
283 void gen6_enable_rps_interrupts(struct drm_device *dev)
284 {
285         struct drm_i915_private *dev_priv = dev->dev_private;
286
287         spin_lock_irq(&dev_priv->irq_lock);
288
289         WARN_ON(dev_priv->rps.pm_iir);
290         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
291         dev_priv->rps.interrupts_enabled = true;
292         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
293                                 dev_priv->pm_rps_events);
294         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
295
296         spin_unlock_irq(&dev_priv->irq_lock);
297 }
298
299 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
300 {
301         /*
302          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
303          * if GEN6_PM_UP_EI_EXPIRED is masked.
304          *
305          * TODO: verify if this can be reproduced on VLV,CHV.
306          */
307         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
308                 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
309
310         if (INTEL_INFO(dev_priv)->gen >= 8)
311                 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
312
313         return mask;
314 }
315
316 void gen6_disable_rps_interrupts(struct drm_device *dev)
317 {
318         struct drm_i915_private *dev_priv = dev->dev_private;
319
320         spin_lock_irq(&dev_priv->irq_lock);
321         dev_priv->rps.interrupts_enabled = false;
322         spin_unlock_irq(&dev_priv->irq_lock);
323
324         cancel_work_sync(&dev_priv->rps.work);
325
326         spin_lock_irq(&dev_priv->irq_lock);
327
328         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
329
330         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
331         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
332                                 ~dev_priv->pm_rps_events);
333         I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
334         I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
335
336         dev_priv->rps.pm_iir = 0;
337
338         spin_unlock_irq(&dev_priv->irq_lock);
339 }
340
341 /**
342  * ibx_display_interrupt_update - update SDEIMR
343  * @dev_priv: driver private
344  * @interrupt_mask: mask of interrupt bits to update
345  * @enabled_irq_mask: mask of interrupt bits to enable
346  */
347 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
348                                   uint32_t interrupt_mask,
349                                   uint32_t enabled_irq_mask)
350 {
351         uint32_t sdeimr = I915_READ(SDEIMR);
352         sdeimr &= ~interrupt_mask;
353         sdeimr |= (~enabled_irq_mask & interrupt_mask);
354
355         WARN_ON(enabled_irq_mask & ~interrupt_mask);
356
357         assert_spin_locked(&dev_priv->irq_lock);
358
359         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
360                 return;
361
362         I915_WRITE(SDEIMR, sdeimr);
363         POSTING_READ(SDEIMR);
364 }
365
366 static void
367 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
368                        u32 enable_mask, u32 status_mask)
369 {
370         u32 reg = PIPESTAT(pipe);
371         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
372
373         assert_spin_locked(&dev_priv->irq_lock);
374         WARN_ON(!intel_irqs_enabled(dev_priv));
375
376         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
377                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
378                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
379                       pipe_name(pipe), enable_mask, status_mask))
380                 return;
381
382         if ((pipestat & enable_mask) == enable_mask)
383                 return;
384
385         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
386
387         /* Enable the interrupt, clear any pending status */
388         pipestat |= enable_mask | status_mask;
389         I915_WRITE(reg, pipestat);
390         POSTING_READ(reg);
391 }
392
393 static void
394 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
395                         u32 enable_mask, u32 status_mask)
396 {
397         u32 reg = PIPESTAT(pipe);
398         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
399
400         assert_spin_locked(&dev_priv->irq_lock);
401         WARN_ON(!intel_irqs_enabled(dev_priv));
402
403         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
404                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
405                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
406                       pipe_name(pipe), enable_mask, status_mask))
407                 return;
408
409         if ((pipestat & enable_mask) == 0)
410                 return;
411
412         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
413
414         pipestat &= ~enable_mask;
415         I915_WRITE(reg, pipestat);
416         POSTING_READ(reg);
417 }
418
419 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
420 {
421         u32 enable_mask = status_mask << 16;
422
423         /*
424          * On pipe A we don't support the PSR interrupt yet,
425          * on pipe B and C the same bit MBZ.
426          */
427         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
428                 return 0;
429         /*
430          * On pipe B and C we don't support the PSR interrupt yet, on pipe
431          * A the same bit is for perf counters which we don't use either.
432          */
433         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
434                 return 0;
435
436         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
437                          SPRITE0_FLIP_DONE_INT_EN_VLV |
438                          SPRITE1_FLIP_DONE_INT_EN_VLV);
439         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
440                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
441         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
442                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
443
444         return enable_mask;
445 }
446
447 void
448 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
449                      u32 status_mask)
450 {
451         u32 enable_mask;
452
453         if (IS_VALLEYVIEW(dev_priv->dev))
454                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
455                                                            status_mask);
456         else
457                 enable_mask = status_mask << 16;
458         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
459 }
460
461 void
462 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
463                       u32 status_mask)
464 {
465         u32 enable_mask;
466
467         if (IS_VALLEYVIEW(dev_priv->dev))
468                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
469                                                            status_mask);
470         else
471                 enable_mask = status_mask << 16;
472         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
473 }
474
475 /**
476  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
477  */
478 static void i915_enable_asle_pipestat(struct drm_device *dev)
479 {
480         struct drm_i915_private *dev_priv = dev->dev_private;
481
482         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
483                 return;
484
485         spin_lock_irq(&dev_priv->irq_lock);
486
487         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
488         if (INTEL_INFO(dev)->gen >= 4)
489                 i915_enable_pipestat(dev_priv, PIPE_A,
490                                      PIPE_LEGACY_BLC_EVENT_STATUS);
491
492         spin_unlock_irq(&dev_priv->irq_lock);
493 }
494
495 /**
496  * i915_pipe_enabled - check if a pipe is enabled
497  * @dev: DRM device
498  * @pipe: pipe to check
499  *
500  * Reading certain registers when the pipe is disabled can hang the chip.
501  * Use this routine to make sure the PLL is running and the pipe is active
502  * before reading such registers if unsure.
503  */
504 static int
505 i915_pipe_enabled(struct drm_device *dev, int pipe)
506 {
507         struct drm_i915_private *dev_priv = dev->dev_private;
508
509         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
510                 /* Locking is horribly broken here, but whatever. */
511                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
512                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
513
514                 return intel_crtc->active;
515         } else {
516                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
517         }
518 }
519
520 /*
521  * This timing diagram depicts the video signal in and
522  * around the vertical blanking period.
523  *
524  * Assumptions about the fictitious mode used in this example:
525  *  vblank_start >= 3
526  *  vsync_start = vblank_start + 1
527  *  vsync_end = vblank_start + 2
528  *  vtotal = vblank_start + 3
529  *
530  *           start of vblank:
531  *           latch double buffered registers
532  *           increment frame counter (ctg+)
533  *           generate start of vblank interrupt (gen4+)
534  *           |
535  *           |          frame start:
536  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
537  *           |          may be shifted forward 1-3 extra lines via PIPECONF
538  *           |          |
539  *           |          |  start of vsync:
540  *           |          |  generate vsync interrupt
541  *           |          |  |
542  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
543  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
544  * ----va---> <-----------------vb--------------------> <--------va-------------
545  *       |          |       <----vs----->                     |
546  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
547  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
548  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
549  *       |          |                                         |
550  *       last visible pixel                                   first visible pixel
551  *                  |                                         increment frame counter (gen3/4)
552  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
553  *
554  * x  = horizontal active
555  * _  = horizontal blanking
556  * hs = horizontal sync
557  * va = vertical active
558  * vb = vertical blanking
559  * vs = vertical sync
560  * vbs = vblank_start (number)
561  *
562  * Summary:
563  * - most events happen at the start of horizontal sync
564  * - frame start happens at the start of horizontal blank, 1-4 lines
565  *   (depending on PIPECONF settings) after the start of vblank
566  * - gen3/4 pixel and frame counter are synchronized with the start
567  *   of horizontal active on the first line of vertical active
568  */
569
570 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
571 {
572         /* Gen2 doesn't have a hardware frame counter */
573         return 0;
574 }
575
576 /* Called from drm generic code, passed a 'crtc', which
577  * we use as a pipe index
578  */
579 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
580 {
581         struct drm_i915_private *dev_priv = dev->dev_private;
582         unsigned long high_frame;
583         unsigned long low_frame;
584         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
585
586         if (!i915_pipe_enabled(dev, pipe)) {
587                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
588                                 "pipe %c\n", pipe_name(pipe));
589                 return 0;
590         }
591
592         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
593                 struct intel_crtc *intel_crtc =
594                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
595                 const struct drm_display_mode *mode =
596                         &intel_crtc->config->base.adjusted_mode;
597
598                 htotal = mode->crtc_htotal;
599                 hsync_start = mode->crtc_hsync_start;
600                 vbl_start = mode->crtc_vblank_start;
601                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
602                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
603         } else {
604                 enum transcoder cpu_transcoder = (enum transcoder) pipe;
605
606                 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
607                 hsync_start = (I915_READ(HSYNC(cpu_transcoder))  & 0x1fff) + 1;
608                 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
609                 if ((I915_READ(PIPECONF(cpu_transcoder)) &
610                      PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
611                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
612         }
613
614         /* Convert to pixel count */
615         vbl_start *= htotal;
616
617         /* Start of vblank event occurs at start of hsync */
618         vbl_start -= htotal - hsync_start;
619
620         high_frame = PIPEFRAME(pipe);
621         low_frame = PIPEFRAMEPIXEL(pipe);
622
623         /*
624          * High & low register fields aren't synchronized, so make sure
625          * we get a low value that's stable across two reads of the high
626          * register.
627          */
628         do {
629                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
630                 low   = I915_READ(low_frame);
631                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
632         } while (high1 != high2);
633
634         high1 >>= PIPE_FRAME_HIGH_SHIFT;
635         pixel = low & PIPE_PIXEL_MASK;
636         low >>= PIPE_FRAME_LOW_SHIFT;
637
638         /*
639          * The frame counter increments at beginning of active.
640          * Cook up a vblank counter by also checking the pixel
641          * counter against vblank start.
642          */
643         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
644 }
645
646 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
647 {
648         struct drm_i915_private *dev_priv = dev->dev_private;
649         int reg = PIPE_FRMCOUNT_GM45(pipe);
650
651         if (!i915_pipe_enabled(dev, pipe)) {
652                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
653                                  "pipe %c\n", pipe_name(pipe));
654                 return 0;
655         }
656
657         return I915_READ(reg);
658 }
659
660 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
661 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
662
663 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
664 {
665         struct drm_device *dev = crtc->base.dev;
666         struct drm_i915_private *dev_priv = dev->dev_private;
667         const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
668         enum pipe pipe = crtc->pipe;
669         int position, vtotal;
670
671         vtotal = mode->crtc_vtotal;
672         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
673                 vtotal /= 2;
674
675         if (IS_GEN2(dev))
676                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
677         else
678                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
679
680         /*
681          * See update_scanline_offset() for the details on the
682          * scanline_offset adjustment.
683          */
684         return (position + crtc->scanline_offset) % vtotal;
685 }
686
687 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
688                                     unsigned int flags, int *vpos, int *hpos,
689                                     ktime_t *stime, ktime_t *etime)
690 {
691         struct drm_i915_private *dev_priv = dev->dev_private;
692         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
693         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
694         const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
695         int position;
696         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
697         bool in_vbl = true;
698         int ret = 0;
699         unsigned long irqflags;
700
701         if (!intel_crtc->active) {
702                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
703                                  "pipe %c\n", pipe_name(pipe));
704                 return 0;
705         }
706
707         htotal = mode->crtc_htotal;
708         hsync_start = mode->crtc_hsync_start;
709         vtotal = mode->crtc_vtotal;
710         vbl_start = mode->crtc_vblank_start;
711         vbl_end = mode->crtc_vblank_end;
712
713         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
714                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
715                 vbl_end /= 2;
716                 vtotal /= 2;
717         }
718
719         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
720
721         /*
722          * Lock uncore.lock, as we will do multiple timing critical raw
723          * register reads, potentially with preemption disabled, so the
724          * following code must not block on uncore.lock.
725          */
726         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
727
728         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
729
730         /* Get optional system timestamp before query. */
731         if (stime)
732                 *stime = ktime_get();
733
734         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
735                 /* No obvious pixelcount register. Only query vertical
736                  * scanout position from Display scan line register.
737                  */
738                 position = __intel_get_crtc_scanline(intel_crtc);
739         } else {
740                 /* Have access to pixelcount since start of frame.
741                  * We can split this into vertical and horizontal
742                  * scanout position.
743                  */
744                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
745
746                 /* convert to pixel counts */
747                 vbl_start *= htotal;
748                 vbl_end *= htotal;
749                 vtotal *= htotal;
750
751                 /*
752                  * In interlaced modes, the pixel counter counts all pixels,
753                  * so one field will have htotal more pixels. In order to avoid
754                  * the reported position from jumping backwards when the pixel
755                  * counter is beyond the length of the shorter field, just
756                  * clamp the position the length of the shorter field. This
757                  * matches how the scanline counter based position works since
758                  * the scanline counter doesn't count the two half lines.
759                  */
760                 if (position >= vtotal)
761                         position = vtotal - 1;
762
763                 /*
764                  * Start of vblank interrupt is triggered at start of hsync,
765                  * just prior to the first active line of vblank. However we
766                  * consider lines to start at the leading edge of horizontal
767                  * active. So, should we get here before we've crossed into
768                  * the horizontal active of the first line in vblank, we would
769                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
770                  * always add htotal-hsync_start to the current pixel position.
771                  */
772                 position = (position + htotal - hsync_start) % vtotal;
773         }
774
775         /* Get optional system timestamp after query. */
776         if (etime)
777                 *etime = ktime_get();
778
779         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
780
781         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
782
783         in_vbl = position >= vbl_start && position < vbl_end;
784
785         /*
786          * While in vblank, position will be negative
787          * counting up towards 0 at vbl_end. And outside
788          * vblank, position will be positive counting
789          * up since vbl_end.
790          */
791         if (position >= vbl_start)
792                 position -= vbl_end;
793         else
794                 position += vtotal - vbl_end;
795
796         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
797                 *vpos = position;
798                 *hpos = 0;
799         } else {
800                 *vpos = position / htotal;
801                 *hpos = position - (*vpos * htotal);
802         }
803
804         /* In vblank? */
805         if (in_vbl)
806                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
807
808         return ret;
809 }
810
811 int intel_get_crtc_scanline(struct intel_crtc *crtc)
812 {
813         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
814         unsigned long irqflags;
815         int position;
816
817         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
818         position = __intel_get_crtc_scanline(crtc);
819         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
820
821         return position;
822 }
823
824 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
825                               int *max_error,
826                               struct timeval *vblank_time,
827                               unsigned flags)
828 {
829         struct drm_crtc *crtc;
830
831         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
832                 DRM_ERROR("Invalid crtc %d\n", pipe);
833                 return -EINVAL;
834         }
835
836         /* Get drm_crtc to timestamp: */
837         crtc = intel_get_crtc_for_pipe(dev, pipe);
838         if (crtc == NULL) {
839                 DRM_ERROR("Invalid crtc %d\n", pipe);
840                 return -EINVAL;
841         }
842
843         if (!crtc->enabled) {
844                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
845                 return -EBUSY;
846         }
847
848         /* Helper routine in DRM core does all the work: */
849         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
850                                                      vblank_time, flags,
851                                                      crtc,
852                                                      &to_intel_crtc(crtc)->config->base.adjusted_mode);
853 }
854
855 static bool intel_hpd_irq_event(struct drm_device *dev,
856                                 struct drm_connector *connector)
857 {
858         enum drm_connector_status old_status;
859
860         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
861         old_status = connector->status;
862
863         connector->status = connector->funcs->detect(connector, false);
864         if (old_status == connector->status)
865                 return false;
866
867         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
868                       connector->base.id,
869                       connector->name,
870                       drm_get_connector_status_name(old_status),
871                       drm_get_connector_status_name(connector->status));
872
873         return true;
874 }
875
876 static void i915_digport_work_func(struct work_struct *work)
877 {
878         struct drm_i915_private *dev_priv =
879                 container_of(work, struct drm_i915_private, dig_port_work);
880         u32 long_port_mask, short_port_mask;
881         struct intel_digital_port *intel_dig_port;
882         int i;
883         u32 old_bits = 0;
884
885         spin_lock_irq(&dev_priv->irq_lock);
886         long_port_mask = dev_priv->long_hpd_port_mask;
887         dev_priv->long_hpd_port_mask = 0;
888         short_port_mask = dev_priv->short_hpd_port_mask;
889         dev_priv->short_hpd_port_mask = 0;
890         spin_unlock_irq(&dev_priv->irq_lock);
891
892         for (i = 0; i < I915_MAX_PORTS; i++) {
893                 bool valid = false;
894                 bool long_hpd = false;
895                 intel_dig_port = dev_priv->hpd_irq_port[i];
896                 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
897                         continue;
898
899                 if (long_port_mask & (1 << i))  {
900                         valid = true;
901                         long_hpd = true;
902                 } else if (short_port_mask & (1 << i))
903                         valid = true;
904
905                 if (valid) {
906                         enum irqreturn ret;
907
908                         ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
909                         if (ret == IRQ_NONE) {
910                                 /* fall back to old school hpd */
911                                 old_bits |= (1 << intel_dig_port->base.hpd_pin);
912                         }
913                 }
914         }
915
916         if (old_bits) {
917                 spin_lock_irq(&dev_priv->irq_lock);
918                 dev_priv->hpd_event_bits |= old_bits;
919                 spin_unlock_irq(&dev_priv->irq_lock);
920                 schedule_work(&dev_priv->hotplug_work);
921         }
922 }
923
924 /*
925  * Handle hotplug events outside the interrupt handler proper.
926  */
927 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
928
929 static void i915_hotplug_work_func(struct work_struct *work)
930 {
931         struct drm_i915_private *dev_priv =
932                 container_of(work, struct drm_i915_private, hotplug_work);
933         struct drm_device *dev = dev_priv->dev;
934         struct drm_mode_config *mode_config = &dev->mode_config;
935         struct intel_connector *intel_connector;
936         struct intel_encoder *intel_encoder;
937         struct drm_connector *connector;
938         bool hpd_disabled = false;
939         bool changed = false;
940         u32 hpd_event_bits;
941
942         mutex_lock(&mode_config->mutex);
943         DRM_DEBUG_KMS("running encoder hotplug functions\n");
944
945         spin_lock_irq(&dev_priv->irq_lock);
946
947         hpd_event_bits = dev_priv->hpd_event_bits;
948         dev_priv->hpd_event_bits = 0;
949         list_for_each_entry(connector, &mode_config->connector_list, head) {
950                 intel_connector = to_intel_connector(connector);
951                 if (!intel_connector->encoder)
952                         continue;
953                 intel_encoder = intel_connector->encoder;
954                 if (intel_encoder->hpd_pin > HPD_NONE &&
955                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
956                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
957                         DRM_INFO("HPD interrupt storm detected on connector %s: "
958                                  "switching from hotplug detection to polling\n",
959                                 connector->name);
960                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
961                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
962                                 | DRM_CONNECTOR_POLL_DISCONNECT;
963                         hpd_disabled = true;
964                 }
965                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
966                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
967                                       connector->name, intel_encoder->hpd_pin);
968                 }
969         }
970          /* if there were no outputs to poll, poll was disabled,
971           * therefore make sure it's enabled when disabling HPD on
972           * some connectors */
973         if (hpd_disabled) {
974                 drm_kms_helper_poll_enable(dev);
975                 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
976                                  msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
977         }
978
979         spin_unlock_irq(&dev_priv->irq_lock);
980
981         list_for_each_entry(connector, &mode_config->connector_list, head) {
982                 intel_connector = to_intel_connector(connector);
983                 if (!intel_connector->encoder)
984                         continue;
985                 intel_encoder = intel_connector->encoder;
986                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
987                         if (intel_encoder->hot_plug)
988                                 intel_encoder->hot_plug(intel_encoder);
989                         if (intel_hpd_irq_event(dev, connector))
990                                 changed = true;
991                 }
992         }
993         mutex_unlock(&mode_config->mutex);
994
995         if (changed)
996                 drm_kms_helper_hotplug_event(dev);
997 }
998
999 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1000 {
1001         struct drm_i915_private *dev_priv = dev->dev_private;
1002         u32 busy_up, busy_down, max_avg, min_avg;
1003         u8 new_delay;
1004
1005         spin_lock(&mchdev_lock);
1006
1007         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1008
1009         new_delay = dev_priv->ips.cur_delay;
1010
1011         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1012         busy_up = I915_READ(RCPREVBSYTUPAVG);
1013         busy_down = I915_READ(RCPREVBSYTDNAVG);
1014         max_avg = I915_READ(RCBMAXAVG);
1015         min_avg = I915_READ(RCBMINAVG);
1016
1017         /* Handle RCS change request from hw */
1018         if (busy_up > max_avg) {
1019                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1020                         new_delay = dev_priv->ips.cur_delay - 1;
1021                 if (new_delay < dev_priv->ips.max_delay)
1022                         new_delay = dev_priv->ips.max_delay;
1023         } else if (busy_down < min_avg) {
1024                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1025                         new_delay = dev_priv->ips.cur_delay + 1;
1026                 if (new_delay > dev_priv->ips.min_delay)
1027                         new_delay = dev_priv->ips.min_delay;
1028         }
1029
1030         if (ironlake_set_drps(dev, new_delay))
1031                 dev_priv->ips.cur_delay = new_delay;
1032
1033         spin_unlock(&mchdev_lock);
1034
1035         return;
1036 }
1037
1038 static void notify_ring(struct drm_device *dev,
1039                         struct intel_engine_cs *ring)
1040 {
1041         if (!intel_ring_initialized(ring))
1042                 return;
1043
1044         trace_i915_gem_request_notify(ring);
1045
1046         wake_up_all(&ring->irq_queue);
1047 }
1048
1049 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1050                             struct intel_rps_ei *rps_ei)
1051 {
1052         u32 cz_ts, cz_freq_khz;
1053         u32 render_count, media_count;
1054         u32 elapsed_render, elapsed_media, elapsed_time;
1055         u32 residency = 0;
1056
1057         cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1058         cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1059
1060         render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1061         media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1062
1063         if (rps_ei->cz_clock == 0) {
1064                 rps_ei->cz_clock = cz_ts;
1065                 rps_ei->render_c0 = render_count;
1066                 rps_ei->media_c0 = media_count;
1067
1068                 return dev_priv->rps.cur_freq;
1069         }
1070
1071         elapsed_time = cz_ts - rps_ei->cz_clock;
1072         rps_ei->cz_clock = cz_ts;
1073
1074         elapsed_render = render_count - rps_ei->render_c0;
1075         rps_ei->render_c0 = render_count;
1076
1077         elapsed_media = media_count - rps_ei->media_c0;
1078         rps_ei->media_c0 = media_count;
1079
1080         /* Convert all the counters into common unit of milli sec */
1081         elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1082         elapsed_render /=  cz_freq_khz;
1083         elapsed_media /= cz_freq_khz;
1084
1085         /*
1086          * Calculate overall C0 residency percentage
1087          * only if elapsed time is non zero
1088          */
1089         if (elapsed_time) {
1090                 residency =
1091                         ((max(elapsed_render, elapsed_media) * 100)
1092                                 / elapsed_time);
1093         }
1094
1095         return residency;
1096 }
1097
1098 /**
1099  * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1100  * busy-ness calculated from C0 counters of render & media power wells
1101  * @dev_priv: DRM device private
1102  *
1103  */
1104 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1105 {
1106         u32 residency_C0_up = 0, residency_C0_down = 0;
1107         int new_delay, adj;
1108
1109         dev_priv->rps.ei_interrupt_count++;
1110
1111         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1112
1113
1114         if (dev_priv->rps.up_ei.cz_clock == 0) {
1115                 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1116                 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1117                 return dev_priv->rps.cur_freq;
1118         }
1119
1120
1121         /*
1122          * To down throttle, C0 residency should be less than down threshold
1123          * for continous EI intervals. So calculate down EI counters
1124          * once in VLV_INT_COUNT_FOR_DOWN_EI
1125          */
1126         if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1127
1128                 dev_priv->rps.ei_interrupt_count = 0;
1129
1130                 residency_C0_down = vlv_c0_residency(dev_priv,
1131                                                      &dev_priv->rps.down_ei);
1132         } else {
1133                 residency_C0_up = vlv_c0_residency(dev_priv,
1134                                                    &dev_priv->rps.up_ei);
1135         }
1136
1137         new_delay = dev_priv->rps.cur_freq;
1138
1139         adj = dev_priv->rps.last_adj;
1140         /* C0 residency is greater than UP threshold. Increase Frequency */
1141         if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1142                 if (adj > 0)
1143                         adj *= 2;
1144                 else
1145                         adj = 1;
1146
1147                 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1148                         new_delay = dev_priv->rps.cur_freq + adj;
1149
1150                 /*
1151                  * For better performance, jump directly
1152                  * to RPe if we're below it.
1153                  */
1154                 if (new_delay < dev_priv->rps.efficient_freq)
1155                         new_delay = dev_priv->rps.efficient_freq;
1156
1157         } else if (!dev_priv->rps.ei_interrupt_count &&
1158                         (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1159                 if (adj < 0)
1160                         adj *= 2;
1161                 else
1162                         adj = -1;
1163                 /*
1164                  * This means, C0 residency is less than down threshold over
1165                  * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1166                  */
1167                 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1168                         new_delay = dev_priv->rps.cur_freq + adj;
1169         }
1170
1171         return new_delay;
1172 }
1173
1174 static void gen6_pm_rps_work(struct work_struct *work)
1175 {
1176         struct drm_i915_private *dev_priv =
1177                 container_of(work, struct drm_i915_private, rps.work);
1178         u32 pm_iir;
1179         int new_delay, adj;
1180
1181         spin_lock_irq(&dev_priv->irq_lock);
1182         /* Speed up work cancelation during disabling rps interrupts. */
1183         if (!dev_priv->rps.interrupts_enabled) {
1184                 spin_unlock_irq(&dev_priv->irq_lock);
1185                 return;
1186         }
1187         pm_iir = dev_priv->rps.pm_iir;
1188         dev_priv->rps.pm_iir = 0;
1189         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1190         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1191         spin_unlock_irq(&dev_priv->irq_lock);
1192
1193         /* Make sure we didn't queue anything we're not going to process. */
1194         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1195
1196         if ((pm_iir & dev_priv->pm_rps_events) == 0)
1197                 return;
1198
1199         mutex_lock(&dev_priv->rps.hw_lock);
1200
1201         adj = dev_priv->rps.last_adj;
1202         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1203                 if (adj > 0)
1204                         adj *= 2;
1205                 else {
1206                         /* CHV needs even encode values */
1207                         adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1208                 }
1209                 new_delay = dev_priv->rps.cur_freq + adj;
1210
1211                 /*
1212                  * For better performance, jump directly
1213                  * to RPe if we're below it.
1214                  */
1215                 if (new_delay < dev_priv->rps.efficient_freq)
1216                         new_delay = dev_priv->rps.efficient_freq;
1217         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1218                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1219                         new_delay = dev_priv->rps.efficient_freq;
1220                 else
1221                         new_delay = dev_priv->rps.min_freq_softlimit;
1222                 adj = 0;
1223         } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1224                 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1225         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1226                 if (adj < 0)
1227                         adj *= 2;
1228                 else {
1229                         /* CHV needs even encode values */
1230                         adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1231                 }
1232                 new_delay = dev_priv->rps.cur_freq + adj;
1233         } else { /* unknown event */
1234                 new_delay = dev_priv->rps.cur_freq;
1235         }
1236
1237         /* sysfs frequency interfaces may have snuck in while servicing the
1238          * interrupt
1239          */
1240         new_delay = clamp_t(int, new_delay,
1241                             dev_priv->rps.min_freq_softlimit,
1242                             dev_priv->rps.max_freq_softlimit);
1243
1244         dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1245
1246         if (IS_VALLEYVIEW(dev_priv->dev))
1247                 valleyview_set_rps(dev_priv->dev, new_delay);
1248         else
1249                 gen6_set_rps(dev_priv->dev, new_delay);
1250
1251         mutex_unlock(&dev_priv->rps.hw_lock);
1252 }
1253
1254
1255 /**
1256  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1257  * occurred.
1258  * @work: workqueue struct
1259  *
1260  * Doesn't actually do anything except notify userspace. As a consequence of
1261  * this event, userspace should try to remap the bad rows since statistically
1262  * it is likely the same row is more likely to go bad again.
1263  */
1264 static void ivybridge_parity_work(struct work_struct *work)
1265 {
1266         struct drm_i915_private *dev_priv =
1267                 container_of(work, struct drm_i915_private, l3_parity.error_work);
1268         u32 error_status, row, bank, subbank;
1269         char *parity_event[6];
1270         uint32_t misccpctl;
1271         uint8_t slice = 0;
1272
1273         /* We must turn off DOP level clock gating to access the L3 registers.
1274          * In order to prevent a get/put style interface, acquire struct mutex
1275          * any time we access those registers.
1276          */
1277         mutex_lock(&dev_priv->dev->struct_mutex);
1278
1279         /* If we've screwed up tracking, just let the interrupt fire again */
1280         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1281                 goto out;
1282
1283         misccpctl = I915_READ(GEN7_MISCCPCTL);
1284         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1285         POSTING_READ(GEN7_MISCCPCTL);
1286
1287         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1288                 u32 reg;
1289
1290                 slice--;
1291                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1292                         break;
1293
1294                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1295
1296                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1297
1298                 error_status = I915_READ(reg);
1299                 row = GEN7_PARITY_ERROR_ROW(error_status);
1300                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1301                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1302
1303                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1304                 POSTING_READ(reg);
1305
1306                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1307                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1308                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1309                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1310                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1311                 parity_event[5] = NULL;
1312
1313                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1314                                    KOBJ_CHANGE, parity_event);
1315
1316                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1317                           slice, row, bank, subbank);
1318
1319                 kfree(parity_event[4]);
1320                 kfree(parity_event[3]);
1321                 kfree(parity_event[2]);
1322                 kfree(parity_event[1]);
1323         }
1324
1325         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1326
1327 out:
1328         WARN_ON(dev_priv->l3_parity.which_slice);
1329         spin_lock_irq(&dev_priv->irq_lock);
1330         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1331         spin_unlock_irq(&dev_priv->irq_lock);
1332
1333         mutex_unlock(&dev_priv->dev->struct_mutex);
1334 }
1335
1336 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1337 {
1338         struct drm_i915_private *dev_priv = dev->dev_private;
1339
1340         if (!HAS_L3_DPF(dev))
1341                 return;
1342
1343         spin_lock(&dev_priv->irq_lock);
1344         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1345         spin_unlock(&dev_priv->irq_lock);
1346
1347         iir &= GT_PARITY_ERROR(dev);
1348         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1349                 dev_priv->l3_parity.which_slice |= 1 << 1;
1350
1351         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1352                 dev_priv->l3_parity.which_slice |= 1 << 0;
1353
1354         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1355 }
1356
1357 static void ilk_gt_irq_handler(struct drm_device *dev,
1358                                struct drm_i915_private *dev_priv,
1359                                u32 gt_iir)
1360 {
1361         if (gt_iir &
1362             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1363                 notify_ring(dev, &dev_priv->ring[RCS]);
1364         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1365                 notify_ring(dev, &dev_priv->ring[VCS]);
1366 }
1367
1368 static void snb_gt_irq_handler(struct drm_device *dev,
1369                                struct drm_i915_private *dev_priv,
1370                                u32 gt_iir)
1371 {
1372
1373         if (gt_iir &
1374             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1375                 notify_ring(dev, &dev_priv->ring[RCS]);
1376         if (gt_iir & GT_BSD_USER_INTERRUPT)
1377                 notify_ring(dev, &dev_priv->ring[VCS]);
1378         if (gt_iir & GT_BLT_USER_INTERRUPT)
1379                 notify_ring(dev, &dev_priv->ring[BCS]);
1380
1381         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1382                       GT_BSD_CS_ERROR_INTERRUPT |
1383                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1384                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1385
1386         if (gt_iir & GT_PARITY_ERROR(dev))
1387                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1388 }
1389
1390 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1391                                        struct drm_i915_private *dev_priv,
1392                                        u32 master_ctl)
1393 {
1394         struct intel_engine_cs *ring;
1395         u32 rcs, bcs, vcs;
1396         uint32_t tmp = 0;
1397         irqreturn_t ret = IRQ_NONE;
1398
1399         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1400                 tmp = I915_READ(GEN8_GT_IIR(0));
1401                 if (tmp) {
1402                         I915_WRITE(GEN8_GT_IIR(0), tmp);
1403                         ret = IRQ_HANDLED;
1404
1405                         rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1406                         ring = &dev_priv->ring[RCS];
1407                         if (rcs & GT_RENDER_USER_INTERRUPT)
1408                                 notify_ring(dev, ring);
1409                         if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1410                                 intel_lrc_irq_handler(ring);
1411
1412                         bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1413                         ring = &dev_priv->ring[BCS];
1414                         if (bcs & GT_RENDER_USER_INTERRUPT)
1415                                 notify_ring(dev, ring);
1416                         if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1417                                 intel_lrc_irq_handler(ring);
1418                 } else
1419                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1420         }
1421
1422         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1423                 tmp = I915_READ(GEN8_GT_IIR(1));
1424                 if (tmp) {
1425                         I915_WRITE(GEN8_GT_IIR(1), tmp);
1426                         ret = IRQ_HANDLED;
1427
1428                         vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1429                         ring = &dev_priv->ring[VCS];
1430                         if (vcs & GT_RENDER_USER_INTERRUPT)
1431                                 notify_ring(dev, ring);
1432                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1433                                 intel_lrc_irq_handler(ring);
1434
1435                         vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1436                         ring = &dev_priv->ring[VCS2];
1437                         if (vcs & GT_RENDER_USER_INTERRUPT)
1438                                 notify_ring(dev, ring);
1439                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1440                                 intel_lrc_irq_handler(ring);
1441                 } else
1442                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1443         }
1444
1445         if (master_ctl & GEN8_GT_PM_IRQ) {
1446                 tmp = I915_READ(GEN8_GT_IIR(2));
1447                 if (tmp & dev_priv->pm_rps_events) {
1448                         I915_WRITE(GEN8_GT_IIR(2),
1449                                    tmp & dev_priv->pm_rps_events);
1450                         ret = IRQ_HANDLED;
1451                         gen6_rps_irq_handler(dev_priv, tmp);
1452                 } else
1453                         DRM_ERROR("The master control interrupt lied (PM)!\n");
1454         }
1455
1456         if (master_ctl & GEN8_GT_VECS_IRQ) {
1457                 tmp = I915_READ(GEN8_GT_IIR(3));
1458                 if (tmp) {
1459                         I915_WRITE(GEN8_GT_IIR(3), tmp);
1460                         ret = IRQ_HANDLED;
1461
1462                         vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1463                         ring = &dev_priv->ring[VECS];
1464                         if (vcs & GT_RENDER_USER_INTERRUPT)
1465                                 notify_ring(dev, ring);
1466                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1467                                 intel_lrc_irq_handler(ring);
1468                 } else
1469                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1470         }
1471
1472         return ret;
1473 }
1474
1475 #define HPD_STORM_DETECT_PERIOD 1000
1476 #define HPD_STORM_THRESHOLD 5
1477
1478 static int pch_port_to_hotplug_shift(enum port port)
1479 {
1480         switch (port) {
1481         case PORT_A:
1482         case PORT_E:
1483         default:
1484                 return -1;
1485         case PORT_B:
1486                 return 0;
1487         case PORT_C:
1488                 return 8;
1489         case PORT_D:
1490                 return 16;
1491         }
1492 }
1493
1494 static int i915_port_to_hotplug_shift(enum port port)
1495 {
1496         switch (port) {
1497         case PORT_A:
1498         case PORT_E:
1499         default:
1500                 return -1;
1501         case PORT_B:
1502                 return 17;
1503         case PORT_C:
1504                 return 19;
1505         case PORT_D:
1506                 return 21;
1507         }
1508 }
1509
1510 static inline enum port get_port_from_pin(enum hpd_pin pin)
1511 {
1512         switch (pin) {
1513         case HPD_PORT_B:
1514                 return PORT_B;
1515         case HPD_PORT_C:
1516                 return PORT_C;
1517         case HPD_PORT_D:
1518                 return PORT_D;
1519         default:
1520                 return PORT_A; /* no hpd */
1521         }
1522 }
1523
1524 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1525                                          u32 hotplug_trigger,
1526                                          u32 dig_hotplug_reg,
1527                                          const u32 hpd[HPD_NUM_PINS])
1528 {
1529         struct drm_i915_private *dev_priv = dev->dev_private;
1530         int i;
1531         enum port port;
1532         bool storm_detected = false;
1533         bool queue_dig = false, queue_hp = false;
1534         u32 dig_shift;
1535         u32 dig_port_mask = 0;
1536
1537         if (!hotplug_trigger)
1538                 return;
1539
1540         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1541                          hotplug_trigger, dig_hotplug_reg);
1542
1543         spin_lock(&dev_priv->irq_lock);
1544         for (i = 1; i < HPD_NUM_PINS; i++) {
1545                 if (!(hpd[i] & hotplug_trigger))
1546                         continue;
1547
1548                 port = get_port_from_pin(i);
1549                 if (port && dev_priv->hpd_irq_port[port]) {
1550                         bool long_hpd;
1551
1552                         if (HAS_PCH_SPLIT(dev)) {
1553                                 dig_shift = pch_port_to_hotplug_shift(port);
1554                                 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1555                         } else {
1556                                 dig_shift = i915_port_to_hotplug_shift(port);
1557                                 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1558                         }
1559
1560                         DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1561                                          port_name(port),
1562                                          long_hpd ? "long" : "short");
1563                         /* for long HPD pulses we want to have the digital queue happen,
1564                            but we still want HPD storm detection to function. */
1565                         if (long_hpd) {
1566                                 dev_priv->long_hpd_port_mask |= (1 << port);
1567                                 dig_port_mask |= hpd[i];
1568                         } else {
1569                                 /* for short HPD just trigger the digital queue */
1570                                 dev_priv->short_hpd_port_mask |= (1 << port);
1571                                 hotplug_trigger &= ~hpd[i];
1572                         }
1573                         queue_dig = true;
1574                 }
1575         }
1576
1577         for (i = 1; i < HPD_NUM_PINS; i++) {
1578                 if (hpd[i] & hotplug_trigger &&
1579                     dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1580                         /*
1581                          * On GMCH platforms the interrupt mask bits only
1582                          * prevent irq generation, not the setting of the
1583                          * hotplug bits itself. So only WARN about unexpected
1584                          * interrupts on saner platforms.
1585                          */
1586                         WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1587                                   "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1588                                   hotplug_trigger, i, hpd[i]);
1589
1590                         continue;
1591                 }
1592
1593                 if (!(hpd[i] & hotplug_trigger) ||
1594                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1595                         continue;
1596
1597                 if (!(dig_port_mask & hpd[i])) {
1598                         dev_priv->hpd_event_bits |= (1 << i);
1599                         queue_hp = true;
1600                 }
1601
1602                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1603                                    dev_priv->hpd_stats[i].hpd_last_jiffies
1604                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1605                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1606                         dev_priv->hpd_stats[i].hpd_cnt = 0;
1607                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1608                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1609                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1610                         dev_priv->hpd_event_bits &= ~(1 << i);
1611                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1612                         storm_detected = true;
1613                 } else {
1614                         dev_priv->hpd_stats[i].hpd_cnt++;
1615                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1616                                       dev_priv->hpd_stats[i].hpd_cnt);
1617                 }
1618         }
1619
1620         if (storm_detected)
1621                 dev_priv->display.hpd_irq_setup(dev);
1622         spin_unlock(&dev_priv->irq_lock);
1623
1624         /*
1625          * Our hotplug handler can grab modeset locks (by calling down into the
1626          * fb helpers). Hence it must not be run on our own dev-priv->wq work
1627          * queue for otherwise the flush_work in the pageflip code will
1628          * deadlock.
1629          */
1630         if (queue_dig)
1631                 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1632         if (queue_hp)
1633                 schedule_work(&dev_priv->hotplug_work);
1634 }
1635
1636 static void gmbus_irq_handler(struct drm_device *dev)
1637 {
1638         struct drm_i915_private *dev_priv = dev->dev_private;
1639
1640         wake_up_all(&dev_priv->gmbus_wait_queue);
1641 }
1642
1643 static void dp_aux_irq_handler(struct drm_device *dev)
1644 {
1645         struct drm_i915_private *dev_priv = dev->dev_private;
1646
1647         wake_up_all(&dev_priv->gmbus_wait_queue);
1648 }
1649
1650 #if defined(CONFIG_DEBUG_FS)
1651 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1652                                          uint32_t crc0, uint32_t crc1,
1653                                          uint32_t crc2, uint32_t crc3,
1654                                          uint32_t crc4)
1655 {
1656         struct drm_i915_private *dev_priv = dev->dev_private;
1657         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1658         struct intel_pipe_crc_entry *entry;
1659         int head, tail;
1660
1661         spin_lock(&pipe_crc->lock);
1662
1663         if (!pipe_crc->entries) {
1664                 spin_unlock(&pipe_crc->lock);
1665                 DRM_DEBUG_KMS("spurious interrupt\n");
1666                 return;
1667         }
1668
1669         head = pipe_crc->head;
1670         tail = pipe_crc->tail;
1671
1672         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1673                 spin_unlock(&pipe_crc->lock);
1674                 DRM_ERROR("CRC buffer overflowing\n");
1675                 return;
1676         }
1677
1678         entry = &pipe_crc->entries[head];
1679
1680         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1681         entry->crc[0] = crc0;
1682         entry->crc[1] = crc1;
1683         entry->crc[2] = crc2;
1684         entry->crc[3] = crc3;
1685         entry->crc[4] = crc4;
1686
1687         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1688         pipe_crc->head = head;
1689
1690         spin_unlock(&pipe_crc->lock);
1691
1692         wake_up_interruptible(&pipe_crc->wq);
1693 }
1694 #else
1695 static inline void
1696 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1697                              uint32_t crc0, uint32_t crc1,
1698                              uint32_t crc2, uint32_t crc3,
1699                              uint32_t crc4) {}
1700 #endif
1701
1702
1703 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1704 {
1705         struct drm_i915_private *dev_priv = dev->dev_private;
1706
1707         display_pipe_crc_irq_handler(dev, pipe,
1708                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1709                                      0, 0, 0, 0);
1710 }
1711
1712 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1713 {
1714         struct drm_i915_private *dev_priv = dev->dev_private;
1715
1716         display_pipe_crc_irq_handler(dev, pipe,
1717                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1718                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1719                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1720                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1721                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1722 }
1723
1724 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1725 {
1726         struct drm_i915_private *dev_priv = dev->dev_private;
1727         uint32_t res1, res2;
1728
1729         if (INTEL_INFO(dev)->gen >= 3)
1730                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1731         else
1732                 res1 = 0;
1733
1734         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1735                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1736         else
1737                 res2 = 0;
1738
1739         display_pipe_crc_irq_handler(dev, pipe,
1740                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1741                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1742                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1743                                      res1, res2);
1744 }
1745
1746 /* The RPS events need forcewake, so we add them to a work queue and mask their
1747  * IMR bits until the work is done. Other interrupts can be processed without
1748  * the work queue. */
1749 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1750 {
1751         /* TODO: RPS on GEN9+ is not supported yet. */
1752         if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1753                       "GEN9+: unexpected RPS IRQ\n"))
1754                 return;
1755
1756         if (pm_iir & dev_priv->pm_rps_events) {
1757                 spin_lock(&dev_priv->irq_lock);
1758                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1759                 if (dev_priv->rps.interrupts_enabled) {
1760                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1761                         queue_work(dev_priv->wq, &dev_priv->rps.work);
1762                 }
1763                 spin_unlock(&dev_priv->irq_lock);
1764         }
1765
1766         if (INTEL_INFO(dev_priv)->gen >= 8)
1767                 return;
1768
1769         if (HAS_VEBOX(dev_priv->dev)) {
1770                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1771                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1772
1773                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1774                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1775         }
1776 }
1777
1778 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1779 {
1780         if (!drm_handle_vblank(dev, pipe))
1781                 return false;
1782
1783         return true;
1784 }
1785
1786 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1787 {
1788         struct drm_i915_private *dev_priv = dev->dev_private;
1789         u32 pipe_stats[I915_MAX_PIPES] = { };
1790         int pipe;
1791
1792         spin_lock(&dev_priv->irq_lock);
1793         for_each_pipe(dev_priv, pipe) {
1794                 int reg;
1795                 u32 mask, iir_bit = 0;
1796
1797                 /*
1798                  * PIPESTAT bits get signalled even when the interrupt is
1799                  * disabled with the mask bits, and some of the status bits do
1800                  * not generate interrupts at all (like the underrun bit). Hence
1801                  * we need to be careful that we only handle what we want to
1802                  * handle.
1803                  */
1804
1805                 /* fifo underruns are filterered in the underrun handler. */
1806                 mask = PIPE_FIFO_UNDERRUN_STATUS;
1807
1808                 switch (pipe) {
1809                 case PIPE_A:
1810                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1811                         break;
1812                 case PIPE_B:
1813                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1814                         break;
1815                 case PIPE_C:
1816                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1817                         break;
1818                 }
1819                 if (iir & iir_bit)
1820                         mask |= dev_priv->pipestat_irq_mask[pipe];
1821
1822                 if (!mask)
1823                         continue;
1824
1825                 reg = PIPESTAT(pipe);
1826                 mask |= PIPESTAT_INT_ENABLE_MASK;
1827                 pipe_stats[pipe] = I915_READ(reg) & mask;
1828
1829                 /*
1830                  * Clear the PIPE*STAT regs before the IIR
1831                  */
1832                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1833                                         PIPESTAT_INT_STATUS_MASK))
1834                         I915_WRITE(reg, pipe_stats[pipe]);
1835         }
1836         spin_unlock(&dev_priv->irq_lock);
1837
1838         for_each_pipe(dev_priv, pipe) {
1839                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1840                     intel_pipe_handle_vblank(dev, pipe))
1841                         intel_check_page_flip(dev, pipe);
1842
1843                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1844                         intel_prepare_page_flip(dev, pipe);
1845                         intel_finish_page_flip(dev, pipe);
1846                 }
1847
1848                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1849                         i9xx_pipe_crc_irq_handler(dev, pipe);
1850
1851                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1852                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1853         }
1854
1855         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1856                 gmbus_irq_handler(dev);
1857 }
1858
1859 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1860 {
1861         struct drm_i915_private *dev_priv = dev->dev_private;
1862         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1863
1864         if (hotplug_status) {
1865                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1866                 /*
1867                  * Make sure hotplug status is cleared before we clear IIR, or else we
1868                  * may miss hotplug events.
1869                  */
1870                 POSTING_READ(PORT_HOTPLUG_STAT);
1871
1872                 if (IS_G4X(dev)) {
1873                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1874
1875                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1876                 } else {
1877                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1878
1879                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1880                 }
1881
1882                 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1883                     hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1884                         dp_aux_irq_handler(dev);
1885         }
1886 }
1887
1888 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1889 {
1890         struct drm_device *dev = arg;
1891         struct drm_i915_private *dev_priv = dev->dev_private;
1892         u32 iir, gt_iir, pm_iir;
1893         irqreturn_t ret = IRQ_NONE;
1894
1895         while (true) {
1896                 /* Find, clear, then process each source of interrupt */
1897
1898                 gt_iir = I915_READ(GTIIR);
1899                 if (gt_iir)
1900                         I915_WRITE(GTIIR, gt_iir);
1901
1902                 pm_iir = I915_READ(GEN6_PMIIR);
1903                 if (pm_iir)
1904                         I915_WRITE(GEN6_PMIIR, pm_iir);
1905
1906                 iir = I915_READ(VLV_IIR);
1907                 if (iir) {
1908                         /* Consume port before clearing IIR or we'll miss events */
1909                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1910                                 i9xx_hpd_irq_handler(dev);
1911                         I915_WRITE(VLV_IIR, iir);
1912                 }
1913
1914                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1915                         goto out;
1916
1917                 ret = IRQ_HANDLED;
1918
1919                 if (gt_iir)
1920                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1921                 if (pm_iir)
1922                         gen6_rps_irq_handler(dev_priv, pm_iir);
1923                 /* Call regardless, as some status bits might not be
1924                  * signalled in iir */
1925                 valleyview_pipestat_irq_handler(dev, iir);
1926         }
1927
1928 out:
1929         return ret;
1930 }
1931
1932 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1933 {
1934         struct drm_device *dev = arg;
1935         struct drm_i915_private *dev_priv = dev->dev_private;
1936         u32 master_ctl, iir;
1937         irqreturn_t ret = IRQ_NONE;
1938
1939         for (;;) {
1940                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1941                 iir = I915_READ(VLV_IIR);
1942
1943                 if (master_ctl == 0 && iir == 0)
1944                         break;
1945
1946                 ret = IRQ_HANDLED;
1947
1948                 I915_WRITE(GEN8_MASTER_IRQ, 0);
1949
1950                 /* Find, clear, then process each source of interrupt */
1951
1952                 if (iir) {
1953                         /* Consume port before clearing IIR or we'll miss events */
1954                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1955                                 i9xx_hpd_irq_handler(dev);
1956                         I915_WRITE(VLV_IIR, iir);
1957                 }
1958
1959                 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1960
1961                 /* Call regardless, as some status bits might not be
1962                  * signalled in iir */
1963                 valleyview_pipestat_irq_handler(dev, iir);
1964
1965                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1966                 POSTING_READ(GEN8_MASTER_IRQ);
1967         }
1968
1969         return ret;
1970 }
1971
1972 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1973 {
1974         struct drm_i915_private *dev_priv = dev->dev_private;
1975         int pipe;
1976         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1977         u32 dig_hotplug_reg;
1978
1979         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1980         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1981
1982         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1983
1984         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1985                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1986                                SDE_AUDIO_POWER_SHIFT);
1987                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1988                                  port_name(port));
1989         }
1990
1991         if (pch_iir & SDE_AUX_MASK)
1992                 dp_aux_irq_handler(dev);
1993
1994         if (pch_iir & SDE_GMBUS)
1995                 gmbus_irq_handler(dev);
1996
1997         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1998                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1999
2000         if (pch_iir & SDE_AUDIO_TRANS_MASK)
2001                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2002
2003         if (pch_iir & SDE_POISON)
2004                 DRM_ERROR("PCH poison interrupt\n");
2005
2006         if (pch_iir & SDE_FDI_MASK)
2007                 for_each_pipe(dev_priv, pipe)
2008                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2009                                          pipe_name(pipe),
2010                                          I915_READ(FDI_RX_IIR(pipe)));
2011
2012         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2013                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2014
2015         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2016                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2017
2018         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2019                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2020
2021         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2022                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2023 }
2024
2025 static void ivb_err_int_handler(struct drm_device *dev)
2026 {
2027         struct drm_i915_private *dev_priv = dev->dev_private;
2028         u32 err_int = I915_READ(GEN7_ERR_INT);
2029         enum pipe pipe;
2030
2031         if (err_int & ERR_INT_POISON)
2032                 DRM_ERROR("Poison interrupt\n");
2033
2034         for_each_pipe(dev_priv, pipe) {
2035                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2036                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2037
2038                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2039                         if (IS_IVYBRIDGE(dev))
2040                                 ivb_pipe_crc_irq_handler(dev, pipe);
2041                         else
2042                                 hsw_pipe_crc_irq_handler(dev, pipe);
2043                 }
2044         }
2045
2046         I915_WRITE(GEN7_ERR_INT, err_int);
2047 }
2048
2049 static void cpt_serr_int_handler(struct drm_device *dev)
2050 {
2051         struct drm_i915_private *dev_priv = dev->dev_private;
2052         u32 serr_int = I915_READ(SERR_INT);
2053
2054         if (serr_int & SERR_INT_POISON)
2055                 DRM_ERROR("PCH poison interrupt\n");
2056
2057         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2058                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2059
2060         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2061                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2062
2063         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2064                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2065
2066         I915_WRITE(SERR_INT, serr_int);
2067 }
2068
2069 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2070 {
2071         struct drm_i915_private *dev_priv = dev->dev_private;
2072         int pipe;
2073         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2074         u32 dig_hotplug_reg;
2075
2076         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2077         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2078
2079         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2080
2081         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2082                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2083                                SDE_AUDIO_POWER_SHIFT_CPT);
2084                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2085                                  port_name(port));
2086         }
2087
2088         if (pch_iir & SDE_AUX_MASK_CPT)
2089                 dp_aux_irq_handler(dev);
2090
2091         if (pch_iir & SDE_GMBUS_CPT)
2092                 gmbus_irq_handler(dev);
2093
2094         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2095                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2096
2097         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2098                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2099
2100         if (pch_iir & SDE_FDI_MASK_CPT)
2101                 for_each_pipe(dev_priv, pipe)
2102                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2103                                          pipe_name(pipe),
2104                                          I915_READ(FDI_RX_IIR(pipe)));
2105
2106         if (pch_iir & SDE_ERROR_CPT)
2107                 cpt_serr_int_handler(dev);
2108 }
2109
2110 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2111 {
2112         struct drm_i915_private *dev_priv = dev->dev_private;
2113         enum pipe pipe;
2114
2115         if (de_iir & DE_AUX_CHANNEL_A)
2116                 dp_aux_irq_handler(dev);
2117
2118         if (de_iir & DE_GSE)
2119                 intel_opregion_asle_intr(dev);
2120
2121         if (de_iir & DE_POISON)
2122                 DRM_ERROR("Poison interrupt\n");
2123
2124         for_each_pipe(dev_priv, pipe) {
2125                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2126                     intel_pipe_handle_vblank(dev, pipe))
2127                         intel_check_page_flip(dev, pipe);
2128
2129                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2130                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2131
2132                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2133                         i9xx_pipe_crc_irq_handler(dev, pipe);
2134
2135                 /* plane/pipes map 1:1 on ilk+ */
2136                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2137                         intel_prepare_page_flip(dev, pipe);
2138                         intel_finish_page_flip_plane(dev, pipe);
2139                 }
2140         }
2141
2142         /* check event from PCH */
2143         if (de_iir & DE_PCH_EVENT) {
2144                 u32 pch_iir = I915_READ(SDEIIR);
2145
2146                 if (HAS_PCH_CPT(dev))
2147                         cpt_irq_handler(dev, pch_iir);
2148                 else
2149                         ibx_irq_handler(dev, pch_iir);
2150
2151                 /* should clear PCH hotplug event before clear CPU irq */
2152                 I915_WRITE(SDEIIR, pch_iir);
2153         }
2154
2155         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2156                 ironlake_rps_change_irq_handler(dev);
2157 }
2158
2159 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2160 {
2161         struct drm_i915_private *dev_priv = dev->dev_private;
2162         enum pipe pipe;
2163
2164         if (de_iir & DE_ERR_INT_IVB)
2165                 ivb_err_int_handler(dev);
2166
2167         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2168                 dp_aux_irq_handler(dev);
2169
2170         if (de_iir & DE_GSE_IVB)
2171                 intel_opregion_asle_intr(dev);
2172
2173         for_each_pipe(dev_priv, pipe) {
2174                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2175                     intel_pipe_handle_vblank(dev, pipe))
2176                         intel_check_page_flip(dev, pipe);
2177
2178                 /* plane/pipes map 1:1 on ilk+ */
2179                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2180                         intel_prepare_page_flip(dev, pipe);
2181                         intel_finish_page_flip_plane(dev, pipe);
2182                 }
2183         }
2184
2185         /* check event from PCH */
2186         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2187                 u32 pch_iir = I915_READ(SDEIIR);
2188
2189                 cpt_irq_handler(dev, pch_iir);
2190
2191                 /* clear PCH hotplug event before clear CPU irq */
2192                 I915_WRITE(SDEIIR, pch_iir);
2193         }
2194 }
2195
2196 /*
2197  * To handle irqs with the minimum potential races with fresh interrupts, we:
2198  * 1 - Disable Master Interrupt Control.
2199  * 2 - Find the source(s) of the interrupt.
2200  * 3 - Clear the Interrupt Identity bits (IIR).
2201  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2202  * 5 - Re-enable Master Interrupt Control.
2203  */
2204 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2205 {
2206         struct drm_device *dev = arg;
2207         struct drm_i915_private *dev_priv = dev->dev_private;
2208         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2209         irqreturn_t ret = IRQ_NONE;
2210
2211         /* We get interrupts on unclaimed registers, so check for this before we
2212          * do any I915_{READ,WRITE}. */
2213         intel_uncore_check_errors(dev);
2214
2215         /* disable master interrupt before clearing iir  */
2216         de_ier = I915_READ(DEIER);
2217         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2218         POSTING_READ(DEIER);
2219
2220         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2221          * interrupts will will be stored on its back queue, and then we'll be
2222          * able to process them after we restore SDEIER (as soon as we restore
2223          * it, we'll get an interrupt if SDEIIR still has something to process
2224          * due to its back queue). */
2225         if (!HAS_PCH_NOP(dev)) {
2226                 sde_ier = I915_READ(SDEIER);
2227                 I915_WRITE(SDEIER, 0);
2228                 POSTING_READ(SDEIER);
2229         }
2230
2231         /* Find, clear, then process each source of interrupt */
2232
2233         gt_iir = I915_READ(GTIIR);
2234         if (gt_iir) {
2235                 I915_WRITE(GTIIR, gt_iir);
2236                 ret = IRQ_HANDLED;
2237                 if (INTEL_INFO(dev)->gen >= 6)
2238                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
2239                 else
2240                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2241         }
2242
2243         de_iir = I915_READ(DEIIR);
2244         if (de_iir) {
2245                 I915_WRITE(DEIIR, de_iir);
2246                 ret = IRQ_HANDLED;
2247                 if (INTEL_INFO(dev)->gen >= 7)
2248                         ivb_display_irq_handler(dev, de_iir);
2249                 else
2250                         ilk_display_irq_handler(dev, de_iir);
2251         }
2252
2253         if (INTEL_INFO(dev)->gen >= 6) {
2254                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2255                 if (pm_iir) {
2256                         I915_WRITE(GEN6_PMIIR, pm_iir);
2257                         ret = IRQ_HANDLED;
2258                         gen6_rps_irq_handler(dev_priv, pm_iir);
2259                 }
2260         }
2261
2262         I915_WRITE(DEIER, de_ier);
2263         POSTING_READ(DEIER);
2264         if (!HAS_PCH_NOP(dev)) {
2265                 I915_WRITE(SDEIER, sde_ier);
2266                 POSTING_READ(SDEIER);
2267         }
2268
2269         return ret;
2270 }
2271
2272 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2273 {
2274         struct drm_device *dev = arg;
2275         struct drm_i915_private *dev_priv = dev->dev_private;
2276         u32 master_ctl;
2277         irqreturn_t ret = IRQ_NONE;
2278         uint32_t tmp = 0;
2279         enum pipe pipe;
2280         u32 aux_mask = GEN8_AUX_CHANNEL_A;
2281
2282         if (IS_GEN9(dev))
2283                 aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2284                         GEN9_AUX_CHANNEL_D;
2285
2286         master_ctl = I915_READ(GEN8_MASTER_IRQ);
2287         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2288         if (!master_ctl)
2289                 return IRQ_NONE;
2290
2291         I915_WRITE(GEN8_MASTER_IRQ, 0);
2292         POSTING_READ(GEN8_MASTER_IRQ);
2293
2294         /* Find, clear, then process each source of interrupt */
2295
2296         ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2297
2298         if (master_ctl & GEN8_DE_MISC_IRQ) {
2299                 tmp = I915_READ(GEN8_DE_MISC_IIR);
2300                 if (tmp) {
2301                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2302                         ret = IRQ_HANDLED;
2303                         if (tmp & GEN8_DE_MISC_GSE)
2304                                 intel_opregion_asle_intr(dev);
2305                         else
2306                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2307                 }
2308                 else
2309                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2310         }
2311
2312         if (master_ctl & GEN8_DE_PORT_IRQ) {
2313                 tmp = I915_READ(GEN8_DE_PORT_IIR);
2314                 if (tmp) {
2315                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2316                         ret = IRQ_HANDLED;
2317
2318                         if (tmp & aux_mask)
2319                                 dp_aux_irq_handler(dev);
2320                         else
2321                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2322                 }
2323                 else
2324                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2325         }
2326
2327         for_each_pipe(dev_priv, pipe) {
2328                 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2329
2330                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2331                         continue;
2332
2333                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2334                 if (pipe_iir) {
2335                         ret = IRQ_HANDLED;
2336                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2337
2338                         if (pipe_iir & GEN8_PIPE_VBLANK &&
2339                             intel_pipe_handle_vblank(dev, pipe))
2340                                 intel_check_page_flip(dev, pipe);
2341
2342                         if (IS_GEN9(dev))
2343                                 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2344                         else
2345                                 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2346
2347                         if (flip_done) {
2348                                 intel_prepare_page_flip(dev, pipe);
2349                                 intel_finish_page_flip_plane(dev, pipe);
2350                         }
2351
2352                         if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2353                                 hsw_pipe_crc_irq_handler(dev, pipe);
2354
2355                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2356                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2357                                                                     pipe);
2358
2359
2360                         if (IS_GEN9(dev))
2361                                 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2362                         else
2363                                 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2364
2365                         if (fault_errors)
2366                                 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2367                                           pipe_name(pipe),
2368                                           pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2369                 } else
2370                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2371         }
2372
2373         if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2374                 /*
2375                  * FIXME(BDW): Assume for now that the new interrupt handling
2376                  * scheme also closed the SDE interrupt handling race we've seen
2377                  * on older pch-split platforms. But this needs testing.
2378                  */
2379                 u32 pch_iir = I915_READ(SDEIIR);
2380                 if (pch_iir) {
2381                         I915_WRITE(SDEIIR, pch_iir);
2382                         ret = IRQ_HANDLED;
2383                         cpt_irq_handler(dev, pch_iir);
2384                 } else
2385                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
2386
2387         }
2388
2389         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2390         POSTING_READ(GEN8_MASTER_IRQ);
2391
2392         return ret;
2393 }
2394
2395 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2396                                bool reset_completed)
2397 {
2398         struct intel_engine_cs *ring;
2399         int i;
2400
2401         /*
2402          * Notify all waiters for GPU completion events that reset state has
2403          * been changed, and that they need to restart their wait after
2404          * checking for potential errors (and bail out to drop locks if there is
2405          * a gpu reset pending so that i915_error_work_func can acquire them).
2406          */
2407
2408         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2409         for_each_ring(ring, dev_priv, i)
2410                 wake_up_all(&ring->irq_queue);
2411
2412         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2413         wake_up_all(&dev_priv->pending_flip_queue);
2414
2415         /*
2416          * Signal tasks blocked in i915_gem_wait_for_error that the pending
2417          * reset state is cleared.
2418          */
2419         if (reset_completed)
2420                 wake_up_all(&dev_priv->gpu_error.reset_queue);
2421 }
2422
2423 /**
2424  * i915_reset_and_wakeup - do process context error handling work
2425  *
2426  * Fire an error uevent so userspace can see that a hang or error
2427  * was detected.
2428  */
2429 static void i915_reset_and_wakeup(struct drm_device *dev)
2430 {
2431         struct drm_i915_private *dev_priv = to_i915(dev);
2432         struct i915_gpu_error *error = &dev_priv->gpu_error;
2433         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2434         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2435         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2436         int ret;
2437
2438         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2439
2440         /*
2441          * Note that there's only one work item which does gpu resets, so we
2442          * need not worry about concurrent gpu resets potentially incrementing
2443          * error->reset_counter twice. We only need to take care of another
2444          * racing irq/hangcheck declaring the gpu dead for a second time. A
2445          * quick check for that is good enough: schedule_work ensures the
2446          * correct ordering between hang detection and this work item, and since
2447          * the reset in-progress bit is only ever set by code outside of this
2448          * work we don't need to worry about any other races.
2449          */
2450         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2451                 DRM_DEBUG_DRIVER("resetting chip\n");
2452                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2453                                    reset_event);
2454
2455                 /*
2456                  * In most cases it's guaranteed that we get here with an RPM
2457                  * reference held, for example because there is a pending GPU
2458                  * request that won't finish until the reset is done. This
2459                  * isn't the case at least when we get here by doing a
2460                  * simulated reset via debugs, so get an RPM reference.
2461                  */
2462                 intel_runtime_pm_get(dev_priv);
2463
2464                 intel_prepare_reset(dev);
2465
2466                 /*
2467                  * All state reset _must_ be completed before we update the
2468                  * reset counter, for otherwise waiters might miss the reset
2469                  * pending state and not properly drop locks, resulting in
2470                  * deadlocks with the reset work.
2471                  */
2472                 ret = i915_reset(dev);
2473
2474                 intel_finish_reset(dev);
2475
2476                 intel_runtime_pm_put(dev_priv);
2477
2478                 if (ret == 0) {
2479                         /*
2480                          * After all the gem state is reset, increment the reset
2481                          * counter and wake up everyone waiting for the reset to
2482                          * complete.
2483                          *
2484                          * Since unlock operations are a one-sided barrier only,
2485                          * we need to insert a barrier here to order any seqno
2486                          * updates before
2487                          * the counter increment.
2488                          */
2489                         smp_mb__before_atomic();
2490                         atomic_inc(&dev_priv->gpu_error.reset_counter);
2491
2492                         kobject_uevent_env(&dev->primary->kdev->kobj,
2493                                            KOBJ_CHANGE, reset_done_event);
2494                 } else {
2495                         atomic_set_mask(I915_WEDGED, &error->reset_counter);
2496                 }
2497
2498                 /*
2499                  * Note: The wake_up also serves as a memory barrier so that
2500                  * waiters see the update value of the reset counter atomic_t.
2501                  */
2502                 i915_error_wake_up(dev_priv, true);
2503         }
2504 }
2505
2506 static void i915_report_and_clear_eir(struct drm_device *dev)
2507 {
2508         struct drm_i915_private *dev_priv = dev->dev_private;
2509         uint32_t instdone[I915_NUM_INSTDONE_REG];
2510         u32 eir = I915_READ(EIR);
2511         int pipe, i;
2512
2513         if (!eir)
2514                 return;
2515
2516         pr_err("render error detected, EIR: 0x%08x\n", eir);
2517
2518         i915_get_extra_instdone(dev, instdone);
2519
2520         if (IS_G4X(dev)) {
2521                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2522                         u32 ipeir = I915_READ(IPEIR_I965);
2523
2524                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2525                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2526                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
2527                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2528                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2529                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2530                         I915_WRITE(IPEIR_I965, ipeir);
2531                         POSTING_READ(IPEIR_I965);
2532                 }
2533                 if (eir & GM45_ERROR_PAGE_TABLE) {
2534                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2535                         pr_err("page table error\n");
2536                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2537                         I915_WRITE(PGTBL_ER, pgtbl_err);
2538                         POSTING_READ(PGTBL_ER);
2539                 }
2540         }
2541
2542         if (!IS_GEN2(dev)) {
2543                 if (eir & I915_ERROR_PAGE_TABLE) {
2544                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2545                         pr_err("page table error\n");
2546                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2547                         I915_WRITE(PGTBL_ER, pgtbl_err);
2548                         POSTING_READ(PGTBL_ER);
2549                 }
2550         }
2551
2552         if (eir & I915_ERROR_MEMORY_REFRESH) {
2553                 pr_err("memory refresh error:\n");
2554                 for_each_pipe(dev_priv, pipe)
2555                         pr_err("pipe %c stat: 0x%08x\n",
2556                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2557                 /* pipestat has already been acked */
2558         }
2559         if (eir & I915_ERROR_INSTRUCTION) {
2560                 pr_err("instruction error\n");
2561                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2562                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2563                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2564                 if (INTEL_INFO(dev)->gen < 4) {
2565                         u32 ipeir = I915_READ(IPEIR);
2566
2567                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2568                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2569                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2570                         I915_WRITE(IPEIR, ipeir);
2571                         POSTING_READ(IPEIR);
2572                 } else {
2573                         u32 ipeir = I915_READ(IPEIR_I965);
2574
2575                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2576                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2577                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2578                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2579                         I915_WRITE(IPEIR_I965, ipeir);
2580                         POSTING_READ(IPEIR_I965);
2581                 }
2582         }
2583
2584         I915_WRITE(EIR, eir);
2585         POSTING_READ(EIR);
2586         eir = I915_READ(EIR);
2587         if (eir) {
2588                 /*
2589                  * some errors might have become stuck,
2590                  * mask them.
2591                  */
2592                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2593                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2594                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2595         }
2596 }
2597
2598 /**
2599  * i915_handle_error - handle a gpu error
2600  * @dev: drm device
2601  *
2602  * Do some basic checking of regsiter state at error time and
2603  * dump it to the syslog.  Also call i915_capture_error_state() to make
2604  * sure we get a record and make it available in debugfs.  Fire a uevent
2605  * so userspace knows something bad happened (should trigger collection
2606  * of a ring dump etc.).
2607  */
2608 void i915_handle_error(struct drm_device *dev, bool wedged,
2609                        const char *fmt, ...)
2610 {
2611         struct drm_i915_private *dev_priv = dev->dev_private;
2612         va_list args;
2613         char error_msg[80];
2614
2615         va_start(args, fmt);
2616         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2617         va_end(args);
2618
2619         i915_capture_error_state(dev, wedged, error_msg);
2620         i915_report_and_clear_eir(dev);
2621
2622         if (wedged) {
2623                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2624                                 &dev_priv->gpu_error.reset_counter);
2625
2626                 /*
2627                  * Wakeup waiting processes so that the reset function
2628                  * i915_reset_and_wakeup doesn't deadlock trying to grab
2629                  * various locks. By bumping the reset counter first, the woken
2630                  * processes will see a reset in progress and back off,
2631                  * releasing their locks and then wait for the reset completion.
2632                  * We must do this for _all_ gpu waiters that might hold locks
2633                  * that the reset work needs to acquire.
2634                  *
2635                  * Note: The wake_up serves as the required memory barrier to
2636                  * ensure that the waiters see the updated value of the reset
2637                  * counter atomic_t.
2638                  */
2639                 i915_error_wake_up(dev_priv, false);
2640         }
2641
2642         i915_reset_and_wakeup(dev);
2643 }
2644
2645 /* Called from drm generic code, passed 'crtc' which
2646  * we use as a pipe index
2647  */
2648 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2649 {
2650         struct drm_i915_private *dev_priv = dev->dev_private;
2651         unsigned long irqflags;
2652
2653         if (!i915_pipe_enabled(dev, pipe))
2654                 return -EINVAL;
2655
2656         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2657         if (INTEL_INFO(dev)->gen >= 4)
2658                 i915_enable_pipestat(dev_priv, pipe,
2659                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
2660         else
2661                 i915_enable_pipestat(dev_priv, pipe,
2662                                      PIPE_VBLANK_INTERRUPT_STATUS);
2663         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2664
2665         return 0;
2666 }
2667
2668 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2669 {
2670         struct drm_i915_private *dev_priv = dev->dev_private;
2671         unsigned long irqflags;
2672         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2673                                                      DE_PIPE_VBLANK(pipe);
2674
2675         if (!i915_pipe_enabled(dev, pipe))
2676                 return -EINVAL;
2677
2678         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2679         ironlake_enable_display_irq(dev_priv, bit);
2680         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2681
2682         return 0;
2683 }
2684
2685 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2686 {
2687         struct drm_i915_private *dev_priv = dev->dev_private;
2688         unsigned long irqflags;
2689
2690         if (!i915_pipe_enabled(dev, pipe))
2691                 return -EINVAL;
2692
2693         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2694         i915_enable_pipestat(dev_priv, pipe,
2695                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2696         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2697
2698         return 0;
2699 }
2700
2701 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2702 {
2703         struct drm_i915_private *dev_priv = dev->dev_private;
2704         unsigned long irqflags;
2705
2706         if (!i915_pipe_enabled(dev, pipe))
2707                 return -EINVAL;
2708
2709         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2710         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2711         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2712         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2713         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2714         return 0;
2715 }
2716
2717 /* Called from drm generic code, passed 'crtc' which
2718  * we use as a pipe index
2719  */
2720 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2721 {
2722         struct drm_i915_private *dev_priv = dev->dev_private;
2723         unsigned long irqflags;
2724
2725         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2726         i915_disable_pipestat(dev_priv, pipe,
2727                               PIPE_VBLANK_INTERRUPT_STATUS |
2728                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2729         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2730 }
2731
2732 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2733 {
2734         struct drm_i915_private *dev_priv = dev->dev_private;
2735         unsigned long irqflags;
2736         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2737                                                      DE_PIPE_VBLANK(pipe);
2738
2739         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2740         ironlake_disable_display_irq(dev_priv, bit);
2741         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2742 }
2743
2744 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2745 {
2746         struct drm_i915_private *dev_priv = dev->dev_private;
2747         unsigned long irqflags;
2748
2749         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2750         i915_disable_pipestat(dev_priv, pipe,
2751                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2752         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2753 }
2754
2755 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2756 {
2757         struct drm_i915_private *dev_priv = dev->dev_private;
2758         unsigned long irqflags;
2759
2760         if (!i915_pipe_enabled(dev, pipe))
2761                 return;
2762
2763         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2764         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2765         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2766         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2767         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2768 }
2769
2770 static struct drm_i915_gem_request *
2771 ring_last_request(struct intel_engine_cs *ring)
2772 {
2773         return list_entry(ring->request_list.prev,
2774                           struct drm_i915_gem_request, list);
2775 }
2776
2777 static bool
2778 ring_idle(struct intel_engine_cs *ring)
2779 {
2780         return (list_empty(&ring->request_list) ||
2781                 i915_gem_request_completed(ring_last_request(ring), false));
2782 }
2783
2784 static bool
2785 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2786 {
2787         if (INTEL_INFO(dev)->gen >= 8) {
2788                 return (ipehr >> 23) == 0x1c;
2789         } else {
2790                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2791                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2792                                  MI_SEMAPHORE_REGISTER);
2793         }
2794 }
2795
2796 static struct intel_engine_cs *
2797 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2798 {
2799         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2800         struct intel_engine_cs *signaller;
2801         int i;
2802
2803         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2804                 for_each_ring(signaller, dev_priv, i) {
2805                         if (ring == signaller)
2806                                 continue;
2807
2808                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
2809                                 return signaller;
2810                 }
2811         } else {
2812                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2813
2814                 for_each_ring(signaller, dev_priv, i) {
2815                         if(ring == signaller)
2816                                 continue;
2817
2818                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2819                                 return signaller;
2820                 }
2821         }
2822
2823         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2824                   ring->id, ipehr, offset);
2825
2826         return NULL;
2827 }
2828
2829 static struct intel_engine_cs *
2830 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2831 {
2832         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2833         u32 cmd, ipehr, head;
2834         u64 offset = 0;
2835         int i, backwards;
2836
2837         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2838         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2839                 return NULL;
2840
2841         /*
2842          * HEAD is likely pointing to the dword after the actual command,
2843          * so scan backwards until we find the MBOX. But limit it to just 3
2844          * or 4 dwords depending on the semaphore wait command size.
2845          * Note that we don't care about ACTHD here since that might
2846          * point at at batch, and semaphores are always emitted into the
2847          * ringbuffer itself.
2848          */
2849         head = I915_READ_HEAD(ring) & HEAD_ADDR;
2850         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2851
2852         for (i = backwards; i; --i) {
2853                 /*
2854                  * Be paranoid and presume the hw has gone off into the wild -
2855                  * our ring is smaller than what the hardware (and hence
2856                  * HEAD_ADDR) allows. Also handles wrap-around.
2857                  */
2858                 head &= ring->buffer->size - 1;
2859
2860                 /* This here seems to blow up */
2861                 cmd = ioread32(ring->buffer->virtual_start + head);
2862                 if (cmd == ipehr)
2863                         break;
2864
2865                 head -= 4;
2866         }
2867
2868         if (!i)
2869                 return NULL;
2870
2871         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2872         if (INTEL_INFO(ring->dev)->gen >= 8) {
2873                 offset = ioread32(ring->buffer->virtual_start + head + 12);
2874                 offset <<= 32;
2875                 offset = ioread32(ring->buffer->virtual_start + head + 8);
2876         }
2877         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2878 }
2879
2880 static int semaphore_passed(struct intel_engine_cs *ring)
2881 {
2882         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2883         struct intel_engine_cs *signaller;
2884         u32 seqno;
2885
2886         ring->hangcheck.deadlock++;
2887
2888         signaller = semaphore_waits_for(ring, &seqno);
2889         if (signaller == NULL)
2890                 return -1;
2891
2892         /* Prevent pathological recursion due to driver bugs */
2893         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2894                 return -1;
2895
2896         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2897                 return 1;
2898
2899         /* cursory check for an unkickable deadlock */
2900         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2901             semaphore_passed(signaller) < 0)
2902                 return -1;
2903
2904         return 0;
2905 }
2906
2907 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2908 {
2909         struct intel_engine_cs *ring;
2910         int i;
2911
2912         for_each_ring(ring, dev_priv, i)
2913                 ring->hangcheck.deadlock = 0;
2914 }
2915
2916 static enum intel_ring_hangcheck_action
2917 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2918 {
2919         struct drm_device *dev = ring->dev;
2920         struct drm_i915_private *dev_priv = dev->dev_private;
2921         u32 tmp;
2922
2923         if (acthd != ring->hangcheck.acthd) {
2924                 if (acthd > ring->hangcheck.max_acthd) {
2925                         ring->hangcheck.max_acthd = acthd;
2926                         return HANGCHECK_ACTIVE;
2927                 }
2928
2929                 return HANGCHECK_ACTIVE_LOOP;
2930         }
2931
2932         if (IS_GEN2(dev))
2933                 return HANGCHECK_HUNG;
2934
2935         /* Is the chip hanging on a WAIT_FOR_EVENT?
2936          * If so we can simply poke the RB_WAIT bit
2937          * and break the hang. This should work on
2938          * all but the second generation chipsets.
2939          */
2940         tmp = I915_READ_CTL(ring);
2941         if (tmp & RING_WAIT) {
2942                 i915_handle_error(dev, false,
2943                                   "Kicking stuck wait on %s",
2944                                   ring->name);
2945                 I915_WRITE_CTL(ring, tmp);
2946                 return HANGCHECK_KICK;
2947         }
2948
2949         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2950                 switch (semaphore_passed(ring)) {
2951                 default:
2952                         return HANGCHECK_HUNG;
2953                 case 1:
2954                         i915_handle_error(dev, false,
2955                                           "Kicking stuck semaphore on %s",
2956                                           ring->name);
2957                         I915_WRITE_CTL(ring, tmp);
2958                         return HANGCHECK_KICK;
2959                 case 0:
2960                         return HANGCHECK_WAIT;
2961                 }
2962         }
2963
2964         return HANGCHECK_HUNG;
2965 }
2966
2967 /*
2968  * This is called when the chip hasn't reported back with completed
2969  * batchbuffers in a long time. We keep track per ring seqno progress and
2970  * if there are no progress, hangcheck score for that ring is increased.
2971  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2972  * we kick the ring. If we see no progress on three subsequent calls
2973  * we assume chip is wedged and try to fix it by resetting the chip.
2974  */
2975 static void i915_hangcheck_elapsed(struct work_struct *work)
2976 {
2977         struct drm_i915_private *dev_priv =
2978                 container_of(work, typeof(*dev_priv),
2979                              gpu_error.hangcheck_work.work);
2980         struct drm_device *dev = dev_priv->dev;
2981         struct intel_engine_cs *ring;
2982         int i;
2983         int busy_count = 0, rings_hung = 0;
2984         bool stuck[I915_NUM_RINGS] = { 0 };
2985 #define BUSY 1
2986 #define KICK 5
2987 #define HUNG 20
2988
2989         if (!i915.enable_hangcheck)
2990                 return;
2991
2992         for_each_ring(ring, dev_priv, i) {
2993                 u64 acthd;
2994                 u32 seqno;
2995                 bool busy = true;
2996
2997                 semaphore_clear_deadlocks(dev_priv);
2998
2999                 seqno = ring->get_seqno(ring, false);
3000                 acthd = intel_ring_get_active_head(ring);
3001
3002                 if (ring->hangcheck.seqno == seqno) {
3003                         if (ring_idle(ring)) {
3004                                 ring->hangcheck.action = HANGCHECK_IDLE;
3005
3006                                 if (waitqueue_active(&ring->irq_queue)) {
3007                                         /* Issue a wake-up to catch stuck h/w. */
3008                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3009                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3010                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3011                                                                   ring->name);
3012                                                 else
3013                                                         DRM_INFO("Fake missed irq on %s\n",
3014                                                                  ring->name);
3015                                                 wake_up_all(&ring->irq_queue);
3016                                         }
3017                                         /* Safeguard against driver failure */
3018                                         ring->hangcheck.score += BUSY;
3019                                 } else
3020                                         busy = false;
3021                         } else {
3022                                 /* We always increment the hangcheck score
3023                                  * if the ring is busy and still processing
3024                                  * the same request, so that no single request
3025                                  * can run indefinitely (such as a chain of
3026                                  * batches). The only time we do not increment
3027                                  * the hangcheck score on this ring, if this
3028                                  * ring is in a legitimate wait for another
3029                                  * ring. In that case the waiting ring is a
3030                                  * victim and we want to be sure we catch the
3031                                  * right culprit. Then every time we do kick
3032                                  * the ring, add a small increment to the
3033                                  * score so that we can catch a batch that is
3034                                  * being repeatedly kicked and so responsible
3035                                  * for stalling the machine.
3036                                  */
3037                                 ring->hangcheck.action = ring_stuck(ring,
3038                                                                     acthd);
3039
3040                                 switch (ring->hangcheck.action) {
3041                                 case HANGCHECK_IDLE:
3042                                 case HANGCHECK_WAIT:
3043                                 case HANGCHECK_ACTIVE:
3044                                         break;
3045                                 case HANGCHECK_ACTIVE_LOOP:
3046                                         ring->hangcheck.score += BUSY;
3047                                         break;
3048                                 case HANGCHECK_KICK:
3049                                         ring->hangcheck.score += KICK;
3050                                         break;
3051                                 case HANGCHECK_HUNG:
3052                                         ring->hangcheck.score += HUNG;
3053                                         stuck[i] = true;
3054                                         break;
3055                                 }
3056                         }
3057                 } else {
3058                         ring->hangcheck.action = HANGCHECK_ACTIVE;
3059
3060                         /* Gradually reduce the count so that we catch DoS
3061                          * attempts across multiple batches.
3062                          */
3063                         if (ring->hangcheck.score > 0)
3064                                 ring->hangcheck.score--;
3065
3066                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3067                 }
3068
3069                 ring->hangcheck.seqno = seqno;
3070                 ring->hangcheck.acthd = acthd;
3071                 busy_count += busy;
3072         }
3073
3074         for_each_ring(ring, dev_priv, i) {
3075                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3076                         DRM_INFO("%s on %s\n",
3077                                  stuck[i] ? "stuck" : "no progress",
3078                                  ring->name);
3079                         rings_hung++;
3080                 }
3081         }
3082
3083         if (rings_hung)
3084                 return i915_handle_error(dev, true, "Ring hung");
3085
3086         if (busy_count)
3087                 /* Reset timer case chip hangs without another request
3088                  * being added */
3089                 i915_queue_hangcheck(dev);
3090 }
3091
3092 void i915_queue_hangcheck(struct drm_device *dev)
3093 {
3094         struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3095
3096         if (!i915.enable_hangcheck)
3097                 return;
3098
3099         /* Don't continually defer the hangcheck so that it is always run at
3100          * least once after work has been scheduled on any ring. Otherwise,
3101          * we will ignore a hung ring if a second ring is kept busy.
3102          */
3103
3104         queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3105                            round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3106 }
3107
3108 static void ibx_irq_reset(struct drm_device *dev)
3109 {
3110         struct drm_i915_private *dev_priv = dev->dev_private;
3111
3112         if (HAS_PCH_NOP(dev))
3113                 return;
3114
3115         GEN5_IRQ_RESET(SDE);
3116
3117         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3118                 I915_WRITE(SERR_INT, 0xffffffff);
3119 }
3120
3121 /*
3122  * SDEIER is also touched by the interrupt handler to work around missed PCH
3123  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3124  * instead we unconditionally enable all PCH interrupt sources here, but then
3125  * only unmask them as needed with SDEIMR.
3126  *
3127  * This function needs to be called before interrupts are enabled.
3128  */
3129 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3130 {
3131         struct drm_i915_private *dev_priv = dev->dev_private;
3132
3133         if (HAS_PCH_NOP(dev))
3134                 return;
3135
3136         WARN_ON(I915_READ(SDEIER) != 0);
3137         I915_WRITE(SDEIER, 0xffffffff);
3138         POSTING_READ(SDEIER);
3139 }
3140
3141 static void gen5_gt_irq_reset(struct drm_device *dev)
3142 {
3143         struct drm_i915_private *dev_priv = dev->dev_private;
3144
3145         GEN5_IRQ_RESET(GT);
3146         if (INTEL_INFO(dev)->gen >= 6)
3147                 GEN5_IRQ_RESET(GEN6_PM);
3148 }
3149
3150 /* drm_dma.h hooks
3151 */
3152 static void ironlake_irq_reset(struct drm_device *dev)
3153 {
3154         struct drm_i915_private *dev_priv = dev->dev_private;
3155
3156         I915_WRITE(HWSTAM, 0xffffffff);
3157
3158         GEN5_IRQ_RESET(DE);
3159         if (IS_GEN7(dev))
3160                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3161
3162         gen5_gt_irq_reset(dev);
3163
3164         ibx_irq_reset(dev);
3165 }
3166
3167 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3168 {
3169         enum pipe pipe;
3170
3171         I915_WRITE(PORT_HOTPLUG_EN, 0);
3172         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3173
3174         for_each_pipe(dev_priv, pipe)
3175                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3176
3177         GEN5_IRQ_RESET(VLV_);
3178 }
3179
3180 static void valleyview_irq_preinstall(struct drm_device *dev)
3181 {
3182         struct drm_i915_private *dev_priv = dev->dev_private;
3183
3184         /* VLV magic */
3185         I915_WRITE(VLV_IMR, 0);
3186         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3187         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3188         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3189
3190         gen5_gt_irq_reset(dev);
3191
3192         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3193
3194         vlv_display_irq_reset(dev_priv);
3195 }
3196
3197 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3198 {
3199         GEN8_IRQ_RESET_NDX(GT, 0);
3200         GEN8_IRQ_RESET_NDX(GT, 1);
3201         GEN8_IRQ_RESET_NDX(GT, 2);
3202         GEN8_IRQ_RESET_NDX(GT, 3);
3203 }
3204
3205 static void gen8_irq_reset(struct drm_device *dev)
3206 {
3207         struct drm_i915_private *dev_priv = dev->dev_private;
3208         int pipe;
3209
3210         I915_WRITE(GEN8_MASTER_IRQ, 0);
3211         POSTING_READ(GEN8_MASTER_IRQ);
3212
3213         gen8_gt_irq_reset(dev_priv);
3214
3215         for_each_pipe(dev_priv, pipe)
3216                 if (intel_display_power_is_enabled(dev_priv,
3217                                                    POWER_DOMAIN_PIPE(pipe)))
3218                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3219
3220         GEN5_IRQ_RESET(GEN8_DE_PORT_);
3221         GEN5_IRQ_RESET(GEN8_DE_MISC_);
3222         GEN5_IRQ_RESET(GEN8_PCU_);
3223
3224         ibx_irq_reset(dev);
3225 }
3226
3227 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3228 {
3229         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3230
3231         spin_lock_irq(&dev_priv->irq_lock);
3232         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3233                           ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3234         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3235                           ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3236         spin_unlock_irq(&dev_priv->irq_lock);
3237 }
3238
3239 static void cherryview_irq_preinstall(struct drm_device *dev)
3240 {
3241         struct drm_i915_private *dev_priv = dev->dev_private;
3242
3243         I915_WRITE(GEN8_MASTER_IRQ, 0);
3244         POSTING_READ(GEN8_MASTER_IRQ);
3245
3246         gen8_gt_irq_reset(dev_priv);
3247
3248         GEN5_IRQ_RESET(GEN8_PCU_);
3249
3250         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3251
3252         vlv_display_irq_reset(dev_priv);
3253 }
3254
3255 static void ibx_hpd_irq_setup(struct drm_device *dev)
3256 {
3257         struct drm_i915_private *dev_priv = dev->dev_private;
3258         struct intel_encoder *intel_encoder;
3259         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3260
3261         if (HAS_PCH_IBX(dev)) {
3262                 hotplug_irqs = SDE_HOTPLUG_MASK;
3263                 for_each_intel_encoder(dev, intel_encoder)
3264                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3265                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3266         } else {
3267                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3268                 for_each_intel_encoder(dev, intel_encoder)
3269                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3270                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3271         }
3272
3273         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3274
3275         /*
3276          * Enable digital hotplug on the PCH, and configure the DP short pulse
3277          * duration to 2ms (which is the minimum in the Display Port spec)
3278          *
3279          * This register is the same on all known PCH chips.
3280          */
3281         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3282         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3283         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3284         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3285         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3286         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3287 }
3288
3289 static void ibx_irq_postinstall(struct drm_device *dev)
3290 {
3291         struct drm_i915_private *dev_priv = dev->dev_private;
3292         u32 mask;
3293
3294         if (HAS_PCH_NOP(dev))
3295                 return;
3296
3297         if (HAS_PCH_IBX(dev))
3298                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3299         else
3300                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3301
3302         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3303         I915_WRITE(SDEIMR, ~mask);
3304 }
3305
3306 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3307 {
3308         struct drm_i915_private *dev_priv = dev->dev_private;
3309         u32 pm_irqs, gt_irqs;
3310
3311         pm_irqs = gt_irqs = 0;
3312
3313         dev_priv->gt_irq_mask = ~0;
3314         if (HAS_L3_DPF(dev)) {
3315                 /* L3 parity interrupt is always unmasked. */
3316                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3317                 gt_irqs |= GT_PARITY_ERROR(dev);
3318         }
3319
3320         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3321         if (IS_GEN5(dev)) {
3322                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3323                            ILK_BSD_USER_INTERRUPT;
3324         } else {
3325                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3326         }
3327
3328         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3329
3330         if (INTEL_INFO(dev)->gen >= 6) {
3331                 /*
3332                  * RPS interrupts will get enabled/disabled on demand when RPS
3333                  * itself is enabled/disabled.
3334                  */
3335                 if (HAS_VEBOX(dev))
3336                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3337
3338                 dev_priv->pm_irq_mask = 0xffffffff;
3339                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3340         }
3341 }
3342
3343 static int ironlake_irq_postinstall(struct drm_device *dev)
3344 {
3345         struct drm_i915_private *dev_priv = dev->dev_private;
3346         u32 display_mask, extra_mask;
3347
3348         if (INTEL_INFO(dev)->gen >= 7) {
3349                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3350                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3351                                 DE_PLANEB_FLIP_DONE_IVB |
3352                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3353                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3354                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3355         } else {
3356                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3357                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3358                                 DE_AUX_CHANNEL_A |
3359                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3360                                 DE_POISON);
3361                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3362                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3363         }
3364
3365         dev_priv->irq_mask = ~display_mask;
3366
3367         I915_WRITE(HWSTAM, 0xeffe);
3368
3369         ibx_irq_pre_postinstall(dev);
3370
3371         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3372
3373         gen5_gt_irq_postinstall(dev);
3374
3375         ibx_irq_postinstall(dev);
3376
3377         if (IS_IRONLAKE_M(dev)) {
3378                 /* Enable PCU event interrupts
3379                  *
3380                  * spinlocking not required here for correctness since interrupt
3381                  * setup is guaranteed to run in single-threaded context. But we
3382                  * need it to make the assert_spin_locked happy. */
3383                 spin_lock_irq(&dev_priv->irq_lock);
3384                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3385                 spin_unlock_irq(&dev_priv->irq_lock);
3386         }
3387
3388         return 0;
3389 }
3390
3391 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3392 {
3393         u32 pipestat_mask;
3394         u32 iir_mask;
3395         enum pipe pipe;
3396
3397         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3398                         PIPE_FIFO_UNDERRUN_STATUS;
3399
3400         for_each_pipe(dev_priv, pipe)
3401                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3402         POSTING_READ(PIPESTAT(PIPE_A));
3403
3404         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3405                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3406
3407         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3408         for_each_pipe(dev_priv, pipe)
3409                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3410
3411         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3412                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3413                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3414         if (IS_CHERRYVIEW(dev_priv))
3415                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3416         dev_priv->irq_mask &= ~iir_mask;
3417
3418         I915_WRITE(VLV_IIR, iir_mask);
3419         I915_WRITE(VLV_IIR, iir_mask);
3420         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3421         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3422         POSTING_READ(VLV_IMR);
3423 }
3424
3425 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3426 {
3427         u32 pipestat_mask;
3428         u32 iir_mask;
3429         enum pipe pipe;
3430
3431         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3432                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3433                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3434         if (IS_CHERRYVIEW(dev_priv))
3435                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3436
3437         dev_priv->irq_mask |= iir_mask;
3438         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3439         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3440         I915_WRITE(VLV_IIR, iir_mask);
3441         I915_WRITE(VLV_IIR, iir_mask);
3442         POSTING_READ(VLV_IIR);
3443
3444         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3445                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3446
3447         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3448         for_each_pipe(dev_priv, pipe)
3449                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3450
3451         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3452                         PIPE_FIFO_UNDERRUN_STATUS;
3453
3454         for_each_pipe(dev_priv, pipe)
3455                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3456         POSTING_READ(PIPESTAT(PIPE_A));
3457 }
3458
3459 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3460 {
3461         assert_spin_locked(&dev_priv->irq_lock);
3462
3463         if (dev_priv->display_irqs_enabled)
3464                 return;
3465
3466         dev_priv->display_irqs_enabled = true;
3467
3468         if (intel_irqs_enabled(dev_priv))
3469                 valleyview_display_irqs_install(dev_priv);
3470 }
3471
3472 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3473 {
3474         assert_spin_locked(&dev_priv->irq_lock);
3475
3476         if (!dev_priv->display_irqs_enabled)
3477                 return;
3478
3479         dev_priv->display_irqs_enabled = false;
3480
3481         if (intel_irqs_enabled(dev_priv))
3482                 valleyview_display_irqs_uninstall(dev_priv);
3483 }
3484
3485 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3486 {
3487         dev_priv->irq_mask = ~0;
3488
3489         I915_WRITE(PORT_HOTPLUG_EN, 0);
3490         POSTING_READ(PORT_HOTPLUG_EN);
3491
3492         I915_WRITE(VLV_IIR, 0xffffffff);
3493         I915_WRITE(VLV_IIR, 0xffffffff);
3494         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3495         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3496         POSTING_READ(VLV_IMR);
3497
3498         /* Interrupt setup is already guaranteed to be single-threaded, this is
3499          * just to make the assert_spin_locked check happy. */
3500         spin_lock_irq(&dev_priv->irq_lock);
3501         if (dev_priv->display_irqs_enabled)
3502                 valleyview_display_irqs_install(dev_priv);
3503         spin_unlock_irq(&dev_priv->irq_lock);
3504 }
3505
3506 static int valleyview_irq_postinstall(struct drm_device *dev)
3507 {
3508         struct drm_i915_private *dev_priv = dev->dev_private;
3509
3510         vlv_display_irq_postinstall(dev_priv);
3511
3512         gen5_gt_irq_postinstall(dev);
3513
3514         /* ack & enable invalid PTE error interrupts */
3515 #if 0 /* FIXME: add support to irq handler for checking these bits */
3516         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3517         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3518 #endif
3519
3520         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3521
3522         return 0;
3523 }
3524
3525 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3526 {
3527         /* These are interrupts we'll toggle with the ring mask register */
3528         uint32_t gt_interrupts[] = {
3529                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3530                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3531                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3532                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3533                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3534                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3535                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3536                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3537                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3538                 0,
3539                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3540                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3541                 };
3542
3543         dev_priv->pm_irq_mask = 0xffffffff;
3544         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3545         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3546         /*
3547          * RPS interrupts will get enabled/disabled on demand when RPS itself
3548          * is enabled/disabled.
3549          */
3550         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3551         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3552 }
3553
3554 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3555 {
3556         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3557         uint32_t de_pipe_enables;
3558         int pipe;
3559         u32 aux_en = GEN8_AUX_CHANNEL_A;
3560
3561         if (IS_GEN9(dev_priv)) {
3562                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3563                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3564                 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3565                         GEN9_AUX_CHANNEL_D;
3566         } else
3567                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3568                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3569
3570         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3571                                            GEN8_PIPE_FIFO_UNDERRUN;
3572
3573         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3574         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3575         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3576
3577         for_each_pipe(dev_priv, pipe)
3578                 if (intel_display_power_is_enabled(dev_priv,
3579                                 POWER_DOMAIN_PIPE(pipe)))
3580                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3581                                           dev_priv->de_irq_mask[pipe],
3582                                           de_pipe_enables);
3583
3584         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3585 }
3586
3587 static int gen8_irq_postinstall(struct drm_device *dev)
3588 {
3589         struct drm_i915_private *dev_priv = dev->dev_private;
3590
3591         ibx_irq_pre_postinstall(dev);
3592
3593         gen8_gt_irq_postinstall(dev_priv);
3594         gen8_de_irq_postinstall(dev_priv);
3595
3596         ibx_irq_postinstall(dev);
3597
3598         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3599         POSTING_READ(GEN8_MASTER_IRQ);
3600
3601         return 0;
3602 }
3603
3604 static int cherryview_irq_postinstall(struct drm_device *dev)
3605 {
3606         struct drm_i915_private *dev_priv = dev->dev_private;
3607
3608         vlv_display_irq_postinstall(dev_priv);
3609
3610         gen8_gt_irq_postinstall(dev_priv);
3611
3612         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3613         POSTING_READ(GEN8_MASTER_IRQ);
3614
3615         return 0;
3616 }
3617
3618 static void gen8_irq_uninstall(struct drm_device *dev)
3619 {
3620         struct drm_i915_private *dev_priv = dev->dev_private;
3621
3622         if (!dev_priv)
3623                 return;
3624
3625         gen8_irq_reset(dev);
3626 }
3627
3628 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3629 {
3630         /* Interrupt setup is already guaranteed to be single-threaded, this is
3631          * just to make the assert_spin_locked check happy. */
3632         spin_lock_irq(&dev_priv->irq_lock);
3633         if (dev_priv->display_irqs_enabled)
3634                 valleyview_display_irqs_uninstall(dev_priv);
3635         spin_unlock_irq(&dev_priv->irq_lock);
3636
3637         vlv_display_irq_reset(dev_priv);
3638
3639         dev_priv->irq_mask = ~0;
3640 }
3641
3642 static void valleyview_irq_uninstall(struct drm_device *dev)
3643 {
3644         struct drm_i915_private *dev_priv = dev->dev_private;
3645
3646         if (!dev_priv)
3647                 return;
3648
3649         I915_WRITE(VLV_MASTER_IER, 0);
3650
3651         gen5_gt_irq_reset(dev);
3652
3653         I915_WRITE(HWSTAM, 0xffffffff);
3654
3655         vlv_display_irq_uninstall(dev_priv);
3656 }
3657
3658 static void cherryview_irq_uninstall(struct drm_device *dev)
3659 {
3660         struct drm_i915_private *dev_priv = dev->dev_private;
3661
3662         if (!dev_priv)
3663                 return;
3664
3665         I915_WRITE(GEN8_MASTER_IRQ, 0);
3666         POSTING_READ(GEN8_MASTER_IRQ);
3667
3668         gen8_gt_irq_reset(dev_priv);
3669
3670         GEN5_IRQ_RESET(GEN8_PCU_);
3671
3672         vlv_display_irq_uninstall(dev_priv);
3673 }
3674
3675 static void ironlake_irq_uninstall(struct drm_device *dev)
3676 {
3677         struct drm_i915_private *dev_priv = dev->dev_private;
3678
3679         if (!dev_priv)
3680                 return;
3681
3682         ironlake_irq_reset(dev);
3683 }
3684
3685 static void i8xx_irq_preinstall(struct drm_device * dev)
3686 {
3687         struct drm_i915_private *dev_priv = dev->dev_private;
3688         int pipe;
3689
3690         for_each_pipe(dev_priv, pipe)
3691                 I915_WRITE(PIPESTAT(pipe), 0);
3692         I915_WRITE16(IMR, 0xffff);
3693         I915_WRITE16(IER, 0x0);
3694         POSTING_READ16(IER);
3695 }
3696
3697 static int i8xx_irq_postinstall(struct drm_device *dev)
3698 {
3699         struct drm_i915_private *dev_priv = dev->dev_private;
3700
3701         I915_WRITE16(EMR,
3702                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3703
3704         /* Unmask the interrupts that we always want on. */
3705         dev_priv->irq_mask =
3706                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3707                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3708                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3709                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3710                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3711         I915_WRITE16(IMR, dev_priv->irq_mask);
3712
3713         I915_WRITE16(IER,
3714                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3715                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3716                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3717                      I915_USER_INTERRUPT);
3718         POSTING_READ16(IER);
3719
3720         /* Interrupt setup is already guaranteed to be single-threaded, this is
3721          * just to make the assert_spin_locked check happy. */
3722         spin_lock_irq(&dev_priv->irq_lock);
3723         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3724         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3725         spin_unlock_irq(&dev_priv->irq_lock);
3726
3727         return 0;
3728 }
3729
3730 /*
3731  * Returns true when a page flip has completed.
3732  */
3733 static bool i8xx_handle_vblank(struct drm_device *dev,
3734                                int plane, int pipe, u32 iir)
3735 {
3736         struct drm_i915_private *dev_priv = dev->dev_private;
3737         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3738
3739         if (!intel_pipe_handle_vblank(dev, pipe))
3740                 return false;
3741
3742         if ((iir & flip_pending) == 0)
3743                 goto check_page_flip;
3744
3745         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3746          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3747          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3748          * the flip is completed (no longer pending). Since this doesn't raise
3749          * an interrupt per se, we watch for the change at vblank.
3750          */
3751         if (I915_READ16(ISR) & flip_pending)
3752                 goto check_page_flip;
3753
3754         intel_prepare_page_flip(dev, plane);
3755         intel_finish_page_flip(dev, pipe);
3756         return true;
3757
3758 check_page_flip:
3759         intel_check_page_flip(dev, pipe);
3760         return false;
3761 }
3762
3763 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3764 {
3765         struct drm_device *dev = arg;
3766         struct drm_i915_private *dev_priv = dev->dev_private;
3767         u16 iir, new_iir;
3768         u32 pipe_stats[2];
3769         int pipe;
3770         u16 flip_mask =
3771                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3772                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3773
3774         iir = I915_READ16(IIR);
3775         if (iir == 0)
3776                 return IRQ_NONE;
3777
3778         while (iir & ~flip_mask) {
3779                 /* Can't rely on pipestat interrupt bit in iir as it might
3780                  * have been cleared after the pipestat interrupt was received.
3781                  * It doesn't set the bit in iir again, but it still produces
3782                  * interrupts (for non-MSI).
3783                  */
3784                 spin_lock(&dev_priv->irq_lock);
3785                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3786                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3787
3788                 for_each_pipe(dev_priv, pipe) {
3789                         int reg = PIPESTAT(pipe);
3790                         pipe_stats[pipe] = I915_READ(reg);
3791
3792                         /*
3793                          * Clear the PIPE*STAT regs before the IIR
3794                          */
3795                         if (pipe_stats[pipe] & 0x8000ffff)
3796                                 I915_WRITE(reg, pipe_stats[pipe]);
3797                 }
3798                 spin_unlock(&dev_priv->irq_lock);
3799
3800                 I915_WRITE16(IIR, iir & ~flip_mask);
3801                 new_iir = I915_READ16(IIR); /* Flush posted writes */
3802
3803                 if (iir & I915_USER_INTERRUPT)
3804                         notify_ring(dev, &dev_priv->ring[RCS]);
3805
3806                 for_each_pipe(dev_priv, pipe) {
3807                         int plane = pipe;
3808                         if (HAS_FBC(dev))
3809                                 plane = !plane;
3810
3811                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3812                             i8xx_handle_vblank(dev, plane, pipe, iir))
3813                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3814
3815                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3816                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3817
3818                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3819                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3820                                                                     pipe);
3821                 }
3822
3823                 iir = new_iir;
3824         }
3825
3826         return IRQ_HANDLED;
3827 }
3828
3829 static void i8xx_irq_uninstall(struct drm_device * dev)
3830 {
3831         struct drm_i915_private *dev_priv = dev->dev_private;
3832         int pipe;
3833
3834         for_each_pipe(dev_priv, pipe) {
3835                 /* Clear enable bits; then clear status bits */
3836                 I915_WRITE(PIPESTAT(pipe), 0);
3837                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3838         }
3839         I915_WRITE16(IMR, 0xffff);
3840         I915_WRITE16(IER, 0x0);
3841         I915_WRITE16(IIR, I915_READ16(IIR));
3842 }
3843
3844 static void i915_irq_preinstall(struct drm_device * dev)
3845 {
3846         struct drm_i915_private *dev_priv = dev->dev_private;
3847         int pipe;
3848
3849         if (I915_HAS_HOTPLUG(dev)) {
3850                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3851                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3852         }
3853
3854         I915_WRITE16(HWSTAM, 0xeffe);
3855         for_each_pipe(dev_priv, pipe)
3856                 I915_WRITE(PIPESTAT(pipe), 0);
3857         I915_WRITE(IMR, 0xffffffff);
3858         I915_WRITE(IER, 0x0);
3859         POSTING_READ(IER);
3860 }
3861
3862 static int i915_irq_postinstall(struct drm_device *dev)
3863 {
3864         struct drm_i915_private *dev_priv = dev->dev_private;
3865         u32 enable_mask;
3866
3867         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3868
3869         /* Unmask the interrupts that we always want on. */
3870         dev_priv->irq_mask =
3871                 ~(I915_ASLE_INTERRUPT |
3872                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3873                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3874                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3875                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3876                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3877
3878         enable_mask =
3879                 I915_ASLE_INTERRUPT |
3880                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3881                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3882                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3883                 I915_USER_INTERRUPT;
3884
3885         if (I915_HAS_HOTPLUG(dev)) {
3886                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3887                 POSTING_READ(PORT_HOTPLUG_EN);
3888
3889                 /* Enable in IER... */
3890                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3891                 /* and unmask in IMR */
3892                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3893         }
3894
3895         I915_WRITE(IMR, dev_priv->irq_mask);
3896         I915_WRITE(IER, enable_mask);
3897         POSTING_READ(IER);
3898
3899         i915_enable_asle_pipestat(dev);
3900
3901         /* Interrupt setup is already guaranteed to be single-threaded, this is
3902          * just to make the assert_spin_locked check happy. */
3903         spin_lock_irq(&dev_priv->irq_lock);
3904         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3905         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3906         spin_unlock_irq(&dev_priv->irq_lock);
3907
3908         return 0;
3909 }
3910
3911 /*
3912  * Returns true when a page flip has completed.
3913  */
3914 static bool i915_handle_vblank(struct drm_device *dev,
3915                                int plane, int pipe, u32 iir)
3916 {
3917         struct drm_i915_private *dev_priv = dev->dev_private;
3918         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3919
3920         if (!intel_pipe_handle_vblank(dev, pipe))
3921                 return false;
3922
3923         if ((iir & flip_pending) == 0)
3924                 goto check_page_flip;
3925
3926         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3927          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3928          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3929          * the flip is completed (no longer pending). Since this doesn't raise
3930          * an interrupt per se, we watch for the change at vblank.
3931          */
3932         if (I915_READ(ISR) & flip_pending)
3933                 goto check_page_flip;
3934
3935         intel_prepare_page_flip(dev, plane);
3936         intel_finish_page_flip(dev, pipe);
3937         return true;
3938
3939 check_page_flip:
3940         intel_check_page_flip(dev, pipe);
3941         return false;
3942 }
3943
3944 static irqreturn_t i915_irq_handler(int irq, void *arg)
3945 {
3946         struct drm_device *dev = arg;
3947         struct drm_i915_private *dev_priv = dev->dev_private;
3948         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3949         u32 flip_mask =
3950                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3951                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3952         int pipe, ret = IRQ_NONE;
3953
3954         iir = I915_READ(IIR);
3955         do {
3956                 bool irq_received = (iir & ~flip_mask) != 0;
3957                 bool blc_event = false;
3958
3959                 /* Can't rely on pipestat interrupt bit in iir as it might
3960                  * have been cleared after the pipestat interrupt was received.
3961                  * It doesn't set the bit in iir again, but it still produces
3962                  * interrupts (for non-MSI).
3963                  */
3964                 spin_lock(&dev_priv->irq_lock);
3965                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3966                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3967
3968                 for_each_pipe(dev_priv, pipe) {
3969                         int reg = PIPESTAT(pipe);
3970                         pipe_stats[pipe] = I915_READ(reg);
3971
3972                         /* Clear the PIPE*STAT regs before the IIR */
3973                         if (pipe_stats[pipe] & 0x8000ffff) {
3974                                 I915_WRITE(reg, pipe_stats[pipe]);
3975                                 irq_received = true;
3976                         }
3977                 }
3978                 spin_unlock(&dev_priv->irq_lock);
3979
3980                 if (!irq_received)
3981                         break;
3982
3983                 /* Consume port.  Then clear IIR or we'll miss events */
3984                 if (I915_HAS_HOTPLUG(dev) &&
3985                     iir & I915_DISPLAY_PORT_INTERRUPT)
3986                         i9xx_hpd_irq_handler(dev);
3987
3988                 I915_WRITE(IIR, iir & ~flip_mask);
3989                 new_iir = I915_READ(IIR); /* Flush posted writes */
3990
3991                 if (iir & I915_USER_INTERRUPT)
3992                         notify_ring(dev, &dev_priv->ring[RCS]);
3993
3994                 for_each_pipe(dev_priv, pipe) {
3995                         int plane = pipe;
3996                         if (HAS_FBC(dev))
3997                                 plane = !plane;
3998
3999                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4000                             i915_handle_vblank(dev, plane, pipe, iir))
4001                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4002
4003                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4004                                 blc_event = true;
4005
4006                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4007                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4008
4009                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4010                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4011                                                                     pipe);
4012                 }
4013
4014                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4015                         intel_opregion_asle_intr(dev);
4016
4017                 /* With MSI, interrupts are only generated when iir
4018                  * transitions from zero to nonzero.  If another bit got
4019                  * set while we were handling the existing iir bits, then
4020                  * we would never get another interrupt.
4021                  *
4022                  * This is fine on non-MSI as well, as if we hit this path
4023                  * we avoid exiting the interrupt handler only to generate
4024                  * another one.
4025                  *
4026                  * Note that for MSI this could cause a stray interrupt report
4027                  * if an interrupt landed in the time between writing IIR and
4028                  * the posting read.  This should be rare enough to never
4029                  * trigger the 99% of 100,000 interrupts test for disabling
4030                  * stray interrupts.
4031                  */
4032                 ret = IRQ_HANDLED;
4033                 iir = new_iir;
4034         } while (iir & ~flip_mask);
4035
4036         return ret;
4037 }
4038
4039 static void i915_irq_uninstall(struct drm_device * dev)
4040 {
4041         struct drm_i915_private *dev_priv = dev->dev_private;
4042         int pipe;
4043
4044         if (I915_HAS_HOTPLUG(dev)) {
4045                 I915_WRITE(PORT_HOTPLUG_EN, 0);
4046                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4047         }
4048
4049         I915_WRITE16(HWSTAM, 0xffff);
4050         for_each_pipe(dev_priv, pipe) {
4051                 /* Clear enable bits; then clear status bits */
4052                 I915_WRITE(PIPESTAT(pipe), 0);
4053                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4054         }
4055         I915_WRITE(IMR, 0xffffffff);
4056         I915_WRITE(IER, 0x0);
4057
4058         I915_WRITE(IIR, I915_READ(IIR));
4059 }
4060
4061 static void i965_irq_preinstall(struct drm_device * dev)
4062 {
4063         struct drm_i915_private *dev_priv = dev->dev_private;
4064         int pipe;
4065
4066         I915_WRITE(PORT_HOTPLUG_EN, 0);
4067         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4068
4069         I915_WRITE(HWSTAM, 0xeffe);
4070         for_each_pipe(dev_priv, pipe)
4071                 I915_WRITE(PIPESTAT(pipe), 0);
4072         I915_WRITE(IMR, 0xffffffff);
4073         I915_WRITE(IER, 0x0);
4074         POSTING_READ(IER);
4075 }
4076
4077 static int i965_irq_postinstall(struct drm_device *dev)
4078 {
4079         struct drm_i915_private *dev_priv = dev->dev_private;
4080         u32 enable_mask;
4081         u32 error_mask;
4082
4083         /* Unmask the interrupts that we always want on. */
4084         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4085                                I915_DISPLAY_PORT_INTERRUPT |
4086                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4087                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4088                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4089                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4090                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4091
4092         enable_mask = ~dev_priv->irq_mask;
4093         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4094                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4095         enable_mask |= I915_USER_INTERRUPT;
4096
4097         if (IS_G4X(dev))
4098                 enable_mask |= I915_BSD_USER_INTERRUPT;
4099
4100         /* Interrupt setup is already guaranteed to be single-threaded, this is
4101          * just to make the assert_spin_locked check happy. */
4102         spin_lock_irq(&dev_priv->irq_lock);
4103         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4104         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4105         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4106         spin_unlock_irq(&dev_priv->irq_lock);
4107
4108         /*
4109          * Enable some error detection, note the instruction error mask
4110          * bit is reserved, so we leave it masked.
4111          */
4112         if (IS_G4X(dev)) {
4113                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4114                                GM45_ERROR_MEM_PRIV |
4115                                GM45_ERROR_CP_PRIV |
4116                                I915_ERROR_MEMORY_REFRESH);
4117         } else {
4118                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4119                                I915_ERROR_MEMORY_REFRESH);
4120         }
4121         I915_WRITE(EMR, error_mask);
4122
4123         I915_WRITE(IMR, dev_priv->irq_mask);
4124         I915_WRITE(IER, enable_mask);
4125         POSTING_READ(IER);
4126
4127         I915_WRITE(PORT_HOTPLUG_EN, 0);
4128         POSTING_READ(PORT_HOTPLUG_EN);
4129
4130         i915_enable_asle_pipestat(dev);
4131
4132         return 0;
4133 }
4134
4135 static void i915_hpd_irq_setup(struct drm_device *dev)
4136 {
4137         struct drm_i915_private *dev_priv = dev->dev_private;
4138         struct intel_encoder *intel_encoder;
4139         u32 hotplug_en;
4140
4141         assert_spin_locked(&dev_priv->irq_lock);
4142
4143         hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4144         hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4145         /* Note HDMI and DP share hotplug bits */
4146         /* enable bits are the same for all generations */
4147         for_each_intel_encoder(dev, intel_encoder)
4148                 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4149                         hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4150         /* Programming the CRT detection parameters tends
4151            to generate a spurious hotplug event about three
4152            seconds later.  So just do it once.
4153         */
4154         if (IS_G4X(dev))
4155                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4156         hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4157         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4158
4159         /* Ignore TV since it's buggy */
4160         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4161 }
4162
4163 static irqreturn_t i965_irq_handler(int irq, void *arg)
4164 {
4165         struct drm_device *dev = arg;
4166         struct drm_i915_private *dev_priv = dev->dev_private;
4167         u32 iir, new_iir;
4168         u32 pipe_stats[I915_MAX_PIPES];
4169         int ret = IRQ_NONE, pipe;
4170         u32 flip_mask =
4171                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4172                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4173
4174         iir = I915_READ(IIR);
4175
4176         for (;;) {
4177                 bool irq_received = (iir & ~flip_mask) != 0;
4178                 bool blc_event = false;
4179
4180                 /* Can't rely on pipestat interrupt bit in iir as it might
4181                  * have been cleared after the pipestat interrupt was received.
4182                  * It doesn't set the bit in iir again, but it still produces
4183                  * interrupts (for non-MSI).
4184                  */
4185                 spin_lock(&dev_priv->irq_lock);
4186                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4187                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4188
4189                 for_each_pipe(dev_priv, pipe) {
4190                         int reg = PIPESTAT(pipe);
4191                         pipe_stats[pipe] = I915_READ(reg);
4192
4193                         /*
4194                          * Clear the PIPE*STAT regs before the IIR
4195                          */
4196                         if (pipe_stats[pipe] & 0x8000ffff) {
4197                                 I915_WRITE(reg, pipe_stats[pipe]);
4198                                 irq_received = true;
4199                         }
4200                 }
4201                 spin_unlock(&dev_priv->irq_lock);
4202
4203                 if (!irq_received)
4204                         break;
4205
4206                 ret = IRQ_HANDLED;
4207
4208                 /* Consume port.  Then clear IIR or we'll miss events */
4209                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4210                         i9xx_hpd_irq_handler(dev);
4211
4212                 I915_WRITE(IIR, iir & ~flip_mask);
4213                 new_iir = I915_READ(IIR); /* Flush posted writes */
4214
4215                 if (iir & I915_USER_INTERRUPT)
4216                         notify_ring(dev, &dev_priv->ring[RCS]);
4217                 if (iir & I915_BSD_USER_INTERRUPT)
4218                         notify_ring(dev, &dev_priv->ring[VCS]);
4219
4220                 for_each_pipe(dev_priv, pipe) {
4221                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4222                             i915_handle_vblank(dev, pipe, pipe, iir))
4223                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4224
4225                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4226                                 blc_event = true;
4227
4228                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4229                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4230
4231                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4232                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4233                 }
4234
4235                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4236                         intel_opregion_asle_intr(dev);
4237
4238                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4239                         gmbus_irq_handler(dev);
4240
4241                 /* With MSI, interrupts are only generated when iir
4242                  * transitions from zero to nonzero.  If another bit got
4243                  * set while we were handling the existing iir bits, then
4244                  * we would never get another interrupt.
4245                  *
4246                  * This is fine on non-MSI as well, as if we hit this path
4247                  * we avoid exiting the interrupt handler only to generate
4248                  * another one.
4249                  *
4250                  * Note that for MSI this could cause a stray interrupt report
4251                  * if an interrupt landed in the time between writing IIR and
4252                  * the posting read.  This should be rare enough to never
4253                  * trigger the 99% of 100,000 interrupts test for disabling
4254                  * stray interrupts.
4255                  */
4256                 iir = new_iir;
4257         }
4258
4259         return ret;
4260 }
4261
4262 static void i965_irq_uninstall(struct drm_device * dev)
4263 {
4264         struct drm_i915_private *dev_priv = dev->dev_private;
4265         int pipe;
4266
4267         if (!dev_priv)
4268                 return;
4269
4270         I915_WRITE(PORT_HOTPLUG_EN, 0);
4271         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4272
4273         I915_WRITE(HWSTAM, 0xffffffff);
4274         for_each_pipe(dev_priv, pipe)
4275                 I915_WRITE(PIPESTAT(pipe), 0);
4276         I915_WRITE(IMR, 0xffffffff);
4277         I915_WRITE(IER, 0x0);
4278
4279         for_each_pipe(dev_priv, pipe)
4280                 I915_WRITE(PIPESTAT(pipe),
4281                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4282         I915_WRITE(IIR, I915_READ(IIR));
4283 }
4284
4285 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4286 {
4287         struct drm_i915_private *dev_priv =
4288                 container_of(work, typeof(*dev_priv),
4289                              hotplug_reenable_work.work);
4290         struct drm_device *dev = dev_priv->dev;
4291         struct drm_mode_config *mode_config = &dev->mode_config;
4292         int i;
4293
4294         intel_runtime_pm_get(dev_priv);
4295
4296         spin_lock_irq(&dev_priv->irq_lock);
4297         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4298                 struct drm_connector *connector;
4299
4300                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4301                         continue;
4302
4303                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4304
4305                 list_for_each_entry(connector, &mode_config->connector_list, head) {
4306                         struct intel_connector *intel_connector = to_intel_connector(connector);
4307
4308                         if (intel_connector->encoder->hpd_pin == i) {
4309                                 if (connector->polled != intel_connector->polled)
4310                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4311                                                          connector->name);
4312                                 connector->polled = intel_connector->polled;
4313                                 if (!connector->polled)
4314                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4315                         }
4316                 }
4317         }
4318         if (dev_priv->display.hpd_irq_setup)
4319                 dev_priv->display.hpd_irq_setup(dev);
4320         spin_unlock_irq(&dev_priv->irq_lock);
4321
4322         intel_runtime_pm_put(dev_priv);
4323 }
4324
4325 /**
4326  * intel_irq_init - initializes irq support
4327  * @dev_priv: i915 device instance
4328  *
4329  * This function initializes all the irq support including work items, timers
4330  * and all the vtables. It does not setup the interrupt itself though.
4331  */
4332 void intel_irq_init(struct drm_i915_private *dev_priv)
4333 {
4334         struct drm_device *dev = dev_priv->dev;
4335
4336         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4337         INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4338         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4339         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4340
4341         /* Let's track the enabled rps events */
4342         if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4343                 /* WaGsvRC0ResidencyMethod:vlv */
4344                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4345         else
4346                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4347
4348         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4349                           i915_hangcheck_elapsed);
4350         INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4351                           intel_hpd_irq_reenable_work);
4352
4353         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4354
4355         if (IS_GEN2(dev_priv)) {
4356                 dev->max_vblank_count = 0;
4357                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4358         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4359                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4360                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4361         } else {
4362                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4363                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4364         }
4365
4366         /*
4367          * Opt out of the vblank disable timer on everything except gen2.
4368          * Gen2 doesn't have a hardware frame counter and so depends on
4369          * vblank interrupts to produce sane vblank seuquence numbers.
4370          */
4371         if (!IS_GEN2(dev_priv))
4372                 dev->vblank_disable_immediate = true;
4373
4374         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4375                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4376                 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4377         }
4378
4379         if (IS_CHERRYVIEW(dev_priv)) {
4380                 dev->driver->irq_handler = cherryview_irq_handler;
4381                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4382                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4383                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4384                 dev->driver->enable_vblank = valleyview_enable_vblank;
4385                 dev->driver->disable_vblank = valleyview_disable_vblank;
4386                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4387         } else if (IS_VALLEYVIEW(dev_priv)) {
4388                 dev->driver->irq_handler = valleyview_irq_handler;
4389                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4390                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4391                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4392                 dev->driver->enable_vblank = valleyview_enable_vblank;
4393                 dev->driver->disable_vblank = valleyview_disable_vblank;
4394                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4395         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4396                 dev->driver->irq_handler = gen8_irq_handler;
4397                 dev->driver->irq_preinstall = gen8_irq_reset;
4398                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4399                 dev->driver->irq_uninstall = gen8_irq_uninstall;
4400                 dev->driver->enable_vblank = gen8_enable_vblank;
4401                 dev->driver->disable_vblank = gen8_disable_vblank;
4402                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4403         } else if (HAS_PCH_SPLIT(dev)) {
4404                 dev->driver->irq_handler = ironlake_irq_handler;
4405                 dev->driver->irq_preinstall = ironlake_irq_reset;
4406                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4407                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4408                 dev->driver->enable_vblank = ironlake_enable_vblank;
4409                 dev->driver->disable_vblank = ironlake_disable_vblank;
4410                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4411         } else {
4412                 if (INTEL_INFO(dev_priv)->gen == 2) {
4413                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
4414                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4415                         dev->driver->irq_handler = i8xx_irq_handler;
4416                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
4417                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4418                         dev->driver->irq_preinstall = i915_irq_preinstall;
4419                         dev->driver->irq_postinstall = i915_irq_postinstall;
4420                         dev->driver->irq_uninstall = i915_irq_uninstall;
4421                         dev->driver->irq_handler = i915_irq_handler;
4422                 } else {
4423                         dev->driver->irq_preinstall = i965_irq_preinstall;
4424                         dev->driver->irq_postinstall = i965_irq_postinstall;
4425                         dev->driver->irq_uninstall = i965_irq_uninstall;
4426                         dev->driver->irq_handler = i965_irq_handler;
4427                 }
4428                 if (I915_HAS_HOTPLUG(dev_priv))
4429                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4430                 dev->driver->enable_vblank = i915_enable_vblank;
4431                 dev->driver->disable_vblank = i915_disable_vblank;
4432         }
4433 }
4434
4435 /**
4436  * intel_hpd_init - initializes and enables hpd support
4437  * @dev_priv: i915 device instance
4438  *
4439  * This function enables the hotplug support. It requires that interrupts have
4440  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4441  * poll request can run concurrently to other code, so locking rules must be
4442  * obeyed.
4443  *
4444  * This is a separate step from interrupt enabling to simplify the locking rules
4445  * in the driver load and resume code.
4446  */
4447 void intel_hpd_init(struct drm_i915_private *dev_priv)
4448 {
4449         struct drm_device *dev = dev_priv->dev;
4450         struct drm_mode_config *mode_config = &dev->mode_config;
4451         struct drm_connector *connector;
4452         int i;
4453
4454         for (i = 1; i < HPD_NUM_PINS; i++) {
4455                 dev_priv->hpd_stats[i].hpd_cnt = 0;
4456                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4457         }
4458         list_for_each_entry(connector, &mode_config->connector_list, head) {
4459                 struct intel_connector *intel_connector = to_intel_connector(connector);
4460                 connector->polled = intel_connector->polled;
4461                 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4462                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4463                 if (intel_connector->mst_port)
4464                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4465         }
4466
4467         /* Interrupt setup is already guaranteed to be single-threaded, this is
4468          * just to make the assert_spin_locked checks happy. */
4469         spin_lock_irq(&dev_priv->irq_lock);
4470         if (dev_priv->display.hpd_irq_setup)
4471                 dev_priv->display.hpd_irq_setup(dev);
4472         spin_unlock_irq(&dev_priv->irq_lock);
4473 }
4474
4475 /**
4476  * intel_irq_install - enables the hardware interrupt
4477  * @dev_priv: i915 device instance
4478  *
4479  * This function enables the hardware interrupt handling, but leaves the hotplug
4480  * handling still disabled. It is called after intel_irq_init().
4481  *
4482  * In the driver load and resume code we need working interrupts in a few places
4483  * but don't want to deal with the hassle of concurrent probe and hotplug
4484  * workers. Hence the split into this two-stage approach.
4485  */
4486 int intel_irq_install(struct drm_i915_private *dev_priv)
4487 {
4488         /*
4489          * We enable some interrupt sources in our postinstall hooks, so mark
4490          * interrupts as enabled _before_ actually enabling them to avoid
4491          * special cases in our ordering checks.
4492          */
4493         dev_priv->pm.irqs_enabled = true;
4494
4495         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4496 }
4497
4498 /**
4499  * intel_irq_uninstall - finilizes all irq handling
4500  * @dev_priv: i915 device instance
4501  *
4502  * This stops interrupt and hotplug handling and unregisters and frees all
4503  * resources acquired in the init functions.
4504  */
4505 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4506 {
4507         drm_irq_uninstall(dev_priv->dev);
4508         intel_hpd_cancel_work(dev_priv);
4509         dev_priv->pm.irqs_enabled = false;
4510 }
4511
4512 /**
4513  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4514  * @dev_priv: i915 device instance
4515  *
4516  * This function is used to disable interrupts at runtime, both in the runtime
4517  * pm and the system suspend/resume code.
4518  */
4519 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4520 {
4521         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4522         dev_priv->pm.irqs_enabled = false;
4523 }
4524
4525 /**
4526  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4527  * @dev_priv: i915 device instance
4528  *
4529  * This function is used to enable interrupts at runtime, both in the runtime
4530  * pm and the system suspend/resume code.
4531  */
4532 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4533 {
4534         dev_priv->pm.irqs_enabled = true;
4535         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4536         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4537 }