OSDN Git Service

drm/i915/dp: return number of bytes written for short aux/i2c writes
[uclinux-h8/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 struct dp_link_dpll {
45         int link_bw;
46         struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50         { DP_LINK_BW_1_62,
51                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52         { DP_LINK_BW_2_7,
53                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75         /*
76          * CHV requires to program fractional division for m2.
77          * m2 is stored in fixed point format using formula below
78          * (m2_int << 22) | m2_fraction
79          */
80         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
81                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
83                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
85                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89                                   324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91                                  243000, 270000, 324000, 405000,
92                                  420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
94
95 /**
96  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97  * @intel_dp: DP struct
98  *
99  * If a CPU or PCH DP output is attached to an eDP panel, this function
100  * will return true, and false otherwise.
101  */
102 static bool is_edp(struct intel_dp *intel_dp)
103 {
104         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
107 }
108
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
110 {
111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113         return intel_dig_port->base.base.dev;
114 }
115
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117 {
118         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
119 }
120
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
126                                       enum pipe pipe);
127
128 static int
129 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
130 {
131         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
132
133         switch (max_link_bw) {
134         case DP_LINK_BW_1_62:
135         case DP_LINK_BW_2_7:
136         case DP_LINK_BW_5_4:
137                 break;
138         default:
139                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140                      max_link_bw);
141                 max_link_bw = DP_LINK_BW_1_62;
142                 break;
143         }
144         return max_link_bw;
145 }
146
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148 {
149         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150         struct drm_device *dev = intel_dig_port->base.base.dev;
151         u8 source_max, sink_max;
152
153         source_max = 4;
154         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156                 source_max = 2;
157
158         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160         return min(source_max, sink_max);
161 }
162
163 /*
164  * The units on the numbers in the next two are... bizarre.  Examples will
165  * make it clearer; this one parallels an example in the eDP spec.
166  *
167  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168  *
169  *     270000 * 1 * 8 / 10 == 216000
170  *
171  * The actual data capacity of that configuration is 2.16Gbit/s, so the
172  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
173  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174  * 119000.  At 18bpp that's 2142000 kilobits per second.
175  *
176  * Thus the strange-looking division by 10 in intel_dp_link_required, to
177  * get the result in decakilobits instead of kilobits.
178  */
179
180 static int
181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183         return (pixel_clock * bpp + 9) / 10;
184 }
185
186 static int
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188 {
189         return (max_link_clock * max_lanes * 8) / 10;
190 }
191
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194                     struct drm_display_mode *mode)
195 {
196         struct intel_dp *intel_dp = intel_attached_dp(connector);
197         struct intel_connector *intel_connector = to_intel_connector(connector);
198         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199         int target_clock = mode->clock;
200         int max_rate, mode_rate, max_lanes, max_link_clock;
201
202         if (is_edp(intel_dp) && fixed_mode) {
203                 if (mode->hdisplay > fixed_mode->hdisplay)
204                         return MODE_PANEL;
205
206                 if (mode->vdisplay > fixed_mode->vdisplay)
207                         return MODE_PANEL;
208
209                 target_clock = fixed_mode->clock;
210         }
211
212         max_link_clock = intel_dp_max_link_rate(intel_dp);
213         max_lanes = intel_dp_max_lane_count(intel_dp);
214
215         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216         mode_rate = intel_dp_link_required(target_clock, 18);
217
218         if (mode_rate > max_rate)
219                 return MODE_CLOCK_HIGH;
220
221         if (mode->clock < 10000)
222                 return MODE_CLOCK_LOW;
223
224         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225                 return MODE_H_ILLEGAL;
226
227         return MODE_OK;
228 }
229
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231 {
232         int     i;
233         uint32_t v = 0;
234
235         if (src_bytes > 4)
236                 src_bytes = 4;
237         for (i = 0; i < src_bytes; i++)
238                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239         return v;
240 }
241
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
243 {
244         int i;
245         if (dst_bytes > 4)
246                 dst_bytes = 4;
247         for (i = 0; i < dst_bytes; i++)
248                 dst[i] = src >> ((3-i) * 8);
249 }
250
251 /* hrawclock is 1/4 the FSB frequency */
252 static int
253 intel_hrawclk(struct drm_device *dev)
254 {
255         struct drm_i915_private *dev_priv = dev->dev_private;
256         uint32_t clkcfg;
257
258         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259         if (IS_VALLEYVIEW(dev))
260                 return 200;
261
262         clkcfg = I915_READ(CLKCFG);
263         switch (clkcfg & CLKCFG_FSB_MASK) {
264         case CLKCFG_FSB_400:
265                 return 100;
266         case CLKCFG_FSB_533:
267                 return 133;
268         case CLKCFG_FSB_667:
269                 return 166;
270         case CLKCFG_FSB_800:
271                 return 200;
272         case CLKCFG_FSB_1067:
273                 return 266;
274         case CLKCFG_FSB_1333:
275                 return 333;
276         /* these two are just a guess; one of them might be right */
277         case CLKCFG_FSB_1600:
278         case CLKCFG_FSB_1600_ALT:
279                 return 400;
280         default:
281                 return 133;
282         }
283 }
284
285 static void
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287                                     struct intel_dp *intel_dp);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290                                               struct intel_dp *intel_dp);
291
292 static void pps_lock(struct intel_dp *intel_dp)
293 {
294         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295         struct intel_encoder *encoder = &intel_dig_port->base;
296         struct drm_device *dev = encoder->base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         enum intel_display_power_domain power_domain;
299
300         /*
301          * See vlv_power_sequencer_reset() why we need
302          * a power domain reference here.
303          */
304         power_domain = intel_display_port_power_domain(encoder);
305         intel_display_power_get(dev_priv, power_domain);
306
307         mutex_lock(&dev_priv->pps_mutex);
308 }
309
310 static void pps_unlock(struct intel_dp *intel_dp)
311 {
312         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313         struct intel_encoder *encoder = &intel_dig_port->base;
314         struct drm_device *dev = encoder->base.dev;
315         struct drm_i915_private *dev_priv = dev->dev_private;
316         enum intel_display_power_domain power_domain;
317
318         mutex_unlock(&dev_priv->pps_mutex);
319
320         power_domain = intel_display_port_power_domain(encoder);
321         intel_display_power_put(dev_priv, power_domain);
322 }
323
324 static void
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326 {
327         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328         struct drm_device *dev = intel_dig_port->base.base.dev;
329         struct drm_i915_private *dev_priv = dev->dev_private;
330         enum pipe pipe = intel_dp->pps_pipe;
331         bool pll_enabled;
332         uint32_t DP;
333
334         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336                  pipe_name(pipe), port_name(intel_dig_port->port)))
337                 return;
338
339         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340                       pipe_name(pipe), port_name(intel_dig_port->port));
341
342         /* Preserve the BIOS-computed detected bit. This is
343          * supposed to be read-only.
344          */
345         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347         DP |= DP_PORT_WIDTH(1);
348         DP |= DP_LINK_TRAIN_PAT_1;
349
350         if (IS_CHERRYVIEW(dev))
351                 DP |= DP_PIPE_SELECT_CHV(pipe);
352         else if (pipe == PIPE_B)
353                 DP |= DP_PIPEB_SELECT;
354
355         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357         /*
358          * The DPLL for the pipe must be enabled for this to work.
359          * So enable temporarily it if it's not already enabled.
360          */
361         if (!pll_enabled)
362                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
365         /*
366          * Similar magic as in intel_dp_enable_port().
367          * We _must_ do this port enable + disable trick
368          * to make this power seqeuencer lock onto the port.
369          * Otherwise even VDD force bit won't work.
370          */
371         I915_WRITE(intel_dp->output_reg, DP);
372         POSTING_READ(intel_dp->output_reg);
373
374         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375         POSTING_READ(intel_dp->output_reg);
376
377         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378         POSTING_READ(intel_dp->output_reg);
379
380         if (!pll_enabled)
381                 vlv_force_pll_off(dev, pipe);
382 }
383
384 static enum pipe
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386 {
387         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388         struct drm_device *dev = intel_dig_port->base.base.dev;
389         struct drm_i915_private *dev_priv = dev->dev_private;
390         struct intel_encoder *encoder;
391         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
392         enum pipe pipe;
393
394         lockdep_assert_held(&dev_priv->pps_mutex);
395
396         /* We should never land here with regular DP ports */
397         WARN_ON(!is_edp(intel_dp));
398
399         if (intel_dp->pps_pipe != INVALID_PIPE)
400                 return intel_dp->pps_pipe;
401
402         /*
403          * We don't have power sequencer currently.
404          * Pick one that's not used by other ports.
405          */
406         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407                             base.head) {
408                 struct intel_dp *tmp;
409
410                 if (encoder->type != INTEL_OUTPUT_EDP)
411                         continue;
412
413                 tmp = enc_to_intel_dp(&encoder->base);
414
415                 if (tmp->pps_pipe != INVALID_PIPE)
416                         pipes &= ~(1 << tmp->pps_pipe);
417         }
418
419         /*
420          * Didn't find one. This should not happen since there
421          * are two power sequencers and up to two eDP ports.
422          */
423         if (WARN_ON(pipes == 0))
424                 pipe = PIPE_A;
425         else
426                 pipe = ffs(pipes) - 1;
427
428         vlv_steal_power_sequencer(dev, pipe);
429         intel_dp->pps_pipe = pipe;
430
431         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432                       pipe_name(intel_dp->pps_pipe),
433                       port_name(intel_dig_port->port));
434
435         /* init power sequencer on this pipe and port */
436         intel_dp_init_panel_power_sequencer(dev, intel_dp);
437         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
438
439         /*
440          * Even vdd force doesn't work until we've made
441          * the power sequencer lock in on the port.
442          */
443         vlv_power_sequencer_kick(intel_dp);
444
445         return intel_dp->pps_pipe;
446 }
447
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449                                enum pipe pipe);
450
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452                                enum pipe pipe)
453 {
454         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455 }
456
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458                                 enum pipe pipe)
459 {
460         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461 }
462
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464                          enum pipe pipe)
465 {
466         return true;
467 }
468
469 static enum pipe
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471                      enum port port,
472                      vlv_pipe_check pipe_check)
473 {
474         enum pipe pipe;
475
476         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478                         PANEL_PORT_SELECT_MASK;
479
480                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481                         continue;
482
483                 if (!pipe_check(dev_priv, pipe))
484                         continue;
485
486                 return pipe;
487         }
488
489         return INVALID_PIPE;
490 }
491
492 static void
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494 {
495         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496         struct drm_device *dev = intel_dig_port->base.base.dev;
497         struct drm_i915_private *dev_priv = dev->dev_private;
498         enum port port = intel_dig_port->port;
499
500         lockdep_assert_held(&dev_priv->pps_mutex);
501
502         /* try to find a pipe with this port selected */
503         /* first pick one where the panel is on */
504         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505                                                   vlv_pipe_has_pp_on);
506         /* didn't find one? pick one where vdd is on */
507         if (intel_dp->pps_pipe == INVALID_PIPE)
508                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509                                                           vlv_pipe_has_vdd_on);
510         /* didn't find one? pick one with just the correct port */
511         if (intel_dp->pps_pipe == INVALID_PIPE)
512                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513                                                           vlv_pipe_any);
514
515         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516         if (intel_dp->pps_pipe == INVALID_PIPE) {
517                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518                               port_name(port));
519                 return;
520         }
521
522         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523                       port_name(port), pipe_name(intel_dp->pps_pipe));
524
525         intel_dp_init_panel_power_sequencer(dev, intel_dp);
526         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
527 }
528
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530 {
531         struct drm_device *dev = dev_priv->dev;
532         struct intel_encoder *encoder;
533
534         if (WARN_ON(!IS_VALLEYVIEW(dev)))
535                 return;
536
537         /*
538          * We can't grab pps_mutex here due to deadlock with power_domain
539          * mutex when power_domain functions are called while holding pps_mutex.
540          * That also means that in order to use pps_pipe the code needs to
541          * hold both a power domain reference and pps_mutex, and the power domain
542          * reference get/put must be done while _not_ holding pps_mutex.
543          * pps_{lock,unlock}() do these steps in the correct order, so one
544          * should use them always.
545          */
546
547         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548                 struct intel_dp *intel_dp;
549
550                 if (encoder->type != INTEL_OUTPUT_EDP)
551                         continue;
552
553                 intel_dp = enc_to_intel_dp(&encoder->base);
554                 intel_dp->pps_pipe = INVALID_PIPE;
555         }
556 }
557
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559 {
560         struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562         if (HAS_PCH_SPLIT(dev))
563                 return PCH_PP_CONTROL;
564         else
565                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569 {
570         struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572         if (HAS_PCH_SPLIT(dev))
573                 return PCH_PP_STATUS;
574         else
575                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576 }
577
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579    This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581                               void *unused)
582 {
583         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584                                                  edp_notifier);
585         struct drm_device *dev = intel_dp_to_dev(intel_dp);
586         struct drm_i915_private *dev_priv = dev->dev_private;
587         u32 pp_div;
588         u32 pp_ctrl_reg, pp_div_reg;
589
590         if (!is_edp(intel_dp) || code != SYS_RESTART)
591                 return 0;
592
593         pps_lock(intel_dp);
594
595         if (IS_VALLEYVIEW(dev)) {
596                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
598                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
600                 pp_div = I915_READ(pp_div_reg);
601                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606                 msleep(intel_dp->panel_power_cycle_delay);
607         }
608
609         pps_unlock(intel_dp);
610
611         return 0;
612 }
613
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
615 {
616         struct drm_device *dev = intel_dp_to_dev(intel_dp);
617         struct drm_i915_private *dev_priv = dev->dev_private;
618
619         lockdep_assert_held(&dev_priv->pps_mutex);
620
621         if (IS_VALLEYVIEW(dev) &&
622             intel_dp->pps_pipe == INVALID_PIPE)
623                 return false;
624
625         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
626 }
627
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
629 {
630         struct drm_device *dev = intel_dp_to_dev(intel_dp);
631         struct drm_i915_private *dev_priv = dev->dev_private;
632
633         lockdep_assert_held(&dev_priv->pps_mutex);
634
635         if (IS_VALLEYVIEW(dev) &&
636             intel_dp->pps_pipe == INVALID_PIPE)
637                 return false;
638
639         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
640 }
641
642 static void
643 intel_dp_check_edp(struct intel_dp *intel_dp)
644 {
645         struct drm_device *dev = intel_dp_to_dev(intel_dp);
646         struct drm_i915_private *dev_priv = dev->dev_private;
647
648         if (!is_edp(intel_dp))
649                 return;
650
651         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654                               I915_READ(_pp_stat_reg(intel_dp)),
655                               I915_READ(_pp_ctrl_reg(intel_dp)));
656         }
657 }
658
659 static uint32_t
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661 {
662         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663         struct drm_device *dev = intel_dig_port->base.base.dev;
664         struct drm_i915_private *dev_priv = dev->dev_private;
665         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
666         uint32_t status;
667         bool done;
668
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
670         if (has_aux_irq)
671                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672                                           msecs_to_jiffies_timeout(10));
673         else
674                 done = wait_for_atomic(C, 10) == 0;
675         if (!done)
676                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677                           has_aux_irq);
678 #undef C
679
680         return status;
681 }
682
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
684 {
685         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686         struct drm_device *dev = intel_dig_port->base.base.dev;
687
688         /*
689          * The clock divider is based off the hrawclk, and would like to run at
690          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
691          */
692         return index ? 0 : intel_hrawclk(dev) / 2;
693 }
694
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698         struct drm_device *dev = intel_dig_port->base.base.dev;
699
700         if (index)
701                 return 0;
702
703         if (intel_dig_port->port == PORT_A) {
704                 if (IS_GEN6(dev) || IS_GEN7(dev))
705                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
706                 else
707                         return 225; /* eDP input clock at 450Mhz */
708         } else {
709                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710         }
711 }
712
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 {
715         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716         struct drm_device *dev = intel_dig_port->base.base.dev;
717         struct drm_i915_private *dev_priv = dev->dev_private;
718
719         if (intel_dig_port->port == PORT_A) {
720                 if (index)
721                         return 0;
722                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724                 /* Workaround for non-ULT HSW */
725                 switch (index) {
726                 case 0: return 63;
727                 case 1: return 72;
728                 default: return 0;
729                 }
730         } else  {
731                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
732         }
733 }
734
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 {
737         return index ? 0 : 100;
738 }
739
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 {
742         /*
743          * SKL doesn't need us to program the AUX clock divider (Hardware will
744          * derive the clock from CDCLK automatically). We still implement the
745          * get_aux_clock_divider vfunc to plug-in into the existing code.
746          */
747         return index ? 0 : 1;
748 }
749
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751                                       bool has_aux_irq,
752                                       int send_bytes,
753                                       uint32_t aux_clock_divider)
754 {
755         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756         struct drm_device *dev = intel_dig_port->base.base.dev;
757         uint32_t precharge, timeout;
758
759         if (IS_GEN6(dev))
760                 precharge = 3;
761         else
762                 precharge = 5;
763
764         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766         else
767                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769         return DP_AUX_CH_CTL_SEND_BUSY |
770                DP_AUX_CH_CTL_DONE |
771                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772                DP_AUX_CH_CTL_TIME_OUT_ERROR |
773                timeout |
774                DP_AUX_CH_CTL_RECEIVE_ERROR |
775                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
778 }
779
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781                                       bool has_aux_irq,
782                                       int send_bytes,
783                                       uint32_t unused)
784 {
785         return DP_AUX_CH_CTL_SEND_BUSY |
786                DP_AUX_CH_CTL_DONE |
787                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788                DP_AUX_CH_CTL_TIME_OUT_ERROR |
789                DP_AUX_CH_CTL_TIME_OUT_1600us |
790                DP_AUX_CH_CTL_RECEIVE_ERROR |
791                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793 }
794
795 static int
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797                 const uint8_t *send, int send_bytes,
798                 uint8_t *recv, int recv_size)
799 {
800         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801         struct drm_device *dev = intel_dig_port->base.base.dev;
802         struct drm_i915_private *dev_priv = dev->dev_private;
803         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804         uint32_t ch_data = ch_ctl + 4;
805         uint32_t aux_clock_divider;
806         int i, ret, recv_bytes;
807         uint32_t status;
808         int try, clock = 0;
809         bool has_aux_irq = HAS_AUX_IRQ(dev);
810         bool vdd;
811
812         pps_lock(intel_dp);
813
814         /*
815          * We will be called with VDD already enabled for dpcd/edid/oui reads.
816          * In such cases we want to leave VDD enabled and it's up to upper layers
817          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818          * ourselves.
819          */
820         vdd = edp_panel_vdd_on(intel_dp);
821
822         /* dp aux is extremely sensitive to irq latency, hence request the
823          * lowest possible wakeup latency and so prevent the cpu from going into
824          * deep sleep states.
825          */
826         pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828         intel_dp_check_edp(intel_dp);
829
830         intel_aux_display_runtime_get(dev_priv);
831
832         /* Try to wait for any previous AUX channel activity */
833         for (try = 0; try < 3; try++) {
834                 status = I915_READ_NOTRACE(ch_ctl);
835                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836                         break;
837                 msleep(1);
838         }
839
840         if (try == 3) {
841                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842                      I915_READ(ch_ctl));
843                 ret = -EBUSY;
844                 goto out;
845         }
846
847         /* Only 5 data registers! */
848         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849                 ret = -E2BIG;
850                 goto out;
851         }
852
853         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855                                                           has_aux_irq,
856                                                           send_bytes,
857                                                           aux_clock_divider);
858
859                 /* Must try at least 3 times according to DP spec */
860                 for (try = 0; try < 5; try++) {
861                         /* Load the send data into the aux channel data registers */
862                         for (i = 0; i < send_bytes; i += 4)
863                                 I915_WRITE(ch_data + i,
864                                            intel_dp_pack_aux(send + i,
865                                                              send_bytes - i));
866
867                         /* Send the command and wait for it to complete */
868                         I915_WRITE(ch_ctl, send_ctl);
869
870                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872                         /* Clear done status and any errors */
873                         I915_WRITE(ch_ctl,
874                                    status |
875                                    DP_AUX_CH_CTL_DONE |
876                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
877                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
881                                 continue;
882                         if (status & DP_AUX_CH_CTL_DONE)
883                                 break;
884                 }
885                 if (status & DP_AUX_CH_CTL_DONE)
886                         break;
887         }
888
889         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
891                 ret = -EBUSY;
892                 goto out;
893         }
894
895         /* Check for timeout or receive error.
896          * Timeouts occur when the sink is not connected
897          */
898         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
899                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
900                 ret = -EIO;
901                 goto out;
902         }
903
904         /* Timeouts occur when the device isn't connected, so they're
905          * "normal" -- don't fill the kernel log with these */
906         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
907                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
908                 ret = -ETIMEDOUT;
909                 goto out;
910         }
911
912         /* Unload any bytes sent back from the other side */
913         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
915         if (recv_bytes > recv_size)
916                 recv_bytes = recv_size;
917
918         for (i = 0; i < recv_bytes; i += 4)
919                 intel_dp_unpack_aux(I915_READ(ch_data + i),
920                                     recv + i, recv_bytes - i);
921
922         ret = recv_bytes;
923 out:
924         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
925         intel_aux_display_runtime_put(dev_priv);
926
927         if (vdd)
928                 edp_panel_vdd_off(intel_dp, false);
929
930         pps_unlock(intel_dp);
931
932         return ret;
933 }
934
935 #define BARE_ADDRESS_SIZE       3
936 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
937 static ssize_t
938 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
939 {
940         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941         uint8_t txbuf[20], rxbuf[20];
942         size_t txsize, rxsize;
943         int ret;
944
945         txbuf[0] = msg->request << 4;
946         txbuf[1] = msg->address >> 8;
947         txbuf[2] = msg->address & 0xff;
948         txbuf[3] = msg->size - 1;
949
950         switch (msg->request & ~DP_AUX_I2C_MOT) {
951         case DP_AUX_NATIVE_WRITE:
952         case DP_AUX_I2C_WRITE:
953                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
954                 rxsize = 2; /* 0 or 1 data bytes */
955
956                 if (WARN_ON(txsize > 20))
957                         return -E2BIG;
958
959                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
960
961                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
962                 if (ret > 0) {
963                         msg->reply = rxbuf[0] >> 4;
964
965                         if (ret > 1) {
966                                 /* Number of bytes written in a short write. */
967                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
968                         } else {
969                                 /* Return payload size. */
970                                 ret = msg->size;
971                         }
972                 }
973                 break;
974
975         case DP_AUX_NATIVE_READ:
976         case DP_AUX_I2C_READ:
977                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
978                 rxsize = msg->size + 1;
979
980                 if (WARN_ON(rxsize > 20))
981                         return -E2BIG;
982
983                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
984                 if (ret > 0) {
985                         msg->reply = rxbuf[0] >> 4;
986                         /*
987                          * Assume happy day, and copy the data. The caller is
988                          * expected to check msg->reply before touching it.
989                          *
990                          * Return payload size.
991                          */
992                         ret--;
993                         memcpy(msg->buffer, rxbuf + 1, ret);
994                 }
995                 break;
996
997         default:
998                 ret = -EINVAL;
999                 break;
1000         }
1001
1002         return ret;
1003 }
1004
1005 static void
1006 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1007 {
1008         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1009         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1010         enum port port = intel_dig_port->port;
1011         const char *name = NULL;
1012         int ret;
1013
1014         switch (port) {
1015         case PORT_A:
1016                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1017                 name = "DPDDC-A";
1018                 break;
1019         case PORT_B:
1020                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1021                 name = "DPDDC-B";
1022                 break;
1023         case PORT_C:
1024                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1025                 name = "DPDDC-C";
1026                 break;
1027         case PORT_D:
1028                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1029                 name = "DPDDC-D";
1030                 break;
1031         default:
1032                 BUG();
1033         }
1034
1035         /*
1036          * The AUX_CTL register is usually DP_CTL + 0x10.
1037          *
1038          * On Haswell and Broadwell though:
1039          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1040          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1041          *
1042          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1043          */
1044         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1045                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1046
1047         intel_dp->aux.name = name;
1048         intel_dp->aux.dev = dev->dev;
1049         intel_dp->aux.transfer = intel_dp_aux_transfer;
1050
1051         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1052                       connector->base.kdev->kobj.name);
1053
1054         ret = drm_dp_aux_register(&intel_dp->aux);
1055         if (ret < 0) {
1056                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1057                           name, ret);
1058                 return;
1059         }
1060
1061         ret = sysfs_create_link(&connector->base.kdev->kobj,
1062                                 &intel_dp->aux.ddc.dev.kobj,
1063                                 intel_dp->aux.ddc.dev.kobj.name);
1064         if (ret < 0) {
1065                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1066                 drm_dp_aux_unregister(&intel_dp->aux);
1067         }
1068 }
1069
1070 static void
1071 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1072 {
1073         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1074
1075         if (!intel_connector->mst_port)
1076                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1077                                   intel_dp->aux.ddc.dev.kobj.name);
1078         intel_connector_unregister(intel_connector);
1079 }
1080
1081 static void
1082 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1083 {
1084         u32 ctrl1;
1085
1086         pipe_config->ddi_pll_sel = SKL_DPLL0;
1087         pipe_config->dpll_hw_state.cfgcr1 = 0;
1088         pipe_config->dpll_hw_state.cfgcr2 = 0;
1089
1090         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1091         switch (link_clock / 2) {
1092         case 81000:
1093                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1094                                               SKL_DPLL0);
1095                 break;
1096         case 135000:
1097                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1098                                               SKL_DPLL0);
1099                 break;
1100         case 270000:
1101                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1102                                               SKL_DPLL0);
1103                 break;
1104         case 162000:
1105                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1106                                               SKL_DPLL0);
1107                 break;
1108         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1109         results in CDCLK change. Need to handle the change of CDCLK by
1110         disabling pipes and re-enabling them */
1111         case 108000:
1112                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1113                                               SKL_DPLL0);
1114                 break;
1115         case 216000:
1116                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1117                                               SKL_DPLL0);
1118                 break;
1119
1120         }
1121         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1122 }
1123
1124 static void
1125 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1126 {
1127         switch (link_bw) {
1128         case DP_LINK_BW_1_62:
1129                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1130                 break;
1131         case DP_LINK_BW_2_7:
1132                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1133                 break;
1134         case DP_LINK_BW_5_4:
1135                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1136                 break;
1137         }
1138 }
1139
1140 static int
1141 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1142 {
1143         if (intel_dp->num_sink_rates) {
1144                 *sink_rates = intel_dp->sink_rates;
1145                 return intel_dp->num_sink_rates;
1146         }
1147
1148         *sink_rates = default_rates;
1149
1150         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1151 }
1152
1153 static int
1154 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1155 {
1156         if (INTEL_INFO(dev)->gen >= 9) {
1157                 *source_rates = gen9_rates;
1158                 return ARRAY_SIZE(gen9_rates);
1159         } else if (IS_CHERRYVIEW(dev)) {
1160                 *source_rates = chv_rates;
1161                 return ARRAY_SIZE(chv_rates);
1162         }
1163
1164         *source_rates = default_rates;
1165
1166         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1167                 /* WaDisableHBR2:skl */
1168                 return (DP_LINK_BW_2_7 >> 3) + 1;
1169         else if (INTEL_INFO(dev)->gen >= 8 ||
1170             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1171                 return (DP_LINK_BW_5_4 >> 3) + 1;
1172         else
1173                 return (DP_LINK_BW_2_7 >> 3) + 1;
1174 }
1175
1176 static void
1177 intel_dp_set_clock(struct intel_encoder *encoder,
1178                    struct intel_crtc_state *pipe_config, int link_bw)
1179 {
1180         struct drm_device *dev = encoder->base.dev;
1181         const struct dp_link_dpll *divisor = NULL;
1182         int i, count = 0;
1183
1184         if (IS_G4X(dev)) {
1185                 divisor = gen4_dpll;
1186                 count = ARRAY_SIZE(gen4_dpll);
1187         } else if (HAS_PCH_SPLIT(dev)) {
1188                 divisor = pch_dpll;
1189                 count = ARRAY_SIZE(pch_dpll);
1190         } else if (IS_CHERRYVIEW(dev)) {
1191                 divisor = chv_dpll;
1192                 count = ARRAY_SIZE(chv_dpll);
1193         } else if (IS_VALLEYVIEW(dev)) {
1194                 divisor = vlv_dpll;
1195                 count = ARRAY_SIZE(vlv_dpll);
1196         }
1197
1198         if (divisor && count) {
1199                 for (i = 0; i < count; i++) {
1200                         if (link_bw == divisor[i].link_bw) {
1201                                 pipe_config->dpll = divisor[i].dpll;
1202                                 pipe_config->clock_set = true;
1203                                 break;
1204                         }
1205                 }
1206         }
1207 }
1208
1209 static int intersect_rates(const int *source_rates, int source_len,
1210                            const int *sink_rates, int sink_len,
1211                            int *common_rates)
1212 {
1213         int i = 0, j = 0, k = 0;
1214
1215         while (i < source_len && j < sink_len) {
1216                 if (source_rates[i] == sink_rates[j]) {
1217                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1218                                 return k;
1219                         common_rates[k] = source_rates[i];
1220                         ++k;
1221                         ++i;
1222                         ++j;
1223                 } else if (source_rates[i] < sink_rates[j]) {
1224                         ++i;
1225                 } else {
1226                         ++j;
1227                 }
1228         }
1229         return k;
1230 }
1231
1232 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1233                                  int *common_rates)
1234 {
1235         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1236         const int *source_rates, *sink_rates;
1237         int source_len, sink_len;
1238
1239         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1240         source_len = intel_dp_source_rates(dev, &source_rates);
1241
1242         return intersect_rates(source_rates, source_len,
1243                                sink_rates, sink_len,
1244                                common_rates);
1245 }
1246
1247 static void snprintf_int_array(char *str, size_t len,
1248                                const int *array, int nelem)
1249 {
1250         int i;
1251
1252         str[0] = '\0';
1253
1254         for (i = 0; i < nelem; i++) {
1255                 int r = snprintf(str, len, "%d,", array[i]);
1256                 if (r >= len)
1257                         return;
1258                 str += r;
1259                 len -= r;
1260         }
1261 }
1262
1263 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1264 {
1265         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1266         const int *source_rates, *sink_rates;
1267         int source_len, sink_len, common_len;
1268         int common_rates[DP_MAX_SUPPORTED_RATES];
1269         char str[128]; /* FIXME: too big for stack? */
1270
1271         if ((drm_debug & DRM_UT_KMS) == 0)
1272                 return;
1273
1274         source_len = intel_dp_source_rates(dev, &source_rates);
1275         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1276         DRM_DEBUG_KMS("source rates: %s\n", str);
1277
1278         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1279         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1280         DRM_DEBUG_KMS("sink rates: %s\n", str);
1281
1282         common_len = intel_dp_common_rates(intel_dp, common_rates);
1283         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1284         DRM_DEBUG_KMS("common rates: %s\n", str);
1285 }
1286
1287 static int rate_to_index(int find, const int *rates)
1288 {
1289         int i = 0;
1290
1291         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1292                 if (find == rates[i])
1293                         break;
1294
1295         return i;
1296 }
1297
1298 int
1299 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1300 {
1301         int rates[DP_MAX_SUPPORTED_RATES] = {};
1302         int len;
1303
1304         len = intel_dp_common_rates(intel_dp, rates);
1305         if (WARN_ON(len <= 0))
1306                 return 162000;
1307
1308         return rates[rate_to_index(0, rates) - 1];
1309 }
1310
1311 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1312 {
1313         return rate_to_index(rate, intel_dp->sink_rates);
1314 }
1315
1316 bool
1317 intel_dp_compute_config(struct intel_encoder *encoder,
1318                         struct intel_crtc_state *pipe_config)
1319 {
1320         struct drm_device *dev = encoder->base.dev;
1321         struct drm_i915_private *dev_priv = dev->dev_private;
1322         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1323         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1324         enum port port = dp_to_dig_port(intel_dp)->port;
1325         struct intel_crtc *intel_crtc = encoder->new_crtc;
1326         struct intel_connector *intel_connector = intel_dp->attached_connector;
1327         int lane_count, clock;
1328         int min_lane_count = 1;
1329         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1330         /* Conveniently, the link BW constants become indices with a shift...*/
1331         int min_clock = 0;
1332         int max_clock;
1333         int bpp, mode_rate;
1334         int link_avail, link_clock;
1335         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1336         int common_len;
1337
1338         common_len = intel_dp_common_rates(intel_dp, common_rates);
1339
1340         /* No common link rates between source and sink */
1341         WARN_ON(common_len <= 0);
1342
1343         max_clock = common_len - 1;
1344
1345         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1346                 pipe_config->has_pch_encoder = true;
1347
1348         pipe_config->has_dp_encoder = true;
1349         pipe_config->has_drrs = false;
1350         pipe_config->has_audio = intel_dp->has_audio;
1351
1352         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1353                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1354                                        adjusted_mode);
1355                 if (!HAS_PCH_SPLIT(dev))
1356                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1357                                                  intel_connector->panel.fitting_mode);
1358                 else
1359                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1360                                                 intel_connector->panel.fitting_mode);
1361         }
1362
1363         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1364                 return false;
1365
1366         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1367                       "max bw %d pixel clock %iKHz\n",
1368                       max_lane_count, common_rates[max_clock],
1369                       adjusted_mode->crtc_clock);
1370
1371         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1372          * bpc in between. */
1373         bpp = pipe_config->pipe_bpp;
1374         if (is_edp(intel_dp)) {
1375                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1376                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1377                                       dev_priv->vbt.edp_bpp);
1378                         bpp = dev_priv->vbt.edp_bpp;
1379                 }
1380
1381                 /*
1382                  * Use the maximum clock and number of lanes the eDP panel
1383                  * advertizes being capable of. The panels are generally
1384                  * designed to support only a single clock and lane
1385                  * configuration, and typically these values correspond to the
1386                  * native resolution of the panel.
1387                  */
1388                 min_lane_count = max_lane_count;
1389                 min_clock = max_clock;
1390         }
1391
1392         for (; bpp >= 6*3; bpp -= 2*3) {
1393                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1394                                                    bpp);
1395
1396                 for (clock = min_clock; clock <= max_clock; clock++) {
1397                         for (lane_count = min_lane_count;
1398                                 lane_count <= max_lane_count;
1399                                 lane_count <<= 1) {
1400
1401                                 link_clock = common_rates[clock];
1402                                 link_avail = intel_dp_max_data_rate(link_clock,
1403                                                                     lane_count);
1404
1405                                 if (mode_rate <= link_avail) {
1406                                         goto found;
1407                                 }
1408                         }
1409                 }
1410         }
1411
1412         return false;
1413
1414 found:
1415         if (intel_dp->color_range_auto) {
1416                 /*
1417                  * See:
1418                  * CEA-861-E - 5.1 Default Encoding Parameters
1419                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1420                  */
1421                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1422                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1423                 else
1424                         intel_dp->color_range = 0;
1425         }
1426
1427         if (intel_dp->color_range)
1428                 pipe_config->limited_color_range = true;
1429
1430         intel_dp->lane_count = lane_count;
1431
1432         if (intel_dp->num_sink_rates) {
1433                 intel_dp->link_bw = 0;
1434                 intel_dp->rate_select =
1435                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1436         } else {
1437                 intel_dp->link_bw =
1438                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1439                 intel_dp->rate_select = 0;
1440         }
1441
1442         pipe_config->pipe_bpp = bpp;
1443         pipe_config->port_clock = common_rates[clock];
1444
1445         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1446                       intel_dp->link_bw, intel_dp->lane_count,
1447                       pipe_config->port_clock, bpp);
1448         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1449                       mode_rate, link_avail);
1450
1451         intel_link_compute_m_n(bpp, lane_count,
1452                                adjusted_mode->crtc_clock,
1453                                pipe_config->port_clock,
1454                                &pipe_config->dp_m_n);
1455
1456         if (intel_connector->panel.downclock_mode != NULL &&
1457                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1458                         pipe_config->has_drrs = true;
1459                         intel_link_compute_m_n(bpp, lane_count,
1460                                 intel_connector->panel.downclock_mode->clock,
1461                                 pipe_config->port_clock,
1462                                 &pipe_config->dp_m2_n2);
1463         }
1464
1465         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1466                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1467         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1468                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1469         else
1470                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1471
1472         return true;
1473 }
1474
1475 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1476 {
1477         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1478         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1479         struct drm_device *dev = crtc->base.dev;
1480         struct drm_i915_private *dev_priv = dev->dev_private;
1481         u32 dpa_ctl;
1482
1483         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1484                       crtc->config->port_clock);
1485         dpa_ctl = I915_READ(DP_A);
1486         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1487
1488         if (crtc->config->port_clock == 162000) {
1489                 /* For a long time we've carried around a ILK-DevA w/a for the
1490                  * 160MHz clock. If we're really unlucky, it's still required.
1491                  */
1492                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1493                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1494                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1495         } else {
1496                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1497                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1498         }
1499
1500         I915_WRITE(DP_A, dpa_ctl);
1501
1502         POSTING_READ(DP_A);
1503         udelay(500);
1504 }
1505
1506 static void intel_dp_prepare(struct intel_encoder *encoder)
1507 {
1508         struct drm_device *dev = encoder->base.dev;
1509         struct drm_i915_private *dev_priv = dev->dev_private;
1510         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1511         enum port port = dp_to_dig_port(intel_dp)->port;
1512         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1513         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1514
1515         /*
1516          * There are four kinds of DP registers:
1517          *
1518          *      IBX PCH
1519          *      SNB CPU
1520          *      IVB CPU
1521          *      CPT PCH
1522          *
1523          * IBX PCH and CPU are the same for almost everything,
1524          * except that the CPU DP PLL is configured in this
1525          * register
1526          *
1527          * CPT PCH is quite different, having many bits moved
1528          * to the TRANS_DP_CTL register instead. That
1529          * configuration happens (oddly) in ironlake_pch_enable
1530          */
1531
1532         /* Preserve the BIOS-computed detected bit. This is
1533          * supposed to be read-only.
1534          */
1535         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1536
1537         /* Handle DP bits in common between all three register formats */
1538         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1539         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1540
1541         if (crtc->config->has_audio)
1542                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1543
1544         /* Split out the IBX/CPU vs CPT settings */
1545
1546         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1547                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1548                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1549                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1550                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1551                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1552
1553                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1554                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1555
1556                 intel_dp->DP |= crtc->pipe << 29;
1557         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1558                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1559                         intel_dp->DP |= intel_dp->color_range;
1560
1561                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1562                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1563                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1564                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1565                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1566
1567                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1568                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1569
1570                 if (!IS_CHERRYVIEW(dev)) {
1571                         if (crtc->pipe == 1)
1572                                 intel_dp->DP |= DP_PIPEB_SELECT;
1573                 } else {
1574                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1575                 }
1576         } else {
1577                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1578         }
1579 }
1580
1581 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1582 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1583
1584 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1585 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1586
1587 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1588 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1589
1590 static void wait_panel_status(struct intel_dp *intel_dp,
1591                                        u32 mask,
1592                                        u32 value)
1593 {
1594         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1595         struct drm_i915_private *dev_priv = dev->dev_private;
1596         u32 pp_stat_reg, pp_ctrl_reg;
1597
1598         lockdep_assert_held(&dev_priv->pps_mutex);
1599
1600         pp_stat_reg = _pp_stat_reg(intel_dp);
1601         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1602
1603         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1604                         mask, value,
1605                         I915_READ(pp_stat_reg),
1606                         I915_READ(pp_ctrl_reg));
1607
1608         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1609                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1610                                 I915_READ(pp_stat_reg),
1611                                 I915_READ(pp_ctrl_reg));
1612         }
1613
1614         DRM_DEBUG_KMS("Wait complete\n");
1615 }
1616
1617 static void wait_panel_on(struct intel_dp *intel_dp)
1618 {
1619         DRM_DEBUG_KMS("Wait for panel power on\n");
1620         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1621 }
1622
1623 static void wait_panel_off(struct intel_dp *intel_dp)
1624 {
1625         DRM_DEBUG_KMS("Wait for panel power off time\n");
1626         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1627 }
1628
1629 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1630 {
1631         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1632
1633         /* When we disable the VDD override bit last we have to do the manual
1634          * wait. */
1635         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1636                                        intel_dp->panel_power_cycle_delay);
1637
1638         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1639 }
1640
1641 static void wait_backlight_on(struct intel_dp *intel_dp)
1642 {
1643         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1644                                        intel_dp->backlight_on_delay);
1645 }
1646
1647 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1648 {
1649         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1650                                        intel_dp->backlight_off_delay);
1651 }
1652
1653 /* Read the current pp_control value, unlocking the register if it
1654  * is locked
1655  */
1656
1657 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1658 {
1659         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1660         struct drm_i915_private *dev_priv = dev->dev_private;
1661         u32 control;
1662
1663         lockdep_assert_held(&dev_priv->pps_mutex);
1664
1665         control = I915_READ(_pp_ctrl_reg(intel_dp));
1666         control &= ~PANEL_UNLOCK_MASK;
1667         control |= PANEL_UNLOCK_REGS;
1668         return control;
1669 }
1670
1671 /*
1672  * Must be paired with edp_panel_vdd_off().
1673  * Must hold pps_mutex around the whole on/off sequence.
1674  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1675  */
1676 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1677 {
1678         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1679         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1680         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1681         struct drm_i915_private *dev_priv = dev->dev_private;
1682         enum intel_display_power_domain power_domain;
1683         u32 pp;
1684         u32 pp_stat_reg, pp_ctrl_reg;
1685         bool need_to_disable = !intel_dp->want_panel_vdd;
1686
1687         lockdep_assert_held(&dev_priv->pps_mutex);
1688
1689         if (!is_edp(intel_dp))
1690                 return false;
1691
1692         cancel_delayed_work(&intel_dp->panel_vdd_work);
1693         intel_dp->want_panel_vdd = true;
1694
1695         if (edp_have_panel_vdd(intel_dp))
1696                 return need_to_disable;
1697
1698         power_domain = intel_display_port_power_domain(intel_encoder);
1699         intel_display_power_get(dev_priv, power_domain);
1700
1701         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1702                       port_name(intel_dig_port->port));
1703
1704         if (!edp_have_panel_power(intel_dp))
1705                 wait_panel_power_cycle(intel_dp);
1706
1707         pp = ironlake_get_pp_control(intel_dp);
1708         pp |= EDP_FORCE_VDD;
1709
1710         pp_stat_reg = _pp_stat_reg(intel_dp);
1711         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1712
1713         I915_WRITE(pp_ctrl_reg, pp);
1714         POSTING_READ(pp_ctrl_reg);
1715         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1716                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1717         /*
1718          * If the panel wasn't on, delay before accessing aux channel
1719          */
1720         if (!edp_have_panel_power(intel_dp)) {
1721                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1722                               port_name(intel_dig_port->port));
1723                 msleep(intel_dp->panel_power_up_delay);
1724         }
1725
1726         return need_to_disable;
1727 }
1728
1729 /*
1730  * Must be paired with intel_edp_panel_vdd_off() or
1731  * intel_edp_panel_off().
1732  * Nested calls to these functions are not allowed since
1733  * we drop the lock. Caller must use some higher level
1734  * locking to prevent nested calls from other threads.
1735  */
1736 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1737 {
1738         bool vdd;
1739
1740         if (!is_edp(intel_dp))
1741                 return;
1742
1743         pps_lock(intel_dp);
1744         vdd = edp_panel_vdd_on(intel_dp);
1745         pps_unlock(intel_dp);
1746
1747         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1748              port_name(dp_to_dig_port(intel_dp)->port));
1749 }
1750
1751 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1752 {
1753         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1754         struct drm_i915_private *dev_priv = dev->dev_private;
1755         struct intel_digital_port *intel_dig_port =
1756                 dp_to_dig_port(intel_dp);
1757         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1758         enum intel_display_power_domain power_domain;
1759         u32 pp;
1760         u32 pp_stat_reg, pp_ctrl_reg;
1761
1762         lockdep_assert_held(&dev_priv->pps_mutex);
1763
1764         WARN_ON(intel_dp->want_panel_vdd);
1765
1766         if (!edp_have_panel_vdd(intel_dp))
1767                 return;
1768
1769         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1770                       port_name(intel_dig_port->port));
1771
1772         pp = ironlake_get_pp_control(intel_dp);
1773         pp &= ~EDP_FORCE_VDD;
1774
1775         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1776         pp_stat_reg = _pp_stat_reg(intel_dp);
1777
1778         I915_WRITE(pp_ctrl_reg, pp);
1779         POSTING_READ(pp_ctrl_reg);
1780
1781         /* Make sure sequencer is idle before allowing subsequent activity */
1782         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1783         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1784
1785         if ((pp & POWER_TARGET_ON) == 0)
1786                 intel_dp->last_power_cycle = jiffies;
1787
1788         power_domain = intel_display_port_power_domain(intel_encoder);
1789         intel_display_power_put(dev_priv, power_domain);
1790 }
1791
1792 static void edp_panel_vdd_work(struct work_struct *__work)
1793 {
1794         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1795                                                  struct intel_dp, panel_vdd_work);
1796
1797         pps_lock(intel_dp);
1798         if (!intel_dp->want_panel_vdd)
1799                 edp_panel_vdd_off_sync(intel_dp);
1800         pps_unlock(intel_dp);
1801 }
1802
1803 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1804 {
1805         unsigned long delay;
1806
1807         /*
1808          * Queue the timer to fire a long time from now (relative to the power
1809          * down delay) to keep the panel power up across a sequence of
1810          * operations.
1811          */
1812         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1813         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1814 }
1815
1816 /*
1817  * Must be paired with edp_panel_vdd_on().
1818  * Must hold pps_mutex around the whole on/off sequence.
1819  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1820  */
1821 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1822 {
1823         struct drm_i915_private *dev_priv =
1824                 intel_dp_to_dev(intel_dp)->dev_private;
1825
1826         lockdep_assert_held(&dev_priv->pps_mutex);
1827
1828         if (!is_edp(intel_dp))
1829                 return;
1830
1831         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1832              port_name(dp_to_dig_port(intel_dp)->port));
1833
1834         intel_dp->want_panel_vdd = false;
1835
1836         if (sync)
1837                 edp_panel_vdd_off_sync(intel_dp);
1838         else
1839                 edp_panel_vdd_schedule_off(intel_dp);
1840 }
1841
1842 static void edp_panel_on(struct intel_dp *intel_dp)
1843 {
1844         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1845         struct drm_i915_private *dev_priv = dev->dev_private;
1846         u32 pp;
1847         u32 pp_ctrl_reg;
1848
1849         lockdep_assert_held(&dev_priv->pps_mutex);
1850
1851         if (!is_edp(intel_dp))
1852                 return;
1853
1854         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1855                       port_name(dp_to_dig_port(intel_dp)->port));
1856
1857         if (WARN(edp_have_panel_power(intel_dp),
1858                  "eDP port %c panel power already on\n",
1859                  port_name(dp_to_dig_port(intel_dp)->port)))
1860                 return;
1861
1862         wait_panel_power_cycle(intel_dp);
1863
1864         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1865         pp = ironlake_get_pp_control(intel_dp);
1866         if (IS_GEN5(dev)) {
1867                 /* ILK workaround: disable reset around power sequence */
1868                 pp &= ~PANEL_POWER_RESET;
1869                 I915_WRITE(pp_ctrl_reg, pp);
1870                 POSTING_READ(pp_ctrl_reg);
1871         }
1872
1873         pp |= POWER_TARGET_ON;
1874         if (!IS_GEN5(dev))
1875                 pp |= PANEL_POWER_RESET;
1876
1877         I915_WRITE(pp_ctrl_reg, pp);
1878         POSTING_READ(pp_ctrl_reg);
1879
1880         wait_panel_on(intel_dp);
1881         intel_dp->last_power_on = jiffies;
1882
1883         if (IS_GEN5(dev)) {
1884                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1885                 I915_WRITE(pp_ctrl_reg, pp);
1886                 POSTING_READ(pp_ctrl_reg);
1887         }
1888 }
1889
1890 void intel_edp_panel_on(struct intel_dp *intel_dp)
1891 {
1892         if (!is_edp(intel_dp))
1893                 return;
1894
1895         pps_lock(intel_dp);
1896         edp_panel_on(intel_dp);
1897         pps_unlock(intel_dp);
1898 }
1899
1900
1901 static void edp_panel_off(struct intel_dp *intel_dp)
1902 {
1903         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1904         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1905         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1906         struct drm_i915_private *dev_priv = dev->dev_private;
1907         enum intel_display_power_domain power_domain;
1908         u32 pp;
1909         u32 pp_ctrl_reg;
1910
1911         lockdep_assert_held(&dev_priv->pps_mutex);
1912
1913         if (!is_edp(intel_dp))
1914                 return;
1915
1916         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1917                       port_name(dp_to_dig_port(intel_dp)->port));
1918
1919         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1920              port_name(dp_to_dig_port(intel_dp)->port));
1921
1922         pp = ironlake_get_pp_control(intel_dp);
1923         /* We need to switch off panel power _and_ force vdd, for otherwise some
1924          * panels get very unhappy and cease to work. */
1925         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1926                 EDP_BLC_ENABLE);
1927
1928         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1929
1930         intel_dp->want_panel_vdd = false;
1931
1932         I915_WRITE(pp_ctrl_reg, pp);
1933         POSTING_READ(pp_ctrl_reg);
1934
1935         intel_dp->last_power_cycle = jiffies;
1936         wait_panel_off(intel_dp);
1937
1938         /* We got a reference when we enabled the VDD. */
1939         power_domain = intel_display_port_power_domain(intel_encoder);
1940         intel_display_power_put(dev_priv, power_domain);
1941 }
1942
1943 void intel_edp_panel_off(struct intel_dp *intel_dp)
1944 {
1945         if (!is_edp(intel_dp))
1946                 return;
1947
1948         pps_lock(intel_dp);
1949         edp_panel_off(intel_dp);
1950         pps_unlock(intel_dp);
1951 }
1952
1953 /* Enable backlight in the panel power control. */
1954 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1955 {
1956         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1957         struct drm_device *dev = intel_dig_port->base.base.dev;
1958         struct drm_i915_private *dev_priv = dev->dev_private;
1959         u32 pp;
1960         u32 pp_ctrl_reg;
1961
1962         /*
1963          * If we enable the backlight right away following a panel power
1964          * on, we may see slight flicker as the panel syncs with the eDP
1965          * link.  So delay a bit to make sure the image is solid before
1966          * allowing it to appear.
1967          */
1968         wait_backlight_on(intel_dp);
1969
1970         pps_lock(intel_dp);
1971
1972         pp = ironlake_get_pp_control(intel_dp);
1973         pp |= EDP_BLC_ENABLE;
1974
1975         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1976
1977         I915_WRITE(pp_ctrl_reg, pp);
1978         POSTING_READ(pp_ctrl_reg);
1979
1980         pps_unlock(intel_dp);
1981 }
1982
1983 /* Enable backlight PWM and backlight PP control. */
1984 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1985 {
1986         if (!is_edp(intel_dp))
1987                 return;
1988
1989         DRM_DEBUG_KMS("\n");
1990
1991         intel_panel_enable_backlight(intel_dp->attached_connector);
1992         _intel_edp_backlight_on(intel_dp);
1993 }
1994
1995 /* Disable backlight in the panel power control. */
1996 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1997 {
1998         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1999         struct drm_i915_private *dev_priv = dev->dev_private;
2000         u32 pp;
2001         u32 pp_ctrl_reg;
2002
2003         if (!is_edp(intel_dp))
2004                 return;
2005
2006         pps_lock(intel_dp);
2007
2008         pp = ironlake_get_pp_control(intel_dp);
2009         pp &= ~EDP_BLC_ENABLE;
2010
2011         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2012
2013         I915_WRITE(pp_ctrl_reg, pp);
2014         POSTING_READ(pp_ctrl_reg);
2015
2016         pps_unlock(intel_dp);
2017
2018         intel_dp->last_backlight_off = jiffies;
2019         edp_wait_backlight_off(intel_dp);
2020 }
2021
2022 /* Disable backlight PP control and backlight PWM. */
2023 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2024 {
2025         if (!is_edp(intel_dp))
2026                 return;
2027
2028         DRM_DEBUG_KMS("\n");
2029
2030         _intel_edp_backlight_off(intel_dp);
2031         intel_panel_disable_backlight(intel_dp->attached_connector);
2032 }
2033
2034 /*
2035  * Hook for controlling the panel power control backlight through the bl_power
2036  * sysfs attribute. Take care to handle multiple calls.
2037  */
2038 static void intel_edp_backlight_power(struct intel_connector *connector,
2039                                       bool enable)
2040 {
2041         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2042         bool is_enabled;
2043
2044         pps_lock(intel_dp);
2045         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2046         pps_unlock(intel_dp);
2047
2048         if (is_enabled == enable)
2049                 return;
2050
2051         DRM_DEBUG_KMS("panel power control backlight %s\n",
2052                       enable ? "enable" : "disable");
2053
2054         if (enable)
2055                 _intel_edp_backlight_on(intel_dp);
2056         else
2057                 _intel_edp_backlight_off(intel_dp);
2058 }
2059
2060 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2061 {
2062         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2063         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2064         struct drm_device *dev = crtc->dev;
2065         struct drm_i915_private *dev_priv = dev->dev_private;
2066         u32 dpa_ctl;
2067
2068         assert_pipe_disabled(dev_priv,
2069                              to_intel_crtc(crtc)->pipe);
2070
2071         DRM_DEBUG_KMS("\n");
2072         dpa_ctl = I915_READ(DP_A);
2073         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2074         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2075
2076         /* We don't adjust intel_dp->DP while tearing down the link, to
2077          * facilitate link retraining (e.g. after hotplug). Hence clear all
2078          * enable bits here to ensure that we don't enable too much. */
2079         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2080         intel_dp->DP |= DP_PLL_ENABLE;
2081         I915_WRITE(DP_A, intel_dp->DP);
2082         POSTING_READ(DP_A);
2083         udelay(200);
2084 }
2085
2086 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2087 {
2088         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2090         struct drm_device *dev = crtc->dev;
2091         struct drm_i915_private *dev_priv = dev->dev_private;
2092         u32 dpa_ctl;
2093
2094         assert_pipe_disabled(dev_priv,
2095                              to_intel_crtc(crtc)->pipe);
2096
2097         dpa_ctl = I915_READ(DP_A);
2098         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2099              "dp pll off, should be on\n");
2100         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2101
2102         /* We can't rely on the value tracked for the DP register in
2103          * intel_dp->DP because link_down must not change that (otherwise link
2104          * re-training will fail. */
2105         dpa_ctl &= ~DP_PLL_ENABLE;
2106         I915_WRITE(DP_A, dpa_ctl);
2107         POSTING_READ(DP_A);
2108         udelay(200);
2109 }
2110
2111 /* If the sink supports it, try to set the power state appropriately */
2112 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2113 {
2114         int ret, i;
2115
2116         /* Should have a valid DPCD by this point */
2117         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2118                 return;
2119
2120         if (mode != DRM_MODE_DPMS_ON) {
2121                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2122                                          DP_SET_POWER_D3);
2123         } else {
2124                 /*
2125                  * When turning on, we need to retry for 1ms to give the sink
2126                  * time to wake up.
2127                  */
2128                 for (i = 0; i < 3; i++) {
2129                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2130                                                  DP_SET_POWER_D0);
2131                         if (ret == 1)
2132                                 break;
2133                         msleep(1);
2134                 }
2135         }
2136
2137         if (ret != 1)
2138                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2139                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2140 }
2141
2142 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2143                                   enum pipe *pipe)
2144 {
2145         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2146         enum port port = dp_to_dig_port(intel_dp)->port;
2147         struct drm_device *dev = encoder->base.dev;
2148         struct drm_i915_private *dev_priv = dev->dev_private;
2149         enum intel_display_power_domain power_domain;
2150         u32 tmp;
2151
2152         power_domain = intel_display_port_power_domain(encoder);
2153         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2154                 return false;
2155
2156         tmp = I915_READ(intel_dp->output_reg);
2157
2158         if (!(tmp & DP_PORT_EN))
2159                 return false;
2160
2161         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2162                 *pipe = PORT_TO_PIPE_CPT(tmp);
2163         } else if (IS_CHERRYVIEW(dev)) {
2164                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2165         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2166                 *pipe = PORT_TO_PIPE(tmp);
2167         } else {
2168                 u32 trans_sel;
2169                 u32 trans_dp;
2170                 int i;
2171
2172                 switch (intel_dp->output_reg) {
2173                 case PCH_DP_B:
2174                         trans_sel = TRANS_DP_PORT_SEL_B;
2175                         break;
2176                 case PCH_DP_C:
2177                         trans_sel = TRANS_DP_PORT_SEL_C;
2178                         break;
2179                 case PCH_DP_D:
2180                         trans_sel = TRANS_DP_PORT_SEL_D;
2181                         break;
2182                 default:
2183                         return true;
2184                 }
2185
2186                 for_each_pipe(dev_priv, i) {
2187                         trans_dp = I915_READ(TRANS_DP_CTL(i));
2188                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2189                                 *pipe = i;
2190                                 return true;
2191                         }
2192                 }
2193
2194                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2195                               intel_dp->output_reg);
2196         }
2197
2198         return true;
2199 }
2200
2201 static void intel_dp_get_config(struct intel_encoder *encoder,
2202                                 struct intel_crtc_state *pipe_config)
2203 {
2204         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2205         u32 tmp, flags = 0;
2206         struct drm_device *dev = encoder->base.dev;
2207         struct drm_i915_private *dev_priv = dev->dev_private;
2208         enum port port = dp_to_dig_port(intel_dp)->port;
2209         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2210         int dotclock;
2211
2212         tmp = I915_READ(intel_dp->output_reg);
2213         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2214                 pipe_config->has_audio = true;
2215
2216         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2217                 if (tmp & DP_SYNC_HS_HIGH)
2218                         flags |= DRM_MODE_FLAG_PHSYNC;
2219                 else
2220                         flags |= DRM_MODE_FLAG_NHSYNC;
2221
2222                 if (tmp & DP_SYNC_VS_HIGH)
2223                         flags |= DRM_MODE_FLAG_PVSYNC;
2224                 else
2225                         flags |= DRM_MODE_FLAG_NVSYNC;
2226         } else {
2227                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2228                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2229                         flags |= DRM_MODE_FLAG_PHSYNC;
2230                 else
2231                         flags |= DRM_MODE_FLAG_NHSYNC;
2232
2233                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2234                         flags |= DRM_MODE_FLAG_PVSYNC;
2235                 else
2236                         flags |= DRM_MODE_FLAG_NVSYNC;
2237         }
2238
2239         pipe_config->base.adjusted_mode.flags |= flags;
2240
2241         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2242             tmp & DP_COLOR_RANGE_16_235)
2243                 pipe_config->limited_color_range = true;
2244
2245         pipe_config->has_dp_encoder = true;
2246
2247         intel_dp_get_m_n(crtc, pipe_config);
2248
2249         if (port == PORT_A) {
2250                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2251                         pipe_config->port_clock = 162000;
2252                 else
2253                         pipe_config->port_clock = 270000;
2254         }
2255
2256         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2257                                             &pipe_config->dp_m_n);
2258
2259         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2260                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2261
2262         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2263
2264         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2265             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2266                 /*
2267                  * This is a big fat ugly hack.
2268                  *
2269                  * Some machines in UEFI boot mode provide us a VBT that has 18
2270                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2271                  * unknown we fail to light up. Yet the same BIOS boots up with
2272                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2273                  * max, not what it tells us to use.
2274                  *
2275                  * Note: This will still be broken if the eDP panel is not lit
2276                  * up by the BIOS, and thus we can't get the mode at module
2277                  * load.
2278                  */
2279                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2280                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2281                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2282         }
2283 }
2284
2285 static void intel_disable_dp(struct intel_encoder *encoder)
2286 {
2287         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2288         struct drm_device *dev = encoder->base.dev;
2289         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2290
2291         if (crtc->config->has_audio)
2292                 intel_audio_codec_disable(encoder);
2293
2294         if (HAS_PSR(dev) && !HAS_DDI(dev))
2295                 intel_psr_disable(intel_dp);
2296
2297         /* Make sure the panel is off before trying to change the mode. But also
2298          * ensure that we have vdd while we switch off the panel. */
2299         intel_edp_panel_vdd_on(intel_dp);
2300         intel_edp_backlight_off(intel_dp);
2301         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2302         intel_edp_panel_off(intel_dp);
2303
2304         /* disable the port before the pipe on g4x */
2305         if (INTEL_INFO(dev)->gen < 5)
2306                 intel_dp_link_down(intel_dp);
2307 }
2308
2309 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2310 {
2311         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2312         enum port port = dp_to_dig_port(intel_dp)->port;
2313
2314         intel_dp_link_down(intel_dp);
2315         if (port == PORT_A)
2316                 ironlake_edp_pll_off(intel_dp);
2317 }
2318
2319 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2320 {
2321         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2322
2323         intel_dp_link_down(intel_dp);
2324 }
2325
2326 static void chv_post_disable_dp(struct intel_encoder *encoder)
2327 {
2328         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2329         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2330         struct drm_device *dev = encoder->base.dev;
2331         struct drm_i915_private *dev_priv = dev->dev_private;
2332         struct intel_crtc *intel_crtc =
2333                 to_intel_crtc(encoder->base.crtc);
2334         enum dpio_channel ch = vlv_dport_to_channel(dport);
2335         enum pipe pipe = intel_crtc->pipe;
2336         u32 val;
2337
2338         intel_dp_link_down(intel_dp);
2339
2340         mutex_lock(&dev_priv->dpio_lock);
2341
2342         /* Propagate soft reset to data lane reset */
2343         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2344         val |= CHV_PCS_REQ_SOFTRESET_EN;
2345         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2346
2347         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2348         val |= CHV_PCS_REQ_SOFTRESET_EN;
2349         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2350
2351         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2352         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2353         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2354
2355         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2356         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2357         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2358
2359         mutex_unlock(&dev_priv->dpio_lock);
2360 }
2361
2362 static void
2363 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2364                          uint32_t *DP,
2365                          uint8_t dp_train_pat)
2366 {
2367         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2368         struct drm_device *dev = intel_dig_port->base.base.dev;
2369         struct drm_i915_private *dev_priv = dev->dev_private;
2370         enum port port = intel_dig_port->port;
2371
2372         if (HAS_DDI(dev)) {
2373                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2374
2375                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2376                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2377                 else
2378                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2379
2380                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2381                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2382                 case DP_TRAINING_PATTERN_DISABLE:
2383                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2384
2385                         break;
2386                 case DP_TRAINING_PATTERN_1:
2387                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2388                         break;
2389                 case DP_TRAINING_PATTERN_2:
2390                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2391                         break;
2392                 case DP_TRAINING_PATTERN_3:
2393                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2394                         break;
2395                 }
2396                 I915_WRITE(DP_TP_CTL(port), temp);
2397
2398         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2399                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2400
2401                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402                 case DP_TRAINING_PATTERN_DISABLE:
2403                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2404                         break;
2405                 case DP_TRAINING_PATTERN_1:
2406                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2407                         break;
2408                 case DP_TRAINING_PATTERN_2:
2409                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2410                         break;
2411                 case DP_TRAINING_PATTERN_3:
2412                         DRM_ERROR("DP training pattern 3 not supported\n");
2413                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2414                         break;
2415                 }
2416
2417         } else {
2418                 if (IS_CHERRYVIEW(dev))
2419                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2420                 else
2421                         *DP &= ~DP_LINK_TRAIN_MASK;
2422
2423                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2424                 case DP_TRAINING_PATTERN_DISABLE:
2425                         *DP |= DP_LINK_TRAIN_OFF;
2426                         break;
2427                 case DP_TRAINING_PATTERN_1:
2428                         *DP |= DP_LINK_TRAIN_PAT_1;
2429                         break;
2430                 case DP_TRAINING_PATTERN_2:
2431                         *DP |= DP_LINK_TRAIN_PAT_2;
2432                         break;
2433                 case DP_TRAINING_PATTERN_3:
2434                         if (IS_CHERRYVIEW(dev)) {
2435                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2436                         } else {
2437                                 DRM_ERROR("DP training pattern 3 not supported\n");
2438                                 *DP |= DP_LINK_TRAIN_PAT_2;
2439                         }
2440                         break;
2441                 }
2442         }
2443 }
2444
2445 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2446 {
2447         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2448         struct drm_i915_private *dev_priv = dev->dev_private;
2449
2450         /* enable with pattern 1 (as per spec) */
2451         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2452                                  DP_TRAINING_PATTERN_1);
2453
2454         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2455         POSTING_READ(intel_dp->output_reg);
2456
2457         /*
2458          * Magic for VLV/CHV. We _must_ first set up the register
2459          * without actually enabling the port, and then do another
2460          * write to enable the port. Otherwise link training will
2461          * fail when the power sequencer is freshly used for this port.
2462          */
2463         intel_dp->DP |= DP_PORT_EN;
2464
2465         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2466         POSTING_READ(intel_dp->output_reg);
2467 }
2468
2469 static void intel_enable_dp(struct intel_encoder *encoder)
2470 {
2471         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2472         struct drm_device *dev = encoder->base.dev;
2473         struct drm_i915_private *dev_priv = dev->dev_private;
2474         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2475         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2476
2477         if (WARN_ON(dp_reg & DP_PORT_EN))
2478                 return;
2479
2480         pps_lock(intel_dp);
2481
2482         if (IS_VALLEYVIEW(dev))
2483                 vlv_init_panel_power_sequencer(intel_dp);
2484
2485         intel_dp_enable_port(intel_dp);
2486
2487         edp_panel_vdd_on(intel_dp);
2488         edp_panel_on(intel_dp);
2489         edp_panel_vdd_off(intel_dp, true);
2490
2491         pps_unlock(intel_dp);
2492
2493         if (IS_VALLEYVIEW(dev))
2494                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2495
2496         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2497         intel_dp_start_link_train(intel_dp);
2498         intel_dp_complete_link_train(intel_dp);
2499         intel_dp_stop_link_train(intel_dp);
2500
2501         if (crtc->config->has_audio) {
2502                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2503                                  pipe_name(crtc->pipe));
2504                 intel_audio_codec_enable(encoder);
2505         }
2506 }
2507
2508 static void g4x_enable_dp(struct intel_encoder *encoder)
2509 {
2510         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2511
2512         intel_enable_dp(encoder);
2513         intel_edp_backlight_on(intel_dp);
2514 }
2515
2516 static void vlv_enable_dp(struct intel_encoder *encoder)
2517 {
2518         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2519
2520         intel_edp_backlight_on(intel_dp);
2521         intel_psr_enable(intel_dp);
2522 }
2523
2524 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2525 {
2526         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2527         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2528
2529         intel_dp_prepare(encoder);
2530
2531         /* Only ilk+ has port A */
2532         if (dport->port == PORT_A) {
2533                 ironlake_set_pll_cpu_edp(intel_dp);
2534                 ironlake_edp_pll_on(intel_dp);
2535         }
2536 }
2537
2538 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2539 {
2540         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2541         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2542         enum pipe pipe = intel_dp->pps_pipe;
2543         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2544
2545         edp_panel_vdd_off_sync(intel_dp);
2546
2547         /*
2548          * VLV seems to get confused when multiple power seqeuencers
2549          * have the same port selected (even if only one has power/vdd
2550          * enabled). The failure manifests as vlv_wait_port_ready() failing
2551          * CHV on the other hand doesn't seem to mind having the same port
2552          * selected in multiple power seqeuencers, but let's clear the
2553          * port select always when logically disconnecting a power sequencer
2554          * from a port.
2555          */
2556         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2557                       pipe_name(pipe), port_name(intel_dig_port->port));
2558         I915_WRITE(pp_on_reg, 0);
2559         POSTING_READ(pp_on_reg);
2560
2561         intel_dp->pps_pipe = INVALID_PIPE;
2562 }
2563
2564 static void vlv_steal_power_sequencer(struct drm_device *dev,
2565                                       enum pipe pipe)
2566 {
2567         struct drm_i915_private *dev_priv = dev->dev_private;
2568         struct intel_encoder *encoder;
2569
2570         lockdep_assert_held(&dev_priv->pps_mutex);
2571
2572         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2573                 return;
2574
2575         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2576                             base.head) {
2577                 struct intel_dp *intel_dp;
2578                 enum port port;
2579
2580                 if (encoder->type != INTEL_OUTPUT_EDP)
2581                         continue;
2582
2583                 intel_dp = enc_to_intel_dp(&encoder->base);
2584                 port = dp_to_dig_port(intel_dp)->port;
2585
2586                 if (intel_dp->pps_pipe != pipe)
2587                         continue;
2588
2589                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2590                               pipe_name(pipe), port_name(port));
2591
2592                 WARN(encoder->connectors_active,
2593                      "stealing pipe %c power sequencer from active eDP port %c\n",
2594                      pipe_name(pipe), port_name(port));
2595
2596                 /* make sure vdd is off before we steal it */
2597                 vlv_detach_power_sequencer(intel_dp);
2598         }
2599 }
2600
2601 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2602 {
2603         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2604         struct intel_encoder *encoder = &intel_dig_port->base;
2605         struct drm_device *dev = encoder->base.dev;
2606         struct drm_i915_private *dev_priv = dev->dev_private;
2607         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2608
2609         lockdep_assert_held(&dev_priv->pps_mutex);
2610
2611         if (!is_edp(intel_dp))
2612                 return;
2613
2614         if (intel_dp->pps_pipe == crtc->pipe)
2615                 return;
2616
2617         /*
2618          * If another power sequencer was being used on this
2619          * port previously make sure to turn off vdd there while
2620          * we still have control of it.
2621          */
2622         if (intel_dp->pps_pipe != INVALID_PIPE)
2623                 vlv_detach_power_sequencer(intel_dp);
2624
2625         /*
2626          * We may be stealing the power
2627          * sequencer from another port.
2628          */
2629         vlv_steal_power_sequencer(dev, crtc->pipe);
2630
2631         /* now it's all ours */
2632         intel_dp->pps_pipe = crtc->pipe;
2633
2634         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2635                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2636
2637         /* init power sequencer on this pipe and port */
2638         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2639         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2640 }
2641
2642 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2643 {
2644         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2645         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2646         struct drm_device *dev = encoder->base.dev;
2647         struct drm_i915_private *dev_priv = dev->dev_private;
2648         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2649         enum dpio_channel port = vlv_dport_to_channel(dport);
2650         int pipe = intel_crtc->pipe;
2651         u32 val;
2652
2653         mutex_lock(&dev_priv->dpio_lock);
2654
2655         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2656         val = 0;
2657         if (pipe)
2658                 val |= (1<<21);
2659         else
2660                 val &= ~(1<<21);
2661         val |= 0x001000c4;
2662         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2663         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2664         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2665
2666         mutex_unlock(&dev_priv->dpio_lock);
2667
2668         intel_enable_dp(encoder);
2669 }
2670
2671 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2672 {
2673         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2674         struct drm_device *dev = encoder->base.dev;
2675         struct drm_i915_private *dev_priv = dev->dev_private;
2676         struct intel_crtc *intel_crtc =
2677                 to_intel_crtc(encoder->base.crtc);
2678         enum dpio_channel port = vlv_dport_to_channel(dport);
2679         int pipe = intel_crtc->pipe;
2680
2681         intel_dp_prepare(encoder);
2682
2683         /* Program Tx lane resets to default */
2684         mutex_lock(&dev_priv->dpio_lock);
2685         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2686                          DPIO_PCS_TX_LANE2_RESET |
2687                          DPIO_PCS_TX_LANE1_RESET);
2688         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2689                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2690                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2691                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2692                                  DPIO_PCS_CLK_SOFT_RESET);
2693
2694         /* Fix up inter-pair skew failure */
2695         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2696         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2697         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2698         mutex_unlock(&dev_priv->dpio_lock);
2699 }
2700
2701 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2702 {
2703         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2704         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2705         struct drm_device *dev = encoder->base.dev;
2706         struct drm_i915_private *dev_priv = dev->dev_private;
2707         struct intel_crtc *intel_crtc =
2708                 to_intel_crtc(encoder->base.crtc);
2709         enum dpio_channel ch = vlv_dport_to_channel(dport);
2710         int pipe = intel_crtc->pipe;
2711         int data, i;
2712         u32 val;
2713
2714         mutex_lock(&dev_priv->dpio_lock);
2715
2716         /* allow hardware to manage TX FIFO reset source */
2717         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2718         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2719         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2720
2721         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2722         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2723         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2724
2725         /* Deassert soft data lane reset*/
2726         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2727         val |= CHV_PCS_REQ_SOFTRESET_EN;
2728         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2729
2730         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2731         val |= CHV_PCS_REQ_SOFTRESET_EN;
2732         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2733
2734         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2735         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2736         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2737
2738         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2739         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2740         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2741
2742         /* Program Tx lane latency optimal setting*/
2743         for (i = 0; i < 4; i++) {
2744                 /* Set the latency optimal bit */
2745                 data = (i == 1) ? 0x0 : 0x6;
2746                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2747                                 data << DPIO_FRC_LATENCY_SHFIT);
2748
2749                 /* Set the upar bit */
2750                 data = (i == 1) ? 0x0 : 0x1;
2751                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2752                                 data << DPIO_UPAR_SHIFT);
2753         }
2754
2755         /* Data lane stagger programming */
2756         /* FIXME: Fix up value only after power analysis */
2757
2758         mutex_unlock(&dev_priv->dpio_lock);
2759
2760         intel_enable_dp(encoder);
2761 }
2762
2763 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2764 {
2765         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2766         struct drm_device *dev = encoder->base.dev;
2767         struct drm_i915_private *dev_priv = dev->dev_private;
2768         struct intel_crtc *intel_crtc =
2769                 to_intel_crtc(encoder->base.crtc);
2770         enum dpio_channel ch = vlv_dport_to_channel(dport);
2771         enum pipe pipe = intel_crtc->pipe;
2772         u32 val;
2773
2774         intel_dp_prepare(encoder);
2775
2776         mutex_lock(&dev_priv->dpio_lock);
2777
2778         /* program left/right clock distribution */
2779         if (pipe != PIPE_B) {
2780                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2781                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2782                 if (ch == DPIO_CH0)
2783                         val |= CHV_BUFLEFTENA1_FORCE;
2784                 if (ch == DPIO_CH1)
2785                         val |= CHV_BUFRIGHTENA1_FORCE;
2786                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2787         } else {
2788                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2789                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2790                 if (ch == DPIO_CH0)
2791                         val |= CHV_BUFLEFTENA2_FORCE;
2792                 if (ch == DPIO_CH1)
2793                         val |= CHV_BUFRIGHTENA2_FORCE;
2794                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2795         }
2796
2797         /* program clock channel usage */
2798         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2799         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2800         if (pipe != PIPE_B)
2801                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2802         else
2803                 val |= CHV_PCS_USEDCLKCHANNEL;
2804         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2805
2806         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2807         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2808         if (pipe != PIPE_B)
2809                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2810         else
2811                 val |= CHV_PCS_USEDCLKCHANNEL;
2812         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2813
2814         /*
2815          * This a a bit weird since generally CL
2816          * matches the pipe, but here we need to
2817          * pick the CL based on the port.
2818          */
2819         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2820         if (pipe != PIPE_B)
2821                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2822         else
2823                 val |= CHV_CMN_USEDCLKCHANNEL;
2824         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2825
2826         mutex_unlock(&dev_priv->dpio_lock);
2827 }
2828
2829 /*
2830  * Native read with retry for link status and receiver capability reads for
2831  * cases where the sink may still be asleep.
2832  *
2833  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2834  * supposed to retry 3 times per the spec.
2835  */
2836 static ssize_t
2837 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2838                         void *buffer, size_t size)
2839 {
2840         ssize_t ret;
2841         int i;
2842
2843         /*
2844          * Sometime we just get the same incorrect byte repeated
2845          * over the entire buffer. Doing just one throw away read
2846          * initially seems to "solve" it.
2847          */
2848         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2849
2850         for (i = 0; i < 3; i++) {
2851                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2852                 if (ret == size)
2853                         return ret;
2854                 msleep(1);
2855         }
2856
2857         return ret;
2858 }
2859
2860 /*
2861  * Fetch AUX CH registers 0x202 - 0x207 which contain
2862  * link status information
2863  */
2864 static bool
2865 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2866 {
2867         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2868                                        DP_LANE0_1_STATUS,
2869                                        link_status,
2870                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2871 }
2872
2873 /* These are source-specific values. */
2874 static uint8_t
2875 intel_dp_voltage_max(struct intel_dp *intel_dp)
2876 {
2877         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2878         struct drm_i915_private *dev_priv = dev->dev_private;
2879         enum port port = dp_to_dig_port(intel_dp)->port;
2880
2881         if (INTEL_INFO(dev)->gen >= 9) {
2882                 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2883                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2884                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2885         } else if (IS_VALLEYVIEW(dev))
2886                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2887         else if (IS_GEN7(dev) && port == PORT_A)
2888                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2889         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2890                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2891         else
2892                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2893 }
2894
2895 static uint8_t
2896 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2897 {
2898         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2899         enum port port = dp_to_dig_port(intel_dp)->port;
2900
2901         if (INTEL_INFO(dev)->gen >= 9) {
2902                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2903                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2904                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2905                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2906                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2907                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2908                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2909                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2910                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2911                 default:
2912                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2913                 }
2914         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2915                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2916                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2917                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2918                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2919                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2920                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2921                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2922                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2923                 default:
2924                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2925                 }
2926         } else if (IS_VALLEYVIEW(dev)) {
2927                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2928                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2929                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2930                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2931                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2932                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2933                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2934                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2935                 default:
2936                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2937                 }
2938         } else if (IS_GEN7(dev) && port == PORT_A) {
2939                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2940                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2941                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2942                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2943                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2944                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2945                 default:
2946                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2947                 }
2948         } else {
2949                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2950                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2951                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2952                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2953                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2954                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2955                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2956                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2957                 default:
2958                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2959                 }
2960         }
2961 }
2962
2963 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2964 {
2965         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2966         struct drm_i915_private *dev_priv = dev->dev_private;
2967         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2968         struct intel_crtc *intel_crtc =
2969                 to_intel_crtc(dport->base.base.crtc);
2970         unsigned long demph_reg_value, preemph_reg_value,
2971                 uniqtranscale_reg_value;
2972         uint8_t train_set = intel_dp->train_set[0];
2973         enum dpio_channel port = vlv_dport_to_channel(dport);
2974         int pipe = intel_crtc->pipe;
2975
2976         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2977         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2978                 preemph_reg_value = 0x0004000;
2979                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2980                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2981                         demph_reg_value = 0x2B405555;
2982                         uniqtranscale_reg_value = 0x552AB83A;
2983                         break;
2984                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2985                         demph_reg_value = 0x2B404040;
2986                         uniqtranscale_reg_value = 0x5548B83A;
2987                         break;
2988                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2989                         demph_reg_value = 0x2B245555;
2990                         uniqtranscale_reg_value = 0x5560B83A;
2991                         break;
2992                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2993                         demph_reg_value = 0x2B405555;
2994                         uniqtranscale_reg_value = 0x5598DA3A;
2995                         break;
2996                 default:
2997                         return 0;
2998                 }
2999                 break;
3000         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3001                 preemph_reg_value = 0x0002000;
3002                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3003                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3004                         demph_reg_value = 0x2B404040;
3005                         uniqtranscale_reg_value = 0x5552B83A;
3006                         break;
3007                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3008                         demph_reg_value = 0x2B404848;
3009                         uniqtranscale_reg_value = 0x5580B83A;
3010                         break;
3011                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3012                         demph_reg_value = 0x2B404040;
3013                         uniqtranscale_reg_value = 0x55ADDA3A;
3014                         break;
3015                 default:
3016                         return 0;
3017                 }
3018                 break;
3019         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3020                 preemph_reg_value = 0x0000000;
3021                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3022                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3023                         demph_reg_value = 0x2B305555;
3024                         uniqtranscale_reg_value = 0x5570B83A;
3025                         break;
3026                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3027                         demph_reg_value = 0x2B2B4040;
3028                         uniqtranscale_reg_value = 0x55ADDA3A;
3029                         break;
3030                 default:
3031                         return 0;
3032                 }
3033                 break;
3034         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3035                 preemph_reg_value = 0x0006000;
3036                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3037                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3038                         demph_reg_value = 0x1B405555;
3039                         uniqtranscale_reg_value = 0x55ADDA3A;
3040                         break;
3041                 default:
3042                         return 0;
3043                 }
3044                 break;
3045         default:
3046                 return 0;
3047         }
3048
3049         mutex_lock(&dev_priv->dpio_lock);
3050         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3051         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3052         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3053                          uniqtranscale_reg_value);
3054         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3055         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3056         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3057         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3058         mutex_unlock(&dev_priv->dpio_lock);
3059
3060         return 0;
3061 }
3062
3063 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3064 {
3065         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3066         struct drm_i915_private *dev_priv = dev->dev_private;
3067         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3068         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3069         u32 deemph_reg_value, margin_reg_value, val;
3070         uint8_t train_set = intel_dp->train_set[0];
3071         enum dpio_channel ch = vlv_dport_to_channel(dport);
3072         enum pipe pipe = intel_crtc->pipe;
3073         int i;
3074
3075         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3076         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3077                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3078                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3079                         deemph_reg_value = 128;
3080                         margin_reg_value = 52;
3081                         break;
3082                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3083                         deemph_reg_value = 128;
3084                         margin_reg_value = 77;
3085                         break;
3086                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3087                         deemph_reg_value = 128;
3088                         margin_reg_value = 102;
3089                         break;
3090                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3091                         deemph_reg_value = 128;
3092                         margin_reg_value = 154;
3093                         /* FIXME extra to set for 1200 */
3094                         break;
3095                 default:
3096                         return 0;
3097                 }
3098                 break;
3099         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3100                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3101                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3102                         deemph_reg_value = 85;
3103                         margin_reg_value = 78;
3104                         break;
3105                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3106                         deemph_reg_value = 85;
3107                         margin_reg_value = 116;
3108                         break;
3109                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3110                         deemph_reg_value = 85;
3111                         margin_reg_value = 154;
3112                         break;
3113                 default:
3114                         return 0;
3115                 }
3116                 break;
3117         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3118                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3119                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3120                         deemph_reg_value = 64;
3121                         margin_reg_value = 104;
3122                         break;
3123                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3124                         deemph_reg_value = 64;
3125                         margin_reg_value = 154;
3126                         break;
3127                 default:
3128                         return 0;
3129                 }
3130                 break;
3131         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3132                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3133                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3134                         deemph_reg_value = 43;
3135                         margin_reg_value = 154;
3136                         break;
3137                 default:
3138                         return 0;
3139                 }
3140                 break;
3141         default:
3142                 return 0;
3143         }
3144
3145         mutex_lock(&dev_priv->dpio_lock);
3146
3147         /* Clear calc init */
3148         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3149         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3150         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3151         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3152         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3153
3154         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3155         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3156         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3157         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3158         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3159
3160         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3161         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3162         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3163         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3164
3165         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3166         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3167         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3168         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3169
3170         /* Program swing deemph */
3171         for (i = 0; i < 4; i++) {
3172                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3173                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3174                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3175                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3176         }
3177
3178         /* Program swing margin */
3179         for (i = 0; i < 4; i++) {
3180                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3181                 val &= ~DPIO_SWING_MARGIN000_MASK;
3182                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3183                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3184         }
3185
3186         /* Disable unique transition scale */
3187         for (i = 0; i < 4; i++) {
3188                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3189                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3190                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3191         }
3192
3193         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3194                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3195                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3196                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3197
3198                 /*
3199                  * The document said it needs to set bit 27 for ch0 and bit 26
3200                  * for ch1. Might be a typo in the doc.
3201                  * For now, for this unique transition scale selection, set bit
3202                  * 27 for ch0 and ch1.
3203                  */
3204                 for (i = 0; i < 4; i++) {
3205                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3206                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3207                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3208                 }
3209
3210                 for (i = 0; i < 4; i++) {
3211                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3212                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3213                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3214                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3215                 }
3216         }
3217
3218         /* Start swing calculation */
3219         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3220         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3221         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3222
3223         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3224         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3225         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3226
3227         /* LRC Bypass */
3228         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3229         val |= DPIO_LRC_BYPASS;
3230         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3231
3232         mutex_unlock(&dev_priv->dpio_lock);
3233
3234         return 0;
3235 }
3236
3237 static void
3238 intel_get_adjust_train(struct intel_dp *intel_dp,
3239                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3240 {
3241         uint8_t v = 0;
3242         uint8_t p = 0;
3243         int lane;
3244         uint8_t voltage_max;
3245         uint8_t preemph_max;
3246
3247         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3248                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3249                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3250
3251                 if (this_v > v)
3252                         v = this_v;
3253                 if (this_p > p)
3254                         p = this_p;
3255         }
3256
3257         voltage_max = intel_dp_voltage_max(intel_dp);
3258         if (v >= voltage_max)
3259                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3260
3261         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3262         if (p >= preemph_max)
3263                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3264
3265         for (lane = 0; lane < 4; lane++)
3266                 intel_dp->train_set[lane] = v | p;
3267 }
3268
3269 static uint32_t
3270 intel_gen4_signal_levels(uint8_t train_set)
3271 {
3272         uint32_t        signal_levels = 0;
3273
3274         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3275         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3276         default:
3277                 signal_levels |= DP_VOLTAGE_0_4;
3278                 break;
3279         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3280                 signal_levels |= DP_VOLTAGE_0_6;
3281                 break;
3282         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283                 signal_levels |= DP_VOLTAGE_0_8;
3284                 break;
3285         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3286                 signal_levels |= DP_VOLTAGE_1_2;
3287                 break;
3288         }
3289         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3290         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3291         default:
3292                 signal_levels |= DP_PRE_EMPHASIS_0;
3293                 break;
3294         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3295                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3296                 break;
3297         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3298                 signal_levels |= DP_PRE_EMPHASIS_6;
3299                 break;
3300         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3301                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3302                 break;
3303         }
3304         return signal_levels;
3305 }
3306
3307 /* Gen6's DP voltage swing and pre-emphasis control */
3308 static uint32_t
3309 intel_gen6_edp_signal_levels(uint8_t train_set)
3310 {
3311         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3312                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3313         switch (signal_levels) {
3314         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3315         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3316                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3317         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3318                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3319         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3320         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3321                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3322         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3323         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3324                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3325         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3326         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3327                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3328         default:
3329                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3330                               "0x%x\n", signal_levels);
3331                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3332         }
3333 }
3334
3335 /* Gen7's DP voltage swing and pre-emphasis control */
3336 static uint32_t
3337 intel_gen7_edp_signal_levels(uint8_t train_set)
3338 {
3339         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3340                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3341         switch (signal_levels) {
3342         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3343                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3344         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3345                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3346         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3347                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3348
3349         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3350                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3351         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3352                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3353
3354         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3355                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3356         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3357                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3358
3359         default:
3360                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3361                               "0x%x\n", signal_levels);
3362                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3363         }
3364 }
3365
3366 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3367 static uint32_t
3368 intel_hsw_signal_levels(uint8_t train_set)
3369 {
3370         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3371                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3372         switch (signal_levels) {
3373         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3374                 return DDI_BUF_TRANS_SELECT(0);
3375         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3376                 return DDI_BUF_TRANS_SELECT(1);
3377         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3378                 return DDI_BUF_TRANS_SELECT(2);
3379         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3380                 return DDI_BUF_TRANS_SELECT(3);
3381
3382         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3383                 return DDI_BUF_TRANS_SELECT(4);
3384         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3385                 return DDI_BUF_TRANS_SELECT(5);
3386         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3387                 return DDI_BUF_TRANS_SELECT(6);
3388
3389         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3390                 return DDI_BUF_TRANS_SELECT(7);
3391         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3392                 return DDI_BUF_TRANS_SELECT(8);
3393
3394         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3395                 return DDI_BUF_TRANS_SELECT(9);
3396         default:
3397                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3398                               "0x%x\n", signal_levels);
3399                 return DDI_BUF_TRANS_SELECT(0);
3400         }
3401 }
3402
3403 /* Properly updates "DP" with the correct signal levels. */
3404 static void
3405 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3406 {
3407         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3408         enum port port = intel_dig_port->port;
3409         struct drm_device *dev = intel_dig_port->base.base.dev;
3410         uint32_t signal_levels, mask;
3411         uint8_t train_set = intel_dp->train_set[0];
3412
3413         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3414                 signal_levels = intel_hsw_signal_levels(train_set);
3415                 mask = DDI_BUF_EMP_MASK;
3416         } else if (IS_CHERRYVIEW(dev)) {
3417                 signal_levels = intel_chv_signal_levels(intel_dp);
3418                 mask = 0;
3419         } else if (IS_VALLEYVIEW(dev)) {
3420                 signal_levels = intel_vlv_signal_levels(intel_dp);
3421                 mask = 0;
3422         } else if (IS_GEN7(dev) && port == PORT_A) {
3423                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3424                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3425         } else if (IS_GEN6(dev) && port == PORT_A) {
3426                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3427                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3428         } else {
3429                 signal_levels = intel_gen4_signal_levels(train_set);
3430                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3431         }
3432
3433         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3434
3435         *DP = (*DP & ~mask) | signal_levels;
3436 }
3437
3438 static bool
3439 intel_dp_set_link_train(struct intel_dp *intel_dp,
3440                         uint32_t *DP,
3441                         uint8_t dp_train_pat)
3442 {
3443         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3444         struct drm_device *dev = intel_dig_port->base.base.dev;
3445         struct drm_i915_private *dev_priv = dev->dev_private;
3446         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3447         int ret, len;
3448
3449         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3450
3451         I915_WRITE(intel_dp->output_reg, *DP);
3452         POSTING_READ(intel_dp->output_reg);
3453
3454         buf[0] = dp_train_pat;
3455         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3456             DP_TRAINING_PATTERN_DISABLE) {
3457                 /* don't write DP_TRAINING_LANEx_SET on disable */
3458                 len = 1;
3459         } else {
3460                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3461                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3462                 len = intel_dp->lane_count + 1;
3463         }
3464
3465         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3466                                 buf, len);
3467
3468         return ret == len;
3469 }
3470
3471 static bool
3472 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3473                         uint8_t dp_train_pat)
3474 {
3475         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3476         intel_dp_set_signal_levels(intel_dp, DP);
3477         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3478 }
3479
3480 static bool
3481 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3482                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3483 {
3484         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3485         struct drm_device *dev = intel_dig_port->base.base.dev;
3486         struct drm_i915_private *dev_priv = dev->dev_private;
3487         int ret;
3488
3489         intel_get_adjust_train(intel_dp, link_status);
3490         intel_dp_set_signal_levels(intel_dp, DP);
3491
3492         I915_WRITE(intel_dp->output_reg, *DP);
3493         POSTING_READ(intel_dp->output_reg);
3494
3495         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3496                                 intel_dp->train_set, intel_dp->lane_count);
3497
3498         return ret == intel_dp->lane_count;
3499 }
3500
3501 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3502 {
3503         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3504         struct drm_device *dev = intel_dig_port->base.base.dev;
3505         struct drm_i915_private *dev_priv = dev->dev_private;
3506         enum port port = intel_dig_port->port;
3507         uint32_t val;
3508
3509         if (!HAS_DDI(dev))
3510                 return;
3511
3512         val = I915_READ(DP_TP_CTL(port));
3513         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3514         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3515         I915_WRITE(DP_TP_CTL(port), val);
3516
3517         /*
3518          * On PORT_A we can have only eDP in SST mode. There the only reason
3519          * we need to set idle transmission mode is to work around a HW issue
3520          * where we enable the pipe while not in idle link-training mode.
3521          * In this case there is requirement to wait for a minimum number of
3522          * idle patterns to be sent.
3523          */
3524         if (port == PORT_A)
3525                 return;
3526
3527         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3528                      1))
3529                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3530 }
3531
3532 /* Enable corresponding port and start training pattern 1 */
3533 void
3534 intel_dp_start_link_train(struct intel_dp *intel_dp)
3535 {
3536         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3537         struct drm_device *dev = encoder->dev;
3538         int i;
3539         uint8_t voltage;
3540         int voltage_tries, loop_tries;
3541         uint32_t DP = intel_dp->DP;
3542         uint8_t link_config[2];
3543
3544         if (HAS_DDI(dev))
3545                 intel_ddi_prepare_link_retrain(encoder);
3546
3547         /* Write the link configuration data */
3548         link_config[0] = intel_dp->link_bw;
3549         link_config[1] = intel_dp->lane_count;
3550         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3551                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3552         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3553         if (intel_dp->num_sink_rates)
3554                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3555                                 &intel_dp->rate_select, 1);
3556
3557         link_config[0] = 0;
3558         link_config[1] = DP_SET_ANSI_8B10B;
3559         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3560
3561         DP |= DP_PORT_EN;
3562
3563         /* clock recovery */
3564         if (!intel_dp_reset_link_train(intel_dp, &DP,
3565                                        DP_TRAINING_PATTERN_1 |
3566                                        DP_LINK_SCRAMBLING_DISABLE)) {
3567                 DRM_ERROR("failed to enable link training\n");
3568                 return;
3569         }
3570
3571         voltage = 0xff;
3572         voltage_tries = 0;
3573         loop_tries = 0;
3574         for (;;) {
3575                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3576
3577                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3578                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3579                         DRM_ERROR("failed to get link status\n");
3580                         break;
3581                 }
3582
3583                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3584                         DRM_DEBUG_KMS("clock recovery OK\n");
3585                         break;
3586                 }
3587
3588                 /* Check to see if we've tried the max voltage */
3589                 for (i = 0; i < intel_dp->lane_count; i++)
3590                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3591                                 break;
3592                 if (i == intel_dp->lane_count) {
3593                         ++loop_tries;
3594                         if (loop_tries == 5) {
3595                                 DRM_ERROR("too many full retries, give up\n");
3596                                 break;
3597                         }
3598                         intel_dp_reset_link_train(intel_dp, &DP,
3599                                                   DP_TRAINING_PATTERN_1 |
3600                                                   DP_LINK_SCRAMBLING_DISABLE);
3601                         voltage_tries = 0;
3602                         continue;
3603                 }
3604
3605                 /* Check to see if we've tried the same voltage 5 times */
3606                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3607                         ++voltage_tries;
3608                         if (voltage_tries == 5) {
3609                                 DRM_ERROR("too many voltage retries, give up\n");
3610                                 break;
3611                         }
3612                 } else
3613                         voltage_tries = 0;
3614                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3615
3616                 /* Update training set as requested by target */
3617                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3618                         DRM_ERROR("failed to update link training\n");
3619                         break;
3620                 }
3621         }
3622
3623         intel_dp->DP = DP;
3624 }
3625
3626 void
3627 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3628 {
3629         bool channel_eq = false;
3630         int tries, cr_tries;
3631         uint32_t DP = intel_dp->DP;
3632         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3633
3634         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3635         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3636                 training_pattern = DP_TRAINING_PATTERN_3;
3637
3638         /* channel equalization */
3639         if (!intel_dp_set_link_train(intel_dp, &DP,
3640                                      training_pattern |
3641                                      DP_LINK_SCRAMBLING_DISABLE)) {
3642                 DRM_ERROR("failed to start channel equalization\n");
3643                 return;
3644         }
3645
3646         tries = 0;
3647         cr_tries = 0;
3648         channel_eq = false;
3649         for (;;) {
3650                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3651
3652                 if (cr_tries > 5) {
3653                         DRM_ERROR("failed to train DP, aborting\n");
3654                         break;
3655                 }
3656
3657                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3658                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3659                         DRM_ERROR("failed to get link status\n");
3660                         break;
3661                 }
3662
3663                 /* Make sure clock is still ok */
3664                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3665                         intel_dp_start_link_train(intel_dp);
3666                         intel_dp_set_link_train(intel_dp, &DP,
3667                                                 training_pattern |
3668                                                 DP_LINK_SCRAMBLING_DISABLE);
3669                         cr_tries++;
3670                         continue;
3671                 }
3672
3673                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3674                         channel_eq = true;
3675                         break;
3676                 }
3677
3678                 /* Try 5 times, then try clock recovery if that fails */
3679                 if (tries > 5) {
3680                         intel_dp_start_link_train(intel_dp);
3681                         intel_dp_set_link_train(intel_dp, &DP,
3682                                                 training_pattern |
3683                                                 DP_LINK_SCRAMBLING_DISABLE);
3684                         tries = 0;
3685                         cr_tries++;
3686                         continue;
3687                 }
3688
3689                 /* Update training set as requested by target */
3690                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3691                         DRM_ERROR("failed to update link training\n");
3692                         break;
3693                 }
3694                 ++tries;
3695         }
3696
3697         intel_dp_set_idle_link_train(intel_dp);
3698
3699         intel_dp->DP = DP;
3700
3701         if (channel_eq)
3702                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3703
3704 }
3705
3706 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3707 {
3708         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3709                                 DP_TRAINING_PATTERN_DISABLE);
3710 }
3711
3712 static void
3713 intel_dp_link_down(struct intel_dp *intel_dp)
3714 {
3715         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3716         enum port port = intel_dig_port->port;
3717         struct drm_device *dev = intel_dig_port->base.base.dev;
3718         struct drm_i915_private *dev_priv = dev->dev_private;
3719         uint32_t DP = intel_dp->DP;
3720
3721         if (WARN_ON(HAS_DDI(dev)))
3722                 return;
3723
3724         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3725                 return;
3726
3727         DRM_DEBUG_KMS("\n");
3728
3729         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3730                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3731                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3732         } else {
3733                 if (IS_CHERRYVIEW(dev))
3734                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3735                 else
3736                         DP &= ~DP_LINK_TRAIN_MASK;
3737                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3738         }
3739         POSTING_READ(intel_dp->output_reg);
3740
3741         if (HAS_PCH_IBX(dev) &&
3742             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3743                 /* Hardware workaround: leaving our transcoder select
3744                  * set to transcoder B while it's off will prevent the
3745                  * corresponding HDMI output on transcoder A.
3746                  *
3747                  * Combine this with another hardware workaround:
3748                  * transcoder select bit can only be cleared while the
3749                  * port is enabled.
3750                  */
3751                 DP &= ~DP_PIPEB_SELECT;
3752                 I915_WRITE(intel_dp->output_reg, DP);
3753                 POSTING_READ(intel_dp->output_reg);
3754         }
3755
3756         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3757         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3758         POSTING_READ(intel_dp->output_reg);
3759         msleep(intel_dp->panel_power_down_delay);
3760 }
3761
3762 static bool
3763 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3764 {
3765         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3766         struct drm_device *dev = dig_port->base.base.dev;
3767         struct drm_i915_private *dev_priv = dev->dev_private;
3768         uint8_t rev;
3769
3770         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3771                                     sizeof(intel_dp->dpcd)) < 0)
3772                 return false; /* aux transfer failed */
3773
3774         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3775
3776         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3777                 return false; /* DPCD not present */
3778
3779         /* Check if the panel supports PSR */
3780         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3781         if (is_edp(intel_dp)) {
3782                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3783                                         intel_dp->psr_dpcd,
3784                                         sizeof(intel_dp->psr_dpcd));
3785                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3786                         dev_priv->psr.sink_support = true;
3787                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3788                 }
3789         }
3790
3791         /* Training Pattern 3 support, both source and sink */
3792         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3793             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3794             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3795                 intel_dp->use_tps3 = true;
3796                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3797         } else
3798                 intel_dp->use_tps3 = false;
3799
3800         /* Intermediate frequency support */
3801         if (is_edp(intel_dp) &&
3802             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3803             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3804             (rev >= 0x03)) { /* eDp v1.4 or higher */
3805                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3806                 int i;
3807
3808                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3809                                 DP_SUPPORTED_LINK_RATES,
3810                                 sink_rates,
3811                                 sizeof(sink_rates));
3812
3813                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3814                         int val = le16_to_cpu(sink_rates[i]);
3815
3816                         if (val == 0)
3817                                 break;
3818
3819                         intel_dp->sink_rates[i] = val * 200;
3820                 }
3821                 intel_dp->num_sink_rates = i;
3822         }
3823
3824         intel_dp_print_rates(intel_dp);
3825
3826         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3827               DP_DWN_STRM_PORT_PRESENT))
3828                 return true; /* native DP sink */
3829
3830         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3831                 return true; /* no per-port downstream info */
3832
3833         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3834                                     intel_dp->downstream_ports,
3835                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3836                 return false; /* downstream port status fetch failed */
3837
3838         return true;
3839 }
3840
3841 static void
3842 intel_dp_probe_oui(struct intel_dp *intel_dp)
3843 {
3844         u8 buf[3];
3845
3846         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3847                 return;
3848
3849         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3850                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3851                               buf[0], buf[1], buf[2]);
3852
3853         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3854                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3855                               buf[0], buf[1], buf[2]);
3856 }
3857
3858 static bool
3859 intel_dp_probe_mst(struct intel_dp *intel_dp)
3860 {
3861         u8 buf[1];
3862
3863         if (!intel_dp->can_mst)
3864                 return false;
3865
3866         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3867                 return false;
3868
3869         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3870                 if (buf[0] & DP_MST_CAP) {
3871                         DRM_DEBUG_KMS("Sink is MST capable\n");
3872                         intel_dp->is_mst = true;
3873                 } else {
3874                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3875                         intel_dp->is_mst = false;
3876                 }
3877         }
3878
3879         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3880         return intel_dp->is_mst;
3881 }
3882
3883 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3884 {
3885         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3886         struct drm_device *dev = intel_dig_port->base.base.dev;
3887         struct intel_crtc *intel_crtc =
3888                 to_intel_crtc(intel_dig_port->base.base.crtc);
3889         u8 buf;
3890         int test_crc_count;
3891         int attempts = 6;
3892
3893         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3894                 return -EIO;
3895
3896         if (!(buf & DP_TEST_CRC_SUPPORTED))
3897                 return -ENOTTY;
3898
3899         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3900                 return -EIO;
3901
3902         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3903                                 buf | DP_TEST_SINK_START) < 0)
3904                 return -EIO;
3905
3906         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3907                 return -EIO;
3908         test_crc_count = buf & DP_TEST_COUNT_MASK;
3909
3910         do {
3911                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3912                                       DP_TEST_SINK_MISC, &buf) < 0)
3913                         return -EIO;
3914                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3915         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3916
3917         if (attempts == 0) {
3918                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3919                 return -ETIMEDOUT;
3920         }
3921
3922         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3923                 return -EIO;
3924
3925         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3926                 return -EIO;
3927         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3928                                buf & ~DP_TEST_SINK_START) < 0)
3929                 return -EIO;
3930
3931         return 0;
3932 }
3933
3934 static bool
3935 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3936 {
3937         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3938                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3939                                        sink_irq_vector, 1) == 1;
3940 }
3941
3942 static bool
3943 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3944 {
3945         int ret;
3946
3947         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3948                                              DP_SINK_COUNT_ESI,
3949                                              sink_irq_vector, 14);
3950         if (ret != 14)
3951                 return false;
3952
3953         return true;
3954 }
3955
3956 static void
3957 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3958 {
3959         /* NAK by default */
3960         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3961 }
3962
3963 static int
3964 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3965 {
3966         bool bret;
3967
3968         if (intel_dp->is_mst) {
3969                 u8 esi[16] = { 0 };
3970                 int ret = 0;
3971                 int retry;
3972                 bool handled;
3973                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3974 go_again:
3975                 if (bret == true) {
3976
3977                         /* check link status - esi[10] = 0x200c */
3978                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3979                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3980                                 intel_dp_start_link_train(intel_dp);
3981                                 intel_dp_complete_link_train(intel_dp);
3982                                 intel_dp_stop_link_train(intel_dp);
3983                         }
3984
3985                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3986                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3987
3988                         if (handled) {
3989                                 for (retry = 0; retry < 3; retry++) {
3990                                         int wret;
3991                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3992                                                                  DP_SINK_COUNT_ESI+1,
3993                                                                  &esi[1], 3);
3994                                         if (wret == 3) {
3995                                                 break;
3996                                         }
3997                                 }
3998
3999                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4000                                 if (bret == true) {
4001                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4002                                         goto go_again;
4003                                 }
4004                         } else
4005                                 ret = 0;
4006
4007                         return ret;
4008                 } else {
4009                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4010                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4011                         intel_dp->is_mst = false;
4012                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4013                         /* send a hotplug event */
4014                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4015                 }
4016         }
4017         return -EINVAL;
4018 }
4019
4020 /*
4021  * According to DP spec
4022  * 5.1.2:
4023  *  1. Read DPCD
4024  *  2. Configure link according to Receiver Capabilities
4025  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4026  *  4. Check link status on receipt of hot-plug interrupt
4027  */
4028 static void
4029 intel_dp_check_link_status(struct intel_dp *intel_dp)
4030 {
4031         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4032         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4033         u8 sink_irq_vector;
4034         u8 link_status[DP_LINK_STATUS_SIZE];
4035
4036         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4037
4038         if (!intel_encoder->connectors_active)
4039                 return;
4040
4041         if (WARN_ON(!intel_encoder->base.crtc))
4042                 return;
4043
4044         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4045                 return;
4046
4047         /* Try to read receiver status if the link appears to be up */
4048         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4049                 return;
4050         }
4051
4052         /* Now read the DPCD to see if it's actually running */
4053         if (!intel_dp_get_dpcd(intel_dp)) {
4054                 return;
4055         }
4056
4057         /* Try to read the source of the interrupt */
4058         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4059             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4060                 /* Clear interrupt source */
4061                 drm_dp_dpcd_writeb(&intel_dp->aux,
4062                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4063                                    sink_irq_vector);
4064
4065                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4066                         intel_dp_handle_test_request(intel_dp);
4067                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4068                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4069         }
4070
4071         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4072                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4073                               intel_encoder->base.name);
4074                 intel_dp_start_link_train(intel_dp);
4075                 intel_dp_complete_link_train(intel_dp);
4076                 intel_dp_stop_link_train(intel_dp);
4077         }
4078 }
4079
4080 /* XXX this is probably wrong for multiple downstream ports */
4081 static enum drm_connector_status
4082 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4083 {
4084         uint8_t *dpcd = intel_dp->dpcd;
4085         uint8_t type;
4086
4087         if (!intel_dp_get_dpcd(intel_dp))
4088                 return connector_status_disconnected;
4089
4090         /* if there's no downstream port, we're done */
4091         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4092                 return connector_status_connected;
4093
4094         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4095         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4096             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4097                 uint8_t reg;
4098
4099                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4100                                             &reg, 1) < 0)
4101                         return connector_status_unknown;
4102
4103                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4104                                               : connector_status_disconnected;
4105         }
4106
4107         /* If no HPD, poke DDC gently */
4108         if (drm_probe_ddc(&intel_dp->aux.ddc))
4109                 return connector_status_connected;
4110
4111         /* Well we tried, say unknown for unreliable port types */
4112         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4113                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4114                 if (type == DP_DS_PORT_TYPE_VGA ||
4115                     type == DP_DS_PORT_TYPE_NON_EDID)
4116                         return connector_status_unknown;
4117         } else {
4118                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4119                         DP_DWN_STRM_PORT_TYPE_MASK;
4120                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4121                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4122                         return connector_status_unknown;
4123         }
4124
4125         /* Anything else is out of spec, warn and ignore */
4126         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4127         return connector_status_disconnected;
4128 }
4129
4130 static enum drm_connector_status
4131 edp_detect(struct intel_dp *intel_dp)
4132 {
4133         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4134         enum drm_connector_status status;
4135
4136         status = intel_panel_detect(dev);
4137         if (status == connector_status_unknown)
4138                 status = connector_status_connected;
4139
4140         return status;
4141 }
4142
4143 static enum drm_connector_status
4144 ironlake_dp_detect(struct intel_dp *intel_dp)
4145 {
4146         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4147         struct drm_i915_private *dev_priv = dev->dev_private;
4148         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4149
4150         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4151                 return connector_status_disconnected;
4152
4153         return intel_dp_detect_dpcd(intel_dp);
4154 }
4155
4156 static int g4x_digital_port_connected(struct drm_device *dev,
4157                                        struct intel_digital_port *intel_dig_port)
4158 {
4159         struct drm_i915_private *dev_priv = dev->dev_private;
4160         uint32_t bit;
4161
4162         if (IS_VALLEYVIEW(dev)) {
4163                 switch (intel_dig_port->port) {
4164                 case PORT_B:
4165                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4166                         break;
4167                 case PORT_C:
4168                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4169                         break;
4170                 case PORT_D:
4171                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4172                         break;
4173                 default:
4174                         return -EINVAL;
4175                 }
4176         } else {
4177                 switch (intel_dig_port->port) {
4178                 case PORT_B:
4179                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4180                         break;
4181                 case PORT_C:
4182                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4183                         break;
4184                 case PORT_D:
4185                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4186                         break;
4187                 default:
4188                         return -EINVAL;
4189                 }
4190         }
4191
4192         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4193                 return 0;
4194         return 1;
4195 }
4196
4197 static enum drm_connector_status
4198 g4x_dp_detect(struct intel_dp *intel_dp)
4199 {
4200         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4201         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4202         int ret;
4203
4204         /* Can't disconnect eDP, but you can close the lid... */
4205         if (is_edp(intel_dp)) {
4206                 enum drm_connector_status status;
4207
4208                 status = intel_panel_detect(dev);
4209                 if (status == connector_status_unknown)
4210                         status = connector_status_connected;
4211                 return status;
4212         }
4213
4214         ret = g4x_digital_port_connected(dev, intel_dig_port);
4215         if (ret == -EINVAL)
4216                 return connector_status_unknown;
4217         else if (ret == 0)
4218                 return connector_status_disconnected;
4219
4220         return intel_dp_detect_dpcd(intel_dp);
4221 }
4222
4223 static struct edid *
4224 intel_dp_get_edid(struct intel_dp *intel_dp)
4225 {
4226         struct intel_connector *intel_connector = intel_dp->attached_connector;
4227
4228         /* use cached edid if we have one */
4229         if (intel_connector->edid) {
4230                 /* invalid edid */
4231                 if (IS_ERR(intel_connector->edid))
4232                         return NULL;
4233
4234                 return drm_edid_duplicate(intel_connector->edid);
4235         } else
4236                 return drm_get_edid(&intel_connector->base,
4237                                     &intel_dp->aux.ddc);
4238 }
4239
4240 static void
4241 intel_dp_set_edid(struct intel_dp *intel_dp)
4242 {
4243         struct intel_connector *intel_connector = intel_dp->attached_connector;
4244         struct edid *edid;
4245
4246         edid = intel_dp_get_edid(intel_dp);
4247         intel_connector->detect_edid = edid;
4248
4249         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4250                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4251         else
4252                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4253 }
4254
4255 static void
4256 intel_dp_unset_edid(struct intel_dp *intel_dp)
4257 {
4258         struct intel_connector *intel_connector = intel_dp->attached_connector;
4259
4260         kfree(intel_connector->detect_edid);
4261         intel_connector->detect_edid = NULL;
4262
4263         intel_dp->has_audio = false;
4264 }
4265
4266 static enum intel_display_power_domain
4267 intel_dp_power_get(struct intel_dp *dp)
4268 {
4269         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4270         enum intel_display_power_domain power_domain;
4271
4272         power_domain = intel_display_port_power_domain(encoder);
4273         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4274
4275         return power_domain;
4276 }
4277
4278 static void
4279 intel_dp_power_put(struct intel_dp *dp,
4280                    enum intel_display_power_domain power_domain)
4281 {
4282         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4283         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4284 }
4285
4286 static enum drm_connector_status
4287 intel_dp_detect(struct drm_connector *connector, bool force)
4288 {
4289         struct intel_dp *intel_dp = intel_attached_dp(connector);
4290         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4291         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4292         struct drm_device *dev = connector->dev;
4293         enum drm_connector_status status;
4294         enum intel_display_power_domain power_domain;
4295         bool ret;
4296
4297         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4298                       connector->base.id, connector->name);
4299         intel_dp_unset_edid(intel_dp);
4300
4301         if (intel_dp->is_mst) {
4302                 /* MST devices are disconnected from a monitor POV */
4303                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4304                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4305                 return connector_status_disconnected;
4306         }
4307
4308         power_domain = intel_dp_power_get(intel_dp);
4309
4310         /* Can't disconnect eDP, but you can close the lid... */
4311         if (is_edp(intel_dp))
4312                 status = edp_detect(intel_dp);
4313         else if (HAS_PCH_SPLIT(dev))
4314                 status = ironlake_dp_detect(intel_dp);
4315         else
4316                 status = g4x_dp_detect(intel_dp);
4317         if (status != connector_status_connected)
4318                 goto out;
4319
4320         intel_dp_probe_oui(intel_dp);
4321
4322         ret = intel_dp_probe_mst(intel_dp);
4323         if (ret) {
4324                 /* if we are in MST mode then this connector
4325                    won't appear connected or have anything with EDID on it */
4326                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4327                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4328                 status = connector_status_disconnected;
4329                 goto out;
4330         }
4331
4332         intel_dp_set_edid(intel_dp);
4333
4334         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4335                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4336         status = connector_status_connected;
4337
4338 out:
4339         intel_dp_power_put(intel_dp, power_domain);
4340         return status;
4341 }
4342
4343 static void
4344 intel_dp_force(struct drm_connector *connector)
4345 {
4346         struct intel_dp *intel_dp = intel_attached_dp(connector);
4347         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4348         enum intel_display_power_domain power_domain;
4349
4350         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4351                       connector->base.id, connector->name);
4352         intel_dp_unset_edid(intel_dp);
4353
4354         if (connector->status != connector_status_connected)
4355                 return;
4356
4357         power_domain = intel_dp_power_get(intel_dp);
4358
4359         intel_dp_set_edid(intel_dp);
4360
4361         intel_dp_power_put(intel_dp, power_domain);
4362
4363         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4364                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4365 }
4366
4367 static int intel_dp_get_modes(struct drm_connector *connector)
4368 {
4369         struct intel_connector *intel_connector = to_intel_connector(connector);
4370         struct edid *edid;
4371
4372         edid = intel_connector->detect_edid;
4373         if (edid) {
4374                 int ret = intel_connector_update_modes(connector, edid);
4375                 if (ret)
4376                         return ret;
4377         }
4378
4379         /* if eDP has no EDID, fall back to fixed mode */
4380         if (is_edp(intel_attached_dp(connector)) &&
4381             intel_connector->panel.fixed_mode) {
4382                 struct drm_display_mode *mode;
4383
4384                 mode = drm_mode_duplicate(connector->dev,
4385                                           intel_connector->panel.fixed_mode);
4386                 if (mode) {
4387                         drm_mode_probed_add(connector, mode);
4388                         return 1;
4389                 }
4390         }
4391
4392         return 0;
4393 }
4394
4395 static bool
4396 intel_dp_detect_audio(struct drm_connector *connector)
4397 {
4398         bool has_audio = false;
4399         struct edid *edid;
4400
4401         edid = to_intel_connector(connector)->detect_edid;
4402         if (edid)
4403                 has_audio = drm_detect_monitor_audio(edid);
4404
4405         return has_audio;
4406 }
4407
4408 static int
4409 intel_dp_set_property(struct drm_connector *connector,
4410                       struct drm_property *property,
4411                       uint64_t val)
4412 {
4413         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4414         struct intel_connector *intel_connector = to_intel_connector(connector);
4415         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4416         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4417         int ret;
4418
4419         ret = drm_object_property_set_value(&connector->base, property, val);
4420         if (ret)
4421                 return ret;
4422
4423         if (property == dev_priv->force_audio_property) {
4424                 int i = val;
4425                 bool has_audio;
4426
4427                 if (i == intel_dp->force_audio)
4428                         return 0;
4429
4430                 intel_dp->force_audio = i;
4431
4432                 if (i == HDMI_AUDIO_AUTO)
4433                         has_audio = intel_dp_detect_audio(connector);
4434                 else
4435                         has_audio = (i == HDMI_AUDIO_ON);
4436
4437                 if (has_audio == intel_dp->has_audio)
4438                         return 0;
4439
4440                 intel_dp->has_audio = has_audio;
4441                 goto done;
4442         }
4443
4444         if (property == dev_priv->broadcast_rgb_property) {
4445                 bool old_auto = intel_dp->color_range_auto;
4446                 uint32_t old_range = intel_dp->color_range;
4447
4448                 switch (val) {
4449                 case INTEL_BROADCAST_RGB_AUTO:
4450                         intel_dp->color_range_auto = true;
4451                         break;
4452                 case INTEL_BROADCAST_RGB_FULL:
4453                         intel_dp->color_range_auto = false;
4454                         intel_dp->color_range = 0;
4455                         break;
4456                 case INTEL_BROADCAST_RGB_LIMITED:
4457                         intel_dp->color_range_auto = false;
4458                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4459                         break;
4460                 default:
4461                         return -EINVAL;
4462                 }
4463
4464                 if (old_auto == intel_dp->color_range_auto &&
4465                     old_range == intel_dp->color_range)
4466                         return 0;
4467
4468                 goto done;
4469         }
4470
4471         if (is_edp(intel_dp) &&
4472             property == connector->dev->mode_config.scaling_mode_property) {
4473                 if (val == DRM_MODE_SCALE_NONE) {
4474                         DRM_DEBUG_KMS("no scaling not supported\n");
4475                         return -EINVAL;
4476                 }
4477
4478                 if (intel_connector->panel.fitting_mode == val) {
4479                         /* the eDP scaling property is not changed */
4480                         return 0;
4481                 }
4482                 intel_connector->panel.fitting_mode = val;
4483
4484                 goto done;
4485         }
4486
4487         return -EINVAL;
4488
4489 done:
4490         if (intel_encoder->base.crtc)
4491                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4492
4493         return 0;
4494 }
4495
4496 static void
4497 intel_dp_connector_destroy(struct drm_connector *connector)
4498 {
4499         struct intel_connector *intel_connector = to_intel_connector(connector);
4500
4501         kfree(intel_connector->detect_edid);
4502
4503         if (!IS_ERR_OR_NULL(intel_connector->edid))
4504                 kfree(intel_connector->edid);
4505
4506         /* Can't call is_edp() since the encoder may have been destroyed
4507          * already. */
4508         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4509                 intel_panel_fini(&intel_connector->panel);
4510
4511         drm_connector_cleanup(connector);
4512         kfree(connector);
4513 }
4514
4515 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4516 {
4517         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4518         struct intel_dp *intel_dp = &intel_dig_port->dp;
4519
4520         drm_dp_aux_unregister(&intel_dp->aux);
4521         intel_dp_mst_encoder_cleanup(intel_dig_port);
4522         if (is_edp(intel_dp)) {
4523                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4524                 /*
4525                  * vdd might still be enabled do to the delayed vdd off.
4526                  * Make sure vdd is actually turned off here.
4527                  */
4528                 pps_lock(intel_dp);
4529                 edp_panel_vdd_off_sync(intel_dp);
4530                 pps_unlock(intel_dp);
4531
4532                 if (intel_dp->edp_notifier.notifier_call) {
4533                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4534                         intel_dp->edp_notifier.notifier_call = NULL;
4535                 }
4536         }
4537         drm_encoder_cleanup(encoder);
4538         kfree(intel_dig_port);
4539 }
4540
4541 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4542 {
4543         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4544
4545         if (!is_edp(intel_dp))
4546                 return;
4547
4548         /*
4549          * vdd might still be enabled do to the delayed vdd off.
4550          * Make sure vdd is actually turned off here.
4551          */
4552         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4553         pps_lock(intel_dp);
4554         edp_panel_vdd_off_sync(intel_dp);
4555         pps_unlock(intel_dp);
4556 }
4557
4558 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4559 {
4560         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4561         struct drm_device *dev = intel_dig_port->base.base.dev;
4562         struct drm_i915_private *dev_priv = dev->dev_private;
4563         enum intel_display_power_domain power_domain;
4564
4565         lockdep_assert_held(&dev_priv->pps_mutex);
4566
4567         if (!edp_have_panel_vdd(intel_dp))
4568                 return;
4569
4570         /*
4571          * The VDD bit needs a power domain reference, so if the bit is
4572          * already enabled when we boot or resume, grab this reference and
4573          * schedule a vdd off, so we don't hold on to the reference
4574          * indefinitely.
4575          */
4576         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4577         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4578         intel_display_power_get(dev_priv, power_domain);
4579
4580         edp_panel_vdd_schedule_off(intel_dp);
4581 }
4582
4583 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4584 {
4585         struct intel_dp *intel_dp;
4586
4587         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4588                 return;
4589
4590         intel_dp = enc_to_intel_dp(encoder);
4591
4592         pps_lock(intel_dp);
4593
4594         /*
4595          * Read out the current power sequencer assignment,
4596          * in case the BIOS did something with it.
4597          */
4598         if (IS_VALLEYVIEW(encoder->dev))
4599                 vlv_initial_power_sequencer_setup(intel_dp);
4600
4601         intel_edp_panel_vdd_sanitize(intel_dp);
4602
4603         pps_unlock(intel_dp);
4604 }
4605
4606 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4607         .dpms = intel_connector_dpms,
4608         .detect = intel_dp_detect,
4609         .force = intel_dp_force,
4610         .fill_modes = drm_helper_probe_single_connector_modes,
4611         .set_property = intel_dp_set_property,
4612         .atomic_get_property = intel_connector_atomic_get_property,
4613         .destroy = intel_dp_connector_destroy,
4614         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4615 };
4616
4617 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4618         .get_modes = intel_dp_get_modes,
4619         .mode_valid = intel_dp_mode_valid,
4620         .best_encoder = intel_best_encoder,
4621 };
4622
4623 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4624         .reset = intel_dp_encoder_reset,
4625         .destroy = intel_dp_encoder_destroy,
4626 };
4627
4628 void
4629 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4630 {
4631         return;
4632 }
4633
4634 enum irqreturn
4635 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4636 {
4637         struct intel_dp *intel_dp = &intel_dig_port->dp;
4638         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4639         struct drm_device *dev = intel_dig_port->base.base.dev;
4640         struct drm_i915_private *dev_priv = dev->dev_private;
4641         enum intel_display_power_domain power_domain;
4642         enum irqreturn ret = IRQ_NONE;
4643
4644         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4645                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4646
4647         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4648                 /*
4649                  * vdd off can generate a long pulse on eDP which
4650                  * would require vdd on to handle it, and thus we
4651                  * would end up in an endless cycle of
4652                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4653                  */
4654                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4655                               port_name(intel_dig_port->port));
4656                 return IRQ_HANDLED;
4657         }
4658
4659         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4660                       port_name(intel_dig_port->port),
4661                       long_hpd ? "long" : "short");
4662
4663         power_domain = intel_display_port_power_domain(intel_encoder);
4664         intel_display_power_get(dev_priv, power_domain);
4665
4666         if (long_hpd) {
4667
4668                 if (HAS_PCH_SPLIT(dev)) {
4669                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4670                                 goto mst_fail;
4671                 } else {
4672                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4673                                 goto mst_fail;
4674                 }
4675
4676                 if (!intel_dp_get_dpcd(intel_dp)) {
4677                         goto mst_fail;
4678                 }
4679
4680                 intel_dp_probe_oui(intel_dp);
4681
4682                 if (!intel_dp_probe_mst(intel_dp))
4683                         goto mst_fail;
4684
4685         } else {
4686                 if (intel_dp->is_mst) {
4687                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4688                                 goto mst_fail;
4689                 }
4690
4691                 if (!intel_dp->is_mst) {
4692                         /*
4693                          * we'll check the link status via the normal hot plug path later -
4694                          * but for short hpds we should check it now
4695                          */
4696                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4697                         intel_dp_check_link_status(intel_dp);
4698                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4699                 }
4700         }
4701
4702         ret = IRQ_HANDLED;
4703
4704         goto put_power;
4705 mst_fail:
4706         /* if we were in MST mode, and device is not there get out of MST mode */
4707         if (intel_dp->is_mst) {
4708                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4709                 intel_dp->is_mst = false;
4710                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4711         }
4712 put_power:
4713         intel_display_power_put(dev_priv, power_domain);
4714
4715         return ret;
4716 }
4717
4718 /* Return which DP Port should be selected for Transcoder DP control */
4719 int
4720 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4721 {
4722         struct drm_device *dev = crtc->dev;
4723         struct intel_encoder *intel_encoder;
4724         struct intel_dp *intel_dp;
4725
4726         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4727                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4728
4729                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4730                     intel_encoder->type == INTEL_OUTPUT_EDP)
4731                         return intel_dp->output_reg;
4732         }
4733
4734         return -1;
4735 }
4736
4737 /* check the VBT to see whether the eDP is on DP-D port */
4738 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4739 {
4740         struct drm_i915_private *dev_priv = dev->dev_private;
4741         union child_device_config *p_child;
4742         int i;
4743         static const short port_mapping[] = {
4744                 [PORT_B] = PORT_IDPB,
4745                 [PORT_C] = PORT_IDPC,
4746                 [PORT_D] = PORT_IDPD,
4747         };
4748
4749         if (port == PORT_A)
4750                 return true;
4751
4752         if (!dev_priv->vbt.child_dev_num)
4753                 return false;
4754
4755         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4756                 p_child = dev_priv->vbt.child_dev + i;
4757
4758                 if (p_child->common.dvo_port == port_mapping[port] &&
4759                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4760                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4761                         return true;
4762         }
4763         return false;
4764 }
4765
4766 void
4767 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4768 {
4769         struct intel_connector *intel_connector = to_intel_connector(connector);
4770
4771         intel_attach_force_audio_property(connector);
4772         intel_attach_broadcast_rgb_property(connector);
4773         intel_dp->color_range_auto = true;
4774
4775         if (is_edp(intel_dp)) {
4776                 drm_mode_create_scaling_mode_property(connector->dev);
4777                 drm_object_attach_property(
4778                         &connector->base,
4779                         connector->dev->mode_config.scaling_mode_property,
4780                         DRM_MODE_SCALE_ASPECT);
4781                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4782         }
4783 }
4784
4785 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4786 {
4787         intel_dp->last_power_cycle = jiffies;
4788         intel_dp->last_power_on = jiffies;
4789         intel_dp->last_backlight_off = jiffies;
4790 }
4791
4792 static void
4793 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4794                                     struct intel_dp *intel_dp)
4795 {
4796         struct drm_i915_private *dev_priv = dev->dev_private;
4797         struct edp_power_seq cur, vbt, spec,
4798                 *final = &intel_dp->pps_delays;
4799         u32 pp_on, pp_off, pp_div, pp;
4800         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4801
4802         lockdep_assert_held(&dev_priv->pps_mutex);
4803
4804         /* already initialized? */
4805         if (final->t11_t12 != 0)
4806                 return;
4807
4808         if (HAS_PCH_SPLIT(dev)) {
4809                 pp_ctrl_reg = PCH_PP_CONTROL;
4810                 pp_on_reg = PCH_PP_ON_DELAYS;
4811                 pp_off_reg = PCH_PP_OFF_DELAYS;
4812                 pp_div_reg = PCH_PP_DIVISOR;
4813         } else {
4814                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4815
4816                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4817                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4818                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4819                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4820         }
4821
4822         /* Workaround: Need to write PP_CONTROL with the unlock key as
4823          * the very first thing. */
4824         pp = ironlake_get_pp_control(intel_dp);
4825         I915_WRITE(pp_ctrl_reg, pp);
4826
4827         pp_on = I915_READ(pp_on_reg);
4828         pp_off = I915_READ(pp_off_reg);
4829         pp_div = I915_READ(pp_div_reg);
4830
4831         /* Pull timing values out of registers */
4832         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4833                 PANEL_POWER_UP_DELAY_SHIFT;
4834
4835         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4836                 PANEL_LIGHT_ON_DELAY_SHIFT;
4837
4838         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4839                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4840
4841         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4842                 PANEL_POWER_DOWN_DELAY_SHIFT;
4843
4844         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4845                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4846
4847         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4848                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4849
4850         vbt = dev_priv->vbt.edp_pps;
4851
4852         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4853          * our hw here, which are all in 100usec. */
4854         spec.t1_t3 = 210 * 10;
4855         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4856         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4857         spec.t10 = 500 * 10;
4858         /* This one is special and actually in units of 100ms, but zero
4859          * based in the hw (so we need to add 100 ms). But the sw vbt
4860          * table multiplies it with 1000 to make it in units of 100usec,
4861          * too. */
4862         spec.t11_t12 = (510 + 100) * 10;
4863
4864         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4865                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4866
4867         /* Use the max of the register settings and vbt. If both are
4868          * unset, fall back to the spec limits. */
4869 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4870                                        spec.field : \
4871                                        max(cur.field, vbt.field))
4872         assign_final(t1_t3);
4873         assign_final(t8);
4874         assign_final(t9);
4875         assign_final(t10);
4876         assign_final(t11_t12);
4877 #undef assign_final
4878
4879 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4880         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4881         intel_dp->backlight_on_delay = get_delay(t8);
4882         intel_dp->backlight_off_delay = get_delay(t9);
4883         intel_dp->panel_power_down_delay = get_delay(t10);
4884         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4885 #undef get_delay
4886
4887         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4888                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4889                       intel_dp->panel_power_cycle_delay);
4890
4891         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4892                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4893 }
4894
4895 static void
4896 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4897                                               struct intel_dp *intel_dp)
4898 {
4899         struct drm_i915_private *dev_priv = dev->dev_private;
4900         u32 pp_on, pp_off, pp_div, port_sel = 0;
4901         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4902         int pp_on_reg, pp_off_reg, pp_div_reg;
4903         enum port port = dp_to_dig_port(intel_dp)->port;
4904         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4905
4906         lockdep_assert_held(&dev_priv->pps_mutex);
4907
4908         if (HAS_PCH_SPLIT(dev)) {
4909                 pp_on_reg = PCH_PP_ON_DELAYS;
4910                 pp_off_reg = PCH_PP_OFF_DELAYS;
4911                 pp_div_reg = PCH_PP_DIVISOR;
4912         } else {
4913                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4914
4915                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4916                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4917                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4918         }
4919
4920         /*
4921          * And finally store the new values in the power sequencer. The
4922          * backlight delays are set to 1 because we do manual waits on them. For
4923          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4924          * we'll end up waiting for the backlight off delay twice: once when we
4925          * do the manual sleep, and once when we disable the panel and wait for
4926          * the PP_STATUS bit to become zero.
4927          */
4928         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4929                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4930         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4931                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4932         /* Compute the divisor for the pp clock, simply match the Bspec
4933          * formula. */
4934         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4935         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4936                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4937
4938         /* Haswell doesn't have any port selection bits for the panel
4939          * power sequencer any more. */
4940         if (IS_VALLEYVIEW(dev)) {
4941                 port_sel = PANEL_PORT_SELECT_VLV(port);
4942         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4943                 if (port == PORT_A)
4944                         port_sel = PANEL_PORT_SELECT_DPA;
4945                 else
4946                         port_sel = PANEL_PORT_SELECT_DPD;
4947         }
4948
4949         pp_on |= port_sel;
4950
4951         I915_WRITE(pp_on_reg, pp_on);
4952         I915_WRITE(pp_off_reg, pp_off);
4953         I915_WRITE(pp_div_reg, pp_div);
4954
4955         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4956                       I915_READ(pp_on_reg),
4957                       I915_READ(pp_off_reg),
4958                       I915_READ(pp_div_reg));
4959 }
4960
4961 /**
4962  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4963  * @dev: DRM device
4964  * @refresh_rate: RR to be programmed
4965  *
4966  * This function gets called when refresh rate (RR) has to be changed from
4967  * one frequency to another. Switches can be between high and low RR
4968  * supported by the panel or to any other RR based on media playback (in
4969  * this case, RR value needs to be passed from user space).
4970  *
4971  * The caller of this function needs to take a lock on dev_priv->drrs.
4972  */
4973 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4974 {
4975         struct drm_i915_private *dev_priv = dev->dev_private;
4976         struct intel_encoder *encoder;
4977         struct intel_digital_port *dig_port = NULL;
4978         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4979         struct intel_crtc_state *config = NULL;
4980         struct intel_crtc *intel_crtc = NULL;
4981         u32 reg, val;
4982         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4983
4984         if (refresh_rate <= 0) {
4985                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4986                 return;
4987         }
4988
4989         if (intel_dp == NULL) {
4990                 DRM_DEBUG_KMS("DRRS not supported.\n");
4991                 return;
4992         }
4993
4994         /*
4995          * FIXME: This needs proper synchronization with psr state for some
4996          * platforms that cannot have PSR and DRRS enabled at the same time.
4997          */
4998
4999         dig_port = dp_to_dig_port(intel_dp);
5000         encoder = &dig_port->base;
5001         intel_crtc = encoder->new_crtc;
5002
5003         if (!intel_crtc) {
5004                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5005                 return;
5006         }
5007
5008         config = intel_crtc->config;
5009
5010         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5011                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5012                 return;
5013         }
5014
5015         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5016                         refresh_rate)
5017                 index = DRRS_LOW_RR;
5018
5019         if (index == dev_priv->drrs.refresh_rate_type) {
5020                 DRM_DEBUG_KMS(
5021                         "DRRS requested for previously set RR...ignoring\n");
5022                 return;
5023         }
5024
5025         if (!intel_crtc->active) {
5026                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5027                 return;
5028         }
5029
5030         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5031                 switch (index) {
5032                 case DRRS_HIGH_RR:
5033                         intel_dp_set_m_n(intel_crtc, M1_N1);
5034                         break;
5035                 case DRRS_LOW_RR:
5036                         intel_dp_set_m_n(intel_crtc, M2_N2);
5037                         break;
5038                 case DRRS_MAX_RR:
5039                 default:
5040                         DRM_ERROR("Unsupported refreshrate type\n");
5041                 }
5042         } else if (INTEL_INFO(dev)->gen > 6) {
5043                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5044                 val = I915_READ(reg);
5045
5046                 if (index > DRRS_HIGH_RR) {
5047                         if (IS_VALLEYVIEW(dev))
5048                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5049                         else
5050                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5051                 } else {
5052                         if (IS_VALLEYVIEW(dev))
5053                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5054                         else
5055                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5056                 }
5057                 I915_WRITE(reg, val);
5058         }
5059
5060         dev_priv->drrs.refresh_rate_type = index;
5061
5062         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5063 }
5064
5065 /**
5066  * intel_edp_drrs_enable - init drrs struct if supported
5067  * @intel_dp: DP struct
5068  *
5069  * Initializes frontbuffer_bits and drrs.dp
5070  */
5071 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5072 {
5073         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5074         struct drm_i915_private *dev_priv = dev->dev_private;
5075         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5076         struct drm_crtc *crtc = dig_port->base.base.crtc;
5077         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5078
5079         if (!intel_crtc->config->has_drrs) {
5080                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5081                 return;
5082         }
5083
5084         mutex_lock(&dev_priv->drrs.mutex);
5085         if (WARN_ON(dev_priv->drrs.dp)) {
5086                 DRM_ERROR("DRRS already enabled\n");
5087                 goto unlock;
5088         }
5089
5090         dev_priv->drrs.busy_frontbuffer_bits = 0;
5091
5092         dev_priv->drrs.dp = intel_dp;
5093
5094 unlock:
5095         mutex_unlock(&dev_priv->drrs.mutex);
5096 }
5097
5098 /**
5099  * intel_edp_drrs_disable - Disable DRRS
5100  * @intel_dp: DP struct
5101  *
5102  */
5103 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5104 {
5105         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5106         struct drm_i915_private *dev_priv = dev->dev_private;
5107         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5108         struct drm_crtc *crtc = dig_port->base.base.crtc;
5109         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5110
5111         if (!intel_crtc->config->has_drrs)
5112                 return;
5113
5114         mutex_lock(&dev_priv->drrs.mutex);
5115         if (!dev_priv->drrs.dp) {
5116                 mutex_unlock(&dev_priv->drrs.mutex);
5117                 return;
5118         }
5119
5120         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5121                 intel_dp_set_drrs_state(dev_priv->dev,
5122                         intel_dp->attached_connector->panel.
5123                         fixed_mode->vrefresh);
5124
5125         dev_priv->drrs.dp = NULL;
5126         mutex_unlock(&dev_priv->drrs.mutex);
5127
5128         cancel_delayed_work_sync(&dev_priv->drrs.work);
5129 }
5130
5131 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5132 {
5133         struct drm_i915_private *dev_priv =
5134                 container_of(work, typeof(*dev_priv), drrs.work.work);
5135         struct intel_dp *intel_dp;
5136
5137         mutex_lock(&dev_priv->drrs.mutex);
5138
5139         intel_dp = dev_priv->drrs.dp;
5140
5141         if (!intel_dp)
5142                 goto unlock;
5143
5144         /*
5145          * The delayed work can race with an invalidate hence we need to
5146          * recheck.
5147          */
5148
5149         if (dev_priv->drrs.busy_frontbuffer_bits)
5150                 goto unlock;
5151
5152         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5153                 intel_dp_set_drrs_state(dev_priv->dev,
5154                         intel_dp->attached_connector->panel.
5155                         downclock_mode->vrefresh);
5156
5157 unlock:
5158
5159         mutex_unlock(&dev_priv->drrs.mutex);
5160 }
5161
5162 /**
5163  * intel_edp_drrs_invalidate - Invalidate DRRS
5164  * @dev: DRM device
5165  * @frontbuffer_bits: frontbuffer plane tracking bits
5166  *
5167  * When there is a disturbance on screen (due to cursor movement/time
5168  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5169  * high RR.
5170  *
5171  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5172  */
5173 void intel_edp_drrs_invalidate(struct drm_device *dev,
5174                 unsigned frontbuffer_bits)
5175 {
5176         struct drm_i915_private *dev_priv = dev->dev_private;
5177         struct drm_crtc *crtc;
5178         enum pipe pipe;
5179
5180         if (!dev_priv->drrs.dp)
5181                 return;
5182
5183         cancel_delayed_work_sync(&dev_priv->drrs.work);
5184
5185         mutex_lock(&dev_priv->drrs.mutex);
5186         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5187         pipe = to_intel_crtc(crtc)->pipe;
5188
5189         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5190                 intel_dp_set_drrs_state(dev_priv->dev,
5191                                 dev_priv->drrs.dp->attached_connector->panel.
5192                                 fixed_mode->vrefresh);
5193         }
5194
5195         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5196
5197         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5198         mutex_unlock(&dev_priv->drrs.mutex);
5199 }
5200
5201 /**
5202  * intel_edp_drrs_flush - Flush DRRS
5203  * @dev: DRM device
5204  * @frontbuffer_bits: frontbuffer plane tracking bits
5205  *
5206  * When there is no movement on screen, DRRS work can be scheduled.
5207  * This DRRS work is responsible for setting relevant registers after a
5208  * timeout of 1 second.
5209  *
5210  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5211  */
5212 void intel_edp_drrs_flush(struct drm_device *dev,
5213                 unsigned frontbuffer_bits)
5214 {
5215         struct drm_i915_private *dev_priv = dev->dev_private;
5216         struct drm_crtc *crtc;
5217         enum pipe pipe;
5218
5219         if (!dev_priv->drrs.dp)
5220                 return;
5221
5222         cancel_delayed_work_sync(&dev_priv->drrs.work);
5223
5224         mutex_lock(&dev_priv->drrs.mutex);
5225         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5226         pipe = to_intel_crtc(crtc)->pipe;
5227         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5228
5229         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5230                         !dev_priv->drrs.busy_frontbuffer_bits)
5231                 schedule_delayed_work(&dev_priv->drrs.work,
5232                                 msecs_to_jiffies(1000));
5233         mutex_unlock(&dev_priv->drrs.mutex);
5234 }
5235
5236 /**
5237  * DOC: Display Refresh Rate Switching (DRRS)
5238  *
5239  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5240  * which enables swtching between low and high refresh rates,
5241  * dynamically, based on the usage scenario. This feature is applicable
5242  * for internal panels.
5243  *
5244  * Indication that the panel supports DRRS is given by the panel EDID, which
5245  * would list multiple refresh rates for one resolution.
5246  *
5247  * DRRS is of 2 types - static and seamless.
5248  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5249  * (may appear as a blink on screen) and is used in dock-undock scenario.
5250  * Seamless DRRS involves changing RR without any visual effect to the user
5251  * and can be used during normal system usage. This is done by programming
5252  * certain registers.
5253  *
5254  * Support for static/seamless DRRS may be indicated in the VBT based on
5255  * inputs from the panel spec.
5256  *
5257  * DRRS saves power by switching to low RR based on usage scenarios.
5258  *
5259  * eDP DRRS:-
5260  *        The implementation is based on frontbuffer tracking implementation.
5261  * When there is a disturbance on the screen triggered by user activity or a
5262  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5263  * When there is no movement on screen, after a timeout of 1 second, a switch
5264  * to low RR is made.
5265  *        For integration with frontbuffer tracking code,
5266  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5267  *
5268  * DRRS can be further extended to support other internal panels and also
5269  * the scenario of video playback wherein RR is set based on the rate
5270  * requested by userspace.
5271  */
5272
5273 /**
5274  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5275  * @intel_connector: eDP connector
5276  * @fixed_mode: preferred mode of panel
5277  *
5278  * This function is  called only once at driver load to initialize basic
5279  * DRRS stuff.
5280  *
5281  * Returns:
5282  * Downclock mode if panel supports it, else return NULL.
5283  * DRRS support is determined by the presence of downclock mode (apart
5284  * from VBT setting).
5285  */
5286 static struct drm_display_mode *
5287 intel_dp_drrs_init(struct intel_connector *intel_connector,
5288                 struct drm_display_mode *fixed_mode)
5289 {
5290         struct drm_connector *connector = &intel_connector->base;
5291         struct drm_device *dev = connector->dev;
5292         struct drm_i915_private *dev_priv = dev->dev_private;
5293         struct drm_display_mode *downclock_mode = NULL;
5294
5295         if (INTEL_INFO(dev)->gen <= 6) {
5296                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5297                 return NULL;
5298         }
5299
5300         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5301                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5302                 return NULL;
5303         }
5304
5305         downclock_mode = intel_find_panel_downclock
5306                                         (dev, fixed_mode, connector);
5307
5308         if (!downclock_mode) {
5309                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5310                 return NULL;
5311         }
5312
5313         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5314
5315         mutex_init(&dev_priv->drrs.mutex);
5316
5317         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5318
5319         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5320         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5321         return downclock_mode;
5322 }
5323
5324 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5325                                      struct intel_connector *intel_connector)
5326 {
5327         struct drm_connector *connector = &intel_connector->base;
5328         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5329         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5330         struct drm_device *dev = intel_encoder->base.dev;
5331         struct drm_i915_private *dev_priv = dev->dev_private;
5332         struct drm_display_mode *fixed_mode = NULL;
5333         struct drm_display_mode *downclock_mode = NULL;
5334         bool has_dpcd;
5335         struct drm_display_mode *scan;
5336         struct edid *edid;
5337         enum pipe pipe = INVALID_PIPE;
5338
5339         dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5340
5341         if (!is_edp(intel_dp))
5342                 return true;
5343
5344         pps_lock(intel_dp);
5345         intel_edp_panel_vdd_sanitize(intel_dp);
5346         pps_unlock(intel_dp);
5347
5348         /* Cache DPCD and EDID for edp. */
5349         has_dpcd = intel_dp_get_dpcd(intel_dp);
5350
5351         if (has_dpcd) {
5352                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5353                         dev_priv->no_aux_handshake =
5354                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5355                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5356         } else {
5357                 /* if this fails, presume the device is a ghost */
5358                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5359                 return false;
5360         }
5361
5362         /* We now know it's not a ghost, init power sequence regs. */
5363         pps_lock(intel_dp);
5364         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5365         pps_unlock(intel_dp);
5366
5367         mutex_lock(&dev->mode_config.mutex);
5368         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5369         if (edid) {
5370                 if (drm_add_edid_modes(connector, edid)) {
5371                         drm_mode_connector_update_edid_property(connector,
5372                                                                 edid);
5373                         drm_edid_to_eld(connector, edid);
5374                 } else {
5375                         kfree(edid);
5376                         edid = ERR_PTR(-EINVAL);
5377                 }
5378         } else {
5379                 edid = ERR_PTR(-ENOENT);
5380         }
5381         intel_connector->edid = edid;
5382
5383         /* prefer fixed mode from EDID if available */
5384         list_for_each_entry(scan, &connector->probed_modes, head) {
5385                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5386                         fixed_mode = drm_mode_duplicate(dev, scan);
5387                         downclock_mode = intel_dp_drrs_init(
5388                                                 intel_connector, fixed_mode);
5389                         break;
5390                 }
5391         }
5392
5393         /* fallback to VBT if available for eDP */
5394         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5395                 fixed_mode = drm_mode_duplicate(dev,
5396                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5397                 if (fixed_mode)
5398                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5399         }
5400         mutex_unlock(&dev->mode_config.mutex);
5401
5402         if (IS_VALLEYVIEW(dev)) {
5403                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5404                 register_reboot_notifier(&intel_dp->edp_notifier);
5405
5406                 /*
5407                  * Figure out the current pipe for the initial backlight setup.
5408                  * If the current pipe isn't valid, try the PPS pipe, and if that
5409                  * fails just assume pipe A.
5410                  */
5411                 if (IS_CHERRYVIEW(dev))
5412                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5413                 else
5414                         pipe = PORT_TO_PIPE(intel_dp->DP);
5415
5416                 if (pipe != PIPE_A && pipe != PIPE_B)
5417                         pipe = intel_dp->pps_pipe;
5418
5419                 if (pipe != PIPE_A && pipe != PIPE_B)
5420                         pipe = PIPE_A;
5421
5422                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5423                               pipe_name(pipe));
5424         }
5425
5426         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5427         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5428         intel_panel_setup_backlight(connector, pipe);
5429
5430         return true;
5431 }
5432
5433 bool
5434 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5435                         struct intel_connector *intel_connector)
5436 {
5437         struct drm_connector *connector = &intel_connector->base;
5438         struct intel_dp *intel_dp = &intel_dig_port->dp;
5439         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5440         struct drm_device *dev = intel_encoder->base.dev;
5441         struct drm_i915_private *dev_priv = dev->dev_private;
5442         enum port port = intel_dig_port->port;
5443         int type;
5444
5445         intel_dp->pps_pipe = INVALID_PIPE;
5446
5447         /* intel_dp vfuncs */
5448         if (INTEL_INFO(dev)->gen >= 9)
5449                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5450         else if (IS_VALLEYVIEW(dev))
5451                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5452         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5453                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5454         else if (HAS_PCH_SPLIT(dev))
5455                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5456         else
5457                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5458
5459         if (INTEL_INFO(dev)->gen >= 9)
5460                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5461         else
5462                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5463
5464         /* Preserve the current hw state. */
5465         intel_dp->DP = I915_READ(intel_dp->output_reg);
5466         intel_dp->attached_connector = intel_connector;
5467
5468         if (intel_dp_is_edp(dev, port))
5469                 type = DRM_MODE_CONNECTOR_eDP;
5470         else
5471                 type = DRM_MODE_CONNECTOR_DisplayPort;
5472
5473         /*
5474          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5475          * for DP the encoder type can be set by the caller to
5476          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5477          */
5478         if (type == DRM_MODE_CONNECTOR_eDP)
5479                 intel_encoder->type = INTEL_OUTPUT_EDP;
5480
5481         /* eDP only on port B and/or C on vlv/chv */
5482         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5483                     port != PORT_B && port != PORT_C))
5484                 return false;
5485
5486         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5487                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5488                         port_name(port));
5489
5490         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5491         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5492
5493         connector->interlace_allowed = true;
5494         connector->doublescan_allowed = 0;
5495
5496         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5497                           edp_panel_vdd_work);
5498
5499         intel_connector_attach_encoder(intel_connector, intel_encoder);
5500         drm_connector_register(connector);
5501
5502         if (HAS_DDI(dev))
5503                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5504         else
5505                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5506         intel_connector->unregister = intel_dp_connector_unregister;
5507
5508         /* Set up the hotplug pin. */
5509         switch (port) {
5510         case PORT_A:
5511                 intel_encoder->hpd_pin = HPD_PORT_A;
5512                 break;
5513         case PORT_B:
5514                 intel_encoder->hpd_pin = HPD_PORT_B;
5515                 break;
5516         case PORT_C:
5517                 intel_encoder->hpd_pin = HPD_PORT_C;
5518                 break;
5519         case PORT_D:
5520                 intel_encoder->hpd_pin = HPD_PORT_D;
5521                 break;
5522         default:
5523                 BUG();
5524         }
5525
5526         if (is_edp(intel_dp)) {
5527                 pps_lock(intel_dp);
5528                 intel_dp_init_panel_power_timestamps(intel_dp);
5529                 if (IS_VALLEYVIEW(dev))
5530                         vlv_initial_power_sequencer_setup(intel_dp);
5531                 else
5532                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5533                 pps_unlock(intel_dp);
5534         }
5535
5536         intel_dp_aux_init(intel_dp, intel_connector);
5537
5538         /* init MST on ports that can support it */
5539         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5540                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5541                         intel_dp_mst_encoder_init(intel_dig_port,
5542                                                   intel_connector->base.base.id);
5543                 }
5544         }
5545
5546         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5547                 drm_dp_aux_unregister(&intel_dp->aux);
5548                 if (is_edp(intel_dp)) {
5549                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5550                         /*
5551                          * vdd might still be enabled do to the delayed vdd off.
5552                          * Make sure vdd is actually turned off here.
5553                          */
5554                         pps_lock(intel_dp);
5555                         edp_panel_vdd_off_sync(intel_dp);
5556                         pps_unlock(intel_dp);
5557                 }
5558                 drm_connector_unregister(connector);
5559                 drm_connector_cleanup(connector);
5560                 return false;
5561         }
5562
5563         intel_dp_add_properties(intel_dp, connector);
5564
5565         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5566          * 0xd.  Failure to do so will result in spurious interrupts being
5567          * generated on the port when a cable is not attached.
5568          */
5569         if (IS_G4X(dev) && !IS_GM45(dev)) {
5570                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5571                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5572         }
5573
5574         return true;
5575 }
5576
5577 void
5578 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5579 {
5580         struct drm_i915_private *dev_priv = dev->dev_private;
5581         struct intel_digital_port *intel_dig_port;
5582         struct intel_encoder *intel_encoder;
5583         struct drm_encoder *encoder;
5584         struct intel_connector *intel_connector;
5585
5586         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5587         if (!intel_dig_port)
5588                 return;
5589
5590         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5591         if (!intel_connector) {
5592                 kfree(intel_dig_port);
5593                 return;
5594         }
5595
5596         intel_encoder = &intel_dig_port->base;
5597         encoder = &intel_encoder->base;
5598
5599         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5600                          DRM_MODE_ENCODER_TMDS);
5601
5602         intel_encoder->compute_config = intel_dp_compute_config;
5603         intel_encoder->disable = intel_disable_dp;
5604         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5605         intel_encoder->get_config = intel_dp_get_config;
5606         intel_encoder->suspend = intel_dp_encoder_suspend;
5607         if (IS_CHERRYVIEW(dev)) {
5608                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5609                 intel_encoder->pre_enable = chv_pre_enable_dp;
5610                 intel_encoder->enable = vlv_enable_dp;
5611                 intel_encoder->post_disable = chv_post_disable_dp;
5612         } else if (IS_VALLEYVIEW(dev)) {
5613                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5614                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5615                 intel_encoder->enable = vlv_enable_dp;
5616                 intel_encoder->post_disable = vlv_post_disable_dp;
5617         } else {
5618                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5619                 intel_encoder->enable = g4x_enable_dp;
5620                 if (INTEL_INFO(dev)->gen >= 5)
5621                         intel_encoder->post_disable = ilk_post_disable_dp;
5622         }
5623
5624         intel_dig_port->port = port;
5625         intel_dig_port->dp.output_reg = output_reg;
5626
5627         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5628         if (IS_CHERRYVIEW(dev)) {
5629                 if (port == PORT_D)
5630                         intel_encoder->crtc_mask = 1 << 2;
5631                 else
5632                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5633         } else {
5634                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5635         }
5636         intel_encoder->cloneable = 0;
5637         intel_encoder->hot_plug = intel_dp_hot_plug;
5638
5639         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5640         dev_priv->hpd_irq_port[port] = intel_dig_port;
5641
5642         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5643                 drm_encoder_cleanup(encoder);
5644                 kfree(intel_dig_port);
5645                 kfree(intel_connector);
5646         }
5647 }
5648
5649 void intel_dp_mst_suspend(struct drm_device *dev)
5650 {
5651         struct drm_i915_private *dev_priv = dev->dev_private;
5652         int i;
5653
5654         /* disable MST */
5655         for (i = 0; i < I915_MAX_PORTS; i++) {
5656                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5657                 if (!intel_dig_port)
5658                         continue;
5659
5660                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5661                         if (!intel_dig_port->dp.can_mst)
5662                                 continue;
5663                         if (intel_dig_port->dp.is_mst)
5664                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5665                 }
5666         }
5667 }
5668
5669 void intel_dp_mst_resume(struct drm_device *dev)
5670 {
5671         struct drm_i915_private *dev_priv = dev->dev_private;
5672         int i;
5673
5674         for (i = 0; i < I915_MAX_PORTS; i++) {
5675                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5676                 if (!intel_dig_port)
5677                         continue;
5678                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5679                         int ret;
5680
5681                         if (!intel_dig_port->dp.can_mst)
5682                                 continue;
5683
5684                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5685                         if (ret != 0) {
5686                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5687                         }
5688                 }
5689         }
5690 }