OSDN Git Service

Revert "drm/i915: Add eDP intermediate frequencies for CHV"
[uclinux-h8/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int link_bw;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { DP_LINK_BW_1_62,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { DP_LINK_BW_2_7,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { DP_LINK_BW_1_62,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { DP_LINK_BW_2_7,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { DP_LINK_BW_1_62,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { DP_LINK_BW_2_7,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int skl_rates[] = { 162000, 216000, 270000,
95                                   324000, 432000, 540000 };
96 static const int default_rates[] = { 162000, 270000, 540000 };
97
98 /**
99  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
100  * @intel_dp: DP struct
101  *
102  * If a CPU or PCH DP output is attached to an eDP panel, this function
103  * will return true, and false otherwise.
104  */
105 static bool is_edp(struct intel_dp *intel_dp)
106 {
107         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
108
109         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
110 }
111
112 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
113 {
114         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
115
116         return intel_dig_port->base.base.dev;
117 }
118
119 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
120 {
121         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
122 }
123
124 static void intel_dp_link_down(struct intel_dp *intel_dp);
125 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
126 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
127 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
128 static void vlv_steal_power_sequencer(struct drm_device *dev,
129                                       enum pipe pipe);
130
131 static int
132 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
133 {
134         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
135
136         switch (max_link_bw) {
137         case DP_LINK_BW_1_62:
138         case DP_LINK_BW_2_7:
139         case DP_LINK_BW_5_4:
140                 break;
141         default:
142                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
143                      max_link_bw);
144                 max_link_bw = DP_LINK_BW_1_62;
145                 break;
146         }
147         return max_link_bw;
148 }
149
150 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
151 {
152         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
153         struct drm_device *dev = intel_dig_port->base.base.dev;
154         u8 source_max, sink_max;
155
156         source_max = 4;
157         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
158             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
159                 source_max = 2;
160
161         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
162
163         return min(source_max, sink_max);
164 }
165
166 /*
167  * The units on the numbers in the next two are... bizarre.  Examples will
168  * make it clearer; this one parallels an example in the eDP spec.
169  *
170  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
171  *
172  *     270000 * 1 * 8 / 10 == 216000
173  *
174  * The actual data capacity of that configuration is 2.16Gbit/s, so the
175  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
176  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
177  * 119000.  At 18bpp that's 2142000 kilobits per second.
178  *
179  * Thus the strange-looking division by 10 in intel_dp_link_required, to
180  * get the result in decakilobits instead of kilobits.
181  */
182
183 static int
184 intel_dp_link_required(int pixel_clock, int bpp)
185 {
186         return (pixel_clock * bpp + 9) / 10;
187 }
188
189 static int
190 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
191 {
192         return (max_link_clock * max_lanes * 8) / 10;
193 }
194
195 static enum drm_mode_status
196 intel_dp_mode_valid(struct drm_connector *connector,
197                     struct drm_display_mode *mode)
198 {
199         struct intel_dp *intel_dp = intel_attached_dp(connector);
200         struct intel_connector *intel_connector = to_intel_connector(connector);
201         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
202         int target_clock = mode->clock;
203         int max_rate, mode_rate, max_lanes, max_link_clock;
204
205         if (is_edp(intel_dp) && fixed_mode) {
206                 if (mode->hdisplay > fixed_mode->hdisplay)
207                         return MODE_PANEL;
208
209                 if (mode->vdisplay > fixed_mode->vdisplay)
210                         return MODE_PANEL;
211
212                 target_clock = fixed_mode->clock;
213         }
214
215         max_link_clock = intel_dp_max_link_rate(intel_dp);
216         max_lanes = intel_dp_max_lane_count(intel_dp);
217
218         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
219         mode_rate = intel_dp_link_required(target_clock, 18);
220
221         if (mode_rate > max_rate)
222                 return MODE_CLOCK_HIGH;
223
224         if (mode->clock < 10000)
225                 return MODE_CLOCK_LOW;
226
227         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
228                 return MODE_H_ILLEGAL;
229
230         return MODE_OK;
231 }
232
233 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
234 {
235         int     i;
236         uint32_t v = 0;
237
238         if (src_bytes > 4)
239                 src_bytes = 4;
240         for (i = 0; i < src_bytes; i++)
241                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
242         return v;
243 }
244
245 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
246 {
247         int i;
248         if (dst_bytes > 4)
249                 dst_bytes = 4;
250         for (i = 0; i < dst_bytes; i++)
251                 dst[i] = src >> ((3-i) * 8);
252 }
253
254 /* hrawclock is 1/4 the FSB frequency */
255 static int
256 intel_hrawclk(struct drm_device *dev)
257 {
258         struct drm_i915_private *dev_priv = dev->dev_private;
259         uint32_t clkcfg;
260
261         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
262         if (IS_VALLEYVIEW(dev))
263                 return 200;
264
265         clkcfg = I915_READ(CLKCFG);
266         switch (clkcfg & CLKCFG_FSB_MASK) {
267         case CLKCFG_FSB_400:
268                 return 100;
269         case CLKCFG_FSB_533:
270                 return 133;
271         case CLKCFG_FSB_667:
272                 return 166;
273         case CLKCFG_FSB_800:
274                 return 200;
275         case CLKCFG_FSB_1067:
276                 return 266;
277         case CLKCFG_FSB_1333:
278                 return 333;
279         /* these two are just a guess; one of them might be right */
280         case CLKCFG_FSB_1600:
281         case CLKCFG_FSB_1600_ALT:
282                 return 400;
283         default:
284                 return 133;
285         }
286 }
287
288 static void
289 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
290                                     struct intel_dp *intel_dp);
291 static void
292 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
293                                               struct intel_dp *intel_dp);
294
295 static void pps_lock(struct intel_dp *intel_dp)
296 {
297         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
298         struct intel_encoder *encoder = &intel_dig_port->base;
299         struct drm_device *dev = encoder->base.dev;
300         struct drm_i915_private *dev_priv = dev->dev_private;
301         enum intel_display_power_domain power_domain;
302
303         /*
304          * See vlv_power_sequencer_reset() why we need
305          * a power domain reference here.
306          */
307         power_domain = intel_display_port_power_domain(encoder);
308         intel_display_power_get(dev_priv, power_domain);
309
310         mutex_lock(&dev_priv->pps_mutex);
311 }
312
313 static void pps_unlock(struct intel_dp *intel_dp)
314 {
315         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
316         struct intel_encoder *encoder = &intel_dig_port->base;
317         struct drm_device *dev = encoder->base.dev;
318         struct drm_i915_private *dev_priv = dev->dev_private;
319         enum intel_display_power_domain power_domain;
320
321         mutex_unlock(&dev_priv->pps_mutex);
322
323         power_domain = intel_display_port_power_domain(encoder);
324         intel_display_power_put(dev_priv, power_domain);
325 }
326
327 static void
328 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
329 {
330         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
331         struct drm_device *dev = intel_dig_port->base.base.dev;
332         struct drm_i915_private *dev_priv = dev->dev_private;
333         enum pipe pipe = intel_dp->pps_pipe;
334         bool pll_enabled;
335         uint32_t DP;
336
337         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
338                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
339                  pipe_name(pipe), port_name(intel_dig_port->port)))
340                 return;
341
342         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
343                       pipe_name(pipe), port_name(intel_dig_port->port));
344
345         /* Preserve the BIOS-computed detected bit. This is
346          * supposed to be read-only.
347          */
348         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
349         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
350         DP |= DP_PORT_WIDTH(1);
351         DP |= DP_LINK_TRAIN_PAT_1;
352
353         if (IS_CHERRYVIEW(dev))
354                 DP |= DP_PIPE_SELECT_CHV(pipe);
355         else if (pipe == PIPE_B)
356                 DP |= DP_PIPEB_SELECT;
357
358         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
359
360         /*
361          * The DPLL for the pipe must be enabled for this to work.
362          * So enable temporarily it if it's not already enabled.
363          */
364         if (!pll_enabled)
365                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
366                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
367
368         /*
369          * Similar magic as in intel_dp_enable_port().
370          * We _must_ do this port enable + disable trick
371          * to make this power seqeuencer lock onto the port.
372          * Otherwise even VDD force bit won't work.
373          */
374         I915_WRITE(intel_dp->output_reg, DP);
375         POSTING_READ(intel_dp->output_reg);
376
377         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
378         POSTING_READ(intel_dp->output_reg);
379
380         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
381         POSTING_READ(intel_dp->output_reg);
382
383         if (!pll_enabled)
384                 vlv_force_pll_off(dev, pipe);
385 }
386
387 static enum pipe
388 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
389 {
390         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
391         struct drm_device *dev = intel_dig_port->base.base.dev;
392         struct drm_i915_private *dev_priv = dev->dev_private;
393         struct intel_encoder *encoder;
394         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
395         enum pipe pipe;
396
397         lockdep_assert_held(&dev_priv->pps_mutex);
398
399         /* We should never land here with regular DP ports */
400         WARN_ON(!is_edp(intel_dp));
401
402         if (intel_dp->pps_pipe != INVALID_PIPE)
403                 return intel_dp->pps_pipe;
404
405         /*
406          * We don't have power sequencer currently.
407          * Pick one that's not used by other ports.
408          */
409         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
410                             base.head) {
411                 struct intel_dp *tmp;
412
413                 if (encoder->type != INTEL_OUTPUT_EDP)
414                         continue;
415
416                 tmp = enc_to_intel_dp(&encoder->base);
417
418                 if (tmp->pps_pipe != INVALID_PIPE)
419                         pipes &= ~(1 << tmp->pps_pipe);
420         }
421
422         /*
423          * Didn't find one. This should not happen since there
424          * are two power sequencers and up to two eDP ports.
425          */
426         if (WARN_ON(pipes == 0))
427                 pipe = PIPE_A;
428         else
429                 pipe = ffs(pipes) - 1;
430
431         vlv_steal_power_sequencer(dev, pipe);
432         intel_dp->pps_pipe = pipe;
433
434         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
435                       pipe_name(intel_dp->pps_pipe),
436                       port_name(intel_dig_port->port));
437
438         /* init power sequencer on this pipe and port */
439         intel_dp_init_panel_power_sequencer(dev, intel_dp);
440         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
441
442         /*
443          * Even vdd force doesn't work until we've made
444          * the power sequencer lock in on the port.
445          */
446         vlv_power_sequencer_kick(intel_dp);
447
448         return intel_dp->pps_pipe;
449 }
450
451 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
452                                enum pipe pipe);
453
454 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
455                                enum pipe pipe)
456 {
457         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
458 }
459
460 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
461                                 enum pipe pipe)
462 {
463         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
464 }
465
466 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
467                          enum pipe pipe)
468 {
469         return true;
470 }
471
472 static enum pipe
473 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
474                      enum port port,
475                      vlv_pipe_check pipe_check)
476 {
477         enum pipe pipe;
478
479         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
480                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
481                         PANEL_PORT_SELECT_MASK;
482
483                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
484                         continue;
485
486                 if (!pipe_check(dev_priv, pipe))
487                         continue;
488
489                 return pipe;
490         }
491
492         return INVALID_PIPE;
493 }
494
495 static void
496 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
497 {
498         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
499         struct drm_device *dev = intel_dig_port->base.base.dev;
500         struct drm_i915_private *dev_priv = dev->dev_private;
501         enum port port = intel_dig_port->port;
502
503         lockdep_assert_held(&dev_priv->pps_mutex);
504
505         /* try to find a pipe with this port selected */
506         /* first pick one where the panel is on */
507         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
508                                                   vlv_pipe_has_pp_on);
509         /* didn't find one? pick one where vdd is on */
510         if (intel_dp->pps_pipe == INVALID_PIPE)
511                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
512                                                           vlv_pipe_has_vdd_on);
513         /* didn't find one? pick one with just the correct port */
514         if (intel_dp->pps_pipe == INVALID_PIPE)
515                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
516                                                           vlv_pipe_any);
517
518         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
519         if (intel_dp->pps_pipe == INVALID_PIPE) {
520                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
521                               port_name(port));
522                 return;
523         }
524
525         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
526                       port_name(port), pipe_name(intel_dp->pps_pipe));
527
528         intel_dp_init_panel_power_sequencer(dev, intel_dp);
529         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
530 }
531
532 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
533 {
534         struct drm_device *dev = dev_priv->dev;
535         struct intel_encoder *encoder;
536
537         if (WARN_ON(!IS_VALLEYVIEW(dev)))
538                 return;
539
540         /*
541          * We can't grab pps_mutex here due to deadlock with power_domain
542          * mutex when power_domain functions are called while holding pps_mutex.
543          * That also means that in order to use pps_pipe the code needs to
544          * hold both a power domain reference and pps_mutex, and the power domain
545          * reference get/put must be done while _not_ holding pps_mutex.
546          * pps_{lock,unlock}() do these steps in the correct order, so one
547          * should use them always.
548          */
549
550         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
551                 struct intel_dp *intel_dp;
552
553                 if (encoder->type != INTEL_OUTPUT_EDP)
554                         continue;
555
556                 intel_dp = enc_to_intel_dp(&encoder->base);
557                 intel_dp->pps_pipe = INVALID_PIPE;
558         }
559 }
560
561 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
562 {
563         struct drm_device *dev = intel_dp_to_dev(intel_dp);
564
565         if (HAS_PCH_SPLIT(dev))
566                 return PCH_PP_CONTROL;
567         else
568                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
569 }
570
571 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
572 {
573         struct drm_device *dev = intel_dp_to_dev(intel_dp);
574
575         if (HAS_PCH_SPLIT(dev))
576                 return PCH_PP_STATUS;
577         else
578                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
579 }
580
581 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
582    This function only applicable when panel PM state is not to be tracked */
583 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
584                               void *unused)
585 {
586         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
587                                                  edp_notifier);
588         struct drm_device *dev = intel_dp_to_dev(intel_dp);
589         struct drm_i915_private *dev_priv = dev->dev_private;
590         u32 pp_div;
591         u32 pp_ctrl_reg, pp_div_reg;
592
593         if (!is_edp(intel_dp) || code != SYS_RESTART)
594                 return 0;
595
596         pps_lock(intel_dp);
597
598         if (IS_VALLEYVIEW(dev)) {
599                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
600
601                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
602                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
603                 pp_div = I915_READ(pp_div_reg);
604                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
605
606                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
607                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
608                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
609                 msleep(intel_dp->panel_power_cycle_delay);
610         }
611
612         pps_unlock(intel_dp);
613
614         return 0;
615 }
616
617 static bool edp_have_panel_power(struct intel_dp *intel_dp)
618 {
619         struct drm_device *dev = intel_dp_to_dev(intel_dp);
620         struct drm_i915_private *dev_priv = dev->dev_private;
621
622         lockdep_assert_held(&dev_priv->pps_mutex);
623
624         if (IS_VALLEYVIEW(dev) &&
625             intel_dp->pps_pipe == INVALID_PIPE)
626                 return false;
627
628         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
629 }
630
631 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
632 {
633         struct drm_device *dev = intel_dp_to_dev(intel_dp);
634         struct drm_i915_private *dev_priv = dev->dev_private;
635
636         lockdep_assert_held(&dev_priv->pps_mutex);
637
638         if (IS_VALLEYVIEW(dev) &&
639             intel_dp->pps_pipe == INVALID_PIPE)
640                 return false;
641
642         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
643 }
644
645 static void
646 intel_dp_check_edp(struct intel_dp *intel_dp)
647 {
648         struct drm_device *dev = intel_dp_to_dev(intel_dp);
649         struct drm_i915_private *dev_priv = dev->dev_private;
650
651         if (!is_edp(intel_dp))
652                 return;
653
654         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
655                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
656                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
657                               I915_READ(_pp_stat_reg(intel_dp)),
658                               I915_READ(_pp_ctrl_reg(intel_dp)));
659         }
660 }
661
662 static uint32_t
663 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
664 {
665         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
666         struct drm_device *dev = intel_dig_port->base.base.dev;
667         struct drm_i915_private *dev_priv = dev->dev_private;
668         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
669         uint32_t status;
670         bool done;
671
672 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
673         if (has_aux_irq)
674                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
675                                           msecs_to_jiffies_timeout(10));
676         else
677                 done = wait_for_atomic(C, 10) == 0;
678         if (!done)
679                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
680                           has_aux_irq);
681 #undef C
682
683         return status;
684 }
685
686 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687 {
688         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689         struct drm_device *dev = intel_dig_port->base.base.dev;
690
691         /*
692          * The clock divider is based off the hrawclk, and would like to run at
693          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
694          */
695         return index ? 0 : intel_hrawclk(dev) / 2;
696 }
697
698 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
699 {
700         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701         struct drm_device *dev = intel_dig_port->base.base.dev;
702         struct drm_i915_private *dev_priv = dev->dev_private;
703
704         if (index)
705                 return 0;
706
707         if (intel_dig_port->port == PORT_A) {
708                 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
709         } else {
710                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
711         }
712 }
713
714 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
715 {
716         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
717         struct drm_device *dev = intel_dig_port->base.base.dev;
718         struct drm_i915_private *dev_priv = dev->dev_private;
719
720         if (intel_dig_port->port == PORT_A) {
721                 if (index)
722                         return 0;
723                 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
724         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
725                 /* Workaround for non-ULT HSW */
726                 switch (index) {
727                 case 0: return 63;
728                 case 1: return 72;
729                 default: return 0;
730                 }
731         } else  {
732                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
733         }
734 }
735
736 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
737 {
738         return index ? 0 : 100;
739 }
740
741 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
742 {
743         /*
744          * SKL doesn't need us to program the AUX clock divider (Hardware will
745          * derive the clock from CDCLK automatically). We still implement the
746          * get_aux_clock_divider vfunc to plug-in into the existing code.
747          */
748         return index ? 0 : 1;
749 }
750
751 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
752                                       bool has_aux_irq,
753                                       int send_bytes,
754                                       uint32_t aux_clock_divider)
755 {
756         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
757         struct drm_device *dev = intel_dig_port->base.base.dev;
758         uint32_t precharge, timeout;
759
760         if (IS_GEN6(dev))
761                 precharge = 3;
762         else
763                 precharge = 5;
764
765         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
766                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
767         else
768                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
769
770         return DP_AUX_CH_CTL_SEND_BUSY |
771                DP_AUX_CH_CTL_DONE |
772                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
773                DP_AUX_CH_CTL_TIME_OUT_ERROR |
774                timeout |
775                DP_AUX_CH_CTL_RECEIVE_ERROR |
776                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
777                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
778                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
779 }
780
781 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
782                                       bool has_aux_irq,
783                                       int send_bytes,
784                                       uint32_t unused)
785 {
786         return DP_AUX_CH_CTL_SEND_BUSY |
787                DP_AUX_CH_CTL_DONE |
788                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
789                DP_AUX_CH_CTL_TIME_OUT_ERROR |
790                DP_AUX_CH_CTL_TIME_OUT_1600us |
791                DP_AUX_CH_CTL_RECEIVE_ERROR |
792                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
793                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
794 }
795
796 static int
797 intel_dp_aux_ch(struct intel_dp *intel_dp,
798                 const uint8_t *send, int send_bytes,
799                 uint8_t *recv, int recv_size)
800 {
801         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
802         struct drm_device *dev = intel_dig_port->base.base.dev;
803         struct drm_i915_private *dev_priv = dev->dev_private;
804         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
805         uint32_t ch_data = ch_ctl + 4;
806         uint32_t aux_clock_divider;
807         int i, ret, recv_bytes;
808         uint32_t status;
809         int try, clock = 0;
810         bool has_aux_irq = HAS_AUX_IRQ(dev);
811         bool vdd;
812
813         pps_lock(intel_dp);
814
815         /*
816          * We will be called with VDD already enabled for dpcd/edid/oui reads.
817          * In such cases we want to leave VDD enabled and it's up to upper layers
818          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
819          * ourselves.
820          */
821         vdd = edp_panel_vdd_on(intel_dp);
822
823         /* dp aux is extremely sensitive to irq latency, hence request the
824          * lowest possible wakeup latency and so prevent the cpu from going into
825          * deep sleep states.
826          */
827         pm_qos_update_request(&dev_priv->pm_qos, 0);
828
829         intel_dp_check_edp(intel_dp);
830
831         intel_aux_display_runtime_get(dev_priv);
832
833         /* Try to wait for any previous AUX channel activity */
834         for (try = 0; try < 3; try++) {
835                 status = I915_READ_NOTRACE(ch_ctl);
836                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
837                         break;
838                 msleep(1);
839         }
840
841         if (try == 3) {
842                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
843                      I915_READ(ch_ctl));
844                 ret = -EBUSY;
845                 goto out;
846         }
847
848         /* Only 5 data registers! */
849         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
850                 ret = -E2BIG;
851                 goto out;
852         }
853
854         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
855                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
856                                                           has_aux_irq,
857                                                           send_bytes,
858                                                           aux_clock_divider);
859
860                 /* Must try at least 3 times according to DP spec */
861                 for (try = 0; try < 5; try++) {
862                         /* Load the send data into the aux channel data registers */
863                         for (i = 0; i < send_bytes; i += 4)
864                                 I915_WRITE(ch_data + i,
865                                            intel_dp_pack_aux(send + i,
866                                                              send_bytes - i));
867
868                         /* Send the command and wait for it to complete */
869                         I915_WRITE(ch_ctl, send_ctl);
870
871                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
872
873                         /* Clear done status and any errors */
874                         I915_WRITE(ch_ctl,
875                                    status |
876                                    DP_AUX_CH_CTL_DONE |
877                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
878                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
879
880                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
881                                 continue;
882
883                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
884                          *   400us delay required for errors and timeouts
885                          *   Timeout errors from the HW already meet this
886                          *   requirement so skip to next iteration
887                          */
888                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
889                                 usleep_range(400, 500);
890                                 continue;
891                         }
892                         if (status & DP_AUX_CH_CTL_DONE)
893                                 goto done;
894                 }
895         }
896
897         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
898                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
899                 ret = -EBUSY;
900                 goto out;
901         }
902
903 done:
904         /* Check for timeout or receive error.
905          * Timeouts occur when the sink is not connected
906          */
907         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
908                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
909                 ret = -EIO;
910                 goto out;
911         }
912
913         /* Timeouts occur when the device isn't connected, so they're
914          * "normal" -- don't fill the kernel log with these */
915         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
916                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
917                 ret = -ETIMEDOUT;
918                 goto out;
919         }
920
921         /* Unload any bytes sent back from the other side */
922         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
923                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
924         if (recv_bytes > recv_size)
925                 recv_bytes = recv_size;
926
927         for (i = 0; i < recv_bytes; i += 4)
928                 intel_dp_unpack_aux(I915_READ(ch_data + i),
929                                     recv + i, recv_bytes - i);
930
931         ret = recv_bytes;
932 out:
933         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
934         intel_aux_display_runtime_put(dev_priv);
935
936         if (vdd)
937                 edp_panel_vdd_off(intel_dp, false);
938
939         pps_unlock(intel_dp);
940
941         return ret;
942 }
943
944 #define BARE_ADDRESS_SIZE       3
945 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
946 static ssize_t
947 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
948 {
949         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950         uint8_t txbuf[20], rxbuf[20];
951         size_t txsize, rxsize;
952         int ret;
953
954         txbuf[0] = (msg->request << 4) |
955                 ((msg->address >> 16) & 0xf);
956         txbuf[1] = (msg->address >> 8) & 0xff;
957         txbuf[2] = msg->address & 0xff;
958         txbuf[3] = msg->size - 1;
959
960         switch (msg->request & ~DP_AUX_I2C_MOT) {
961         case DP_AUX_NATIVE_WRITE:
962         case DP_AUX_I2C_WRITE:
963                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
964                 rxsize = 2; /* 0 or 1 data bytes */
965
966                 if (WARN_ON(txsize > 20))
967                         return -E2BIG;
968
969                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
970
971                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
972                 if (ret > 0) {
973                         msg->reply = rxbuf[0] >> 4;
974
975                         if (ret > 1) {
976                                 /* Number of bytes written in a short write. */
977                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
978                         } else {
979                                 /* Return payload size. */
980                                 ret = msg->size;
981                         }
982                 }
983                 break;
984
985         case DP_AUX_NATIVE_READ:
986         case DP_AUX_I2C_READ:
987                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
988                 rxsize = msg->size + 1;
989
990                 if (WARN_ON(rxsize > 20))
991                         return -E2BIG;
992
993                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
994                 if (ret > 0) {
995                         msg->reply = rxbuf[0] >> 4;
996                         /*
997                          * Assume happy day, and copy the data. The caller is
998                          * expected to check msg->reply before touching it.
999                          *
1000                          * Return payload size.
1001                          */
1002                         ret--;
1003                         memcpy(msg->buffer, rxbuf + 1, ret);
1004                 }
1005                 break;
1006
1007         default:
1008                 ret = -EINVAL;
1009                 break;
1010         }
1011
1012         return ret;
1013 }
1014
1015 static void
1016 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1017 {
1018         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1019         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1020         enum port port = intel_dig_port->port;
1021         const char *name = NULL;
1022         int ret;
1023
1024         switch (port) {
1025         case PORT_A:
1026                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1027                 name = "DPDDC-A";
1028                 break;
1029         case PORT_B:
1030                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1031                 name = "DPDDC-B";
1032                 break;
1033         case PORT_C:
1034                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1035                 name = "DPDDC-C";
1036                 break;
1037         case PORT_D:
1038                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1039                 name = "DPDDC-D";
1040                 break;
1041         default:
1042                 BUG();
1043         }
1044
1045         /*
1046          * The AUX_CTL register is usually DP_CTL + 0x10.
1047          *
1048          * On Haswell and Broadwell though:
1049          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1050          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1051          *
1052          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1053          */
1054         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1055                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1056
1057         intel_dp->aux.name = name;
1058         intel_dp->aux.dev = dev->dev;
1059         intel_dp->aux.transfer = intel_dp_aux_transfer;
1060
1061         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1062                       connector->base.kdev->kobj.name);
1063
1064         ret = drm_dp_aux_register(&intel_dp->aux);
1065         if (ret < 0) {
1066                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1067                           name, ret);
1068                 return;
1069         }
1070
1071         ret = sysfs_create_link(&connector->base.kdev->kobj,
1072                                 &intel_dp->aux.ddc.dev.kobj,
1073                                 intel_dp->aux.ddc.dev.kobj.name);
1074         if (ret < 0) {
1075                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1076                 drm_dp_aux_unregister(&intel_dp->aux);
1077         }
1078 }
1079
1080 static void
1081 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1082 {
1083         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1084
1085         if (!intel_connector->mst_port)
1086                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1087                                   intel_dp->aux.ddc.dev.kobj.name);
1088         intel_connector_unregister(intel_connector);
1089 }
1090
1091 static void
1092 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1093 {
1094         u32 ctrl1;
1095
1096         memset(&pipe_config->dpll_hw_state, 0,
1097                sizeof(pipe_config->dpll_hw_state));
1098
1099         pipe_config->ddi_pll_sel = SKL_DPLL0;
1100         pipe_config->dpll_hw_state.cfgcr1 = 0;
1101         pipe_config->dpll_hw_state.cfgcr2 = 0;
1102
1103         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1104         switch (link_clock / 2) {
1105         case 81000:
1106                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1107                                               SKL_DPLL0);
1108                 break;
1109         case 135000:
1110                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1111                                               SKL_DPLL0);
1112                 break;
1113         case 270000:
1114                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1115                                               SKL_DPLL0);
1116                 break;
1117         case 162000:
1118                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1119                                               SKL_DPLL0);
1120                 break;
1121         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1122         results in CDCLK change. Need to handle the change of CDCLK by
1123         disabling pipes and re-enabling them */
1124         case 108000:
1125                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1126                                               SKL_DPLL0);
1127                 break;
1128         case 216000:
1129                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1130                                               SKL_DPLL0);
1131                 break;
1132
1133         }
1134         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1135 }
1136
1137 static void
1138 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1139 {
1140         memset(&pipe_config->dpll_hw_state, 0,
1141                sizeof(pipe_config->dpll_hw_state));
1142
1143         switch (link_bw) {
1144         case DP_LINK_BW_1_62:
1145                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1146                 break;
1147         case DP_LINK_BW_2_7:
1148                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1149                 break;
1150         case DP_LINK_BW_5_4:
1151                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1152                 break;
1153         }
1154 }
1155
1156 static int
1157 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1158 {
1159         if (intel_dp->num_sink_rates) {
1160                 *sink_rates = intel_dp->sink_rates;
1161                 return intel_dp->num_sink_rates;
1162         }
1163
1164         *sink_rates = default_rates;
1165
1166         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1167 }
1168
1169 static int
1170 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1171 {
1172         if (IS_SKYLAKE(dev)) {
1173                 *source_rates = skl_rates;
1174                 return ARRAY_SIZE(skl_rates);
1175         }
1176
1177         *source_rates = default_rates;
1178
1179         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1180                 /* WaDisableHBR2:skl */
1181                 return (DP_LINK_BW_2_7 >> 3) + 1;
1182         else if (INTEL_INFO(dev)->gen >= 8 ||
1183             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1184                 return (DP_LINK_BW_5_4 >> 3) + 1;
1185         else
1186                 return (DP_LINK_BW_2_7 >> 3) + 1;
1187 }
1188
1189 static void
1190 intel_dp_set_clock(struct intel_encoder *encoder,
1191                    struct intel_crtc_state *pipe_config, int link_bw)
1192 {
1193         struct drm_device *dev = encoder->base.dev;
1194         const struct dp_link_dpll *divisor = NULL;
1195         int i, count = 0;
1196
1197         if (IS_G4X(dev)) {
1198                 divisor = gen4_dpll;
1199                 count = ARRAY_SIZE(gen4_dpll);
1200         } else if (HAS_PCH_SPLIT(dev)) {
1201                 divisor = pch_dpll;
1202                 count = ARRAY_SIZE(pch_dpll);
1203         } else if (IS_CHERRYVIEW(dev)) {
1204                 divisor = chv_dpll;
1205                 count = ARRAY_SIZE(chv_dpll);
1206         } else if (IS_VALLEYVIEW(dev)) {
1207                 divisor = vlv_dpll;
1208                 count = ARRAY_SIZE(vlv_dpll);
1209         }
1210
1211         if (divisor && count) {
1212                 for (i = 0; i < count; i++) {
1213                         if (link_bw == divisor[i].link_bw) {
1214                                 pipe_config->dpll = divisor[i].dpll;
1215                                 pipe_config->clock_set = true;
1216                                 break;
1217                         }
1218                 }
1219         }
1220 }
1221
1222 static int intersect_rates(const int *source_rates, int source_len,
1223                            const int *sink_rates, int sink_len,
1224                            int *common_rates)
1225 {
1226         int i = 0, j = 0, k = 0;
1227
1228         while (i < source_len && j < sink_len) {
1229                 if (source_rates[i] == sink_rates[j]) {
1230                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1231                                 return k;
1232                         common_rates[k] = source_rates[i];
1233                         ++k;
1234                         ++i;
1235                         ++j;
1236                 } else if (source_rates[i] < sink_rates[j]) {
1237                         ++i;
1238                 } else {
1239                         ++j;
1240                 }
1241         }
1242         return k;
1243 }
1244
1245 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1246                                  int *common_rates)
1247 {
1248         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1249         const int *source_rates, *sink_rates;
1250         int source_len, sink_len;
1251
1252         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1253         source_len = intel_dp_source_rates(dev, &source_rates);
1254
1255         return intersect_rates(source_rates, source_len,
1256                                sink_rates, sink_len,
1257                                common_rates);
1258 }
1259
1260 static void snprintf_int_array(char *str, size_t len,
1261                                const int *array, int nelem)
1262 {
1263         int i;
1264
1265         str[0] = '\0';
1266
1267         for (i = 0; i < nelem; i++) {
1268                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1269                 if (r >= len)
1270                         return;
1271                 str += r;
1272                 len -= r;
1273         }
1274 }
1275
1276 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1277 {
1278         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1279         const int *source_rates, *sink_rates;
1280         int source_len, sink_len, common_len;
1281         int common_rates[DP_MAX_SUPPORTED_RATES];
1282         char str[128]; /* FIXME: too big for stack? */
1283
1284         if ((drm_debug & DRM_UT_KMS) == 0)
1285                 return;
1286
1287         source_len = intel_dp_source_rates(dev, &source_rates);
1288         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1289         DRM_DEBUG_KMS("source rates: %s\n", str);
1290
1291         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1292         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1293         DRM_DEBUG_KMS("sink rates: %s\n", str);
1294
1295         common_len = intel_dp_common_rates(intel_dp, common_rates);
1296         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1297         DRM_DEBUG_KMS("common rates: %s\n", str);
1298 }
1299
1300 static int rate_to_index(int find, const int *rates)
1301 {
1302         int i = 0;
1303
1304         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1305                 if (find == rates[i])
1306                         break;
1307
1308         return i;
1309 }
1310
1311 int
1312 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1313 {
1314         int rates[DP_MAX_SUPPORTED_RATES] = {};
1315         int len;
1316
1317         len = intel_dp_common_rates(intel_dp, rates);
1318         if (WARN_ON(len <= 0))
1319                 return 162000;
1320
1321         return rates[rate_to_index(0, rates) - 1];
1322 }
1323
1324 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1325 {
1326         return rate_to_index(rate, intel_dp->sink_rates);
1327 }
1328
1329 bool
1330 intel_dp_compute_config(struct intel_encoder *encoder,
1331                         struct intel_crtc_state *pipe_config)
1332 {
1333         struct drm_device *dev = encoder->base.dev;
1334         struct drm_i915_private *dev_priv = dev->dev_private;
1335         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1336         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1337         enum port port = dp_to_dig_port(intel_dp)->port;
1338         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1339         struct intel_connector *intel_connector = intel_dp->attached_connector;
1340         int lane_count, clock;
1341         int min_lane_count = 1;
1342         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1343         /* Conveniently, the link BW constants become indices with a shift...*/
1344         int min_clock = 0;
1345         int max_clock;
1346         int bpp, mode_rate;
1347         int link_avail, link_clock;
1348         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1349         int common_len;
1350
1351         common_len = intel_dp_common_rates(intel_dp, common_rates);
1352
1353         /* No common link rates between source and sink */
1354         WARN_ON(common_len <= 0);
1355
1356         max_clock = common_len - 1;
1357
1358         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1359                 pipe_config->has_pch_encoder = true;
1360
1361         pipe_config->has_dp_encoder = true;
1362         pipe_config->has_drrs = false;
1363         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1364
1365         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1366                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1367                                        adjusted_mode);
1368
1369                 if (INTEL_INFO(dev)->gen >= 9) {
1370                         int ret;
1371                         ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1372                         if (ret)
1373                                 return ret;
1374                 }
1375
1376                 if (!HAS_PCH_SPLIT(dev))
1377                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1378                                                  intel_connector->panel.fitting_mode);
1379                 else
1380                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1381                                                 intel_connector->panel.fitting_mode);
1382         }
1383
1384         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1385                 return false;
1386
1387         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1388                       "max bw %d pixel clock %iKHz\n",
1389                       max_lane_count, common_rates[max_clock],
1390                       adjusted_mode->crtc_clock);
1391
1392         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1393          * bpc in between. */
1394         bpp = pipe_config->pipe_bpp;
1395         if (is_edp(intel_dp)) {
1396                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1397                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1398                                       dev_priv->vbt.edp_bpp);
1399                         bpp = dev_priv->vbt.edp_bpp;
1400                 }
1401
1402                 /*
1403                  * Use the maximum clock and number of lanes the eDP panel
1404                  * advertizes being capable of. The panels are generally
1405                  * designed to support only a single clock and lane
1406                  * configuration, and typically these values correspond to the
1407                  * native resolution of the panel.
1408                  */
1409                 min_lane_count = max_lane_count;
1410                 min_clock = max_clock;
1411         }
1412
1413         for (; bpp >= 6*3; bpp -= 2*3) {
1414                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1415                                                    bpp);
1416
1417                 for (clock = min_clock; clock <= max_clock; clock++) {
1418                         for (lane_count = min_lane_count;
1419                                 lane_count <= max_lane_count;
1420                                 lane_count <<= 1) {
1421
1422                                 link_clock = common_rates[clock];
1423                                 link_avail = intel_dp_max_data_rate(link_clock,
1424                                                                     lane_count);
1425
1426                                 if (mode_rate <= link_avail) {
1427                                         goto found;
1428                                 }
1429                         }
1430                 }
1431         }
1432
1433         return false;
1434
1435 found:
1436         if (intel_dp->color_range_auto) {
1437                 /*
1438                  * See:
1439                  * CEA-861-E - 5.1 Default Encoding Parameters
1440                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1441                  */
1442                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1443                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1444                 else
1445                         intel_dp->color_range = 0;
1446         }
1447
1448         if (intel_dp->color_range)
1449                 pipe_config->limited_color_range = true;
1450
1451         intel_dp->lane_count = lane_count;
1452
1453         if (intel_dp->num_sink_rates) {
1454                 intel_dp->link_bw = 0;
1455                 intel_dp->rate_select =
1456                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1457         } else {
1458                 intel_dp->link_bw =
1459                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1460                 intel_dp->rate_select = 0;
1461         }
1462
1463         pipe_config->pipe_bpp = bpp;
1464         pipe_config->port_clock = common_rates[clock];
1465
1466         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1467                       intel_dp->link_bw, intel_dp->lane_count,
1468                       pipe_config->port_clock, bpp);
1469         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1470                       mode_rate, link_avail);
1471
1472         intel_link_compute_m_n(bpp, lane_count,
1473                                adjusted_mode->crtc_clock,
1474                                pipe_config->port_clock,
1475                                &pipe_config->dp_m_n);
1476
1477         if (intel_connector->panel.downclock_mode != NULL &&
1478                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1479                         pipe_config->has_drrs = true;
1480                         intel_link_compute_m_n(bpp, lane_count,
1481                                 intel_connector->panel.downclock_mode->clock,
1482                                 pipe_config->port_clock,
1483                                 &pipe_config->dp_m2_n2);
1484         }
1485
1486         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1487                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1488         else if (IS_BROXTON(dev))
1489                 /* handled in ddi */;
1490         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1491                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1492         else
1493                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1494
1495         return true;
1496 }
1497
1498 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1499 {
1500         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1501         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1502         struct drm_device *dev = crtc->base.dev;
1503         struct drm_i915_private *dev_priv = dev->dev_private;
1504         u32 dpa_ctl;
1505
1506         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1507                       crtc->config->port_clock);
1508         dpa_ctl = I915_READ(DP_A);
1509         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1510
1511         if (crtc->config->port_clock == 162000) {
1512                 /* For a long time we've carried around a ILK-DevA w/a for the
1513                  * 160MHz clock. If we're really unlucky, it's still required.
1514                  */
1515                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1516                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1517                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1518         } else {
1519                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1520                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1521         }
1522
1523         I915_WRITE(DP_A, dpa_ctl);
1524
1525         POSTING_READ(DP_A);
1526         udelay(500);
1527 }
1528
1529 static void intel_dp_prepare(struct intel_encoder *encoder)
1530 {
1531         struct drm_device *dev = encoder->base.dev;
1532         struct drm_i915_private *dev_priv = dev->dev_private;
1533         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1534         enum port port = dp_to_dig_port(intel_dp)->port;
1535         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1536         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1537
1538         /*
1539          * There are four kinds of DP registers:
1540          *
1541          *      IBX PCH
1542          *      SNB CPU
1543          *      IVB CPU
1544          *      CPT PCH
1545          *
1546          * IBX PCH and CPU are the same for almost everything,
1547          * except that the CPU DP PLL is configured in this
1548          * register
1549          *
1550          * CPT PCH is quite different, having many bits moved
1551          * to the TRANS_DP_CTL register instead. That
1552          * configuration happens (oddly) in ironlake_pch_enable
1553          */
1554
1555         /* Preserve the BIOS-computed detected bit. This is
1556          * supposed to be read-only.
1557          */
1558         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1559
1560         /* Handle DP bits in common between all three register formats */
1561         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1562         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1563
1564         if (crtc->config->has_audio)
1565                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1566
1567         /* Split out the IBX/CPU vs CPT settings */
1568
1569         if (IS_GEN7(dev) && port == PORT_A) {
1570                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1571                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1572                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1573                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1574                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1575
1576                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1577                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1578
1579                 intel_dp->DP |= crtc->pipe << 29;
1580         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1581                 u32 trans_dp;
1582
1583                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1584
1585                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1586                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1587                         trans_dp |= TRANS_DP_ENH_FRAMING;
1588                 else
1589                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1590                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1591         } else {
1592                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1593                         intel_dp->DP |= intel_dp->color_range;
1594
1595                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1596                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1597                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1598                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1599                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1600
1601                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1602                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1603
1604                 if (IS_CHERRYVIEW(dev))
1605                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1606                 else if (crtc->pipe == PIPE_B)
1607                         intel_dp->DP |= DP_PIPEB_SELECT;
1608         }
1609 }
1610
1611 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1612 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1613
1614 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1615 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1616
1617 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1618 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1619
1620 static void wait_panel_status(struct intel_dp *intel_dp,
1621                                        u32 mask,
1622                                        u32 value)
1623 {
1624         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1625         struct drm_i915_private *dev_priv = dev->dev_private;
1626         u32 pp_stat_reg, pp_ctrl_reg;
1627
1628         lockdep_assert_held(&dev_priv->pps_mutex);
1629
1630         pp_stat_reg = _pp_stat_reg(intel_dp);
1631         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1632
1633         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1634                         mask, value,
1635                         I915_READ(pp_stat_reg),
1636                         I915_READ(pp_ctrl_reg));
1637
1638         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1639                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1640                                 I915_READ(pp_stat_reg),
1641                                 I915_READ(pp_ctrl_reg));
1642         }
1643
1644         DRM_DEBUG_KMS("Wait complete\n");
1645 }
1646
1647 static void wait_panel_on(struct intel_dp *intel_dp)
1648 {
1649         DRM_DEBUG_KMS("Wait for panel power on\n");
1650         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1651 }
1652
1653 static void wait_panel_off(struct intel_dp *intel_dp)
1654 {
1655         DRM_DEBUG_KMS("Wait for panel power off time\n");
1656         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1657 }
1658
1659 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1660 {
1661         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1662
1663         /* When we disable the VDD override bit last we have to do the manual
1664          * wait. */
1665         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1666                                        intel_dp->panel_power_cycle_delay);
1667
1668         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1669 }
1670
1671 static void wait_backlight_on(struct intel_dp *intel_dp)
1672 {
1673         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1674                                        intel_dp->backlight_on_delay);
1675 }
1676
1677 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1678 {
1679         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1680                                        intel_dp->backlight_off_delay);
1681 }
1682
1683 /* Read the current pp_control value, unlocking the register if it
1684  * is locked
1685  */
1686
1687 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1688 {
1689         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1690         struct drm_i915_private *dev_priv = dev->dev_private;
1691         u32 control;
1692
1693         lockdep_assert_held(&dev_priv->pps_mutex);
1694
1695         control = I915_READ(_pp_ctrl_reg(intel_dp));
1696         control &= ~PANEL_UNLOCK_MASK;
1697         control |= PANEL_UNLOCK_REGS;
1698         return control;
1699 }
1700
1701 /*
1702  * Must be paired with edp_panel_vdd_off().
1703  * Must hold pps_mutex around the whole on/off sequence.
1704  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1705  */
1706 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1707 {
1708         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1709         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1710         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1711         struct drm_i915_private *dev_priv = dev->dev_private;
1712         enum intel_display_power_domain power_domain;
1713         u32 pp;
1714         u32 pp_stat_reg, pp_ctrl_reg;
1715         bool need_to_disable = !intel_dp->want_panel_vdd;
1716
1717         lockdep_assert_held(&dev_priv->pps_mutex);
1718
1719         if (!is_edp(intel_dp))
1720                 return false;
1721
1722         cancel_delayed_work(&intel_dp->panel_vdd_work);
1723         intel_dp->want_panel_vdd = true;
1724
1725         if (edp_have_panel_vdd(intel_dp))
1726                 return need_to_disable;
1727
1728         power_domain = intel_display_port_power_domain(intel_encoder);
1729         intel_display_power_get(dev_priv, power_domain);
1730
1731         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1732                       port_name(intel_dig_port->port));
1733
1734         if (!edp_have_panel_power(intel_dp))
1735                 wait_panel_power_cycle(intel_dp);
1736
1737         pp = ironlake_get_pp_control(intel_dp);
1738         pp |= EDP_FORCE_VDD;
1739
1740         pp_stat_reg = _pp_stat_reg(intel_dp);
1741         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1742
1743         I915_WRITE(pp_ctrl_reg, pp);
1744         POSTING_READ(pp_ctrl_reg);
1745         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1746                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1747         /*
1748          * If the panel wasn't on, delay before accessing aux channel
1749          */
1750         if (!edp_have_panel_power(intel_dp)) {
1751                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1752                               port_name(intel_dig_port->port));
1753                 msleep(intel_dp->panel_power_up_delay);
1754         }
1755
1756         return need_to_disable;
1757 }
1758
1759 /*
1760  * Must be paired with intel_edp_panel_vdd_off() or
1761  * intel_edp_panel_off().
1762  * Nested calls to these functions are not allowed since
1763  * we drop the lock. Caller must use some higher level
1764  * locking to prevent nested calls from other threads.
1765  */
1766 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1767 {
1768         bool vdd;
1769
1770         if (!is_edp(intel_dp))
1771                 return;
1772
1773         pps_lock(intel_dp);
1774         vdd = edp_panel_vdd_on(intel_dp);
1775         pps_unlock(intel_dp);
1776
1777         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1778              port_name(dp_to_dig_port(intel_dp)->port));
1779 }
1780
1781 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1782 {
1783         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1784         struct drm_i915_private *dev_priv = dev->dev_private;
1785         struct intel_digital_port *intel_dig_port =
1786                 dp_to_dig_port(intel_dp);
1787         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1788         enum intel_display_power_domain power_domain;
1789         u32 pp;
1790         u32 pp_stat_reg, pp_ctrl_reg;
1791
1792         lockdep_assert_held(&dev_priv->pps_mutex);
1793
1794         WARN_ON(intel_dp->want_panel_vdd);
1795
1796         if (!edp_have_panel_vdd(intel_dp))
1797                 return;
1798
1799         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1800                       port_name(intel_dig_port->port));
1801
1802         pp = ironlake_get_pp_control(intel_dp);
1803         pp &= ~EDP_FORCE_VDD;
1804
1805         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1806         pp_stat_reg = _pp_stat_reg(intel_dp);
1807
1808         I915_WRITE(pp_ctrl_reg, pp);
1809         POSTING_READ(pp_ctrl_reg);
1810
1811         /* Make sure sequencer is idle before allowing subsequent activity */
1812         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1813         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1814
1815         if ((pp & POWER_TARGET_ON) == 0)
1816                 intel_dp->last_power_cycle = jiffies;
1817
1818         power_domain = intel_display_port_power_domain(intel_encoder);
1819         intel_display_power_put(dev_priv, power_domain);
1820 }
1821
1822 static void edp_panel_vdd_work(struct work_struct *__work)
1823 {
1824         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1825                                                  struct intel_dp, panel_vdd_work);
1826
1827         pps_lock(intel_dp);
1828         if (!intel_dp->want_panel_vdd)
1829                 edp_panel_vdd_off_sync(intel_dp);
1830         pps_unlock(intel_dp);
1831 }
1832
1833 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1834 {
1835         unsigned long delay;
1836
1837         /*
1838          * Queue the timer to fire a long time from now (relative to the power
1839          * down delay) to keep the panel power up across a sequence of
1840          * operations.
1841          */
1842         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1843         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1844 }
1845
1846 /*
1847  * Must be paired with edp_panel_vdd_on().
1848  * Must hold pps_mutex around the whole on/off sequence.
1849  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1850  */
1851 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1852 {
1853         struct drm_i915_private *dev_priv =
1854                 intel_dp_to_dev(intel_dp)->dev_private;
1855
1856         lockdep_assert_held(&dev_priv->pps_mutex);
1857
1858         if (!is_edp(intel_dp))
1859                 return;
1860
1861         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1862              port_name(dp_to_dig_port(intel_dp)->port));
1863
1864         intel_dp->want_panel_vdd = false;
1865
1866         if (sync)
1867                 edp_panel_vdd_off_sync(intel_dp);
1868         else
1869                 edp_panel_vdd_schedule_off(intel_dp);
1870 }
1871
1872 static void edp_panel_on(struct intel_dp *intel_dp)
1873 {
1874         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1875         struct drm_i915_private *dev_priv = dev->dev_private;
1876         u32 pp;
1877         u32 pp_ctrl_reg;
1878
1879         lockdep_assert_held(&dev_priv->pps_mutex);
1880
1881         if (!is_edp(intel_dp))
1882                 return;
1883
1884         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1885                       port_name(dp_to_dig_port(intel_dp)->port));
1886
1887         if (WARN(edp_have_panel_power(intel_dp),
1888                  "eDP port %c panel power already on\n",
1889                  port_name(dp_to_dig_port(intel_dp)->port)))
1890                 return;
1891
1892         wait_panel_power_cycle(intel_dp);
1893
1894         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1895         pp = ironlake_get_pp_control(intel_dp);
1896         if (IS_GEN5(dev)) {
1897                 /* ILK workaround: disable reset around power sequence */
1898                 pp &= ~PANEL_POWER_RESET;
1899                 I915_WRITE(pp_ctrl_reg, pp);
1900                 POSTING_READ(pp_ctrl_reg);
1901         }
1902
1903         pp |= POWER_TARGET_ON;
1904         if (!IS_GEN5(dev))
1905                 pp |= PANEL_POWER_RESET;
1906
1907         I915_WRITE(pp_ctrl_reg, pp);
1908         POSTING_READ(pp_ctrl_reg);
1909
1910         wait_panel_on(intel_dp);
1911         intel_dp->last_power_on = jiffies;
1912
1913         if (IS_GEN5(dev)) {
1914                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1915                 I915_WRITE(pp_ctrl_reg, pp);
1916                 POSTING_READ(pp_ctrl_reg);
1917         }
1918 }
1919
1920 void intel_edp_panel_on(struct intel_dp *intel_dp)
1921 {
1922         if (!is_edp(intel_dp))
1923                 return;
1924
1925         pps_lock(intel_dp);
1926         edp_panel_on(intel_dp);
1927         pps_unlock(intel_dp);
1928 }
1929
1930
1931 static void edp_panel_off(struct intel_dp *intel_dp)
1932 {
1933         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1934         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1935         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1936         struct drm_i915_private *dev_priv = dev->dev_private;
1937         enum intel_display_power_domain power_domain;
1938         u32 pp;
1939         u32 pp_ctrl_reg;
1940
1941         lockdep_assert_held(&dev_priv->pps_mutex);
1942
1943         if (!is_edp(intel_dp))
1944                 return;
1945
1946         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1947                       port_name(dp_to_dig_port(intel_dp)->port));
1948
1949         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1950              port_name(dp_to_dig_port(intel_dp)->port));
1951
1952         pp = ironlake_get_pp_control(intel_dp);
1953         /* We need to switch off panel power _and_ force vdd, for otherwise some
1954          * panels get very unhappy and cease to work. */
1955         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1956                 EDP_BLC_ENABLE);
1957
1958         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1959
1960         intel_dp->want_panel_vdd = false;
1961
1962         I915_WRITE(pp_ctrl_reg, pp);
1963         POSTING_READ(pp_ctrl_reg);
1964
1965         intel_dp->last_power_cycle = jiffies;
1966         wait_panel_off(intel_dp);
1967
1968         /* We got a reference when we enabled the VDD. */
1969         power_domain = intel_display_port_power_domain(intel_encoder);
1970         intel_display_power_put(dev_priv, power_domain);
1971 }
1972
1973 void intel_edp_panel_off(struct intel_dp *intel_dp)
1974 {
1975         if (!is_edp(intel_dp))
1976                 return;
1977
1978         pps_lock(intel_dp);
1979         edp_panel_off(intel_dp);
1980         pps_unlock(intel_dp);
1981 }
1982
1983 /* Enable backlight in the panel power control. */
1984 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1985 {
1986         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1987         struct drm_device *dev = intel_dig_port->base.base.dev;
1988         struct drm_i915_private *dev_priv = dev->dev_private;
1989         u32 pp;
1990         u32 pp_ctrl_reg;
1991
1992         /*
1993          * If we enable the backlight right away following a panel power
1994          * on, we may see slight flicker as the panel syncs with the eDP
1995          * link.  So delay a bit to make sure the image is solid before
1996          * allowing it to appear.
1997          */
1998         wait_backlight_on(intel_dp);
1999
2000         pps_lock(intel_dp);
2001
2002         pp = ironlake_get_pp_control(intel_dp);
2003         pp |= EDP_BLC_ENABLE;
2004
2005         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2006
2007         I915_WRITE(pp_ctrl_reg, pp);
2008         POSTING_READ(pp_ctrl_reg);
2009
2010         pps_unlock(intel_dp);
2011 }
2012
2013 /* Enable backlight PWM and backlight PP control. */
2014 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2015 {
2016         if (!is_edp(intel_dp))
2017                 return;
2018
2019         DRM_DEBUG_KMS("\n");
2020
2021         intel_panel_enable_backlight(intel_dp->attached_connector);
2022         _intel_edp_backlight_on(intel_dp);
2023 }
2024
2025 /* Disable backlight in the panel power control. */
2026 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2027 {
2028         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2029         struct drm_i915_private *dev_priv = dev->dev_private;
2030         u32 pp;
2031         u32 pp_ctrl_reg;
2032
2033         if (!is_edp(intel_dp))
2034                 return;
2035
2036         pps_lock(intel_dp);
2037
2038         pp = ironlake_get_pp_control(intel_dp);
2039         pp &= ~EDP_BLC_ENABLE;
2040
2041         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2042
2043         I915_WRITE(pp_ctrl_reg, pp);
2044         POSTING_READ(pp_ctrl_reg);
2045
2046         pps_unlock(intel_dp);
2047
2048         intel_dp->last_backlight_off = jiffies;
2049         edp_wait_backlight_off(intel_dp);
2050 }
2051
2052 /* Disable backlight PP control and backlight PWM. */
2053 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2054 {
2055         if (!is_edp(intel_dp))
2056                 return;
2057
2058         DRM_DEBUG_KMS("\n");
2059
2060         _intel_edp_backlight_off(intel_dp);
2061         intel_panel_disable_backlight(intel_dp->attached_connector);
2062 }
2063
2064 /*
2065  * Hook for controlling the panel power control backlight through the bl_power
2066  * sysfs attribute. Take care to handle multiple calls.
2067  */
2068 static void intel_edp_backlight_power(struct intel_connector *connector,
2069                                       bool enable)
2070 {
2071         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2072         bool is_enabled;
2073
2074         pps_lock(intel_dp);
2075         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2076         pps_unlock(intel_dp);
2077
2078         if (is_enabled == enable)
2079                 return;
2080
2081         DRM_DEBUG_KMS("panel power control backlight %s\n",
2082                       enable ? "enable" : "disable");
2083
2084         if (enable)
2085                 _intel_edp_backlight_on(intel_dp);
2086         else
2087                 _intel_edp_backlight_off(intel_dp);
2088 }
2089
2090 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2091 {
2092         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2093         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2094         struct drm_device *dev = crtc->dev;
2095         struct drm_i915_private *dev_priv = dev->dev_private;
2096         u32 dpa_ctl;
2097
2098         assert_pipe_disabled(dev_priv,
2099                              to_intel_crtc(crtc)->pipe);
2100
2101         DRM_DEBUG_KMS("\n");
2102         dpa_ctl = I915_READ(DP_A);
2103         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2104         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2105
2106         /* We don't adjust intel_dp->DP while tearing down the link, to
2107          * facilitate link retraining (e.g. after hotplug). Hence clear all
2108          * enable bits here to ensure that we don't enable too much. */
2109         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2110         intel_dp->DP |= DP_PLL_ENABLE;
2111         I915_WRITE(DP_A, intel_dp->DP);
2112         POSTING_READ(DP_A);
2113         udelay(200);
2114 }
2115
2116 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2117 {
2118         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2119         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2120         struct drm_device *dev = crtc->dev;
2121         struct drm_i915_private *dev_priv = dev->dev_private;
2122         u32 dpa_ctl;
2123
2124         assert_pipe_disabled(dev_priv,
2125                              to_intel_crtc(crtc)->pipe);
2126
2127         dpa_ctl = I915_READ(DP_A);
2128         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2129              "dp pll off, should be on\n");
2130         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2131
2132         /* We can't rely on the value tracked for the DP register in
2133          * intel_dp->DP because link_down must not change that (otherwise link
2134          * re-training will fail. */
2135         dpa_ctl &= ~DP_PLL_ENABLE;
2136         I915_WRITE(DP_A, dpa_ctl);
2137         POSTING_READ(DP_A);
2138         udelay(200);
2139 }
2140
2141 /* If the sink supports it, try to set the power state appropriately */
2142 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2143 {
2144         int ret, i;
2145
2146         /* Should have a valid DPCD by this point */
2147         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2148                 return;
2149
2150         if (mode != DRM_MODE_DPMS_ON) {
2151                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2152                                          DP_SET_POWER_D3);
2153         } else {
2154                 /*
2155                  * When turning on, we need to retry for 1ms to give the sink
2156                  * time to wake up.
2157                  */
2158                 for (i = 0; i < 3; i++) {
2159                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2160                                                  DP_SET_POWER_D0);
2161                         if (ret == 1)
2162                                 break;
2163                         msleep(1);
2164                 }
2165         }
2166
2167         if (ret != 1)
2168                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2169                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2170 }
2171
2172 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2173                                   enum pipe *pipe)
2174 {
2175         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2176         enum port port = dp_to_dig_port(intel_dp)->port;
2177         struct drm_device *dev = encoder->base.dev;
2178         struct drm_i915_private *dev_priv = dev->dev_private;
2179         enum intel_display_power_domain power_domain;
2180         u32 tmp;
2181
2182         power_domain = intel_display_port_power_domain(encoder);
2183         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2184                 return false;
2185
2186         tmp = I915_READ(intel_dp->output_reg);
2187
2188         if (!(tmp & DP_PORT_EN))
2189                 return false;
2190
2191         if (IS_GEN7(dev) && port == PORT_A) {
2192                 *pipe = PORT_TO_PIPE_CPT(tmp);
2193         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2194                 enum pipe p;
2195
2196                 for_each_pipe(dev_priv, p) {
2197                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2198                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2199                                 *pipe = p;
2200                                 return true;
2201                         }
2202                 }
2203
2204                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2205                               intel_dp->output_reg);
2206         } else if (IS_CHERRYVIEW(dev)) {
2207                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2208         } else {
2209                 *pipe = PORT_TO_PIPE(tmp);
2210         }
2211
2212         return true;
2213 }
2214
2215 static void intel_dp_get_config(struct intel_encoder *encoder,
2216                                 struct intel_crtc_state *pipe_config)
2217 {
2218         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2219         u32 tmp, flags = 0;
2220         struct drm_device *dev = encoder->base.dev;
2221         struct drm_i915_private *dev_priv = dev->dev_private;
2222         enum port port = dp_to_dig_port(intel_dp)->port;
2223         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2224         int dotclock;
2225
2226         tmp = I915_READ(intel_dp->output_reg);
2227
2228         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2229
2230         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2231                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2232                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2233                         flags |= DRM_MODE_FLAG_PHSYNC;
2234                 else
2235                         flags |= DRM_MODE_FLAG_NHSYNC;
2236
2237                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2238                         flags |= DRM_MODE_FLAG_PVSYNC;
2239                 else
2240                         flags |= DRM_MODE_FLAG_NVSYNC;
2241         } else {
2242                 if (tmp & DP_SYNC_HS_HIGH)
2243                         flags |= DRM_MODE_FLAG_PHSYNC;
2244                 else
2245                         flags |= DRM_MODE_FLAG_NHSYNC;
2246
2247                 if (tmp & DP_SYNC_VS_HIGH)
2248                         flags |= DRM_MODE_FLAG_PVSYNC;
2249                 else
2250                         flags |= DRM_MODE_FLAG_NVSYNC;
2251         }
2252
2253         pipe_config->base.adjusted_mode.flags |= flags;
2254
2255         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2256             tmp & DP_COLOR_RANGE_16_235)
2257                 pipe_config->limited_color_range = true;
2258
2259         pipe_config->has_dp_encoder = true;
2260
2261         intel_dp_get_m_n(crtc, pipe_config);
2262
2263         if (port == PORT_A) {
2264                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2265                         pipe_config->port_clock = 162000;
2266                 else
2267                         pipe_config->port_clock = 270000;
2268         }
2269
2270         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2271                                             &pipe_config->dp_m_n);
2272
2273         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2274                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2275
2276         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2277
2278         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2279             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2280                 /*
2281                  * This is a big fat ugly hack.
2282                  *
2283                  * Some machines in UEFI boot mode provide us a VBT that has 18
2284                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2285                  * unknown we fail to light up. Yet the same BIOS boots up with
2286                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2287                  * max, not what it tells us to use.
2288                  *
2289                  * Note: This will still be broken if the eDP panel is not lit
2290                  * up by the BIOS, and thus we can't get the mode at module
2291                  * load.
2292                  */
2293                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2294                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2295                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2296         }
2297 }
2298
2299 static void intel_disable_dp(struct intel_encoder *encoder)
2300 {
2301         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2302         struct drm_device *dev = encoder->base.dev;
2303         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2304
2305         if (crtc->config->has_audio)
2306                 intel_audio_codec_disable(encoder);
2307
2308         if (HAS_PSR(dev) && !HAS_DDI(dev))
2309                 intel_psr_disable(intel_dp);
2310
2311         /* Make sure the panel is off before trying to change the mode. But also
2312          * ensure that we have vdd while we switch off the panel. */
2313         intel_edp_panel_vdd_on(intel_dp);
2314         intel_edp_backlight_off(intel_dp);
2315         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2316         intel_edp_panel_off(intel_dp);
2317
2318         /* disable the port before the pipe on g4x */
2319         if (INTEL_INFO(dev)->gen < 5)
2320                 intel_dp_link_down(intel_dp);
2321 }
2322
2323 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2324 {
2325         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2326         enum port port = dp_to_dig_port(intel_dp)->port;
2327
2328         intel_dp_link_down(intel_dp);
2329         if (port == PORT_A)
2330                 ironlake_edp_pll_off(intel_dp);
2331 }
2332
2333 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2334 {
2335         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2336
2337         intel_dp_link_down(intel_dp);
2338 }
2339
2340 static void chv_post_disable_dp(struct intel_encoder *encoder)
2341 {
2342         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2343         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2344         struct drm_device *dev = encoder->base.dev;
2345         struct drm_i915_private *dev_priv = dev->dev_private;
2346         struct intel_crtc *intel_crtc =
2347                 to_intel_crtc(encoder->base.crtc);
2348         enum dpio_channel ch = vlv_dport_to_channel(dport);
2349         enum pipe pipe = intel_crtc->pipe;
2350         u32 val;
2351
2352         intel_dp_link_down(intel_dp);
2353
2354         mutex_lock(&dev_priv->sb_lock);
2355
2356         /* Propagate soft reset to data lane reset */
2357         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2358         val |= CHV_PCS_REQ_SOFTRESET_EN;
2359         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2360
2361         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2362         val |= CHV_PCS_REQ_SOFTRESET_EN;
2363         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2364
2365         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2366         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2367         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2368
2369         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2370         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2371         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2372
2373         mutex_unlock(&dev_priv->sb_lock);
2374 }
2375
2376 static void
2377 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2378                          uint32_t *DP,
2379                          uint8_t dp_train_pat)
2380 {
2381         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2382         struct drm_device *dev = intel_dig_port->base.base.dev;
2383         struct drm_i915_private *dev_priv = dev->dev_private;
2384         enum port port = intel_dig_port->port;
2385
2386         if (HAS_DDI(dev)) {
2387                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2388
2389                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2390                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2391                 else
2392                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2393
2394                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2395                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2396                 case DP_TRAINING_PATTERN_DISABLE:
2397                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2398
2399                         break;
2400                 case DP_TRAINING_PATTERN_1:
2401                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2402                         break;
2403                 case DP_TRAINING_PATTERN_2:
2404                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2405                         break;
2406                 case DP_TRAINING_PATTERN_3:
2407                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2408                         break;
2409                 }
2410                 I915_WRITE(DP_TP_CTL(port), temp);
2411
2412         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2413                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2414                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2415
2416                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2417                 case DP_TRAINING_PATTERN_DISABLE:
2418                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2419                         break;
2420                 case DP_TRAINING_PATTERN_1:
2421                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2422                         break;
2423                 case DP_TRAINING_PATTERN_2:
2424                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2425                         break;
2426                 case DP_TRAINING_PATTERN_3:
2427                         DRM_ERROR("DP training pattern 3 not supported\n");
2428                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2429                         break;
2430                 }
2431
2432         } else {
2433                 if (IS_CHERRYVIEW(dev))
2434                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2435                 else
2436                         *DP &= ~DP_LINK_TRAIN_MASK;
2437
2438                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2439                 case DP_TRAINING_PATTERN_DISABLE:
2440                         *DP |= DP_LINK_TRAIN_OFF;
2441                         break;
2442                 case DP_TRAINING_PATTERN_1:
2443                         *DP |= DP_LINK_TRAIN_PAT_1;
2444                         break;
2445                 case DP_TRAINING_PATTERN_2:
2446                         *DP |= DP_LINK_TRAIN_PAT_2;
2447                         break;
2448                 case DP_TRAINING_PATTERN_3:
2449                         if (IS_CHERRYVIEW(dev)) {
2450                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2451                         } else {
2452                                 DRM_ERROR("DP training pattern 3 not supported\n");
2453                                 *DP |= DP_LINK_TRAIN_PAT_2;
2454                         }
2455                         break;
2456                 }
2457         }
2458 }
2459
2460 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2461 {
2462         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2463         struct drm_i915_private *dev_priv = dev->dev_private;
2464
2465         /* enable with pattern 1 (as per spec) */
2466         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2467                                  DP_TRAINING_PATTERN_1);
2468
2469         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2470         POSTING_READ(intel_dp->output_reg);
2471
2472         /*
2473          * Magic for VLV/CHV. We _must_ first set up the register
2474          * without actually enabling the port, and then do another
2475          * write to enable the port. Otherwise link training will
2476          * fail when the power sequencer is freshly used for this port.
2477          */
2478         intel_dp->DP |= DP_PORT_EN;
2479
2480         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2481         POSTING_READ(intel_dp->output_reg);
2482 }
2483
2484 static void intel_enable_dp(struct intel_encoder *encoder)
2485 {
2486         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2487         struct drm_device *dev = encoder->base.dev;
2488         struct drm_i915_private *dev_priv = dev->dev_private;
2489         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2490         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2491         unsigned int lane_mask = 0x0;
2492
2493         if (WARN_ON(dp_reg & DP_PORT_EN))
2494                 return;
2495
2496         pps_lock(intel_dp);
2497
2498         if (IS_VALLEYVIEW(dev))
2499                 vlv_init_panel_power_sequencer(intel_dp);
2500
2501         intel_dp_enable_port(intel_dp);
2502
2503         edp_panel_vdd_on(intel_dp);
2504         edp_panel_on(intel_dp);
2505         edp_panel_vdd_off(intel_dp, true);
2506
2507         pps_unlock(intel_dp);
2508
2509         if (IS_VALLEYVIEW(dev))
2510                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2511                                     lane_mask);
2512
2513         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2514         intel_dp_start_link_train(intel_dp);
2515         intel_dp_complete_link_train(intel_dp);
2516         intel_dp_stop_link_train(intel_dp);
2517
2518         if (crtc->config->has_audio) {
2519                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2520                                  pipe_name(crtc->pipe));
2521                 intel_audio_codec_enable(encoder);
2522         }
2523 }
2524
2525 static void g4x_enable_dp(struct intel_encoder *encoder)
2526 {
2527         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2528
2529         intel_enable_dp(encoder);
2530         intel_edp_backlight_on(intel_dp);
2531 }
2532
2533 static void vlv_enable_dp(struct intel_encoder *encoder)
2534 {
2535         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2536
2537         intel_edp_backlight_on(intel_dp);
2538         intel_psr_enable(intel_dp);
2539 }
2540
2541 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2542 {
2543         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2544         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2545
2546         intel_dp_prepare(encoder);
2547
2548         /* Only ilk+ has port A */
2549         if (dport->port == PORT_A) {
2550                 ironlake_set_pll_cpu_edp(intel_dp);
2551                 ironlake_edp_pll_on(intel_dp);
2552         }
2553 }
2554
2555 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2556 {
2557         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2558         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2559         enum pipe pipe = intel_dp->pps_pipe;
2560         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2561
2562         edp_panel_vdd_off_sync(intel_dp);
2563
2564         /*
2565          * VLV seems to get confused when multiple power seqeuencers
2566          * have the same port selected (even if only one has power/vdd
2567          * enabled). The failure manifests as vlv_wait_port_ready() failing
2568          * CHV on the other hand doesn't seem to mind having the same port
2569          * selected in multiple power seqeuencers, but let's clear the
2570          * port select always when logically disconnecting a power sequencer
2571          * from a port.
2572          */
2573         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2574                       pipe_name(pipe), port_name(intel_dig_port->port));
2575         I915_WRITE(pp_on_reg, 0);
2576         POSTING_READ(pp_on_reg);
2577
2578         intel_dp->pps_pipe = INVALID_PIPE;
2579 }
2580
2581 static void vlv_steal_power_sequencer(struct drm_device *dev,
2582                                       enum pipe pipe)
2583 {
2584         struct drm_i915_private *dev_priv = dev->dev_private;
2585         struct intel_encoder *encoder;
2586
2587         lockdep_assert_held(&dev_priv->pps_mutex);
2588
2589         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2590                 return;
2591
2592         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2593                             base.head) {
2594                 struct intel_dp *intel_dp;
2595                 enum port port;
2596
2597                 if (encoder->type != INTEL_OUTPUT_EDP)
2598                         continue;
2599
2600                 intel_dp = enc_to_intel_dp(&encoder->base);
2601                 port = dp_to_dig_port(intel_dp)->port;
2602
2603                 if (intel_dp->pps_pipe != pipe)
2604                         continue;
2605
2606                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2607                               pipe_name(pipe), port_name(port));
2608
2609                 WARN(encoder->connectors_active,
2610                      "stealing pipe %c power sequencer from active eDP port %c\n",
2611                      pipe_name(pipe), port_name(port));
2612
2613                 /* make sure vdd is off before we steal it */
2614                 vlv_detach_power_sequencer(intel_dp);
2615         }
2616 }
2617
2618 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2619 {
2620         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2621         struct intel_encoder *encoder = &intel_dig_port->base;
2622         struct drm_device *dev = encoder->base.dev;
2623         struct drm_i915_private *dev_priv = dev->dev_private;
2624         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2625
2626         lockdep_assert_held(&dev_priv->pps_mutex);
2627
2628         if (!is_edp(intel_dp))
2629                 return;
2630
2631         if (intel_dp->pps_pipe == crtc->pipe)
2632                 return;
2633
2634         /*
2635          * If another power sequencer was being used on this
2636          * port previously make sure to turn off vdd there while
2637          * we still have control of it.
2638          */
2639         if (intel_dp->pps_pipe != INVALID_PIPE)
2640                 vlv_detach_power_sequencer(intel_dp);
2641
2642         /*
2643          * We may be stealing the power
2644          * sequencer from another port.
2645          */
2646         vlv_steal_power_sequencer(dev, crtc->pipe);
2647
2648         /* now it's all ours */
2649         intel_dp->pps_pipe = crtc->pipe;
2650
2651         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2652                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2653
2654         /* init power sequencer on this pipe and port */
2655         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2656         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2657 }
2658
2659 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2660 {
2661         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2662         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2663         struct drm_device *dev = encoder->base.dev;
2664         struct drm_i915_private *dev_priv = dev->dev_private;
2665         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2666         enum dpio_channel port = vlv_dport_to_channel(dport);
2667         int pipe = intel_crtc->pipe;
2668         u32 val;
2669
2670         mutex_lock(&dev_priv->sb_lock);
2671
2672         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2673         val = 0;
2674         if (pipe)
2675                 val |= (1<<21);
2676         else
2677                 val &= ~(1<<21);
2678         val |= 0x001000c4;
2679         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2680         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2681         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2682
2683         mutex_unlock(&dev_priv->sb_lock);
2684
2685         intel_enable_dp(encoder);
2686 }
2687
2688 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2689 {
2690         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2691         struct drm_device *dev = encoder->base.dev;
2692         struct drm_i915_private *dev_priv = dev->dev_private;
2693         struct intel_crtc *intel_crtc =
2694                 to_intel_crtc(encoder->base.crtc);
2695         enum dpio_channel port = vlv_dport_to_channel(dport);
2696         int pipe = intel_crtc->pipe;
2697
2698         intel_dp_prepare(encoder);
2699
2700         /* Program Tx lane resets to default */
2701         mutex_lock(&dev_priv->sb_lock);
2702         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2703                          DPIO_PCS_TX_LANE2_RESET |
2704                          DPIO_PCS_TX_LANE1_RESET);
2705         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2706                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2707                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2708                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2709                                  DPIO_PCS_CLK_SOFT_RESET);
2710
2711         /* Fix up inter-pair skew failure */
2712         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2713         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2714         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2715         mutex_unlock(&dev_priv->sb_lock);
2716 }
2717
2718 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2719 {
2720         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2721         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2722         struct drm_device *dev = encoder->base.dev;
2723         struct drm_i915_private *dev_priv = dev->dev_private;
2724         struct intel_crtc *intel_crtc =
2725                 to_intel_crtc(encoder->base.crtc);
2726         enum dpio_channel ch = vlv_dport_to_channel(dport);
2727         int pipe = intel_crtc->pipe;
2728         int data, i, stagger;
2729         u32 val;
2730
2731         mutex_lock(&dev_priv->sb_lock);
2732
2733         /* allow hardware to manage TX FIFO reset source */
2734         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2735         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2736         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2737
2738         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2739         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2740         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2741
2742         /* Deassert soft data lane reset*/
2743         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2744         val |= CHV_PCS_REQ_SOFTRESET_EN;
2745         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2746
2747         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2748         val |= CHV_PCS_REQ_SOFTRESET_EN;
2749         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2750
2751         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2752         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2753         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2754
2755         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2756         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2757         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2758
2759         /* Program Tx lane latency optimal setting*/
2760         for (i = 0; i < 4; i++) {
2761                 /* Set the upar bit */
2762                 data = (i == 1) ? 0x0 : 0x1;
2763                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2764                                 data << DPIO_UPAR_SHIFT);
2765         }
2766
2767         /* Data lane stagger programming */
2768         if (intel_crtc->config->port_clock > 270000)
2769                 stagger = 0x18;
2770         else if (intel_crtc->config->port_clock > 135000)
2771                 stagger = 0xd;
2772         else if (intel_crtc->config->port_clock > 67500)
2773                 stagger = 0x7;
2774         else if (intel_crtc->config->port_clock > 33750)
2775                 stagger = 0x4;
2776         else
2777                 stagger = 0x2;
2778
2779         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2780         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2781         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2782
2783         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2784         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2785         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2786
2787         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2788                        DPIO_LANESTAGGER_STRAP(stagger) |
2789                        DPIO_LANESTAGGER_STRAP_OVRD |
2790                        DPIO_TX1_STAGGER_MASK(0x1f) |
2791                        DPIO_TX1_STAGGER_MULT(6) |
2792                        DPIO_TX2_STAGGER_MULT(0));
2793
2794         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2795                        DPIO_LANESTAGGER_STRAP(stagger) |
2796                        DPIO_LANESTAGGER_STRAP_OVRD |
2797                        DPIO_TX1_STAGGER_MASK(0x1f) |
2798                        DPIO_TX1_STAGGER_MULT(7) |
2799                        DPIO_TX2_STAGGER_MULT(5));
2800
2801         mutex_unlock(&dev_priv->sb_lock);
2802
2803         intel_enable_dp(encoder);
2804 }
2805
2806 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2807 {
2808         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2809         struct drm_device *dev = encoder->base.dev;
2810         struct drm_i915_private *dev_priv = dev->dev_private;
2811         struct intel_crtc *intel_crtc =
2812                 to_intel_crtc(encoder->base.crtc);
2813         enum dpio_channel ch = vlv_dport_to_channel(dport);
2814         enum pipe pipe = intel_crtc->pipe;
2815         u32 val;
2816
2817         intel_dp_prepare(encoder);
2818
2819         mutex_lock(&dev_priv->sb_lock);
2820
2821         /* program left/right clock distribution */
2822         if (pipe != PIPE_B) {
2823                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2824                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2825                 if (ch == DPIO_CH0)
2826                         val |= CHV_BUFLEFTENA1_FORCE;
2827                 if (ch == DPIO_CH1)
2828                         val |= CHV_BUFRIGHTENA1_FORCE;
2829                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2830         } else {
2831                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2832                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2833                 if (ch == DPIO_CH0)
2834                         val |= CHV_BUFLEFTENA2_FORCE;
2835                 if (ch == DPIO_CH1)
2836                         val |= CHV_BUFRIGHTENA2_FORCE;
2837                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2838         }
2839
2840         /* program clock channel usage */
2841         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2842         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2843         if (pipe != PIPE_B)
2844                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2845         else
2846                 val |= CHV_PCS_USEDCLKCHANNEL;
2847         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2848
2849         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2850         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2851         if (pipe != PIPE_B)
2852                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2853         else
2854                 val |= CHV_PCS_USEDCLKCHANNEL;
2855         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2856
2857         /*
2858          * This a a bit weird since generally CL
2859          * matches the pipe, but here we need to
2860          * pick the CL based on the port.
2861          */
2862         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2863         if (pipe != PIPE_B)
2864                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2865         else
2866                 val |= CHV_CMN_USEDCLKCHANNEL;
2867         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2868
2869         mutex_unlock(&dev_priv->sb_lock);
2870 }
2871
2872 /*
2873  * Native read with retry for link status and receiver capability reads for
2874  * cases where the sink may still be asleep.
2875  *
2876  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2877  * supposed to retry 3 times per the spec.
2878  */
2879 static ssize_t
2880 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2881                         void *buffer, size_t size)
2882 {
2883         ssize_t ret;
2884         int i;
2885
2886         /*
2887          * Sometime we just get the same incorrect byte repeated
2888          * over the entire buffer. Doing just one throw away read
2889          * initially seems to "solve" it.
2890          */
2891         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2892
2893         for (i = 0; i < 3; i++) {
2894                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2895                 if (ret == size)
2896                         return ret;
2897                 msleep(1);
2898         }
2899
2900         return ret;
2901 }
2902
2903 /*
2904  * Fetch AUX CH registers 0x202 - 0x207 which contain
2905  * link status information
2906  */
2907 static bool
2908 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2909 {
2910         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2911                                        DP_LANE0_1_STATUS,
2912                                        link_status,
2913                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2914 }
2915
2916 /* These are source-specific values. */
2917 static uint8_t
2918 intel_dp_voltage_max(struct intel_dp *intel_dp)
2919 {
2920         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2921         struct drm_i915_private *dev_priv = dev->dev_private;
2922         enum port port = dp_to_dig_port(intel_dp)->port;
2923
2924         if (IS_BROXTON(dev))
2925                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2926         else if (INTEL_INFO(dev)->gen >= 9) {
2927                 if (dev_priv->edp_low_vswing && port == PORT_A)
2928                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2929                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2930         } else if (IS_VALLEYVIEW(dev))
2931                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2932         else if (IS_GEN7(dev) && port == PORT_A)
2933                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2934         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2935                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2936         else
2937                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2938 }
2939
2940 static uint8_t
2941 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2942 {
2943         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2944         enum port port = dp_to_dig_port(intel_dp)->port;
2945
2946         if (INTEL_INFO(dev)->gen >= 9) {
2947                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2948                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2949                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2950                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2951                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2952                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2953                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2954                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2955                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2956                 default:
2957                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2958                 }
2959         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2960                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2961                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2962                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2963                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2964                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2965                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2966                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2967                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2968                 default:
2969                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2970                 }
2971         } else if (IS_VALLEYVIEW(dev)) {
2972                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2973                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2974                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2975                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2976                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2977                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2978                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2979                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2980                 default:
2981                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2982                 }
2983         } else if (IS_GEN7(dev) && port == PORT_A) {
2984                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2985                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2986                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2987                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2988                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2989                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2990                 default:
2991                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2992                 }
2993         } else {
2994                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2995                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2996                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2997                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2998                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2999                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3000                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3001                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3002                 default:
3003                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3004                 }
3005         }
3006 }
3007
3008 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3009 {
3010         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3011         struct drm_i915_private *dev_priv = dev->dev_private;
3012         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3013         struct intel_crtc *intel_crtc =
3014                 to_intel_crtc(dport->base.base.crtc);
3015         unsigned long demph_reg_value, preemph_reg_value,
3016                 uniqtranscale_reg_value;
3017         uint8_t train_set = intel_dp->train_set[0];
3018         enum dpio_channel port = vlv_dport_to_channel(dport);
3019         int pipe = intel_crtc->pipe;
3020
3021         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3022         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3023                 preemph_reg_value = 0x0004000;
3024                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3025                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3026                         demph_reg_value = 0x2B405555;
3027                         uniqtranscale_reg_value = 0x552AB83A;
3028                         break;
3029                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3030                         demph_reg_value = 0x2B404040;
3031                         uniqtranscale_reg_value = 0x5548B83A;
3032                         break;
3033                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3034                         demph_reg_value = 0x2B245555;
3035                         uniqtranscale_reg_value = 0x5560B83A;
3036                         break;
3037                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3038                         demph_reg_value = 0x2B405555;
3039                         uniqtranscale_reg_value = 0x5598DA3A;
3040                         break;
3041                 default:
3042                         return 0;
3043                 }
3044                 break;
3045         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3046                 preemph_reg_value = 0x0002000;
3047                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3048                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3049                         demph_reg_value = 0x2B404040;
3050                         uniqtranscale_reg_value = 0x5552B83A;
3051                         break;
3052                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3053                         demph_reg_value = 0x2B404848;
3054                         uniqtranscale_reg_value = 0x5580B83A;
3055                         break;
3056                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3057                         demph_reg_value = 0x2B404040;
3058                         uniqtranscale_reg_value = 0x55ADDA3A;
3059                         break;
3060                 default:
3061                         return 0;
3062                 }
3063                 break;
3064         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3065                 preemph_reg_value = 0x0000000;
3066                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3067                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3068                         demph_reg_value = 0x2B305555;
3069                         uniqtranscale_reg_value = 0x5570B83A;
3070                         break;
3071                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3072                         demph_reg_value = 0x2B2B4040;
3073                         uniqtranscale_reg_value = 0x55ADDA3A;
3074                         break;
3075                 default:
3076                         return 0;
3077                 }
3078                 break;
3079         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3080                 preemph_reg_value = 0x0006000;
3081                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3082                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3083                         demph_reg_value = 0x1B405555;
3084                         uniqtranscale_reg_value = 0x55ADDA3A;
3085                         break;
3086                 default:
3087                         return 0;
3088                 }
3089                 break;
3090         default:
3091                 return 0;
3092         }
3093
3094         mutex_lock(&dev_priv->sb_lock);
3095         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3096         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3097         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3098                          uniqtranscale_reg_value);
3099         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3100         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3101         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3102         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3103         mutex_unlock(&dev_priv->sb_lock);
3104
3105         return 0;
3106 }
3107
3108 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3109 {
3110         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3111         struct drm_i915_private *dev_priv = dev->dev_private;
3112         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3113         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3114         u32 deemph_reg_value, margin_reg_value, val;
3115         uint8_t train_set = intel_dp->train_set[0];
3116         enum dpio_channel ch = vlv_dport_to_channel(dport);
3117         enum pipe pipe = intel_crtc->pipe;
3118         int i;
3119
3120         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3121         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3122                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3123                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3124                         deemph_reg_value = 128;
3125                         margin_reg_value = 52;
3126                         break;
3127                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3128                         deemph_reg_value = 128;
3129                         margin_reg_value = 77;
3130                         break;
3131                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3132                         deemph_reg_value = 128;
3133                         margin_reg_value = 102;
3134                         break;
3135                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3136                         deemph_reg_value = 128;
3137                         margin_reg_value = 154;
3138                         /* FIXME extra to set for 1200 */
3139                         break;
3140                 default:
3141                         return 0;
3142                 }
3143                 break;
3144         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3145                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3146                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3147                         deemph_reg_value = 85;
3148                         margin_reg_value = 78;
3149                         break;
3150                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3151                         deemph_reg_value = 85;
3152                         margin_reg_value = 116;
3153                         break;
3154                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3155                         deemph_reg_value = 85;
3156                         margin_reg_value = 154;
3157                         break;
3158                 default:
3159                         return 0;
3160                 }
3161                 break;
3162         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3163                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3164                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3165                         deemph_reg_value = 64;
3166                         margin_reg_value = 104;
3167                         break;
3168                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3169                         deemph_reg_value = 64;
3170                         margin_reg_value = 154;
3171                         break;
3172                 default:
3173                         return 0;
3174                 }
3175                 break;
3176         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3177                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3178                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3179                         deemph_reg_value = 43;
3180                         margin_reg_value = 154;
3181                         break;
3182                 default:
3183                         return 0;
3184                 }
3185                 break;
3186         default:
3187                 return 0;
3188         }
3189
3190         mutex_lock(&dev_priv->sb_lock);
3191
3192         /* Clear calc init */
3193         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3194         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3195         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3196         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3197         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3198
3199         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3200         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3201         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3202         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3203         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3204
3205         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3206         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3207         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3208         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3209
3210         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3211         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3212         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3213         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3214
3215         /* Program swing deemph */
3216         for (i = 0; i < 4; i++) {
3217                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3218                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3219                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3220                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3221         }
3222
3223         /* Program swing margin */
3224         for (i = 0; i < 4; i++) {
3225                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3226                 val &= ~DPIO_SWING_MARGIN000_MASK;
3227                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3228                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3229         }
3230
3231         /* Disable unique transition scale */
3232         for (i = 0; i < 4; i++) {
3233                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3234                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3235                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3236         }
3237
3238         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3239                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3240                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3241                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3242
3243                 /*
3244                  * The document said it needs to set bit 27 for ch0 and bit 26
3245                  * for ch1. Might be a typo in the doc.
3246                  * For now, for this unique transition scale selection, set bit
3247                  * 27 for ch0 and ch1.
3248                  */
3249                 for (i = 0; i < 4; i++) {
3250                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3251                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3252                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3253                 }
3254
3255                 for (i = 0; i < 4; i++) {
3256                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3257                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3258                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3259                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3260                 }
3261         }
3262
3263         /* Start swing calculation */
3264         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3265         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3266         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3267
3268         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3269         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3270         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3271
3272         /* LRC Bypass */
3273         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3274         val |= DPIO_LRC_BYPASS;
3275         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3276
3277         mutex_unlock(&dev_priv->sb_lock);
3278
3279         return 0;
3280 }
3281
3282 static void
3283 intel_get_adjust_train(struct intel_dp *intel_dp,
3284                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3285 {
3286         uint8_t v = 0;
3287         uint8_t p = 0;
3288         int lane;
3289         uint8_t voltage_max;
3290         uint8_t preemph_max;
3291
3292         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3293                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3294                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3295
3296                 if (this_v > v)
3297                         v = this_v;
3298                 if (this_p > p)
3299                         p = this_p;
3300         }
3301
3302         voltage_max = intel_dp_voltage_max(intel_dp);
3303         if (v >= voltage_max)
3304                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3305
3306         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3307         if (p >= preemph_max)
3308                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3309
3310         for (lane = 0; lane < 4; lane++)
3311                 intel_dp->train_set[lane] = v | p;
3312 }
3313
3314 static uint32_t
3315 gen4_signal_levels(uint8_t train_set)
3316 {
3317         uint32_t        signal_levels = 0;
3318
3319         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3320         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3321         default:
3322                 signal_levels |= DP_VOLTAGE_0_4;
3323                 break;
3324         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3325                 signal_levels |= DP_VOLTAGE_0_6;
3326                 break;
3327         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3328                 signal_levels |= DP_VOLTAGE_0_8;
3329                 break;
3330         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3331                 signal_levels |= DP_VOLTAGE_1_2;
3332                 break;
3333         }
3334         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3335         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3336         default:
3337                 signal_levels |= DP_PRE_EMPHASIS_0;
3338                 break;
3339         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3340                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3341                 break;
3342         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3343                 signal_levels |= DP_PRE_EMPHASIS_6;
3344                 break;
3345         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3346                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3347                 break;
3348         }
3349         return signal_levels;
3350 }
3351
3352 /* Gen6's DP voltage swing and pre-emphasis control */
3353 static uint32_t
3354 gen6_edp_signal_levels(uint8_t train_set)
3355 {
3356         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3357                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3358         switch (signal_levels) {
3359         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3360         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3361                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3362         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3363                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3364         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3365         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3366                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3367         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3368         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3369                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3370         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3371         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3372                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3373         default:
3374                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3375                               "0x%x\n", signal_levels);
3376                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3377         }
3378 }
3379
3380 /* Gen7's DP voltage swing and pre-emphasis control */
3381 static uint32_t
3382 gen7_edp_signal_levels(uint8_t train_set)
3383 {
3384         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3385                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3386         switch (signal_levels) {
3387         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3388                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3389         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3390                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3391         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3392                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3393
3394         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3395                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3396         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3397                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3398
3399         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3400                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3401         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3402                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3403
3404         default:
3405                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3406                               "0x%x\n", signal_levels);
3407                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3408         }
3409 }
3410
3411 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3412 static uint32_t
3413 hsw_signal_levels(uint8_t train_set)
3414 {
3415         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3416                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3417         switch (signal_levels) {
3418         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3419                 return DDI_BUF_TRANS_SELECT(0);
3420         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3421                 return DDI_BUF_TRANS_SELECT(1);
3422         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3423                 return DDI_BUF_TRANS_SELECT(2);
3424         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3425                 return DDI_BUF_TRANS_SELECT(3);
3426
3427         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3428                 return DDI_BUF_TRANS_SELECT(4);
3429         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3430                 return DDI_BUF_TRANS_SELECT(5);
3431         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3432                 return DDI_BUF_TRANS_SELECT(6);
3433
3434         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3435                 return DDI_BUF_TRANS_SELECT(7);
3436         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3437                 return DDI_BUF_TRANS_SELECT(8);
3438
3439         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3440                 return DDI_BUF_TRANS_SELECT(9);
3441         default:
3442                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3443                               "0x%x\n", signal_levels);
3444                 return DDI_BUF_TRANS_SELECT(0);
3445         }
3446 }
3447
3448 static void bxt_signal_levels(struct intel_dp *intel_dp)
3449 {
3450         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3451         enum port port = dport->port;
3452         struct drm_device *dev = dport->base.base.dev;
3453         struct intel_encoder *encoder = &dport->base;
3454         uint8_t train_set = intel_dp->train_set[0];
3455         uint32_t level = 0;
3456
3457         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3458                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3459         switch (signal_levels) {
3460         default:
3461                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3462         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3463                 level = 0;
3464                 break;
3465         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3466                 level = 1;
3467                 break;
3468         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3469                 level = 2;
3470                 break;
3471         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3472                 level = 3;
3473                 break;
3474         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3475                 level = 4;
3476                 break;
3477         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3478                 level = 5;
3479                 break;
3480         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3481                 level = 6;
3482                 break;
3483         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3484                 level = 7;
3485                 break;
3486         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3487                 level = 8;
3488                 break;
3489         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3490                 level = 9;
3491                 break;
3492         }
3493
3494         bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3495 }
3496
3497 /* Properly updates "DP" with the correct signal levels. */
3498 static void
3499 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3500 {
3501         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3502         enum port port = intel_dig_port->port;
3503         struct drm_device *dev = intel_dig_port->base.base.dev;
3504         uint32_t signal_levels, mask;
3505         uint8_t train_set = intel_dp->train_set[0];
3506
3507         if (IS_BROXTON(dev)) {
3508                 signal_levels = 0;
3509                 bxt_signal_levels(intel_dp);
3510                 mask = 0;
3511         } else if (HAS_DDI(dev)) {
3512                 signal_levels = hsw_signal_levels(train_set);
3513                 mask = DDI_BUF_EMP_MASK;
3514         } else if (IS_CHERRYVIEW(dev)) {
3515                 signal_levels = chv_signal_levels(intel_dp);
3516                 mask = 0;
3517         } else if (IS_VALLEYVIEW(dev)) {
3518                 signal_levels = vlv_signal_levels(intel_dp);
3519                 mask = 0;
3520         } else if (IS_GEN7(dev) && port == PORT_A) {
3521                 signal_levels = gen7_edp_signal_levels(train_set);
3522                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3523         } else if (IS_GEN6(dev) && port == PORT_A) {
3524                 signal_levels = gen6_edp_signal_levels(train_set);
3525                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3526         } else {
3527                 signal_levels = gen4_signal_levels(train_set);
3528                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3529         }
3530
3531         if (mask)
3532                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3533
3534         DRM_DEBUG_KMS("Using vswing level %d\n",
3535                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3536         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3537                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3538                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3539
3540         *DP = (*DP & ~mask) | signal_levels;
3541 }
3542
3543 static bool
3544 intel_dp_set_link_train(struct intel_dp *intel_dp,
3545                         uint32_t *DP,
3546                         uint8_t dp_train_pat)
3547 {
3548         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3549         struct drm_device *dev = intel_dig_port->base.base.dev;
3550         struct drm_i915_private *dev_priv = dev->dev_private;
3551         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3552         int ret, len;
3553
3554         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3555
3556         I915_WRITE(intel_dp->output_reg, *DP);
3557         POSTING_READ(intel_dp->output_reg);
3558
3559         buf[0] = dp_train_pat;
3560         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3561             DP_TRAINING_PATTERN_DISABLE) {
3562                 /* don't write DP_TRAINING_LANEx_SET on disable */
3563                 len = 1;
3564         } else {
3565                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3566                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3567                 len = intel_dp->lane_count + 1;
3568         }
3569
3570         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3571                                 buf, len);
3572
3573         return ret == len;
3574 }
3575
3576 static bool
3577 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3578                         uint8_t dp_train_pat)
3579 {
3580         if (!intel_dp->train_set_valid)
3581                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3582         intel_dp_set_signal_levels(intel_dp, DP);
3583         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3584 }
3585
3586 static bool
3587 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3588                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3589 {
3590         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3591         struct drm_device *dev = intel_dig_port->base.base.dev;
3592         struct drm_i915_private *dev_priv = dev->dev_private;
3593         int ret;
3594
3595         intel_get_adjust_train(intel_dp, link_status);
3596         intel_dp_set_signal_levels(intel_dp, DP);
3597
3598         I915_WRITE(intel_dp->output_reg, *DP);
3599         POSTING_READ(intel_dp->output_reg);
3600
3601         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3602                                 intel_dp->train_set, intel_dp->lane_count);
3603
3604         return ret == intel_dp->lane_count;
3605 }
3606
3607 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3608 {
3609         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3610         struct drm_device *dev = intel_dig_port->base.base.dev;
3611         struct drm_i915_private *dev_priv = dev->dev_private;
3612         enum port port = intel_dig_port->port;
3613         uint32_t val;
3614
3615         if (!HAS_DDI(dev))
3616                 return;
3617
3618         val = I915_READ(DP_TP_CTL(port));
3619         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3620         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3621         I915_WRITE(DP_TP_CTL(port), val);
3622
3623         /*
3624          * On PORT_A we can have only eDP in SST mode. There the only reason
3625          * we need to set idle transmission mode is to work around a HW issue
3626          * where we enable the pipe while not in idle link-training mode.
3627          * In this case there is requirement to wait for a minimum number of
3628          * idle patterns to be sent.
3629          */
3630         if (port == PORT_A)
3631                 return;
3632
3633         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3634                      1))
3635                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3636 }
3637
3638 /* Enable corresponding port and start training pattern 1 */
3639 void
3640 intel_dp_start_link_train(struct intel_dp *intel_dp)
3641 {
3642         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3643         struct drm_device *dev = encoder->dev;
3644         int i;
3645         uint8_t voltage;
3646         int voltage_tries, loop_tries;
3647         uint32_t DP = intel_dp->DP;
3648         uint8_t link_config[2];
3649
3650         if (HAS_DDI(dev))
3651                 intel_ddi_prepare_link_retrain(encoder);
3652
3653         /* Write the link configuration data */
3654         link_config[0] = intel_dp->link_bw;
3655         link_config[1] = intel_dp->lane_count;
3656         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3657                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3658         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3659         if (intel_dp->num_sink_rates)
3660                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3661                                 &intel_dp->rate_select, 1);
3662
3663         link_config[0] = 0;
3664         link_config[1] = DP_SET_ANSI_8B10B;
3665         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3666
3667         DP |= DP_PORT_EN;
3668
3669         /* clock recovery */
3670         if (!intel_dp_reset_link_train(intel_dp, &DP,
3671                                        DP_TRAINING_PATTERN_1 |
3672                                        DP_LINK_SCRAMBLING_DISABLE)) {
3673                 DRM_ERROR("failed to enable link training\n");
3674                 return;
3675         }
3676
3677         voltage = 0xff;
3678         voltage_tries = 0;
3679         loop_tries = 0;
3680         for (;;) {
3681                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3682
3683                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3684                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3685                         DRM_ERROR("failed to get link status\n");
3686                         break;
3687                 }
3688
3689                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3690                         DRM_DEBUG_KMS("clock recovery OK\n");
3691                         break;
3692                 }
3693
3694                 /*
3695                  * if we used previously trained voltage and pre-emphasis values
3696                  * and we don't get clock recovery, reset link training values
3697                  */
3698                 if (intel_dp->train_set_valid) {
3699                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3700                         /* clear the flag as we are not reusing train set */
3701                         intel_dp->train_set_valid = false;
3702                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3703                                                        DP_TRAINING_PATTERN_1 |
3704                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3705                                 DRM_ERROR("failed to enable link training\n");
3706                                 return;
3707                         }
3708                         continue;
3709                 }
3710
3711                 /* Check to see if we've tried the max voltage */
3712                 for (i = 0; i < intel_dp->lane_count; i++)
3713                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3714                                 break;
3715                 if (i == intel_dp->lane_count) {
3716                         ++loop_tries;
3717                         if (loop_tries == 5) {
3718                                 DRM_ERROR("too many full retries, give up\n");
3719                                 break;
3720                         }
3721                         intel_dp_reset_link_train(intel_dp, &DP,
3722                                                   DP_TRAINING_PATTERN_1 |
3723                                                   DP_LINK_SCRAMBLING_DISABLE);
3724                         voltage_tries = 0;
3725                         continue;
3726                 }
3727
3728                 /* Check to see if we've tried the same voltage 5 times */
3729                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3730                         ++voltage_tries;
3731                         if (voltage_tries == 5) {
3732                                 DRM_ERROR("too many voltage retries, give up\n");
3733                                 break;
3734                         }
3735                 } else
3736                         voltage_tries = 0;
3737                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3738
3739                 /* Update training set as requested by target */
3740                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3741                         DRM_ERROR("failed to update link training\n");
3742                         break;
3743                 }
3744         }
3745
3746         intel_dp->DP = DP;
3747 }
3748
3749 void
3750 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3751 {
3752         bool channel_eq = false;
3753         int tries, cr_tries;
3754         uint32_t DP = intel_dp->DP;
3755         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3756
3757         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3758         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3759                 training_pattern = DP_TRAINING_PATTERN_3;
3760
3761         /* channel equalization */
3762         if (!intel_dp_set_link_train(intel_dp, &DP,
3763                                      training_pattern |
3764                                      DP_LINK_SCRAMBLING_DISABLE)) {
3765                 DRM_ERROR("failed to start channel equalization\n");
3766                 return;
3767         }
3768
3769         tries = 0;
3770         cr_tries = 0;
3771         channel_eq = false;
3772         for (;;) {
3773                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3774
3775                 if (cr_tries > 5) {
3776                         DRM_ERROR("failed to train DP, aborting\n");
3777                         break;
3778                 }
3779
3780                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3781                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3782                         DRM_ERROR("failed to get link status\n");
3783                         break;
3784                 }
3785
3786                 /* Make sure clock is still ok */
3787                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3788                         intel_dp->train_set_valid = false;
3789                         intel_dp_start_link_train(intel_dp);
3790                         intel_dp_set_link_train(intel_dp, &DP,
3791                                                 training_pattern |
3792                                                 DP_LINK_SCRAMBLING_DISABLE);
3793                         cr_tries++;
3794                         continue;
3795                 }
3796
3797                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3798                         channel_eq = true;
3799                         break;
3800                 }
3801
3802                 /* Try 5 times, then try clock recovery if that fails */
3803                 if (tries > 5) {
3804                         intel_dp->train_set_valid = false;
3805                         intel_dp_start_link_train(intel_dp);
3806                         intel_dp_set_link_train(intel_dp, &DP,
3807                                                 training_pattern |
3808                                                 DP_LINK_SCRAMBLING_DISABLE);
3809                         tries = 0;
3810                         cr_tries++;
3811                         continue;
3812                 }
3813
3814                 /* Update training set as requested by target */
3815                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3816                         DRM_ERROR("failed to update link training\n");
3817                         break;
3818                 }
3819                 ++tries;
3820         }
3821
3822         intel_dp_set_idle_link_train(intel_dp);
3823
3824         intel_dp->DP = DP;
3825
3826         if (channel_eq) {
3827                 intel_dp->train_set_valid = true;
3828                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3829         }
3830 }
3831
3832 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3833 {
3834         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3835                                 DP_TRAINING_PATTERN_DISABLE);
3836 }
3837
3838 static void
3839 intel_dp_link_down(struct intel_dp *intel_dp)
3840 {
3841         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3842         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3843         enum port port = intel_dig_port->port;
3844         struct drm_device *dev = intel_dig_port->base.base.dev;
3845         struct drm_i915_private *dev_priv = dev->dev_private;
3846         uint32_t DP = intel_dp->DP;
3847
3848         if (WARN_ON(HAS_DDI(dev)))
3849                 return;
3850
3851         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3852                 return;
3853
3854         DRM_DEBUG_KMS("\n");
3855
3856         if ((IS_GEN7(dev) && port == PORT_A) ||
3857             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3858                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3859                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3860         } else {
3861                 if (IS_CHERRYVIEW(dev))
3862                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3863                 else
3864                         DP &= ~DP_LINK_TRAIN_MASK;
3865                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3866         }
3867         I915_WRITE(intel_dp->output_reg, DP);
3868         POSTING_READ(intel_dp->output_reg);
3869
3870         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3871         I915_WRITE(intel_dp->output_reg, DP);
3872         POSTING_READ(intel_dp->output_reg);
3873
3874         /*
3875          * HW workaround for IBX, we need to move the port
3876          * to transcoder A after disabling it to allow the
3877          * matching HDMI port to be enabled on transcoder A.
3878          */
3879         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3880                 /* always enable with pattern 1 (as per spec) */
3881                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3882                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3883                 I915_WRITE(intel_dp->output_reg, DP);
3884                 POSTING_READ(intel_dp->output_reg);
3885
3886                 DP &= ~DP_PORT_EN;
3887                 I915_WRITE(intel_dp->output_reg, DP);
3888                 POSTING_READ(intel_dp->output_reg);
3889         }
3890
3891         msleep(intel_dp->panel_power_down_delay);
3892 }
3893
3894 static bool
3895 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3896 {
3897         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3898         struct drm_device *dev = dig_port->base.base.dev;
3899         struct drm_i915_private *dev_priv = dev->dev_private;
3900         uint8_t rev;
3901
3902         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3903                                     sizeof(intel_dp->dpcd)) < 0)
3904                 return false; /* aux transfer failed */
3905
3906         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3907
3908         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3909                 return false; /* DPCD not present */
3910
3911         /* Check if the panel supports PSR */
3912         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3913         if (is_edp(intel_dp)) {
3914                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3915                                         intel_dp->psr_dpcd,
3916                                         sizeof(intel_dp->psr_dpcd));
3917                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3918                         dev_priv->psr.sink_support = true;
3919                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3920                 }
3921
3922                 if (INTEL_INFO(dev)->gen >= 9 &&
3923                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3924                         uint8_t frame_sync_cap;
3925
3926                         dev_priv->psr.sink_support = true;
3927                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3928                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3929                                         &frame_sync_cap, 1);
3930                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3931                         /* PSR2 needs frame sync as well */
3932                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3933                         DRM_DEBUG_KMS("PSR2 %s on sink",
3934                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3935                 }
3936         }
3937
3938         /* Training Pattern 3 support, both source and sink */
3939         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3940             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3941             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3942                 intel_dp->use_tps3 = true;
3943                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3944         } else
3945                 intel_dp->use_tps3 = false;
3946
3947         /* Intermediate frequency support */
3948         if (is_edp(intel_dp) &&
3949             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3950             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3951             (rev >= 0x03)) { /* eDp v1.4 or higher */
3952                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3953                 int i;
3954
3955                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3956                                 DP_SUPPORTED_LINK_RATES,
3957                                 sink_rates,
3958                                 sizeof(sink_rates));
3959
3960                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3961                         int val = le16_to_cpu(sink_rates[i]);
3962
3963                         if (val == 0)
3964                                 break;
3965
3966                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3967                         intel_dp->sink_rates[i] = (val * 200) / 10;
3968                 }
3969                 intel_dp->num_sink_rates = i;
3970         }
3971
3972         intel_dp_print_rates(intel_dp);
3973
3974         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3975               DP_DWN_STRM_PORT_PRESENT))
3976                 return true; /* native DP sink */
3977
3978         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3979                 return true; /* no per-port downstream info */
3980
3981         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3982                                     intel_dp->downstream_ports,
3983                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3984                 return false; /* downstream port status fetch failed */
3985
3986         return true;
3987 }
3988
3989 static void
3990 intel_dp_probe_oui(struct intel_dp *intel_dp)
3991 {
3992         u8 buf[3];
3993
3994         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3995                 return;
3996
3997         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3998                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3999                               buf[0], buf[1], buf[2]);
4000
4001         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4002                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4003                               buf[0], buf[1], buf[2]);
4004 }
4005
4006 static bool
4007 intel_dp_probe_mst(struct intel_dp *intel_dp)
4008 {
4009         u8 buf[1];
4010
4011         if (!intel_dp->can_mst)
4012                 return false;
4013
4014         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4015                 return false;
4016
4017         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4018                 if (buf[0] & DP_MST_CAP) {
4019                         DRM_DEBUG_KMS("Sink is MST capable\n");
4020                         intel_dp->is_mst = true;
4021                 } else {
4022                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4023                         intel_dp->is_mst = false;
4024                 }
4025         }
4026
4027         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4028         return intel_dp->is_mst;
4029 }
4030
4031 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4032 {
4033         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4034         struct drm_device *dev = intel_dig_port->base.base.dev;
4035         struct intel_crtc *intel_crtc =
4036                 to_intel_crtc(intel_dig_port->base.base.crtc);
4037         u8 buf;
4038         int test_crc_count;
4039         int attempts = 6;
4040         int ret = 0;
4041
4042         hsw_disable_ips(intel_crtc);
4043
4044         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4045                 ret = -EIO;
4046                 goto out;
4047         }
4048
4049         if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4050                 ret = -ENOTTY;
4051                 goto out;
4052         }
4053
4054         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4055                 ret = -EIO;
4056                 goto out;
4057         }
4058
4059         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4060                                 buf | DP_TEST_SINK_START) < 0) {
4061                 ret = -EIO;
4062                 goto out;
4063         }
4064
4065         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4066                 ret = -EIO;
4067                 goto out;
4068         }
4069
4070         test_crc_count = buf & DP_TEST_COUNT_MASK;
4071
4072         do {
4073                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4074                                       DP_TEST_SINK_MISC, &buf) < 0) {
4075                         ret = -EIO;
4076                         goto out;
4077                 }
4078                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4079         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4080
4081         if (attempts == 0) {
4082                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4083                 ret = -ETIMEDOUT;
4084                 goto out;
4085         }
4086
4087         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4088                 ret = -EIO;
4089                 goto out;
4090         }
4091
4092         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4093                 ret = -EIO;
4094                 goto out;
4095         }
4096         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4097                                buf & ~DP_TEST_SINK_START) < 0) {
4098                 ret = -EIO;
4099                 goto out;
4100         }
4101 out:
4102         hsw_enable_ips(intel_crtc);
4103         return ret;
4104 }
4105
4106 static bool
4107 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4108 {
4109         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4110                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4111                                        sink_irq_vector, 1) == 1;
4112 }
4113
4114 static bool
4115 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4116 {
4117         int ret;
4118
4119         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4120                                              DP_SINK_COUNT_ESI,
4121                                              sink_irq_vector, 14);
4122         if (ret != 14)
4123                 return false;
4124
4125         return true;
4126 }
4127
4128 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4129 {
4130         uint8_t test_result = DP_TEST_ACK;
4131         return test_result;
4132 }
4133
4134 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4135 {
4136         uint8_t test_result = DP_TEST_NAK;
4137         return test_result;
4138 }
4139
4140 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4141 {
4142         uint8_t test_result = DP_TEST_NAK;
4143         struct intel_connector *intel_connector = intel_dp->attached_connector;
4144         struct drm_connector *connector = &intel_connector->base;
4145
4146         if (intel_connector->detect_edid == NULL ||
4147             connector->edid_corrupt ||
4148             intel_dp->aux.i2c_defer_count > 6) {
4149                 /* Check EDID read for NACKs, DEFERs and corruption
4150                  * (DP CTS 1.2 Core r1.1)
4151                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4152                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4153                  *    4.2.2.6 : EDID corruption detected
4154                  * Use failsafe mode for all cases
4155                  */
4156                 if (intel_dp->aux.i2c_nack_count > 0 ||
4157                         intel_dp->aux.i2c_defer_count > 0)
4158                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4159                                       intel_dp->aux.i2c_nack_count,
4160                                       intel_dp->aux.i2c_defer_count);
4161                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4162         } else {
4163                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4164                                         DP_TEST_EDID_CHECKSUM,
4165                                         &intel_connector->detect_edid->checksum,
4166                                         1))
4167                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4168
4169                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4170                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4171         }
4172
4173         /* Set test active flag here so userspace doesn't interrupt things */
4174         intel_dp->compliance_test_active = 1;
4175
4176         return test_result;
4177 }
4178
4179 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4180 {
4181         uint8_t test_result = DP_TEST_NAK;
4182         return test_result;
4183 }
4184
4185 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4186 {
4187         uint8_t response = DP_TEST_NAK;
4188         uint8_t rxdata = 0;
4189         int status = 0;
4190
4191         intel_dp->compliance_test_active = 0;
4192         intel_dp->compliance_test_type = 0;
4193         intel_dp->compliance_test_data = 0;
4194
4195         intel_dp->aux.i2c_nack_count = 0;
4196         intel_dp->aux.i2c_defer_count = 0;
4197
4198         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4199         if (status <= 0) {
4200                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4201                 goto update_status;
4202         }
4203
4204         switch (rxdata) {
4205         case DP_TEST_LINK_TRAINING:
4206                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4207                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4208                 response = intel_dp_autotest_link_training(intel_dp);
4209                 break;
4210         case DP_TEST_LINK_VIDEO_PATTERN:
4211                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4212                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4213                 response = intel_dp_autotest_video_pattern(intel_dp);
4214                 break;
4215         case DP_TEST_LINK_EDID_READ:
4216                 DRM_DEBUG_KMS("EDID test requested\n");
4217                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4218                 response = intel_dp_autotest_edid(intel_dp);
4219                 break;
4220         case DP_TEST_LINK_PHY_TEST_PATTERN:
4221                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4222                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4223                 response = intel_dp_autotest_phy_pattern(intel_dp);
4224                 break;
4225         default:
4226                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4227                 break;
4228         }
4229
4230 update_status:
4231         status = drm_dp_dpcd_write(&intel_dp->aux,
4232                                    DP_TEST_RESPONSE,
4233                                    &response, 1);
4234         if (status <= 0)
4235                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4236 }
4237
4238 static int
4239 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4240 {
4241         bool bret;
4242
4243         if (intel_dp->is_mst) {
4244                 u8 esi[16] = { 0 };
4245                 int ret = 0;
4246                 int retry;
4247                 bool handled;
4248                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4249 go_again:
4250                 if (bret == true) {
4251
4252                         /* check link status - esi[10] = 0x200c */
4253                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4254                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4255                                 intel_dp_start_link_train(intel_dp);
4256                                 intel_dp_complete_link_train(intel_dp);
4257                                 intel_dp_stop_link_train(intel_dp);
4258                         }
4259
4260                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4261                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4262
4263                         if (handled) {
4264                                 for (retry = 0; retry < 3; retry++) {
4265                                         int wret;
4266                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4267                                                                  DP_SINK_COUNT_ESI+1,
4268                                                                  &esi[1], 3);
4269                                         if (wret == 3) {
4270                                                 break;
4271                                         }
4272                                 }
4273
4274                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4275                                 if (bret == true) {
4276                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4277                                         goto go_again;
4278                                 }
4279                         } else
4280                                 ret = 0;
4281
4282                         return ret;
4283                 } else {
4284                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4285                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4286                         intel_dp->is_mst = false;
4287                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4288                         /* send a hotplug event */
4289                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4290                 }
4291         }
4292         return -EINVAL;
4293 }
4294
4295 /*
4296  * According to DP spec
4297  * 5.1.2:
4298  *  1. Read DPCD
4299  *  2. Configure link according to Receiver Capabilities
4300  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4301  *  4. Check link status on receipt of hot-plug interrupt
4302  */
4303 static void
4304 intel_dp_check_link_status(struct intel_dp *intel_dp)
4305 {
4306         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4307         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4308         u8 sink_irq_vector;
4309         u8 link_status[DP_LINK_STATUS_SIZE];
4310
4311         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4312
4313         if (!intel_encoder->connectors_active)
4314                 return;
4315
4316         if (WARN_ON(!intel_encoder->base.crtc))
4317                 return;
4318
4319         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4320                 return;
4321
4322         /* Try to read receiver status if the link appears to be up */
4323         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4324                 return;
4325         }
4326
4327         /* Now read the DPCD to see if it's actually running */
4328         if (!intel_dp_get_dpcd(intel_dp)) {
4329                 return;
4330         }
4331
4332         /* Try to read the source of the interrupt */
4333         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4334             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4335                 /* Clear interrupt source */
4336                 drm_dp_dpcd_writeb(&intel_dp->aux,
4337                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4338                                    sink_irq_vector);
4339
4340                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4341                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4342                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4343                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4344         }
4345
4346         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4347                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4348                               intel_encoder->base.name);
4349                 intel_dp_start_link_train(intel_dp);
4350                 intel_dp_complete_link_train(intel_dp);
4351                 intel_dp_stop_link_train(intel_dp);
4352         }
4353 }
4354
4355 /* XXX this is probably wrong for multiple downstream ports */
4356 static enum drm_connector_status
4357 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4358 {
4359         uint8_t *dpcd = intel_dp->dpcd;
4360         uint8_t type;
4361
4362         if (!intel_dp_get_dpcd(intel_dp))
4363                 return connector_status_disconnected;
4364
4365         /* if there's no downstream port, we're done */
4366         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4367                 return connector_status_connected;
4368
4369         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4370         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4371             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4372                 uint8_t reg;
4373
4374                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4375                                             &reg, 1) < 0)
4376                         return connector_status_unknown;
4377
4378                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4379                                               : connector_status_disconnected;
4380         }
4381
4382         /* If no HPD, poke DDC gently */
4383         if (drm_probe_ddc(&intel_dp->aux.ddc))
4384                 return connector_status_connected;
4385
4386         /* Well we tried, say unknown for unreliable port types */
4387         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4388                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4389                 if (type == DP_DS_PORT_TYPE_VGA ||
4390                     type == DP_DS_PORT_TYPE_NON_EDID)
4391                         return connector_status_unknown;
4392         } else {
4393                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4394                         DP_DWN_STRM_PORT_TYPE_MASK;
4395                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4396                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4397                         return connector_status_unknown;
4398         }
4399
4400         /* Anything else is out of spec, warn and ignore */
4401         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4402         return connector_status_disconnected;
4403 }
4404
4405 static enum drm_connector_status
4406 edp_detect(struct intel_dp *intel_dp)
4407 {
4408         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4409         enum drm_connector_status status;
4410
4411         status = intel_panel_detect(dev);
4412         if (status == connector_status_unknown)
4413                 status = connector_status_connected;
4414
4415         return status;
4416 }
4417
4418 static enum drm_connector_status
4419 ironlake_dp_detect(struct intel_dp *intel_dp)
4420 {
4421         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4422         struct drm_i915_private *dev_priv = dev->dev_private;
4423         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4424
4425         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4426                 return connector_status_disconnected;
4427
4428         return intel_dp_detect_dpcd(intel_dp);
4429 }
4430
4431 static int g4x_digital_port_connected(struct drm_device *dev,
4432                                        struct intel_digital_port *intel_dig_port)
4433 {
4434         struct drm_i915_private *dev_priv = dev->dev_private;
4435         uint32_t bit;
4436
4437         if (IS_VALLEYVIEW(dev)) {
4438                 switch (intel_dig_port->port) {
4439                 case PORT_B:
4440                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4441                         break;
4442                 case PORT_C:
4443                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4444                         break;
4445                 case PORT_D:
4446                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4447                         break;
4448                 default:
4449                         return -EINVAL;
4450                 }
4451         } else {
4452                 switch (intel_dig_port->port) {
4453                 case PORT_B:
4454                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4455                         break;
4456                 case PORT_C:
4457                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4458                         break;
4459                 case PORT_D:
4460                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4461                         break;
4462                 default:
4463                         return -EINVAL;
4464                 }
4465         }
4466
4467         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4468                 return 0;
4469         return 1;
4470 }
4471
4472 static enum drm_connector_status
4473 g4x_dp_detect(struct intel_dp *intel_dp)
4474 {
4475         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4476         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4477         int ret;
4478
4479         /* Can't disconnect eDP, but you can close the lid... */
4480         if (is_edp(intel_dp)) {
4481                 enum drm_connector_status status;
4482
4483                 status = intel_panel_detect(dev);
4484                 if (status == connector_status_unknown)
4485                         status = connector_status_connected;
4486                 return status;
4487         }
4488
4489         ret = g4x_digital_port_connected(dev, intel_dig_port);
4490         if (ret == -EINVAL)
4491                 return connector_status_unknown;
4492         else if (ret == 0)
4493                 return connector_status_disconnected;
4494
4495         return intel_dp_detect_dpcd(intel_dp);
4496 }
4497
4498 static struct edid *
4499 intel_dp_get_edid(struct intel_dp *intel_dp)
4500 {
4501         struct intel_connector *intel_connector = intel_dp->attached_connector;
4502
4503         /* use cached edid if we have one */
4504         if (intel_connector->edid) {
4505                 /* invalid edid */
4506                 if (IS_ERR(intel_connector->edid))
4507                         return NULL;
4508
4509                 return drm_edid_duplicate(intel_connector->edid);
4510         } else
4511                 return drm_get_edid(&intel_connector->base,
4512                                     &intel_dp->aux.ddc);
4513 }
4514
4515 static void
4516 intel_dp_set_edid(struct intel_dp *intel_dp)
4517 {
4518         struct intel_connector *intel_connector = intel_dp->attached_connector;
4519         struct edid *edid;
4520
4521         edid = intel_dp_get_edid(intel_dp);
4522         intel_connector->detect_edid = edid;
4523
4524         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4525                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4526         else
4527                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4528 }
4529
4530 static void
4531 intel_dp_unset_edid(struct intel_dp *intel_dp)
4532 {
4533         struct intel_connector *intel_connector = intel_dp->attached_connector;
4534
4535         kfree(intel_connector->detect_edid);
4536         intel_connector->detect_edid = NULL;
4537
4538         intel_dp->has_audio = false;
4539 }
4540
4541 static enum intel_display_power_domain
4542 intel_dp_power_get(struct intel_dp *dp)
4543 {
4544         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4545         enum intel_display_power_domain power_domain;
4546
4547         power_domain = intel_display_port_power_domain(encoder);
4548         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4549
4550         return power_domain;
4551 }
4552
4553 static void
4554 intel_dp_power_put(struct intel_dp *dp,
4555                    enum intel_display_power_domain power_domain)
4556 {
4557         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4558         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4559 }
4560
4561 static enum drm_connector_status
4562 intel_dp_detect(struct drm_connector *connector, bool force)
4563 {
4564         struct intel_dp *intel_dp = intel_attached_dp(connector);
4565         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4566         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4567         struct drm_device *dev = connector->dev;
4568         enum drm_connector_status status;
4569         enum intel_display_power_domain power_domain;
4570         bool ret;
4571         u8 sink_irq_vector;
4572
4573         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4574                       connector->base.id, connector->name);
4575         intel_dp_unset_edid(intel_dp);
4576
4577         if (intel_dp->is_mst) {
4578                 /* MST devices are disconnected from a monitor POV */
4579                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4580                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4581                 return connector_status_disconnected;
4582         }
4583
4584         power_domain = intel_dp_power_get(intel_dp);
4585
4586         /* Can't disconnect eDP, but you can close the lid... */
4587         if (is_edp(intel_dp))
4588                 status = edp_detect(intel_dp);
4589         else if (HAS_PCH_SPLIT(dev))
4590                 status = ironlake_dp_detect(intel_dp);
4591         else
4592                 status = g4x_dp_detect(intel_dp);
4593         if (status != connector_status_connected)
4594                 goto out;
4595
4596         intel_dp_probe_oui(intel_dp);
4597
4598         ret = intel_dp_probe_mst(intel_dp);
4599         if (ret) {
4600                 /* if we are in MST mode then this connector
4601                    won't appear connected or have anything with EDID on it */
4602                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4603                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4604                 status = connector_status_disconnected;
4605                 goto out;
4606         }
4607
4608         intel_dp_set_edid(intel_dp);
4609
4610         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4611                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4612         status = connector_status_connected;
4613
4614         /* Try to read the source of the interrupt */
4615         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4616             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4617                 /* Clear interrupt source */
4618                 drm_dp_dpcd_writeb(&intel_dp->aux,
4619                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4620                                    sink_irq_vector);
4621
4622                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4623                         intel_dp_handle_test_request(intel_dp);
4624                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4625                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4626         }
4627
4628 out:
4629         intel_dp_power_put(intel_dp, power_domain);
4630         return status;
4631 }
4632
4633 static void
4634 intel_dp_force(struct drm_connector *connector)
4635 {
4636         struct intel_dp *intel_dp = intel_attached_dp(connector);
4637         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4638         enum intel_display_power_domain power_domain;
4639
4640         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4641                       connector->base.id, connector->name);
4642         intel_dp_unset_edid(intel_dp);
4643
4644         if (connector->status != connector_status_connected)
4645                 return;
4646
4647         power_domain = intel_dp_power_get(intel_dp);
4648
4649         intel_dp_set_edid(intel_dp);
4650
4651         intel_dp_power_put(intel_dp, power_domain);
4652
4653         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4654                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4655 }
4656
4657 static int intel_dp_get_modes(struct drm_connector *connector)
4658 {
4659         struct intel_connector *intel_connector = to_intel_connector(connector);
4660         struct edid *edid;
4661
4662         edid = intel_connector->detect_edid;
4663         if (edid) {
4664                 int ret = intel_connector_update_modes(connector, edid);
4665                 if (ret)
4666                         return ret;
4667         }
4668
4669         /* if eDP has no EDID, fall back to fixed mode */
4670         if (is_edp(intel_attached_dp(connector)) &&
4671             intel_connector->panel.fixed_mode) {
4672                 struct drm_display_mode *mode;
4673
4674                 mode = drm_mode_duplicate(connector->dev,
4675                                           intel_connector->panel.fixed_mode);
4676                 if (mode) {
4677                         drm_mode_probed_add(connector, mode);
4678                         return 1;
4679                 }
4680         }
4681
4682         return 0;
4683 }
4684
4685 static bool
4686 intel_dp_detect_audio(struct drm_connector *connector)
4687 {
4688         bool has_audio = false;
4689         struct edid *edid;
4690
4691         edid = to_intel_connector(connector)->detect_edid;
4692         if (edid)
4693                 has_audio = drm_detect_monitor_audio(edid);
4694
4695         return has_audio;
4696 }
4697
4698 static int
4699 intel_dp_set_property(struct drm_connector *connector,
4700                       struct drm_property *property,
4701                       uint64_t val)
4702 {
4703         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4704         struct intel_connector *intel_connector = to_intel_connector(connector);
4705         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4706         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4707         int ret;
4708
4709         ret = drm_object_property_set_value(&connector->base, property, val);
4710         if (ret)
4711                 return ret;
4712
4713         if (property == dev_priv->force_audio_property) {
4714                 int i = val;
4715                 bool has_audio;
4716
4717                 if (i == intel_dp->force_audio)
4718                         return 0;
4719
4720                 intel_dp->force_audio = i;
4721
4722                 if (i == HDMI_AUDIO_AUTO)
4723                         has_audio = intel_dp_detect_audio(connector);
4724                 else
4725                         has_audio = (i == HDMI_AUDIO_ON);
4726
4727                 if (has_audio == intel_dp->has_audio)
4728                         return 0;
4729
4730                 intel_dp->has_audio = has_audio;
4731                 goto done;
4732         }
4733
4734         if (property == dev_priv->broadcast_rgb_property) {
4735                 bool old_auto = intel_dp->color_range_auto;
4736                 uint32_t old_range = intel_dp->color_range;
4737
4738                 switch (val) {
4739                 case INTEL_BROADCAST_RGB_AUTO:
4740                         intel_dp->color_range_auto = true;
4741                         break;
4742                 case INTEL_BROADCAST_RGB_FULL:
4743                         intel_dp->color_range_auto = false;
4744                         intel_dp->color_range = 0;
4745                         break;
4746                 case INTEL_BROADCAST_RGB_LIMITED:
4747                         intel_dp->color_range_auto = false;
4748                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4749                         break;
4750                 default:
4751                         return -EINVAL;
4752                 }
4753
4754                 if (old_auto == intel_dp->color_range_auto &&
4755                     old_range == intel_dp->color_range)
4756                         return 0;
4757
4758                 goto done;
4759         }
4760
4761         if (is_edp(intel_dp) &&
4762             property == connector->dev->mode_config.scaling_mode_property) {
4763                 if (val == DRM_MODE_SCALE_NONE) {
4764                         DRM_DEBUG_KMS("no scaling not supported\n");
4765                         return -EINVAL;
4766                 }
4767
4768                 if (intel_connector->panel.fitting_mode == val) {
4769                         /* the eDP scaling property is not changed */
4770                         return 0;
4771                 }
4772                 intel_connector->panel.fitting_mode = val;
4773
4774                 goto done;
4775         }
4776
4777         return -EINVAL;
4778
4779 done:
4780         if (intel_encoder->base.crtc)
4781                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4782
4783         return 0;
4784 }
4785
4786 static void
4787 intel_dp_connector_destroy(struct drm_connector *connector)
4788 {
4789         struct intel_connector *intel_connector = to_intel_connector(connector);
4790
4791         kfree(intel_connector->detect_edid);
4792
4793         if (!IS_ERR_OR_NULL(intel_connector->edid))
4794                 kfree(intel_connector->edid);
4795
4796         /* Can't call is_edp() since the encoder may have been destroyed
4797          * already. */
4798         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4799                 intel_panel_fini(&intel_connector->panel);
4800
4801         drm_connector_cleanup(connector);
4802         kfree(connector);
4803 }
4804
4805 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4806 {
4807         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4808         struct intel_dp *intel_dp = &intel_dig_port->dp;
4809
4810         drm_dp_aux_unregister(&intel_dp->aux);
4811         intel_dp_mst_encoder_cleanup(intel_dig_port);
4812         if (is_edp(intel_dp)) {
4813                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4814                 /*
4815                  * vdd might still be enabled do to the delayed vdd off.
4816                  * Make sure vdd is actually turned off here.
4817                  */
4818                 pps_lock(intel_dp);
4819                 edp_panel_vdd_off_sync(intel_dp);
4820                 pps_unlock(intel_dp);
4821
4822                 if (intel_dp->edp_notifier.notifier_call) {
4823                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4824                         intel_dp->edp_notifier.notifier_call = NULL;
4825                 }
4826         }
4827         drm_encoder_cleanup(encoder);
4828         kfree(intel_dig_port);
4829 }
4830
4831 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4832 {
4833         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4834
4835         if (!is_edp(intel_dp))
4836                 return;
4837
4838         /*
4839          * vdd might still be enabled do to the delayed vdd off.
4840          * Make sure vdd is actually turned off here.
4841          */
4842         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4843         pps_lock(intel_dp);
4844         edp_panel_vdd_off_sync(intel_dp);
4845         pps_unlock(intel_dp);
4846 }
4847
4848 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4849 {
4850         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4851         struct drm_device *dev = intel_dig_port->base.base.dev;
4852         struct drm_i915_private *dev_priv = dev->dev_private;
4853         enum intel_display_power_domain power_domain;
4854
4855         lockdep_assert_held(&dev_priv->pps_mutex);
4856
4857         if (!edp_have_panel_vdd(intel_dp))
4858                 return;
4859
4860         /*
4861          * The VDD bit needs a power domain reference, so if the bit is
4862          * already enabled when we boot or resume, grab this reference and
4863          * schedule a vdd off, so we don't hold on to the reference
4864          * indefinitely.
4865          */
4866         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4867         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4868         intel_display_power_get(dev_priv, power_domain);
4869
4870         edp_panel_vdd_schedule_off(intel_dp);
4871 }
4872
4873 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4874 {
4875         struct intel_dp *intel_dp;
4876
4877         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4878                 return;
4879
4880         intel_dp = enc_to_intel_dp(encoder);
4881
4882         pps_lock(intel_dp);
4883
4884         /*
4885          * Read out the current power sequencer assignment,
4886          * in case the BIOS did something with it.
4887          */
4888         if (IS_VALLEYVIEW(encoder->dev))
4889                 vlv_initial_power_sequencer_setup(intel_dp);
4890
4891         intel_edp_panel_vdd_sanitize(intel_dp);
4892
4893         pps_unlock(intel_dp);
4894 }
4895
4896 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4897         .dpms = intel_connector_dpms,
4898         .detect = intel_dp_detect,
4899         .force = intel_dp_force,
4900         .fill_modes = drm_helper_probe_single_connector_modes,
4901         .set_property = intel_dp_set_property,
4902         .atomic_get_property = intel_connector_atomic_get_property,
4903         .destroy = intel_dp_connector_destroy,
4904         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4905         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4906 };
4907
4908 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4909         .get_modes = intel_dp_get_modes,
4910         .mode_valid = intel_dp_mode_valid,
4911         .best_encoder = intel_best_encoder,
4912 };
4913
4914 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4915         .reset = intel_dp_encoder_reset,
4916         .destroy = intel_dp_encoder_destroy,
4917 };
4918
4919 void
4920 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4921 {
4922         return;
4923 }
4924
4925 enum irqreturn
4926 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4927 {
4928         struct intel_dp *intel_dp = &intel_dig_port->dp;
4929         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4930         struct drm_device *dev = intel_dig_port->base.base.dev;
4931         struct drm_i915_private *dev_priv = dev->dev_private;
4932         enum intel_display_power_domain power_domain;
4933         enum irqreturn ret = IRQ_NONE;
4934
4935         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4936                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4937
4938         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4939                 /*
4940                  * vdd off can generate a long pulse on eDP which
4941                  * would require vdd on to handle it, and thus we
4942                  * would end up in an endless cycle of
4943                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4944                  */
4945                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4946                               port_name(intel_dig_port->port));
4947                 return IRQ_HANDLED;
4948         }
4949
4950         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4951                       port_name(intel_dig_port->port),
4952                       long_hpd ? "long" : "short");
4953
4954         power_domain = intel_display_port_power_domain(intel_encoder);
4955         intel_display_power_get(dev_priv, power_domain);
4956
4957         if (long_hpd) {
4958                 /* indicate that we need to restart link training */
4959                 intel_dp->train_set_valid = false;
4960
4961                 if (HAS_PCH_SPLIT(dev)) {
4962                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4963                                 goto mst_fail;
4964                 } else {
4965                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4966                                 goto mst_fail;
4967                 }
4968
4969                 if (!intel_dp_get_dpcd(intel_dp)) {
4970                         goto mst_fail;
4971                 }
4972
4973                 intel_dp_probe_oui(intel_dp);
4974
4975                 if (!intel_dp_probe_mst(intel_dp))
4976                         goto mst_fail;
4977
4978         } else {
4979                 if (intel_dp->is_mst) {
4980                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4981                                 goto mst_fail;
4982                 }
4983
4984                 if (!intel_dp->is_mst) {
4985                         /*
4986                          * we'll check the link status via the normal hot plug path later -
4987                          * but for short hpds we should check it now
4988                          */
4989                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4990                         intel_dp_check_link_status(intel_dp);
4991                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4992                 }
4993         }
4994
4995         ret = IRQ_HANDLED;
4996
4997         goto put_power;
4998 mst_fail:
4999         /* if we were in MST mode, and device is not there get out of MST mode */
5000         if (intel_dp->is_mst) {
5001                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5002                 intel_dp->is_mst = false;
5003                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5004         }
5005 put_power:
5006         intel_display_power_put(dev_priv, power_domain);
5007
5008         return ret;
5009 }
5010
5011 /* Return which DP Port should be selected for Transcoder DP control */
5012 int
5013 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5014 {
5015         struct drm_device *dev = crtc->dev;
5016         struct intel_encoder *intel_encoder;
5017         struct intel_dp *intel_dp;
5018
5019         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5020                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5021
5022                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5023                     intel_encoder->type == INTEL_OUTPUT_EDP)
5024                         return intel_dp->output_reg;
5025         }
5026
5027         return -1;
5028 }
5029
5030 /* check the VBT to see whether the eDP is on DP-D port */
5031 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5032 {
5033         struct drm_i915_private *dev_priv = dev->dev_private;
5034         union child_device_config *p_child;
5035         int i;
5036         static const short port_mapping[] = {
5037                 [PORT_B] = PORT_IDPB,
5038                 [PORT_C] = PORT_IDPC,
5039                 [PORT_D] = PORT_IDPD,
5040         };
5041
5042         if (port == PORT_A)
5043                 return true;
5044
5045         if (!dev_priv->vbt.child_dev_num)
5046                 return false;
5047
5048         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5049                 p_child = dev_priv->vbt.child_dev + i;
5050
5051                 if (p_child->common.dvo_port == port_mapping[port] &&
5052                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5053                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5054                         return true;
5055         }
5056         return false;
5057 }
5058
5059 void
5060 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5061 {
5062         struct intel_connector *intel_connector = to_intel_connector(connector);
5063
5064         intel_attach_force_audio_property(connector);
5065         intel_attach_broadcast_rgb_property(connector);
5066         intel_dp->color_range_auto = true;
5067
5068         if (is_edp(intel_dp)) {
5069                 drm_mode_create_scaling_mode_property(connector->dev);
5070                 drm_object_attach_property(
5071                         &connector->base,
5072                         connector->dev->mode_config.scaling_mode_property,
5073                         DRM_MODE_SCALE_ASPECT);
5074                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5075         }
5076 }
5077
5078 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5079 {
5080         intel_dp->last_power_cycle = jiffies;
5081         intel_dp->last_power_on = jiffies;
5082         intel_dp->last_backlight_off = jiffies;
5083 }
5084
5085 static void
5086 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5087                                     struct intel_dp *intel_dp)
5088 {
5089         struct drm_i915_private *dev_priv = dev->dev_private;
5090         struct edp_power_seq cur, vbt, spec,
5091                 *final = &intel_dp->pps_delays;
5092         u32 pp_on, pp_off, pp_div, pp;
5093         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5094
5095         lockdep_assert_held(&dev_priv->pps_mutex);
5096
5097         /* already initialized? */
5098         if (final->t11_t12 != 0)
5099                 return;
5100
5101         if (HAS_PCH_SPLIT(dev)) {
5102                 pp_ctrl_reg = PCH_PP_CONTROL;
5103                 pp_on_reg = PCH_PP_ON_DELAYS;
5104                 pp_off_reg = PCH_PP_OFF_DELAYS;
5105                 pp_div_reg = PCH_PP_DIVISOR;
5106         } else {
5107                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5108
5109                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5110                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5111                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5112                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5113         }
5114
5115         /* Workaround: Need to write PP_CONTROL with the unlock key as
5116          * the very first thing. */
5117         pp = ironlake_get_pp_control(intel_dp);
5118         I915_WRITE(pp_ctrl_reg, pp);
5119
5120         pp_on = I915_READ(pp_on_reg);
5121         pp_off = I915_READ(pp_off_reg);
5122         pp_div = I915_READ(pp_div_reg);
5123
5124         /* Pull timing values out of registers */
5125         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5126                 PANEL_POWER_UP_DELAY_SHIFT;
5127
5128         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5129                 PANEL_LIGHT_ON_DELAY_SHIFT;
5130
5131         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5132                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5133
5134         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5135                 PANEL_POWER_DOWN_DELAY_SHIFT;
5136
5137         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5138                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5139
5140         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5141                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5142
5143         vbt = dev_priv->vbt.edp_pps;
5144
5145         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5146          * our hw here, which are all in 100usec. */
5147         spec.t1_t3 = 210 * 10;
5148         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5149         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5150         spec.t10 = 500 * 10;
5151         /* This one is special and actually in units of 100ms, but zero
5152          * based in the hw (so we need to add 100 ms). But the sw vbt
5153          * table multiplies it with 1000 to make it in units of 100usec,
5154          * too. */
5155         spec.t11_t12 = (510 + 100) * 10;
5156
5157         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5158                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5159
5160         /* Use the max of the register settings and vbt. If both are
5161          * unset, fall back to the spec limits. */
5162 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5163                                        spec.field : \
5164                                        max(cur.field, vbt.field))
5165         assign_final(t1_t3);
5166         assign_final(t8);
5167         assign_final(t9);
5168         assign_final(t10);
5169         assign_final(t11_t12);
5170 #undef assign_final
5171
5172 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5173         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5174         intel_dp->backlight_on_delay = get_delay(t8);
5175         intel_dp->backlight_off_delay = get_delay(t9);
5176         intel_dp->panel_power_down_delay = get_delay(t10);
5177         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5178 #undef get_delay
5179
5180         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5181                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5182                       intel_dp->panel_power_cycle_delay);
5183
5184         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5185                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5186 }
5187
5188 static void
5189 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5190                                               struct intel_dp *intel_dp)
5191 {
5192         struct drm_i915_private *dev_priv = dev->dev_private;
5193         u32 pp_on, pp_off, pp_div, port_sel = 0;
5194         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5195         int pp_on_reg, pp_off_reg, pp_div_reg;
5196         enum port port = dp_to_dig_port(intel_dp)->port;
5197         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5198
5199         lockdep_assert_held(&dev_priv->pps_mutex);
5200
5201         if (HAS_PCH_SPLIT(dev)) {
5202                 pp_on_reg = PCH_PP_ON_DELAYS;
5203                 pp_off_reg = PCH_PP_OFF_DELAYS;
5204                 pp_div_reg = PCH_PP_DIVISOR;
5205         } else {
5206                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5207
5208                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5209                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5210                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5211         }
5212
5213         /*
5214          * And finally store the new values in the power sequencer. The
5215          * backlight delays are set to 1 because we do manual waits on them. For
5216          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5217          * we'll end up waiting for the backlight off delay twice: once when we
5218          * do the manual sleep, and once when we disable the panel and wait for
5219          * the PP_STATUS bit to become zero.
5220          */
5221         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5222                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5223         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5224                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5225         /* Compute the divisor for the pp clock, simply match the Bspec
5226          * formula. */
5227         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5228         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5229                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
5230
5231         /* Haswell doesn't have any port selection bits for the panel
5232          * power sequencer any more. */
5233         if (IS_VALLEYVIEW(dev)) {
5234                 port_sel = PANEL_PORT_SELECT_VLV(port);
5235         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5236                 if (port == PORT_A)
5237                         port_sel = PANEL_PORT_SELECT_DPA;
5238                 else
5239                         port_sel = PANEL_PORT_SELECT_DPD;
5240         }
5241
5242         pp_on |= port_sel;
5243
5244         I915_WRITE(pp_on_reg, pp_on);
5245         I915_WRITE(pp_off_reg, pp_off);
5246         I915_WRITE(pp_div_reg, pp_div);
5247
5248         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5249                       I915_READ(pp_on_reg),
5250                       I915_READ(pp_off_reg),
5251                       I915_READ(pp_div_reg));
5252 }
5253
5254 /**
5255  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5256  * @dev: DRM device
5257  * @refresh_rate: RR to be programmed
5258  *
5259  * This function gets called when refresh rate (RR) has to be changed from
5260  * one frequency to another. Switches can be between high and low RR
5261  * supported by the panel or to any other RR based on media playback (in
5262  * this case, RR value needs to be passed from user space).
5263  *
5264  * The caller of this function needs to take a lock on dev_priv->drrs.
5265  */
5266 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5267 {
5268         struct drm_i915_private *dev_priv = dev->dev_private;
5269         struct intel_encoder *encoder;
5270         struct intel_digital_port *dig_port = NULL;
5271         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5272         struct intel_crtc_state *config = NULL;
5273         struct intel_crtc *intel_crtc = NULL;
5274         u32 reg, val;
5275         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5276
5277         if (refresh_rate <= 0) {
5278                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5279                 return;
5280         }
5281
5282         if (intel_dp == NULL) {
5283                 DRM_DEBUG_KMS("DRRS not supported.\n");
5284                 return;
5285         }
5286
5287         /*
5288          * FIXME: This needs proper synchronization with psr state for some
5289          * platforms that cannot have PSR and DRRS enabled at the same time.
5290          */
5291
5292         dig_port = dp_to_dig_port(intel_dp);
5293         encoder = &dig_port->base;
5294         intel_crtc = to_intel_crtc(encoder->base.crtc);
5295
5296         if (!intel_crtc) {
5297                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5298                 return;
5299         }
5300
5301         config = intel_crtc->config;
5302
5303         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5304                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5305                 return;
5306         }
5307
5308         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5309                         refresh_rate)
5310                 index = DRRS_LOW_RR;
5311
5312         if (index == dev_priv->drrs.refresh_rate_type) {
5313                 DRM_DEBUG_KMS(
5314                         "DRRS requested for previously set RR...ignoring\n");
5315                 return;
5316         }
5317
5318         if (!intel_crtc->active) {
5319                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5320                 return;
5321         }
5322
5323         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5324                 switch (index) {
5325                 case DRRS_HIGH_RR:
5326                         intel_dp_set_m_n(intel_crtc, M1_N1);
5327                         break;
5328                 case DRRS_LOW_RR:
5329                         intel_dp_set_m_n(intel_crtc, M2_N2);
5330                         break;
5331                 case DRRS_MAX_RR:
5332                 default:
5333                         DRM_ERROR("Unsupported refreshrate type\n");
5334                 }
5335         } else if (INTEL_INFO(dev)->gen > 6) {
5336                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5337                 val = I915_READ(reg);
5338
5339                 if (index > DRRS_HIGH_RR) {
5340                         if (IS_VALLEYVIEW(dev))
5341                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5342                         else
5343                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5344                 } else {
5345                         if (IS_VALLEYVIEW(dev))
5346                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5347                         else
5348                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5349                 }
5350                 I915_WRITE(reg, val);
5351         }
5352
5353         dev_priv->drrs.refresh_rate_type = index;
5354
5355         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5356 }
5357
5358 /**
5359  * intel_edp_drrs_enable - init drrs struct if supported
5360  * @intel_dp: DP struct
5361  *
5362  * Initializes frontbuffer_bits and drrs.dp
5363  */
5364 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5365 {
5366         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5367         struct drm_i915_private *dev_priv = dev->dev_private;
5368         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5369         struct drm_crtc *crtc = dig_port->base.base.crtc;
5370         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5371
5372         if (!intel_crtc->config->has_drrs) {
5373                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5374                 return;
5375         }
5376
5377         mutex_lock(&dev_priv->drrs.mutex);
5378         if (WARN_ON(dev_priv->drrs.dp)) {
5379                 DRM_ERROR("DRRS already enabled\n");
5380                 goto unlock;
5381         }
5382
5383         dev_priv->drrs.busy_frontbuffer_bits = 0;
5384
5385         dev_priv->drrs.dp = intel_dp;
5386
5387 unlock:
5388         mutex_unlock(&dev_priv->drrs.mutex);
5389 }
5390
5391 /**
5392  * intel_edp_drrs_disable - Disable DRRS
5393  * @intel_dp: DP struct
5394  *
5395  */
5396 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5397 {
5398         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5399         struct drm_i915_private *dev_priv = dev->dev_private;
5400         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5401         struct drm_crtc *crtc = dig_port->base.base.crtc;
5402         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5403
5404         if (!intel_crtc->config->has_drrs)
5405                 return;
5406
5407         mutex_lock(&dev_priv->drrs.mutex);
5408         if (!dev_priv->drrs.dp) {
5409                 mutex_unlock(&dev_priv->drrs.mutex);
5410                 return;
5411         }
5412
5413         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5414                 intel_dp_set_drrs_state(dev_priv->dev,
5415                         intel_dp->attached_connector->panel.
5416                         fixed_mode->vrefresh);
5417
5418         dev_priv->drrs.dp = NULL;
5419         mutex_unlock(&dev_priv->drrs.mutex);
5420
5421         cancel_delayed_work_sync(&dev_priv->drrs.work);
5422 }
5423
5424 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5425 {
5426         struct drm_i915_private *dev_priv =
5427                 container_of(work, typeof(*dev_priv), drrs.work.work);
5428         struct intel_dp *intel_dp;
5429
5430         mutex_lock(&dev_priv->drrs.mutex);
5431
5432         intel_dp = dev_priv->drrs.dp;
5433
5434         if (!intel_dp)
5435                 goto unlock;
5436
5437         /*
5438          * The delayed work can race with an invalidate hence we need to
5439          * recheck.
5440          */
5441
5442         if (dev_priv->drrs.busy_frontbuffer_bits)
5443                 goto unlock;
5444
5445         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5446                 intel_dp_set_drrs_state(dev_priv->dev,
5447                         intel_dp->attached_connector->panel.
5448                         downclock_mode->vrefresh);
5449
5450 unlock:
5451         mutex_unlock(&dev_priv->drrs.mutex);
5452 }
5453
5454 /**
5455  * intel_edp_drrs_invalidate - Invalidate DRRS
5456  * @dev: DRM device
5457  * @frontbuffer_bits: frontbuffer plane tracking bits
5458  *
5459  * When there is a disturbance on screen (due to cursor movement/time
5460  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5461  * high RR.
5462  *
5463  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5464  */
5465 void intel_edp_drrs_invalidate(struct drm_device *dev,
5466                 unsigned frontbuffer_bits)
5467 {
5468         struct drm_i915_private *dev_priv = dev->dev_private;
5469         struct drm_crtc *crtc;
5470         enum pipe pipe;
5471
5472         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5473                 return;
5474
5475         cancel_delayed_work(&dev_priv->drrs.work);
5476
5477         mutex_lock(&dev_priv->drrs.mutex);
5478         if (!dev_priv->drrs.dp) {
5479                 mutex_unlock(&dev_priv->drrs.mutex);
5480                 return;
5481         }
5482
5483         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5484         pipe = to_intel_crtc(crtc)->pipe;
5485
5486         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5487                 intel_dp_set_drrs_state(dev_priv->dev,
5488                                 dev_priv->drrs.dp->attached_connector->panel.
5489                                 fixed_mode->vrefresh);
5490         }
5491
5492         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5493
5494         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5495         mutex_unlock(&dev_priv->drrs.mutex);
5496 }
5497
5498 /**
5499  * intel_edp_drrs_flush - Flush DRRS
5500  * @dev: DRM device
5501  * @frontbuffer_bits: frontbuffer plane tracking bits
5502  *
5503  * When there is no movement on screen, DRRS work can be scheduled.
5504  * This DRRS work is responsible for setting relevant registers after a
5505  * timeout of 1 second.
5506  *
5507  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5508  */
5509 void intel_edp_drrs_flush(struct drm_device *dev,
5510                 unsigned frontbuffer_bits)
5511 {
5512         struct drm_i915_private *dev_priv = dev->dev_private;
5513         struct drm_crtc *crtc;
5514         enum pipe pipe;
5515
5516         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5517                 return;
5518
5519         cancel_delayed_work(&dev_priv->drrs.work);
5520
5521         mutex_lock(&dev_priv->drrs.mutex);
5522         if (!dev_priv->drrs.dp) {
5523                 mutex_unlock(&dev_priv->drrs.mutex);
5524                 return;
5525         }
5526
5527         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5528         pipe = to_intel_crtc(crtc)->pipe;
5529         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5530
5531         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5532                         !dev_priv->drrs.busy_frontbuffer_bits)
5533                 schedule_delayed_work(&dev_priv->drrs.work,
5534                                 msecs_to_jiffies(1000));
5535         mutex_unlock(&dev_priv->drrs.mutex);
5536 }
5537
5538 /**
5539  * DOC: Display Refresh Rate Switching (DRRS)
5540  *
5541  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5542  * which enables swtching between low and high refresh rates,
5543  * dynamically, based on the usage scenario. This feature is applicable
5544  * for internal panels.
5545  *
5546  * Indication that the panel supports DRRS is given by the panel EDID, which
5547  * would list multiple refresh rates for one resolution.
5548  *
5549  * DRRS is of 2 types - static and seamless.
5550  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5551  * (may appear as a blink on screen) and is used in dock-undock scenario.
5552  * Seamless DRRS involves changing RR without any visual effect to the user
5553  * and can be used during normal system usage. This is done by programming
5554  * certain registers.
5555  *
5556  * Support for static/seamless DRRS may be indicated in the VBT based on
5557  * inputs from the panel spec.
5558  *
5559  * DRRS saves power by switching to low RR based on usage scenarios.
5560  *
5561  * eDP DRRS:-
5562  *        The implementation is based on frontbuffer tracking implementation.
5563  * When there is a disturbance on the screen triggered by user activity or a
5564  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5565  * When there is no movement on screen, after a timeout of 1 second, a switch
5566  * to low RR is made.
5567  *        For integration with frontbuffer tracking code,
5568  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5569  *
5570  * DRRS can be further extended to support other internal panels and also
5571  * the scenario of video playback wherein RR is set based on the rate
5572  * requested by userspace.
5573  */
5574
5575 /**
5576  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5577  * @intel_connector: eDP connector
5578  * @fixed_mode: preferred mode of panel
5579  *
5580  * This function is  called only once at driver load to initialize basic
5581  * DRRS stuff.
5582  *
5583  * Returns:
5584  * Downclock mode if panel supports it, else return NULL.
5585  * DRRS support is determined by the presence of downclock mode (apart
5586  * from VBT setting).
5587  */
5588 static struct drm_display_mode *
5589 intel_dp_drrs_init(struct intel_connector *intel_connector,
5590                 struct drm_display_mode *fixed_mode)
5591 {
5592         struct drm_connector *connector = &intel_connector->base;
5593         struct drm_device *dev = connector->dev;
5594         struct drm_i915_private *dev_priv = dev->dev_private;
5595         struct drm_display_mode *downclock_mode = NULL;
5596
5597         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5598         mutex_init(&dev_priv->drrs.mutex);
5599
5600         if (INTEL_INFO(dev)->gen <= 6) {
5601                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5602                 return NULL;
5603         }
5604
5605         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5606                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5607                 return NULL;
5608         }
5609
5610         downclock_mode = intel_find_panel_downclock
5611                                         (dev, fixed_mode, connector);
5612
5613         if (!downclock_mode) {
5614                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5615                 return NULL;
5616         }
5617
5618         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5619
5620         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5621         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5622         return downclock_mode;
5623 }
5624
5625 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5626                                      struct intel_connector *intel_connector)
5627 {
5628         struct drm_connector *connector = &intel_connector->base;
5629         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5630         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5631         struct drm_device *dev = intel_encoder->base.dev;
5632         struct drm_i915_private *dev_priv = dev->dev_private;
5633         struct drm_display_mode *fixed_mode = NULL;
5634         struct drm_display_mode *downclock_mode = NULL;
5635         bool has_dpcd;
5636         struct drm_display_mode *scan;
5637         struct edid *edid;
5638         enum pipe pipe = INVALID_PIPE;
5639
5640         if (!is_edp(intel_dp))
5641                 return true;
5642
5643         pps_lock(intel_dp);
5644         intel_edp_panel_vdd_sanitize(intel_dp);
5645         pps_unlock(intel_dp);
5646
5647         /* Cache DPCD and EDID for edp. */
5648         has_dpcd = intel_dp_get_dpcd(intel_dp);
5649
5650         if (has_dpcd) {
5651                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5652                         dev_priv->no_aux_handshake =
5653                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5654                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5655         } else {
5656                 /* if this fails, presume the device is a ghost */
5657                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5658                 return false;
5659         }
5660
5661         /* We now know it's not a ghost, init power sequence regs. */
5662         pps_lock(intel_dp);
5663         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5664         pps_unlock(intel_dp);
5665
5666         mutex_lock(&dev->mode_config.mutex);
5667         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5668         if (edid) {
5669                 if (drm_add_edid_modes(connector, edid)) {
5670                         drm_mode_connector_update_edid_property(connector,
5671                                                                 edid);
5672                         drm_edid_to_eld(connector, edid);
5673                 } else {
5674                         kfree(edid);
5675                         edid = ERR_PTR(-EINVAL);
5676                 }
5677         } else {
5678                 edid = ERR_PTR(-ENOENT);
5679         }
5680         intel_connector->edid = edid;
5681
5682         /* prefer fixed mode from EDID if available */
5683         list_for_each_entry(scan, &connector->probed_modes, head) {
5684                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5685                         fixed_mode = drm_mode_duplicate(dev, scan);
5686                         downclock_mode = intel_dp_drrs_init(
5687                                                 intel_connector, fixed_mode);
5688                         break;
5689                 }
5690         }
5691
5692         /* fallback to VBT if available for eDP */
5693         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5694                 fixed_mode = drm_mode_duplicate(dev,
5695                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5696                 if (fixed_mode)
5697                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5698         }
5699         mutex_unlock(&dev->mode_config.mutex);
5700
5701         if (IS_VALLEYVIEW(dev)) {
5702                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5703                 register_reboot_notifier(&intel_dp->edp_notifier);
5704
5705                 /*
5706                  * Figure out the current pipe for the initial backlight setup.
5707                  * If the current pipe isn't valid, try the PPS pipe, and if that
5708                  * fails just assume pipe A.
5709                  */
5710                 if (IS_CHERRYVIEW(dev))
5711                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5712                 else
5713                         pipe = PORT_TO_PIPE(intel_dp->DP);
5714
5715                 if (pipe != PIPE_A && pipe != PIPE_B)
5716                         pipe = intel_dp->pps_pipe;
5717
5718                 if (pipe != PIPE_A && pipe != PIPE_B)
5719                         pipe = PIPE_A;
5720
5721                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5722                               pipe_name(pipe));
5723         }
5724
5725         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5726         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5727         intel_panel_setup_backlight(connector, pipe);
5728
5729         return true;
5730 }
5731
5732 bool
5733 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5734                         struct intel_connector *intel_connector)
5735 {
5736         struct drm_connector *connector = &intel_connector->base;
5737         struct intel_dp *intel_dp = &intel_dig_port->dp;
5738         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5739         struct drm_device *dev = intel_encoder->base.dev;
5740         struct drm_i915_private *dev_priv = dev->dev_private;
5741         enum port port = intel_dig_port->port;
5742         int type;
5743
5744         intel_dp->pps_pipe = INVALID_PIPE;
5745
5746         /* intel_dp vfuncs */
5747         if (INTEL_INFO(dev)->gen >= 9)
5748                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5749         else if (IS_VALLEYVIEW(dev))
5750                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5751         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5752                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5753         else if (HAS_PCH_SPLIT(dev))
5754                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5755         else
5756                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5757
5758         if (INTEL_INFO(dev)->gen >= 9)
5759                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5760         else
5761                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5762
5763         /* Preserve the current hw state. */
5764         intel_dp->DP = I915_READ(intel_dp->output_reg);
5765         intel_dp->attached_connector = intel_connector;
5766
5767         if (intel_dp_is_edp(dev, port))
5768                 type = DRM_MODE_CONNECTOR_eDP;
5769         else
5770                 type = DRM_MODE_CONNECTOR_DisplayPort;
5771
5772         /*
5773          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5774          * for DP the encoder type can be set by the caller to
5775          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5776          */
5777         if (type == DRM_MODE_CONNECTOR_eDP)
5778                 intel_encoder->type = INTEL_OUTPUT_EDP;
5779
5780         /* eDP only on port B and/or C on vlv/chv */
5781         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5782                     port != PORT_B && port != PORT_C))
5783                 return false;
5784
5785         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5786                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5787                         port_name(port));
5788
5789         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5790         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5791
5792         connector->interlace_allowed = true;
5793         connector->doublescan_allowed = 0;
5794
5795         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5796                           edp_panel_vdd_work);
5797
5798         intel_connector_attach_encoder(intel_connector, intel_encoder);
5799         drm_connector_register(connector);
5800
5801         if (HAS_DDI(dev))
5802                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5803         else
5804                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5805         intel_connector->unregister = intel_dp_connector_unregister;
5806
5807         /* Set up the hotplug pin. */
5808         switch (port) {
5809         case PORT_A:
5810                 intel_encoder->hpd_pin = HPD_PORT_A;
5811                 break;
5812         case PORT_B:
5813                 intel_encoder->hpd_pin = HPD_PORT_B;
5814                 break;
5815         case PORT_C:
5816                 intel_encoder->hpd_pin = HPD_PORT_C;
5817                 break;
5818         case PORT_D:
5819                 intel_encoder->hpd_pin = HPD_PORT_D;
5820                 break;
5821         default:
5822                 BUG();
5823         }
5824
5825         if (is_edp(intel_dp)) {
5826                 pps_lock(intel_dp);
5827                 intel_dp_init_panel_power_timestamps(intel_dp);
5828                 if (IS_VALLEYVIEW(dev))
5829                         vlv_initial_power_sequencer_setup(intel_dp);
5830                 else
5831                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5832                 pps_unlock(intel_dp);
5833         }
5834
5835         intel_dp_aux_init(intel_dp, intel_connector);
5836
5837         /* init MST on ports that can support it */
5838         if (HAS_DP_MST(dev) &&
5839             (port == PORT_B || port == PORT_C || port == PORT_D))
5840                 intel_dp_mst_encoder_init(intel_dig_port,
5841                                           intel_connector->base.base.id);
5842
5843         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5844                 drm_dp_aux_unregister(&intel_dp->aux);
5845                 if (is_edp(intel_dp)) {
5846                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5847                         /*
5848                          * vdd might still be enabled do to the delayed vdd off.
5849                          * Make sure vdd is actually turned off here.
5850                          */
5851                         pps_lock(intel_dp);
5852                         edp_panel_vdd_off_sync(intel_dp);
5853                         pps_unlock(intel_dp);
5854                 }
5855                 drm_connector_unregister(connector);
5856                 drm_connector_cleanup(connector);
5857                 return false;
5858         }
5859
5860         intel_dp_add_properties(intel_dp, connector);
5861
5862         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5863          * 0xd.  Failure to do so will result in spurious interrupts being
5864          * generated on the port when a cable is not attached.
5865          */
5866         if (IS_G4X(dev) && !IS_GM45(dev)) {
5867                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5868                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5869         }
5870
5871         i915_debugfs_connector_add(connector);
5872
5873         return true;
5874 }
5875
5876 void
5877 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5878 {
5879         struct drm_i915_private *dev_priv = dev->dev_private;
5880         struct intel_digital_port *intel_dig_port;
5881         struct intel_encoder *intel_encoder;
5882         struct drm_encoder *encoder;
5883         struct intel_connector *intel_connector;
5884
5885         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5886         if (!intel_dig_port)
5887                 return;
5888
5889         intel_connector = intel_connector_alloc();
5890         if (!intel_connector) {
5891                 kfree(intel_dig_port);
5892                 return;
5893         }
5894
5895         intel_encoder = &intel_dig_port->base;
5896         encoder = &intel_encoder->base;
5897
5898         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5899                          DRM_MODE_ENCODER_TMDS);
5900
5901         intel_encoder->compute_config = intel_dp_compute_config;
5902         intel_encoder->disable = intel_disable_dp;
5903         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5904         intel_encoder->get_config = intel_dp_get_config;
5905         intel_encoder->suspend = intel_dp_encoder_suspend;
5906         if (IS_CHERRYVIEW(dev)) {
5907                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5908                 intel_encoder->pre_enable = chv_pre_enable_dp;
5909                 intel_encoder->enable = vlv_enable_dp;
5910                 intel_encoder->post_disable = chv_post_disable_dp;
5911         } else if (IS_VALLEYVIEW(dev)) {
5912                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5913                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5914                 intel_encoder->enable = vlv_enable_dp;
5915                 intel_encoder->post_disable = vlv_post_disable_dp;
5916         } else {
5917                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5918                 intel_encoder->enable = g4x_enable_dp;
5919                 if (INTEL_INFO(dev)->gen >= 5)
5920                         intel_encoder->post_disable = ilk_post_disable_dp;
5921         }
5922
5923         intel_dig_port->port = port;
5924         intel_dig_port->dp.output_reg = output_reg;
5925
5926         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5927         if (IS_CHERRYVIEW(dev)) {
5928                 if (port == PORT_D)
5929                         intel_encoder->crtc_mask = 1 << 2;
5930                 else
5931                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5932         } else {
5933                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5934         }
5935         intel_encoder->cloneable = 0;
5936         intel_encoder->hot_plug = intel_dp_hot_plug;
5937
5938         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5939         dev_priv->hpd_irq_port[port] = intel_dig_port;
5940
5941         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5942                 drm_encoder_cleanup(encoder);
5943                 kfree(intel_dig_port);
5944                 kfree(intel_connector);
5945         }
5946 }
5947
5948 void intel_dp_mst_suspend(struct drm_device *dev)
5949 {
5950         struct drm_i915_private *dev_priv = dev->dev_private;
5951         int i;
5952
5953         /* disable MST */
5954         for (i = 0; i < I915_MAX_PORTS; i++) {
5955                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5956                 if (!intel_dig_port)
5957                         continue;
5958
5959                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5960                         if (!intel_dig_port->dp.can_mst)
5961                                 continue;
5962                         if (intel_dig_port->dp.is_mst)
5963                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5964                 }
5965         }
5966 }
5967
5968 void intel_dp_mst_resume(struct drm_device *dev)
5969 {
5970         struct drm_i915_private *dev_priv = dev->dev_private;
5971         int i;
5972
5973         for (i = 0; i < I915_MAX_PORTS; i++) {
5974                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5975                 if (!intel_dig_port)
5976                         continue;
5977                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5978                         int ret;
5979
5980                         if (!intel_dig_port->dp.can_mst)
5981                                 continue;
5982
5983                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5984                         if (ret != 0) {
5985                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5986                         }
5987                 }
5988         }
5989 }