OSDN Git Service

drm/i915: Remove WA for swapped HPD pins in broxton A stepping
[uclinux-h8/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_edid.h>
40 #include "intel_drv.h"
41 #include <drm/i915_drm.h>
42 #include "i915_drv.h"
43
44 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
45
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
48 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51
52 struct dp_link_dpll {
53         int clock;
54         struct dpll dpll;
55 };
56
57 static const struct dp_link_dpll gen4_dpll[] = {
58         { 162000,
59                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60         { 270000,
61                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63
64 static const struct dp_link_dpll pch_dpll[] = {
65         { 162000,
66                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67         { 270000,
68                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70
71 static const struct dp_link_dpll vlv_dpll[] = {
72         { 162000,
73                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74         { 270000,
75                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83         /*
84          * CHV requires to program fractional division for m2.
85          * m2 is stored in fixed point format using formula below
86          * (m2_int << 22) | m2_fraction
87          */
88         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
89                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90         { 270000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92         { 540000,       /* m2_int = 27, m2_fraction = 0 */
93                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97                                   324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99                                   324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119
120         return intel_dig_port->base.base.dev;
121 }
122
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133                                       enum pipe pipe);
134 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
135
136 static int
137 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
138 {
139         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
140
141         switch (max_link_bw) {
142         case DP_LINK_BW_1_62:
143         case DP_LINK_BW_2_7:
144         case DP_LINK_BW_5_4:
145                 break;
146         default:
147                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148                      max_link_bw);
149                 max_link_bw = DP_LINK_BW_1_62;
150                 break;
151         }
152         return max_link_bw;
153 }
154
155 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156 {
157         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158         u8 source_max, sink_max;
159
160         source_max = intel_dig_port->max_lanes;
161         sink_max = intel_dp->max_sink_lane_count;
162
163         return min(source_max, sink_max);
164 }
165
166 int
167 intel_dp_link_required(int pixel_clock, int bpp)
168 {
169         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
170         return DIV_ROUND_UP(pixel_clock * bpp, 8);
171 }
172
173 int
174 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
175 {
176         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
177          * link rate that is generally expressed in Gbps. Since, 8 bits of data
178          * is transmitted every LS_Clk per lane, there is no need to account for
179          * the channel encoding that is done in the PHY layer here.
180          */
181
182         return max_link_clock * max_lanes;
183 }
184
185 static int
186 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
187 {
188         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
189         struct intel_encoder *encoder = &intel_dig_port->base;
190         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
191         int max_dotclk = dev_priv->max_dotclk_freq;
192         int ds_max_dotclk;
193
194         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
195
196         if (type != DP_DS_PORT_TYPE_VGA)
197                 return max_dotclk;
198
199         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
200                                                     intel_dp->downstream_ports);
201
202         if (ds_max_dotclk != 0)
203                 max_dotclk = min(max_dotclk, ds_max_dotclk);
204
205         return max_dotclk;
206 }
207
208 static int
209 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
210 {
211         if (intel_dp->num_sink_rates) {
212                 *sink_rates = intel_dp->sink_rates;
213                 return intel_dp->num_sink_rates;
214         }
215
216         *sink_rates = default_rates;
217
218         return (intel_dp->max_sink_link_bw >> 3) + 1;
219 }
220
221 static int
222 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
223 {
224         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
225         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
226         int size;
227
228         if (IS_GEN9_LP(dev_priv)) {
229                 *source_rates = bxt_rates;
230                 size = ARRAY_SIZE(bxt_rates);
231         } else if (IS_GEN9_BC(dev_priv)) {
232                 *source_rates = skl_rates;
233                 size = ARRAY_SIZE(skl_rates);
234         } else {
235                 *source_rates = default_rates;
236                 size = ARRAY_SIZE(default_rates);
237         }
238
239         /* This depends on the fact that 5.4 is last value in the array */
240         if (!intel_dp_source_supports_hbr2(intel_dp))
241                 size--;
242
243         return size;
244 }
245
246 static int intersect_rates(const int *source_rates, int source_len,
247                            const int *sink_rates, int sink_len,
248                            int *common_rates)
249 {
250         int i = 0, j = 0, k = 0;
251
252         while (i < source_len && j < sink_len) {
253                 if (source_rates[i] == sink_rates[j]) {
254                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
255                                 return k;
256                         common_rates[k] = source_rates[i];
257                         ++k;
258                         ++i;
259                         ++j;
260                 } else if (source_rates[i] < sink_rates[j]) {
261                         ++i;
262                 } else {
263                         ++j;
264                 }
265         }
266         return k;
267 }
268
269 static int intel_dp_common_rates(struct intel_dp *intel_dp,
270                                  int *common_rates)
271 {
272         const int *source_rates, *sink_rates;
273         int source_len, sink_len;
274
275         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
276         source_len = intel_dp_source_rates(intel_dp, &source_rates);
277
278         return intersect_rates(source_rates, source_len,
279                                sink_rates, sink_len,
280                                common_rates);
281 }
282
283 static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
284                                     int *common_rates, int link_rate)
285 {
286         int common_len;
287         int index;
288
289         common_len = intel_dp_common_rates(intel_dp, common_rates);
290         for (index = 0; index < common_len; index++) {
291                 if (link_rate == common_rates[common_len - index - 1])
292                         return common_len - index - 1;
293         }
294
295         return -1;
296 }
297
298 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
299                                             int link_rate, uint8_t lane_count)
300 {
301         int common_rates[DP_MAX_SUPPORTED_RATES];
302         int link_rate_index;
303
304         link_rate_index = intel_dp_link_rate_index(intel_dp,
305                                                    common_rates,
306                                                    link_rate);
307         if (link_rate_index > 0) {
308                 intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
309                 intel_dp->max_sink_lane_count = lane_count;
310         } else if (lane_count > 1) {
311                 intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
312                 intel_dp->max_sink_lane_count = lane_count >> 1;
313         } else {
314                 DRM_ERROR("Link Training Unsuccessful\n");
315                 return -1;
316         }
317
318         return 0;
319 }
320
321 static enum drm_mode_status
322 intel_dp_mode_valid(struct drm_connector *connector,
323                     struct drm_display_mode *mode)
324 {
325         struct intel_dp *intel_dp = intel_attached_dp(connector);
326         struct intel_connector *intel_connector = to_intel_connector(connector);
327         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
328         int target_clock = mode->clock;
329         int max_rate, mode_rate, max_lanes, max_link_clock;
330         int max_dotclk;
331
332         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
333
334         if (is_edp(intel_dp) && fixed_mode) {
335                 if (mode->hdisplay > fixed_mode->hdisplay)
336                         return MODE_PANEL;
337
338                 if (mode->vdisplay > fixed_mode->vdisplay)
339                         return MODE_PANEL;
340
341                 target_clock = fixed_mode->clock;
342         }
343
344         max_link_clock = intel_dp_max_link_rate(intel_dp);
345         max_lanes = intel_dp_max_lane_count(intel_dp);
346
347         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
348         mode_rate = intel_dp_link_required(target_clock, 18);
349
350         if (mode_rate > max_rate || target_clock > max_dotclk)
351                 return MODE_CLOCK_HIGH;
352
353         if (mode->clock < 10000)
354                 return MODE_CLOCK_LOW;
355
356         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
357                 return MODE_H_ILLEGAL;
358
359         return MODE_OK;
360 }
361
362 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
363 {
364         int     i;
365         uint32_t v = 0;
366
367         if (src_bytes > 4)
368                 src_bytes = 4;
369         for (i = 0; i < src_bytes; i++)
370                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
371         return v;
372 }
373
374 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
375 {
376         int i;
377         if (dst_bytes > 4)
378                 dst_bytes = 4;
379         for (i = 0; i < dst_bytes; i++)
380                 dst[i] = src >> ((3-i) * 8);
381 }
382
383 static void
384 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
385                                     struct intel_dp *intel_dp);
386 static void
387 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
388                                               struct intel_dp *intel_dp,
389                                               bool force_disable_vdd);
390 static void
391 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
392
393 static void pps_lock(struct intel_dp *intel_dp)
394 {
395         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
396         struct intel_encoder *encoder = &intel_dig_port->base;
397         struct drm_device *dev = encoder->base.dev;
398         struct drm_i915_private *dev_priv = to_i915(dev);
399         enum intel_display_power_domain power_domain;
400
401         /*
402          * See vlv_power_sequencer_reset() why we need
403          * a power domain reference here.
404          */
405         power_domain = intel_display_port_aux_power_domain(encoder);
406         intel_display_power_get(dev_priv, power_domain);
407
408         mutex_lock(&dev_priv->pps_mutex);
409 }
410
411 static void pps_unlock(struct intel_dp *intel_dp)
412 {
413         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
414         struct intel_encoder *encoder = &intel_dig_port->base;
415         struct drm_device *dev = encoder->base.dev;
416         struct drm_i915_private *dev_priv = to_i915(dev);
417         enum intel_display_power_domain power_domain;
418
419         mutex_unlock(&dev_priv->pps_mutex);
420
421         power_domain = intel_display_port_aux_power_domain(encoder);
422         intel_display_power_put(dev_priv, power_domain);
423 }
424
425 static void
426 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
427 {
428         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
429         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
430         enum pipe pipe = intel_dp->pps_pipe;
431         bool pll_enabled, release_cl_override = false;
432         enum dpio_phy phy = DPIO_PHY(pipe);
433         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
434         uint32_t DP;
435
436         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
437                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
438                  pipe_name(pipe), port_name(intel_dig_port->port)))
439                 return;
440
441         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
442                       pipe_name(pipe), port_name(intel_dig_port->port));
443
444         /* Preserve the BIOS-computed detected bit. This is
445          * supposed to be read-only.
446          */
447         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
448         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
449         DP |= DP_PORT_WIDTH(1);
450         DP |= DP_LINK_TRAIN_PAT_1;
451
452         if (IS_CHERRYVIEW(dev_priv))
453                 DP |= DP_PIPE_SELECT_CHV(pipe);
454         else if (pipe == PIPE_B)
455                 DP |= DP_PIPEB_SELECT;
456
457         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
458
459         /*
460          * The DPLL for the pipe must be enabled for this to work.
461          * So enable temporarily it if it's not already enabled.
462          */
463         if (!pll_enabled) {
464                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
465                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
466
467                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
468                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
469                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
470                                   pipe_name(pipe));
471                         return;
472                 }
473         }
474
475         /*
476          * Similar magic as in intel_dp_enable_port().
477          * We _must_ do this port enable + disable trick
478          * to make this power seqeuencer lock onto the port.
479          * Otherwise even VDD force bit won't work.
480          */
481         I915_WRITE(intel_dp->output_reg, DP);
482         POSTING_READ(intel_dp->output_reg);
483
484         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
485         POSTING_READ(intel_dp->output_reg);
486
487         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
488         POSTING_READ(intel_dp->output_reg);
489
490         if (!pll_enabled) {
491                 vlv_force_pll_off(dev_priv, pipe);
492
493                 if (release_cl_override)
494                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
495         }
496 }
497
498 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
499 {
500         struct intel_encoder *encoder;
501         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
502
503         /*
504          * We don't have power sequencer currently.
505          * Pick one that's not used by other ports.
506          */
507         for_each_intel_encoder(&dev_priv->drm, encoder) {
508                 struct intel_dp *intel_dp;
509
510                 if (encoder->type != INTEL_OUTPUT_DP &&
511                     encoder->type != INTEL_OUTPUT_EDP)
512                         continue;
513
514                 intel_dp = enc_to_intel_dp(&encoder->base);
515
516                 if (encoder->type == INTEL_OUTPUT_EDP) {
517                         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
518                                 intel_dp->active_pipe != intel_dp->pps_pipe);
519
520                         if (intel_dp->pps_pipe != INVALID_PIPE)
521                                 pipes &= ~(1 << intel_dp->pps_pipe);
522                 } else {
523                         WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
524
525                         if (intel_dp->active_pipe != INVALID_PIPE)
526                                 pipes &= ~(1 << intel_dp->active_pipe);
527                 }
528         }
529
530         if (pipes == 0)
531                 return INVALID_PIPE;
532
533         return ffs(pipes) - 1;
534 }
535
536 static enum pipe
537 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
538 {
539         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
540         struct drm_device *dev = intel_dig_port->base.base.dev;
541         struct drm_i915_private *dev_priv = to_i915(dev);
542         enum pipe pipe;
543
544         lockdep_assert_held(&dev_priv->pps_mutex);
545
546         /* We should never land here with regular DP ports */
547         WARN_ON(!is_edp(intel_dp));
548
549         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
550                 intel_dp->active_pipe != intel_dp->pps_pipe);
551
552         if (intel_dp->pps_pipe != INVALID_PIPE)
553                 return intel_dp->pps_pipe;
554
555         pipe = vlv_find_free_pps(dev_priv);
556
557         /*
558          * Didn't find one. This should not happen since there
559          * are two power sequencers and up to two eDP ports.
560          */
561         if (WARN_ON(pipe == INVALID_PIPE))
562                 pipe = PIPE_A;
563
564         vlv_steal_power_sequencer(dev, pipe);
565         intel_dp->pps_pipe = pipe;
566
567         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
568                       pipe_name(intel_dp->pps_pipe),
569                       port_name(intel_dig_port->port));
570
571         /* init power sequencer on this pipe and port */
572         intel_dp_init_panel_power_sequencer(dev, intel_dp);
573         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
574
575         /*
576          * Even vdd force doesn't work until we've made
577          * the power sequencer lock in on the port.
578          */
579         vlv_power_sequencer_kick(intel_dp);
580
581         return intel_dp->pps_pipe;
582 }
583
584 static int
585 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
586 {
587         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
588         struct drm_device *dev = intel_dig_port->base.base.dev;
589         struct drm_i915_private *dev_priv = to_i915(dev);
590
591         lockdep_assert_held(&dev_priv->pps_mutex);
592
593         /* We should never land here with regular DP ports */
594         WARN_ON(!is_edp(intel_dp));
595
596         /*
597          * TODO: BXT has 2 PPS instances. The correct port->PPS instance
598          * mapping needs to be retrieved from VBT, for now just hard-code to
599          * use instance #0 always.
600          */
601         if (!intel_dp->pps_reset)
602                 return 0;
603
604         intel_dp->pps_reset = false;
605
606         /*
607          * Only the HW needs to be reprogrammed, the SW state is fixed and
608          * has been setup during connector init.
609          */
610         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
611
612         return 0;
613 }
614
615 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
616                                enum pipe pipe);
617
618 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
619                                enum pipe pipe)
620 {
621         return I915_READ(PP_STATUS(pipe)) & PP_ON;
622 }
623
624 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
625                                 enum pipe pipe)
626 {
627         return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
628 }
629
630 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
631                          enum pipe pipe)
632 {
633         return true;
634 }
635
636 static enum pipe
637 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
638                      enum port port,
639                      vlv_pipe_check pipe_check)
640 {
641         enum pipe pipe;
642
643         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
644                 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
645                         PANEL_PORT_SELECT_MASK;
646
647                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
648                         continue;
649
650                 if (!pipe_check(dev_priv, pipe))
651                         continue;
652
653                 return pipe;
654         }
655
656         return INVALID_PIPE;
657 }
658
659 static void
660 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
661 {
662         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663         struct drm_device *dev = intel_dig_port->base.base.dev;
664         struct drm_i915_private *dev_priv = to_i915(dev);
665         enum port port = intel_dig_port->port;
666
667         lockdep_assert_held(&dev_priv->pps_mutex);
668
669         /* try to find a pipe with this port selected */
670         /* first pick one where the panel is on */
671         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
672                                                   vlv_pipe_has_pp_on);
673         /* didn't find one? pick one where vdd is on */
674         if (intel_dp->pps_pipe == INVALID_PIPE)
675                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
676                                                           vlv_pipe_has_vdd_on);
677         /* didn't find one? pick one with just the correct port */
678         if (intel_dp->pps_pipe == INVALID_PIPE)
679                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
680                                                           vlv_pipe_any);
681
682         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
683         if (intel_dp->pps_pipe == INVALID_PIPE) {
684                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
685                               port_name(port));
686                 return;
687         }
688
689         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
690                       port_name(port), pipe_name(intel_dp->pps_pipe));
691
692         intel_dp_init_panel_power_sequencer(dev, intel_dp);
693         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
694 }
695
696 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
697 {
698         struct drm_device *dev = &dev_priv->drm;
699         struct intel_encoder *encoder;
700
701         if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
702                     !IS_GEN9_LP(dev_priv)))
703                 return;
704
705         /*
706          * We can't grab pps_mutex here due to deadlock with power_domain
707          * mutex when power_domain functions are called while holding pps_mutex.
708          * That also means that in order to use pps_pipe the code needs to
709          * hold both a power domain reference and pps_mutex, and the power domain
710          * reference get/put must be done while _not_ holding pps_mutex.
711          * pps_{lock,unlock}() do these steps in the correct order, so one
712          * should use them always.
713          */
714
715         for_each_intel_encoder(dev, encoder) {
716                 struct intel_dp *intel_dp;
717
718                 if (encoder->type != INTEL_OUTPUT_DP &&
719                     encoder->type != INTEL_OUTPUT_EDP)
720                         continue;
721
722                 intel_dp = enc_to_intel_dp(&encoder->base);
723
724                 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
725
726                 if (encoder->type != INTEL_OUTPUT_EDP)
727                         continue;
728
729                 if (IS_GEN9_LP(dev_priv))
730                         intel_dp->pps_reset = true;
731                 else
732                         intel_dp->pps_pipe = INVALID_PIPE;
733         }
734 }
735
736 struct pps_registers {
737         i915_reg_t pp_ctrl;
738         i915_reg_t pp_stat;
739         i915_reg_t pp_on;
740         i915_reg_t pp_off;
741         i915_reg_t pp_div;
742 };
743
744 static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
745                                     struct intel_dp *intel_dp,
746                                     struct pps_registers *regs)
747 {
748         int pps_idx = 0;
749
750         memset(regs, 0, sizeof(*regs));
751
752         if (IS_GEN9_LP(dev_priv))
753                 pps_idx = bxt_power_sequencer_idx(intel_dp);
754         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
755                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
756
757         regs->pp_ctrl = PP_CONTROL(pps_idx);
758         regs->pp_stat = PP_STATUS(pps_idx);
759         regs->pp_on = PP_ON_DELAYS(pps_idx);
760         regs->pp_off = PP_OFF_DELAYS(pps_idx);
761         if (!IS_GEN9_LP(dev_priv))
762                 regs->pp_div = PP_DIVISOR(pps_idx);
763 }
764
765 static i915_reg_t
766 _pp_ctrl_reg(struct intel_dp *intel_dp)
767 {
768         struct pps_registers regs;
769
770         intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
771                                 &regs);
772
773         return regs.pp_ctrl;
774 }
775
776 static i915_reg_t
777 _pp_stat_reg(struct intel_dp *intel_dp)
778 {
779         struct pps_registers regs;
780
781         intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
782                                 &regs);
783
784         return regs.pp_stat;
785 }
786
787 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
788    This function only applicable when panel PM state is not to be tracked */
789 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
790                               void *unused)
791 {
792         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
793                                                  edp_notifier);
794         struct drm_device *dev = intel_dp_to_dev(intel_dp);
795         struct drm_i915_private *dev_priv = to_i915(dev);
796
797         if (!is_edp(intel_dp) || code != SYS_RESTART)
798                 return 0;
799
800         pps_lock(intel_dp);
801
802         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
803                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
804                 i915_reg_t pp_ctrl_reg, pp_div_reg;
805                 u32 pp_div;
806
807                 pp_ctrl_reg = PP_CONTROL(pipe);
808                 pp_div_reg  = PP_DIVISOR(pipe);
809                 pp_div = I915_READ(pp_div_reg);
810                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
811
812                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
813                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
814                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
815                 msleep(intel_dp->panel_power_cycle_delay);
816         }
817
818         pps_unlock(intel_dp);
819
820         return 0;
821 }
822
823 static bool edp_have_panel_power(struct intel_dp *intel_dp)
824 {
825         struct drm_device *dev = intel_dp_to_dev(intel_dp);
826         struct drm_i915_private *dev_priv = to_i915(dev);
827
828         lockdep_assert_held(&dev_priv->pps_mutex);
829
830         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
831             intel_dp->pps_pipe == INVALID_PIPE)
832                 return false;
833
834         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
835 }
836
837 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
838 {
839         struct drm_device *dev = intel_dp_to_dev(intel_dp);
840         struct drm_i915_private *dev_priv = to_i915(dev);
841
842         lockdep_assert_held(&dev_priv->pps_mutex);
843
844         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
845             intel_dp->pps_pipe == INVALID_PIPE)
846                 return false;
847
848         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
849 }
850
851 static void
852 intel_dp_check_edp(struct intel_dp *intel_dp)
853 {
854         struct drm_device *dev = intel_dp_to_dev(intel_dp);
855         struct drm_i915_private *dev_priv = to_i915(dev);
856
857         if (!is_edp(intel_dp))
858                 return;
859
860         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
861                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
862                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
863                               I915_READ(_pp_stat_reg(intel_dp)),
864                               I915_READ(_pp_ctrl_reg(intel_dp)));
865         }
866 }
867
868 static uint32_t
869 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
870 {
871         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
872         struct drm_device *dev = intel_dig_port->base.base.dev;
873         struct drm_i915_private *dev_priv = to_i915(dev);
874         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
875         uint32_t status;
876         bool done;
877
878 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
879         if (has_aux_irq)
880                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
881                                           msecs_to_jiffies_timeout(10));
882         else
883                 done = wait_for(C, 10) == 0;
884         if (!done)
885                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
886                           has_aux_irq);
887 #undef C
888
889         return status;
890 }
891
892 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
893 {
894         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
895         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
896
897         if (index)
898                 return 0;
899
900         /*
901          * The clock divider is based off the hrawclk, and would like to run at
902          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
903          */
904         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
905 }
906
907 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
908 {
909         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
910         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
911
912         if (index)
913                 return 0;
914
915         /*
916          * The clock divider is based off the cdclk or PCH rawclk, and would
917          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
918          * divide by 2000 and use that
919          */
920         if (intel_dig_port->port == PORT_A)
921                 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
922         else
923                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
924 }
925
926 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
927 {
928         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
929         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
930
931         if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
932                 /* Workaround for non-ULT HSW */
933                 switch (index) {
934                 case 0: return 63;
935                 case 1: return 72;
936                 default: return 0;
937                 }
938         }
939
940         return ilk_get_aux_clock_divider(intel_dp, index);
941 }
942
943 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
944 {
945         /*
946          * SKL doesn't need us to program the AUX clock divider (Hardware will
947          * derive the clock from CDCLK automatically). We still implement the
948          * get_aux_clock_divider vfunc to plug-in into the existing code.
949          */
950         return index ? 0 : 1;
951 }
952
953 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
954                                      bool has_aux_irq,
955                                      int send_bytes,
956                                      uint32_t aux_clock_divider)
957 {
958         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
959         struct drm_i915_private *dev_priv =
960                         to_i915(intel_dig_port->base.base.dev);
961         uint32_t precharge, timeout;
962
963         if (IS_GEN6(dev_priv))
964                 precharge = 3;
965         else
966                 precharge = 5;
967
968         if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
969                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
970         else
971                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
972
973         return DP_AUX_CH_CTL_SEND_BUSY |
974                DP_AUX_CH_CTL_DONE |
975                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
976                DP_AUX_CH_CTL_TIME_OUT_ERROR |
977                timeout |
978                DP_AUX_CH_CTL_RECEIVE_ERROR |
979                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
980                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
981                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
982 }
983
984 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
985                                       bool has_aux_irq,
986                                       int send_bytes,
987                                       uint32_t unused)
988 {
989         return DP_AUX_CH_CTL_SEND_BUSY |
990                DP_AUX_CH_CTL_DONE |
991                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
992                DP_AUX_CH_CTL_TIME_OUT_ERROR |
993                DP_AUX_CH_CTL_TIME_OUT_1600us |
994                DP_AUX_CH_CTL_RECEIVE_ERROR |
995                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
996                DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
997                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
998 }
999
1000 static int
1001 intel_dp_aux_ch(struct intel_dp *intel_dp,
1002                 const uint8_t *send, int send_bytes,
1003                 uint8_t *recv, int recv_size)
1004 {
1005         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1006         struct drm_i915_private *dev_priv =
1007                         to_i915(intel_dig_port->base.base.dev);
1008         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
1009         uint32_t aux_clock_divider;
1010         int i, ret, recv_bytes;
1011         uint32_t status;
1012         int try, clock = 0;
1013         bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
1014         bool vdd;
1015
1016         pps_lock(intel_dp);
1017
1018         /*
1019          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1020          * In such cases we want to leave VDD enabled and it's up to upper layers
1021          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1022          * ourselves.
1023          */
1024         vdd = edp_panel_vdd_on(intel_dp);
1025
1026         /* dp aux is extremely sensitive to irq latency, hence request the
1027          * lowest possible wakeup latency and so prevent the cpu from going into
1028          * deep sleep states.
1029          */
1030         pm_qos_update_request(&dev_priv->pm_qos, 0);
1031
1032         intel_dp_check_edp(intel_dp);
1033
1034         /* Try to wait for any previous AUX channel activity */
1035         for (try = 0; try < 3; try++) {
1036                 status = I915_READ_NOTRACE(ch_ctl);
1037                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1038                         break;
1039                 msleep(1);
1040         }
1041
1042         if (try == 3) {
1043                 static u32 last_status = -1;
1044                 const u32 status = I915_READ(ch_ctl);
1045
1046                 if (status != last_status) {
1047                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
1048                              status);
1049                         last_status = status;
1050                 }
1051
1052                 ret = -EBUSY;
1053                 goto out;
1054         }
1055
1056         /* Only 5 data registers! */
1057         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1058                 ret = -E2BIG;
1059                 goto out;
1060         }
1061
1062         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1063                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1064                                                           has_aux_irq,
1065                                                           send_bytes,
1066                                                           aux_clock_divider);
1067
1068                 /* Must try at least 3 times according to DP spec */
1069                 for (try = 0; try < 5; try++) {
1070                         /* Load the send data into the aux channel data registers */
1071                         for (i = 0; i < send_bytes; i += 4)
1072                                 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
1073                                            intel_dp_pack_aux(send + i,
1074                                                              send_bytes - i));
1075
1076                         /* Send the command and wait for it to complete */
1077                         I915_WRITE(ch_ctl, send_ctl);
1078
1079                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1080
1081                         /* Clear done status and any errors */
1082                         I915_WRITE(ch_ctl,
1083                                    status |
1084                                    DP_AUX_CH_CTL_DONE |
1085                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
1086                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
1087
1088                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1089                                 continue;
1090
1091                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1092                          *   400us delay required for errors and timeouts
1093                          *   Timeout errors from the HW already meet this
1094                          *   requirement so skip to next iteration
1095                          */
1096                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1097                                 usleep_range(400, 500);
1098                                 continue;
1099                         }
1100                         if (status & DP_AUX_CH_CTL_DONE)
1101                                 goto done;
1102                 }
1103         }
1104
1105         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1106                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1107                 ret = -EBUSY;
1108                 goto out;
1109         }
1110
1111 done:
1112         /* Check for timeout or receive error.
1113          * Timeouts occur when the sink is not connected
1114          */
1115         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1116                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1117                 ret = -EIO;
1118                 goto out;
1119         }
1120
1121         /* Timeouts occur when the device isn't connected, so they're
1122          * "normal" -- don't fill the kernel log with these */
1123         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1124                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1125                 ret = -ETIMEDOUT;
1126                 goto out;
1127         }
1128
1129         /* Unload any bytes sent back from the other side */
1130         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1131                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1132
1133         /*
1134          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1135          * We have no idea of what happened so we return -EBUSY so
1136          * drm layer takes care for the necessary retries.
1137          */
1138         if (recv_bytes == 0 || recv_bytes > 20) {
1139                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1140                               recv_bytes);
1141                 /*
1142                  * FIXME: This patch was created on top of a series that
1143                  * organize the retries at drm level. There EBUSY should
1144                  * also take care for 1ms wait before retrying.
1145                  * That aux retries re-org is still needed and after that is
1146                  * merged we remove this sleep from here.
1147                  */
1148                 usleep_range(1000, 1500);
1149                 ret = -EBUSY;
1150                 goto out;
1151         }
1152
1153         if (recv_bytes > recv_size)
1154                 recv_bytes = recv_size;
1155
1156         for (i = 0; i < recv_bytes; i += 4)
1157                 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1158                                     recv + i, recv_bytes - i);
1159
1160         ret = recv_bytes;
1161 out:
1162         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1163
1164         if (vdd)
1165                 edp_panel_vdd_off(intel_dp, false);
1166
1167         pps_unlock(intel_dp);
1168
1169         return ret;
1170 }
1171
1172 #define BARE_ADDRESS_SIZE       3
1173 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1174 static ssize_t
1175 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1176 {
1177         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1178         uint8_t txbuf[20], rxbuf[20];
1179         size_t txsize, rxsize;
1180         int ret;
1181
1182         txbuf[0] = (msg->request << 4) |
1183                 ((msg->address >> 16) & 0xf);
1184         txbuf[1] = (msg->address >> 8) & 0xff;
1185         txbuf[2] = msg->address & 0xff;
1186         txbuf[3] = msg->size - 1;
1187
1188         switch (msg->request & ~DP_AUX_I2C_MOT) {
1189         case DP_AUX_NATIVE_WRITE:
1190         case DP_AUX_I2C_WRITE:
1191         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1192                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1193                 rxsize = 2; /* 0 or 1 data bytes */
1194
1195                 if (WARN_ON(txsize > 20))
1196                         return -E2BIG;
1197
1198                 WARN_ON(!msg->buffer != !msg->size);
1199
1200                 if (msg->buffer)
1201                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1202
1203                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1204                 if (ret > 0) {
1205                         msg->reply = rxbuf[0] >> 4;
1206
1207                         if (ret > 1) {
1208                                 /* Number of bytes written in a short write. */
1209                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1210                         } else {
1211                                 /* Return payload size. */
1212                                 ret = msg->size;
1213                         }
1214                 }
1215                 break;
1216
1217         case DP_AUX_NATIVE_READ:
1218         case DP_AUX_I2C_READ:
1219                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1220                 rxsize = msg->size + 1;
1221
1222                 if (WARN_ON(rxsize > 20))
1223                         return -E2BIG;
1224
1225                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1226                 if (ret > 0) {
1227                         msg->reply = rxbuf[0] >> 4;
1228                         /*
1229                          * Assume happy day, and copy the data. The caller is
1230                          * expected to check msg->reply before touching it.
1231                          *
1232                          * Return payload size.
1233                          */
1234                         ret--;
1235                         memcpy(msg->buffer, rxbuf + 1, ret);
1236                 }
1237                 break;
1238
1239         default:
1240                 ret = -EINVAL;
1241                 break;
1242         }
1243
1244         return ret;
1245 }
1246
1247 static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1248                                 enum port port)
1249 {
1250         const struct ddi_vbt_port_info *info =
1251                 &dev_priv->vbt.ddi_port_info[port];
1252         enum port aux_port;
1253
1254         if (!info->alternate_aux_channel) {
1255                 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1256                               port_name(port), port_name(port));
1257                 return port;
1258         }
1259
1260         switch (info->alternate_aux_channel) {
1261         case DP_AUX_A:
1262                 aux_port = PORT_A;
1263                 break;
1264         case DP_AUX_B:
1265                 aux_port = PORT_B;
1266                 break;
1267         case DP_AUX_C:
1268                 aux_port = PORT_C;
1269                 break;
1270         case DP_AUX_D:
1271                 aux_port = PORT_D;
1272                 break;
1273         default:
1274                 MISSING_CASE(info->alternate_aux_channel);
1275                 aux_port = PORT_A;
1276                 break;
1277         }
1278
1279         DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1280                       port_name(aux_port), port_name(port));
1281
1282         return aux_port;
1283 }
1284
1285 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1286                                   enum port port)
1287 {
1288         switch (port) {
1289         case PORT_B:
1290         case PORT_C:
1291         case PORT_D:
1292                 return DP_AUX_CH_CTL(port);
1293         default:
1294                 MISSING_CASE(port);
1295                 return DP_AUX_CH_CTL(PORT_B);
1296         }
1297 }
1298
1299 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1300                                    enum port port, int index)
1301 {
1302         switch (port) {
1303         case PORT_B:
1304         case PORT_C:
1305         case PORT_D:
1306                 return DP_AUX_CH_DATA(port, index);
1307         default:
1308                 MISSING_CASE(port);
1309                 return DP_AUX_CH_DATA(PORT_B, index);
1310         }
1311 }
1312
1313 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1314                                   enum port port)
1315 {
1316         switch (port) {
1317         case PORT_A:
1318                 return DP_AUX_CH_CTL(port);
1319         case PORT_B:
1320         case PORT_C:
1321         case PORT_D:
1322                 return PCH_DP_AUX_CH_CTL(port);
1323         default:
1324                 MISSING_CASE(port);
1325                 return DP_AUX_CH_CTL(PORT_A);
1326         }
1327 }
1328
1329 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1330                                    enum port port, int index)
1331 {
1332         switch (port) {
1333         case PORT_A:
1334                 return DP_AUX_CH_DATA(port, index);
1335         case PORT_B:
1336         case PORT_C:
1337         case PORT_D:
1338                 return PCH_DP_AUX_CH_DATA(port, index);
1339         default:
1340                 MISSING_CASE(port);
1341                 return DP_AUX_CH_DATA(PORT_A, index);
1342         }
1343 }
1344
1345 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1346                                   enum port port)
1347 {
1348         switch (port) {
1349         case PORT_A:
1350         case PORT_B:
1351         case PORT_C:
1352         case PORT_D:
1353                 return DP_AUX_CH_CTL(port);
1354         default:
1355                 MISSING_CASE(port);
1356                 return DP_AUX_CH_CTL(PORT_A);
1357         }
1358 }
1359
1360 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1361                                    enum port port, int index)
1362 {
1363         switch (port) {
1364         case PORT_A:
1365         case PORT_B:
1366         case PORT_C:
1367         case PORT_D:
1368                 return DP_AUX_CH_DATA(port, index);
1369         default:
1370                 MISSING_CASE(port);
1371                 return DP_AUX_CH_DATA(PORT_A, index);
1372         }
1373 }
1374
1375 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1376                                     enum port port)
1377 {
1378         if (INTEL_INFO(dev_priv)->gen >= 9)
1379                 return skl_aux_ctl_reg(dev_priv, port);
1380         else if (HAS_PCH_SPLIT(dev_priv))
1381                 return ilk_aux_ctl_reg(dev_priv, port);
1382         else
1383                 return g4x_aux_ctl_reg(dev_priv, port);
1384 }
1385
1386 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1387                                      enum port port, int index)
1388 {
1389         if (INTEL_INFO(dev_priv)->gen >= 9)
1390                 return skl_aux_data_reg(dev_priv, port, index);
1391         else if (HAS_PCH_SPLIT(dev_priv))
1392                 return ilk_aux_data_reg(dev_priv, port, index);
1393         else
1394                 return g4x_aux_data_reg(dev_priv, port, index);
1395 }
1396
1397 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1398 {
1399         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1400         enum port port = intel_aux_port(dev_priv,
1401                                         dp_to_dig_port(intel_dp)->port);
1402         int i;
1403
1404         intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1405         for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1406                 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1407 }
1408
1409 static void
1410 intel_dp_aux_fini(struct intel_dp *intel_dp)
1411 {
1412         kfree(intel_dp->aux.name);
1413 }
1414
1415 static void
1416 intel_dp_aux_init(struct intel_dp *intel_dp)
1417 {
1418         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1419         enum port port = intel_dig_port->port;
1420
1421         intel_aux_reg_init(intel_dp);
1422         drm_dp_aux_init(&intel_dp->aux);
1423
1424         /* Failure to allocate our preferred name is not critical */
1425         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1426         intel_dp->aux.transfer = intel_dp_aux_transfer;
1427 }
1428
1429 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1430 {
1431         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1432         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1433
1434         if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
1435             IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
1436                 return true;
1437         else
1438                 return false;
1439 }
1440
1441 static void
1442 intel_dp_set_clock(struct intel_encoder *encoder,
1443                    struct intel_crtc_state *pipe_config)
1444 {
1445         struct drm_device *dev = encoder->base.dev;
1446         struct drm_i915_private *dev_priv = to_i915(dev);
1447         const struct dp_link_dpll *divisor = NULL;
1448         int i, count = 0;
1449
1450         if (IS_G4X(dev_priv)) {
1451                 divisor = gen4_dpll;
1452                 count = ARRAY_SIZE(gen4_dpll);
1453         } else if (HAS_PCH_SPLIT(dev_priv)) {
1454                 divisor = pch_dpll;
1455                 count = ARRAY_SIZE(pch_dpll);
1456         } else if (IS_CHERRYVIEW(dev_priv)) {
1457                 divisor = chv_dpll;
1458                 count = ARRAY_SIZE(chv_dpll);
1459         } else if (IS_VALLEYVIEW(dev_priv)) {
1460                 divisor = vlv_dpll;
1461                 count = ARRAY_SIZE(vlv_dpll);
1462         }
1463
1464         if (divisor && count) {
1465                 for (i = 0; i < count; i++) {
1466                         if (pipe_config->port_clock == divisor[i].clock) {
1467                                 pipe_config->dpll = divisor[i].dpll;
1468                                 pipe_config->clock_set = true;
1469                                 break;
1470                         }
1471                 }
1472         }
1473 }
1474
1475 static void snprintf_int_array(char *str, size_t len,
1476                                const int *array, int nelem)
1477 {
1478         int i;
1479
1480         str[0] = '\0';
1481
1482         for (i = 0; i < nelem; i++) {
1483                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1484                 if (r >= len)
1485                         return;
1486                 str += r;
1487                 len -= r;
1488         }
1489 }
1490
1491 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1492 {
1493         const int *source_rates, *sink_rates;
1494         int source_len, sink_len, common_len;
1495         int common_rates[DP_MAX_SUPPORTED_RATES];
1496         char str[128]; /* FIXME: too big for stack? */
1497
1498         if ((drm_debug & DRM_UT_KMS) == 0)
1499                 return;
1500
1501         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1502         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1503         DRM_DEBUG_KMS("source rates: %s\n", str);
1504
1505         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1506         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1507         DRM_DEBUG_KMS("sink rates: %s\n", str);
1508
1509         common_len = intel_dp_common_rates(intel_dp, common_rates);
1510         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1511         DRM_DEBUG_KMS("common rates: %s\n", str);
1512 }
1513
1514 bool
1515 __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
1516 {
1517         u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
1518                                                       DP_SINK_OUI;
1519
1520         return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
1521                sizeof(*desc);
1522 }
1523
1524 bool intel_dp_read_desc(struct intel_dp *intel_dp)
1525 {
1526         struct intel_dp_desc *desc = &intel_dp->desc;
1527         bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
1528                        DP_OUI_SUPPORT;
1529         int dev_id_len;
1530
1531         if (!__intel_dp_read_desc(intel_dp, desc))
1532                 return false;
1533
1534         dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
1535         DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
1536                       drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
1537                       (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
1538                       dev_id_len, desc->device_id,
1539                       desc->hw_rev >> 4, desc->hw_rev & 0xf,
1540                       desc->sw_major_rev, desc->sw_minor_rev);
1541
1542         return true;
1543 }
1544
1545 static int rate_to_index(int find, const int *rates)
1546 {
1547         int i = 0;
1548
1549         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1550                 if (find == rates[i])
1551                         break;
1552
1553         return i;
1554 }
1555
1556 int
1557 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1558 {
1559         int rates[DP_MAX_SUPPORTED_RATES] = {};
1560         int len;
1561
1562         len = intel_dp_common_rates(intel_dp, rates);
1563         if (WARN_ON(len <= 0))
1564                 return 162000;
1565
1566         return rates[len - 1];
1567 }
1568
1569 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1570 {
1571         return rate_to_index(rate, intel_dp->sink_rates);
1572 }
1573
1574 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1575                            uint8_t *link_bw, uint8_t *rate_select)
1576 {
1577         if (intel_dp->num_sink_rates) {
1578                 *link_bw = 0;
1579                 *rate_select =
1580                         intel_dp_rate_select(intel_dp, port_clock);
1581         } else {
1582                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1583                 *rate_select = 0;
1584         }
1585 }
1586
1587 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1588                                 struct intel_crtc_state *pipe_config)
1589 {
1590         int bpp, bpc;
1591
1592         bpp = pipe_config->pipe_bpp;
1593         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1594
1595         if (bpc > 0)
1596                 bpp = min(bpp, 3*bpc);
1597
1598         /* For DP Compliance we override the computed bpp for the pipe */
1599         if (intel_dp->compliance.test_data.bpc != 0) {
1600                 pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
1601                 pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
1602                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
1603                               pipe_config->pipe_bpp);
1604         }
1605         return bpp;
1606 }
1607
1608 bool
1609 intel_dp_compute_config(struct intel_encoder *encoder,
1610                         struct intel_crtc_state *pipe_config,
1611                         struct drm_connector_state *conn_state)
1612 {
1613         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1614         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1615         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1616         enum port port = dp_to_dig_port(intel_dp)->port;
1617         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1618         struct intel_connector *intel_connector = intel_dp->attached_connector;
1619         int lane_count, clock;
1620         int min_lane_count = 1;
1621         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1622         /* Conveniently, the link BW constants become indices with a shift...*/
1623         int min_clock = 0;
1624         int max_clock;
1625         int link_rate_index;
1626         int bpp, mode_rate;
1627         int link_avail, link_clock;
1628         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1629         int common_len;
1630         uint8_t link_bw, rate_select;
1631
1632         common_len = intel_dp_common_rates(intel_dp, common_rates);
1633
1634         /* No common link rates between source and sink */
1635         WARN_ON(common_len <= 0);
1636
1637         max_clock = common_len - 1;
1638
1639         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1640                 pipe_config->has_pch_encoder = true;
1641
1642         pipe_config->has_drrs = false;
1643         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1644
1645         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1646                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1647                                        adjusted_mode);
1648
1649                 if (INTEL_GEN(dev_priv) >= 9) {
1650                         int ret;
1651                         ret = skl_update_scaler_crtc(pipe_config);
1652                         if (ret)
1653                                 return ret;
1654                 }
1655
1656                 if (HAS_GMCH_DISPLAY(dev_priv))
1657                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1658                                                  intel_connector->panel.fitting_mode);
1659                 else
1660                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1661                                                 intel_connector->panel.fitting_mode);
1662         }
1663
1664         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1665                 return false;
1666
1667         /* Use values requested by Compliance Test Request */
1668         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1669                 link_rate_index = intel_dp_link_rate_index(intel_dp,
1670                                                            common_rates,
1671                                                            intel_dp->compliance.test_link_rate);
1672                 if (link_rate_index >= 0)
1673                         min_clock = max_clock = link_rate_index;
1674                 min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
1675         }
1676         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1677                       "max bw %d pixel clock %iKHz\n",
1678                       max_lane_count, common_rates[max_clock],
1679                       adjusted_mode->crtc_clock);
1680
1681         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1682          * bpc in between. */
1683         bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1684         if (is_edp(intel_dp)) {
1685
1686                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1687                 if (intel_connector->base.display_info.bpc == 0 &&
1688                         (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1689                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1690                                       dev_priv->vbt.edp.bpp);
1691                         bpp = dev_priv->vbt.edp.bpp;
1692                 }
1693
1694                 /*
1695                  * Use the maximum clock and number of lanes the eDP panel
1696                  * advertizes being capable of. The panels are generally
1697                  * designed to support only a single clock and lane
1698                  * configuration, and typically these values correspond to the
1699                  * native resolution of the panel.
1700                  */
1701                 min_lane_count = max_lane_count;
1702                 min_clock = max_clock;
1703         }
1704
1705         for (; bpp >= 6*3; bpp -= 2*3) {
1706                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1707                                                    bpp);
1708
1709                 for (clock = min_clock; clock <= max_clock; clock++) {
1710                         for (lane_count = min_lane_count;
1711                                 lane_count <= max_lane_count;
1712                                 lane_count <<= 1) {
1713
1714                                 link_clock = common_rates[clock];
1715                                 link_avail = intel_dp_max_data_rate(link_clock,
1716                                                                     lane_count);
1717
1718                                 if (mode_rate <= link_avail) {
1719                                         goto found;
1720                                 }
1721                         }
1722                 }
1723         }
1724
1725         return false;
1726
1727 found:
1728         if (intel_dp->color_range_auto) {
1729                 /*
1730                  * See:
1731                  * CEA-861-E - 5.1 Default Encoding Parameters
1732                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1733                  */
1734                 pipe_config->limited_color_range =
1735                         bpp != 18 &&
1736                         drm_default_rgb_quant_range(adjusted_mode) ==
1737                         HDMI_QUANTIZATION_RANGE_LIMITED;
1738         } else {
1739                 pipe_config->limited_color_range =
1740                         intel_dp->limited_color_range;
1741         }
1742
1743         pipe_config->lane_count = lane_count;
1744
1745         pipe_config->pipe_bpp = bpp;
1746         pipe_config->port_clock = common_rates[clock];
1747
1748         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1749                               &link_bw, &rate_select);
1750
1751         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1752                       link_bw, rate_select, pipe_config->lane_count,
1753                       pipe_config->port_clock, bpp);
1754         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1755                       mode_rate, link_avail);
1756
1757         intel_link_compute_m_n(bpp, lane_count,
1758                                adjusted_mode->crtc_clock,
1759                                pipe_config->port_clock,
1760                                &pipe_config->dp_m_n);
1761
1762         if (intel_connector->panel.downclock_mode != NULL &&
1763                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1764                         pipe_config->has_drrs = true;
1765                         intel_link_compute_m_n(bpp, lane_count,
1766                                 intel_connector->panel.downclock_mode->clock,
1767                                 pipe_config->port_clock,
1768                                 &pipe_config->dp_m2_n2);
1769         }
1770
1771         /*
1772          * DPLL0 VCO may need to be adjusted to get the correct
1773          * clock for eDP. This will affect cdclk as well.
1774          */
1775         if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
1776                 int vco;
1777
1778                 switch (pipe_config->port_clock / 2) {
1779                 case 108000:
1780                 case 216000:
1781                         vco = 8640000;
1782                         break;
1783                 default:
1784                         vco = 8100000;
1785                         break;
1786                 }
1787
1788                 to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
1789         }
1790
1791         if (!HAS_DDI(dev_priv))
1792                 intel_dp_set_clock(encoder, pipe_config);
1793
1794         return true;
1795 }
1796
1797 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1798                               int link_rate, uint8_t lane_count,
1799                               bool link_mst)
1800 {
1801         intel_dp->link_rate = link_rate;
1802         intel_dp->lane_count = lane_count;
1803         intel_dp->link_mst = link_mst;
1804 }
1805
1806 static void intel_dp_prepare(struct intel_encoder *encoder,
1807                              struct intel_crtc_state *pipe_config)
1808 {
1809         struct drm_device *dev = encoder->base.dev;
1810         struct drm_i915_private *dev_priv = to_i915(dev);
1811         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1812         enum port port = dp_to_dig_port(intel_dp)->port;
1813         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1814         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1815
1816         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1817                                  pipe_config->lane_count,
1818                                  intel_crtc_has_type(pipe_config,
1819                                                      INTEL_OUTPUT_DP_MST));
1820
1821         /*
1822          * There are four kinds of DP registers:
1823          *
1824          *      IBX PCH
1825          *      SNB CPU
1826          *      IVB CPU
1827          *      CPT PCH
1828          *
1829          * IBX PCH and CPU are the same for almost everything,
1830          * except that the CPU DP PLL is configured in this
1831          * register
1832          *
1833          * CPT PCH is quite different, having many bits moved
1834          * to the TRANS_DP_CTL register instead. That
1835          * configuration happens (oddly) in ironlake_pch_enable
1836          */
1837
1838         /* Preserve the BIOS-computed detected bit. This is
1839          * supposed to be read-only.
1840          */
1841         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1842
1843         /* Handle DP bits in common between all three register formats */
1844         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1845         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1846
1847         /* Split out the IBX/CPU vs CPT settings */
1848
1849         if (IS_GEN7(dev_priv) && port == PORT_A) {
1850                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1851                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1852                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1853                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1854                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1855
1856                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1857                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1858
1859                 intel_dp->DP |= crtc->pipe << 29;
1860         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1861                 u32 trans_dp;
1862
1863                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1864
1865                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1866                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1867                         trans_dp |= TRANS_DP_ENH_FRAMING;
1868                 else
1869                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1870                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1871         } else {
1872                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1873                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1874
1875                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1876                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1877                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1878                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1879                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1880
1881                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1882                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1883
1884                 if (IS_CHERRYVIEW(dev_priv))
1885                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1886                 else if (crtc->pipe == PIPE_B)
1887                         intel_dp->DP |= DP_PIPEB_SELECT;
1888         }
1889 }
1890
1891 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1892 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1893
1894 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1895 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1896
1897 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1898 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1899
1900 static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1901                                    struct intel_dp *intel_dp);
1902
1903 static void wait_panel_status(struct intel_dp *intel_dp,
1904                                        u32 mask,
1905                                        u32 value)
1906 {
1907         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1908         struct drm_i915_private *dev_priv = to_i915(dev);
1909         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1910
1911         lockdep_assert_held(&dev_priv->pps_mutex);
1912
1913         intel_pps_verify_state(dev_priv, intel_dp);
1914
1915         pp_stat_reg = _pp_stat_reg(intel_dp);
1916         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1917
1918         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1919                         mask, value,
1920                         I915_READ(pp_stat_reg),
1921                         I915_READ(pp_ctrl_reg));
1922
1923         if (intel_wait_for_register(dev_priv,
1924                                     pp_stat_reg, mask, value,
1925                                     5000))
1926                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1927                                 I915_READ(pp_stat_reg),
1928                                 I915_READ(pp_ctrl_reg));
1929
1930         DRM_DEBUG_KMS("Wait complete\n");
1931 }
1932
1933 static void wait_panel_on(struct intel_dp *intel_dp)
1934 {
1935         DRM_DEBUG_KMS("Wait for panel power on\n");
1936         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1937 }
1938
1939 static void wait_panel_off(struct intel_dp *intel_dp)
1940 {
1941         DRM_DEBUG_KMS("Wait for panel power off time\n");
1942         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1943 }
1944
1945 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1946 {
1947         ktime_t panel_power_on_time;
1948         s64 panel_power_off_duration;
1949
1950         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1951
1952         /* take the difference of currrent time and panel power off time
1953          * and then make panel wait for t11_t12 if needed. */
1954         panel_power_on_time = ktime_get_boottime();
1955         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1956
1957         /* When we disable the VDD override bit last we have to do the manual
1958          * wait. */
1959         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1960                 wait_remaining_ms_from_jiffies(jiffies,
1961                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1962
1963         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1964 }
1965
1966 static void wait_backlight_on(struct intel_dp *intel_dp)
1967 {
1968         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1969                                        intel_dp->backlight_on_delay);
1970 }
1971
1972 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1973 {
1974         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1975                                        intel_dp->backlight_off_delay);
1976 }
1977
1978 /* Read the current pp_control value, unlocking the register if it
1979  * is locked
1980  */
1981
1982 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1983 {
1984         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1985         struct drm_i915_private *dev_priv = to_i915(dev);
1986         u32 control;
1987
1988         lockdep_assert_held(&dev_priv->pps_mutex);
1989
1990         control = I915_READ(_pp_ctrl_reg(intel_dp));
1991         if (WARN_ON(!HAS_DDI(dev_priv) &&
1992                     (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
1993                 control &= ~PANEL_UNLOCK_MASK;
1994                 control |= PANEL_UNLOCK_REGS;
1995         }
1996         return control;
1997 }
1998
1999 /*
2000  * Must be paired with edp_panel_vdd_off().
2001  * Must hold pps_mutex around the whole on/off sequence.
2002  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2003  */
2004 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2005 {
2006         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2007         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2008         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2009         struct drm_i915_private *dev_priv = to_i915(dev);
2010         enum intel_display_power_domain power_domain;
2011         u32 pp;
2012         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2013         bool need_to_disable = !intel_dp->want_panel_vdd;
2014
2015         lockdep_assert_held(&dev_priv->pps_mutex);
2016
2017         if (!is_edp(intel_dp))
2018                 return false;
2019
2020         cancel_delayed_work(&intel_dp->panel_vdd_work);
2021         intel_dp->want_panel_vdd = true;
2022
2023         if (edp_have_panel_vdd(intel_dp))
2024                 return need_to_disable;
2025
2026         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2027         intel_display_power_get(dev_priv, power_domain);
2028
2029         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2030                       port_name(intel_dig_port->port));
2031
2032         if (!edp_have_panel_power(intel_dp))
2033                 wait_panel_power_cycle(intel_dp);
2034
2035         pp = ironlake_get_pp_control(intel_dp);
2036         pp |= EDP_FORCE_VDD;
2037
2038         pp_stat_reg = _pp_stat_reg(intel_dp);
2039         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2040
2041         I915_WRITE(pp_ctrl_reg, pp);
2042         POSTING_READ(pp_ctrl_reg);
2043         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2044                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2045         /*
2046          * If the panel wasn't on, delay before accessing aux channel
2047          */
2048         if (!edp_have_panel_power(intel_dp)) {
2049                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2050                               port_name(intel_dig_port->port));
2051                 msleep(intel_dp->panel_power_up_delay);
2052         }
2053
2054         return need_to_disable;
2055 }
2056
2057 /*
2058  * Must be paired with intel_edp_panel_vdd_off() or
2059  * intel_edp_panel_off().
2060  * Nested calls to these functions are not allowed since
2061  * we drop the lock. Caller must use some higher level
2062  * locking to prevent nested calls from other threads.
2063  */
2064 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2065 {
2066         bool vdd;
2067
2068         if (!is_edp(intel_dp))
2069                 return;
2070
2071         pps_lock(intel_dp);
2072         vdd = edp_panel_vdd_on(intel_dp);
2073         pps_unlock(intel_dp);
2074
2075         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2076              port_name(dp_to_dig_port(intel_dp)->port));
2077 }
2078
2079 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2080 {
2081         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2082         struct drm_i915_private *dev_priv = to_i915(dev);
2083         struct intel_digital_port *intel_dig_port =
2084                 dp_to_dig_port(intel_dp);
2085         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2086         enum intel_display_power_domain power_domain;
2087         u32 pp;
2088         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2089
2090         lockdep_assert_held(&dev_priv->pps_mutex);
2091
2092         WARN_ON(intel_dp->want_panel_vdd);
2093
2094         if (!edp_have_panel_vdd(intel_dp))
2095                 return;
2096
2097         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2098                       port_name(intel_dig_port->port));
2099
2100         pp = ironlake_get_pp_control(intel_dp);
2101         pp &= ~EDP_FORCE_VDD;
2102
2103         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2104         pp_stat_reg = _pp_stat_reg(intel_dp);
2105
2106         I915_WRITE(pp_ctrl_reg, pp);
2107         POSTING_READ(pp_ctrl_reg);
2108
2109         /* Make sure sequencer is idle before allowing subsequent activity */
2110         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2111         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2112
2113         if ((pp & PANEL_POWER_ON) == 0)
2114                 intel_dp->panel_power_off_time = ktime_get_boottime();
2115
2116         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2117         intel_display_power_put(dev_priv, power_domain);
2118 }
2119
2120 static void edp_panel_vdd_work(struct work_struct *__work)
2121 {
2122         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2123                                                  struct intel_dp, panel_vdd_work);
2124
2125         pps_lock(intel_dp);
2126         if (!intel_dp->want_panel_vdd)
2127                 edp_panel_vdd_off_sync(intel_dp);
2128         pps_unlock(intel_dp);
2129 }
2130
2131 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2132 {
2133         unsigned long delay;
2134
2135         /*
2136          * Queue the timer to fire a long time from now (relative to the power
2137          * down delay) to keep the panel power up across a sequence of
2138          * operations.
2139          */
2140         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2141         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2142 }
2143
2144 /*
2145  * Must be paired with edp_panel_vdd_on().
2146  * Must hold pps_mutex around the whole on/off sequence.
2147  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2148  */
2149 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2150 {
2151         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2152
2153         lockdep_assert_held(&dev_priv->pps_mutex);
2154
2155         if (!is_edp(intel_dp))
2156                 return;
2157
2158         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2159              port_name(dp_to_dig_port(intel_dp)->port));
2160
2161         intel_dp->want_panel_vdd = false;
2162
2163         if (sync)
2164                 edp_panel_vdd_off_sync(intel_dp);
2165         else
2166                 edp_panel_vdd_schedule_off(intel_dp);
2167 }
2168
2169 static void edp_panel_on(struct intel_dp *intel_dp)
2170 {
2171         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2172         struct drm_i915_private *dev_priv = to_i915(dev);
2173         u32 pp;
2174         i915_reg_t pp_ctrl_reg;
2175
2176         lockdep_assert_held(&dev_priv->pps_mutex);
2177
2178         if (!is_edp(intel_dp))
2179                 return;
2180
2181         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2182                       port_name(dp_to_dig_port(intel_dp)->port));
2183
2184         if (WARN(edp_have_panel_power(intel_dp),
2185                  "eDP port %c panel power already on\n",
2186                  port_name(dp_to_dig_port(intel_dp)->port)))
2187                 return;
2188
2189         wait_panel_power_cycle(intel_dp);
2190
2191         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2192         pp = ironlake_get_pp_control(intel_dp);
2193         if (IS_GEN5(dev_priv)) {
2194                 /* ILK workaround: disable reset around power sequence */
2195                 pp &= ~PANEL_POWER_RESET;
2196                 I915_WRITE(pp_ctrl_reg, pp);
2197                 POSTING_READ(pp_ctrl_reg);
2198         }
2199
2200         pp |= PANEL_POWER_ON;
2201         if (!IS_GEN5(dev_priv))
2202                 pp |= PANEL_POWER_RESET;
2203
2204         I915_WRITE(pp_ctrl_reg, pp);
2205         POSTING_READ(pp_ctrl_reg);
2206
2207         wait_panel_on(intel_dp);
2208         intel_dp->last_power_on = jiffies;
2209
2210         if (IS_GEN5(dev_priv)) {
2211                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2212                 I915_WRITE(pp_ctrl_reg, pp);
2213                 POSTING_READ(pp_ctrl_reg);
2214         }
2215 }
2216
2217 void intel_edp_panel_on(struct intel_dp *intel_dp)
2218 {
2219         if (!is_edp(intel_dp))
2220                 return;
2221
2222         pps_lock(intel_dp);
2223         edp_panel_on(intel_dp);
2224         pps_unlock(intel_dp);
2225 }
2226
2227
2228 static void edp_panel_off(struct intel_dp *intel_dp)
2229 {
2230         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2231         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2232         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2233         struct drm_i915_private *dev_priv = to_i915(dev);
2234         enum intel_display_power_domain power_domain;
2235         u32 pp;
2236         i915_reg_t pp_ctrl_reg;
2237
2238         lockdep_assert_held(&dev_priv->pps_mutex);
2239
2240         if (!is_edp(intel_dp))
2241                 return;
2242
2243         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2244                       port_name(dp_to_dig_port(intel_dp)->port));
2245
2246         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2247              port_name(dp_to_dig_port(intel_dp)->port));
2248
2249         pp = ironlake_get_pp_control(intel_dp);
2250         /* We need to switch off panel power _and_ force vdd, for otherwise some
2251          * panels get very unhappy and cease to work. */
2252         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2253                 EDP_BLC_ENABLE);
2254
2255         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2256
2257         intel_dp->want_panel_vdd = false;
2258
2259         I915_WRITE(pp_ctrl_reg, pp);
2260         POSTING_READ(pp_ctrl_reg);
2261
2262         intel_dp->panel_power_off_time = ktime_get_boottime();
2263         wait_panel_off(intel_dp);
2264
2265         /* We got a reference when we enabled the VDD. */
2266         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2267         intel_display_power_put(dev_priv, power_domain);
2268 }
2269
2270 void intel_edp_panel_off(struct intel_dp *intel_dp)
2271 {
2272         if (!is_edp(intel_dp))
2273                 return;
2274
2275         pps_lock(intel_dp);
2276         edp_panel_off(intel_dp);
2277         pps_unlock(intel_dp);
2278 }
2279
2280 /* Enable backlight in the panel power control. */
2281 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2282 {
2283         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2284         struct drm_device *dev = intel_dig_port->base.base.dev;
2285         struct drm_i915_private *dev_priv = to_i915(dev);
2286         u32 pp;
2287         i915_reg_t pp_ctrl_reg;
2288
2289         /*
2290          * If we enable the backlight right away following a panel power
2291          * on, we may see slight flicker as the panel syncs with the eDP
2292          * link.  So delay a bit to make sure the image is solid before
2293          * allowing it to appear.
2294          */
2295         wait_backlight_on(intel_dp);
2296
2297         pps_lock(intel_dp);
2298
2299         pp = ironlake_get_pp_control(intel_dp);
2300         pp |= EDP_BLC_ENABLE;
2301
2302         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2303
2304         I915_WRITE(pp_ctrl_reg, pp);
2305         POSTING_READ(pp_ctrl_reg);
2306
2307         pps_unlock(intel_dp);
2308 }
2309
2310 /* Enable backlight PWM and backlight PP control. */
2311 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2312 {
2313         if (!is_edp(intel_dp))
2314                 return;
2315
2316         DRM_DEBUG_KMS("\n");
2317
2318         intel_panel_enable_backlight(intel_dp->attached_connector);
2319         _intel_edp_backlight_on(intel_dp);
2320 }
2321
2322 /* Disable backlight in the panel power control. */
2323 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2324 {
2325         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2326         struct drm_i915_private *dev_priv = to_i915(dev);
2327         u32 pp;
2328         i915_reg_t pp_ctrl_reg;
2329
2330         if (!is_edp(intel_dp))
2331                 return;
2332
2333         pps_lock(intel_dp);
2334
2335         pp = ironlake_get_pp_control(intel_dp);
2336         pp &= ~EDP_BLC_ENABLE;
2337
2338         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2339
2340         I915_WRITE(pp_ctrl_reg, pp);
2341         POSTING_READ(pp_ctrl_reg);
2342
2343         pps_unlock(intel_dp);
2344
2345         intel_dp->last_backlight_off = jiffies;
2346         edp_wait_backlight_off(intel_dp);
2347 }
2348
2349 /* Disable backlight PP control and backlight PWM. */
2350 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2351 {
2352         if (!is_edp(intel_dp))
2353                 return;
2354
2355         DRM_DEBUG_KMS("\n");
2356
2357         _intel_edp_backlight_off(intel_dp);
2358         intel_panel_disable_backlight(intel_dp->attached_connector);
2359 }
2360
2361 /*
2362  * Hook for controlling the panel power control backlight through the bl_power
2363  * sysfs attribute. Take care to handle multiple calls.
2364  */
2365 static void intel_edp_backlight_power(struct intel_connector *connector,
2366                                       bool enable)
2367 {
2368         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2369         bool is_enabled;
2370
2371         pps_lock(intel_dp);
2372         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2373         pps_unlock(intel_dp);
2374
2375         if (is_enabled == enable)
2376                 return;
2377
2378         DRM_DEBUG_KMS("panel power control backlight %s\n",
2379                       enable ? "enable" : "disable");
2380
2381         if (enable)
2382                 _intel_edp_backlight_on(intel_dp);
2383         else
2384                 _intel_edp_backlight_off(intel_dp);
2385 }
2386
2387 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2388 {
2389         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2390         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2391         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2392
2393         I915_STATE_WARN(cur_state != state,
2394                         "DP port %c state assertion failure (expected %s, current %s)\n",
2395                         port_name(dig_port->port),
2396                         onoff(state), onoff(cur_state));
2397 }
2398 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2399
2400 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2401 {
2402         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2403
2404         I915_STATE_WARN(cur_state != state,
2405                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2406                         onoff(state), onoff(cur_state));
2407 }
2408 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2409 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2410
2411 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2412                                 struct intel_crtc_state *pipe_config)
2413 {
2414         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2415         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2416
2417         assert_pipe_disabled(dev_priv, crtc->pipe);
2418         assert_dp_port_disabled(intel_dp);
2419         assert_edp_pll_disabled(dev_priv);
2420
2421         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2422                       pipe_config->port_clock);
2423
2424         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2425
2426         if (pipe_config->port_clock == 162000)
2427                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2428         else
2429                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2430
2431         I915_WRITE(DP_A, intel_dp->DP);
2432         POSTING_READ(DP_A);
2433         udelay(500);
2434
2435         /*
2436          * [DevILK] Work around required when enabling DP PLL
2437          * while a pipe is enabled going to FDI:
2438          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2439          * 2. Program DP PLL enable
2440          */
2441         if (IS_GEN5(dev_priv))
2442                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2443
2444         intel_dp->DP |= DP_PLL_ENABLE;
2445
2446         I915_WRITE(DP_A, intel_dp->DP);
2447         POSTING_READ(DP_A);
2448         udelay(200);
2449 }
2450
2451 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2452 {
2453         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2454         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2455         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2456
2457         assert_pipe_disabled(dev_priv, crtc->pipe);
2458         assert_dp_port_disabled(intel_dp);
2459         assert_edp_pll_enabled(dev_priv);
2460
2461         DRM_DEBUG_KMS("disabling eDP PLL\n");
2462
2463         intel_dp->DP &= ~DP_PLL_ENABLE;
2464
2465         I915_WRITE(DP_A, intel_dp->DP);
2466         POSTING_READ(DP_A);
2467         udelay(200);
2468 }
2469
2470 /* If the sink supports it, try to set the power state appropriately */
2471 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2472 {
2473         int ret, i;
2474
2475         /* Should have a valid DPCD by this point */
2476         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2477                 return;
2478
2479         if (mode != DRM_MODE_DPMS_ON) {
2480                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2481                                          DP_SET_POWER_D3);
2482         } else {
2483                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2484
2485                 /*
2486                  * When turning on, we need to retry for 1ms to give the sink
2487                  * time to wake up.
2488                  */
2489                 for (i = 0; i < 3; i++) {
2490                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2491                                                  DP_SET_POWER_D0);
2492                         if (ret == 1)
2493                                 break;
2494                         msleep(1);
2495                 }
2496
2497                 if (ret == 1 && lspcon->active)
2498                         lspcon_wait_pcon_mode(lspcon);
2499         }
2500
2501         if (ret != 1)
2502                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2503                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2504 }
2505
2506 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2507                                   enum pipe *pipe)
2508 {
2509         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2510         enum port port = dp_to_dig_port(intel_dp)->port;
2511         struct drm_device *dev = encoder->base.dev;
2512         struct drm_i915_private *dev_priv = to_i915(dev);
2513         enum intel_display_power_domain power_domain;
2514         u32 tmp;
2515         bool ret;
2516
2517         power_domain = intel_display_port_power_domain(encoder);
2518         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2519                 return false;
2520
2521         ret = false;
2522
2523         tmp = I915_READ(intel_dp->output_reg);
2524
2525         if (!(tmp & DP_PORT_EN))
2526                 goto out;
2527
2528         if (IS_GEN7(dev_priv) && port == PORT_A) {
2529                 *pipe = PORT_TO_PIPE_CPT(tmp);
2530         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2531                 enum pipe p;
2532
2533                 for_each_pipe(dev_priv, p) {
2534                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2535                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2536                                 *pipe = p;
2537                                 ret = true;
2538
2539                                 goto out;
2540                         }
2541                 }
2542
2543                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2544                               i915_mmio_reg_offset(intel_dp->output_reg));
2545         } else if (IS_CHERRYVIEW(dev_priv)) {
2546                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2547         } else {
2548                 *pipe = PORT_TO_PIPE(tmp);
2549         }
2550
2551         ret = true;
2552
2553 out:
2554         intel_display_power_put(dev_priv, power_domain);
2555
2556         return ret;
2557 }
2558
2559 static void intel_dp_get_config(struct intel_encoder *encoder,
2560                                 struct intel_crtc_state *pipe_config)
2561 {
2562         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2563         u32 tmp, flags = 0;
2564         struct drm_device *dev = encoder->base.dev;
2565         struct drm_i915_private *dev_priv = to_i915(dev);
2566         enum port port = dp_to_dig_port(intel_dp)->port;
2567         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2568
2569         tmp = I915_READ(intel_dp->output_reg);
2570
2571         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2572
2573         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2574                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2575
2576                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2577                         flags |= DRM_MODE_FLAG_PHSYNC;
2578                 else
2579                         flags |= DRM_MODE_FLAG_NHSYNC;
2580
2581                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2582                         flags |= DRM_MODE_FLAG_PVSYNC;
2583                 else
2584                         flags |= DRM_MODE_FLAG_NVSYNC;
2585         } else {
2586                 if (tmp & DP_SYNC_HS_HIGH)
2587                         flags |= DRM_MODE_FLAG_PHSYNC;
2588                 else
2589                         flags |= DRM_MODE_FLAG_NHSYNC;
2590
2591                 if (tmp & DP_SYNC_VS_HIGH)
2592                         flags |= DRM_MODE_FLAG_PVSYNC;
2593                 else
2594                         flags |= DRM_MODE_FLAG_NVSYNC;
2595         }
2596
2597         pipe_config->base.adjusted_mode.flags |= flags;
2598
2599         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2600                 pipe_config->limited_color_range = true;
2601
2602         pipe_config->lane_count =
2603                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2604
2605         intel_dp_get_m_n(crtc, pipe_config);
2606
2607         if (port == PORT_A) {
2608                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2609                         pipe_config->port_clock = 162000;
2610                 else
2611                         pipe_config->port_clock = 270000;
2612         }
2613
2614         pipe_config->base.adjusted_mode.crtc_clock =
2615                 intel_dotclock_calculate(pipe_config->port_clock,
2616                                          &pipe_config->dp_m_n);
2617
2618         if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2619             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2620                 /*
2621                  * This is a big fat ugly hack.
2622                  *
2623                  * Some machines in UEFI boot mode provide us a VBT that has 18
2624                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2625                  * unknown we fail to light up. Yet the same BIOS boots up with
2626                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2627                  * max, not what it tells us to use.
2628                  *
2629                  * Note: This will still be broken if the eDP panel is not lit
2630                  * up by the BIOS, and thus we can't get the mode at module
2631                  * load.
2632                  */
2633                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2634                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2635                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2636         }
2637 }
2638
2639 static void intel_disable_dp(struct intel_encoder *encoder,
2640                              struct intel_crtc_state *old_crtc_state,
2641                              struct drm_connector_state *old_conn_state)
2642 {
2643         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2644         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2645
2646         if (old_crtc_state->has_audio)
2647                 intel_audio_codec_disable(encoder);
2648
2649         if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2650                 intel_psr_disable(intel_dp);
2651
2652         /* Make sure the panel is off before trying to change the mode. But also
2653          * ensure that we have vdd while we switch off the panel. */
2654         intel_edp_panel_vdd_on(intel_dp);
2655         intel_edp_backlight_off(intel_dp);
2656         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2657         intel_edp_panel_off(intel_dp);
2658
2659         /* disable the port before the pipe on g4x */
2660         if (INTEL_GEN(dev_priv) < 5)
2661                 intel_dp_link_down(intel_dp);
2662 }
2663
2664 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2665                                 struct intel_crtc_state *old_crtc_state,
2666                                 struct drm_connector_state *old_conn_state)
2667 {
2668         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2669         enum port port = dp_to_dig_port(intel_dp)->port;
2670
2671         intel_dp_link_down(intel_dp);
2672
2673         /* Only ilk+ has port A */
2674         if (port == PORT_A)
2675                 ironlake_edp_pll_off(intel_dp);
2676 }
2677
2678 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2679                                 struct intel_crtc_state *old_crtc_state,
2680                                 struct drm_connector_state *old_conn_state)
2681 {
2682         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2683
2684         intel_dp_link_down(intel_dp);
2685 }
2686
2687 static void chv_post_disable_dp(struct intel_encoder *encoder,
2688                                 struct intel_crtc_state *old_crtc_state,
2689                                 struct drm_connector_state *old_conn_state)
2690 {
2691         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2692         struct drm_device *dev = encoder->base.dev;
2693         struct drm_i915_private *dev_priv = to_i915(dev);
2694
2695         intel_dp_link_down(intel_dp);
2696
2697         mutex_lock(&dev_priv->sb_lock);
2698
2699         /* Assert data lane reset */
2700         chv_data_lane_soft_reset(encoder, true);
2701
2702         mutex_unlock(&dev_priv->sb_lock);
2703 }
2704
2705 static void
2706 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2707                          uint32_t *DP,
2708                          uint8_t dp_train_pat)
2709 {
2710         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2711         struct drm_device *dev = intel_dig_port->base.base.dev;
2712         struct drm_i915_private *dev_priv = to_i915(dev);
2713         enum port port = intel_dig_port->port;
2714
2715         if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2716                 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2717                               dp_train_pat & DP_TRAINING_PATTERN_MASK);
2718
2719         if (HAS_DDI(dev_priv)) {
2720                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2721
2722                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2723                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2724                 else
2725                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2726
2727                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2728                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2729                 case DP_TRAINING_PATTERN_DISABLE:
2730                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2731
2732                         break;
2733                 case DP_TRAINING_PATTERN_1:
2734                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2735                         break;
2736                 case DP_TRAINING_PATTERN_2:
2737                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2738                         break;
2739                 case DP_TRAINING_PATTERN_3:
2740                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2741                         break;
2742                 }
2743                 I915_WRITE(DP_TP_CTL(port), temp);
2744
2745         } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2746                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2747                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2748
2749                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2750                 case DP_TRAINING_PATTERN_DISABLE:
2751                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2752                         break;
2753                 case DP_TRAINING_PATTERN_1:
2754                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2755                         break;
2756                 case DP_TRAINING_PATTERN_2:
2757                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2758                         break;
2759                 case DP_TRAINING_PATTERN_3:
2760                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2761                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2762                         break;
2763                 }
2764
2765         } else {
2766                 if (IS_CHERRYVIEW(dev_priv))
2767                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2768                 else
2769                         *DP &= ~DP_LINK_TRAIN_MASK;
2770
2771                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2772                 case DP_TRAINING_PATTERN_DISABLE:
2773                         *DP |= DP_LINK_TRAIN_OFF;
2774                         break;
2775                 case DP_TRAINING_PATTERN_1:
2776                         *DP |= DP_LINK_TRAIN_PAT_1;
2777                         break;
2778                 case DP_TRAINING_PATTERN_2:
2779                         *DP |= DP_LINK_TRAIN_PAT_2;
2780                         break;
2781                 case DP_TRAINING_PATTERN_3:
2782                         if (IS_CHERRYVIEW(dev_priv)) {
2783                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2784                         } else {
2785                                 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2786                                 *DP |= DP_LINK_TRAIN_PAT_2;
2787                         }
2788                         break;
2789                 }
2790         }
2791 }
2792
2793 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2794                                  struct intel_crtc_state *old_crtc_state)
2795 {
2796         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2797         struct drm_i915_private *dev_priv = to_i915(dev);
2798
2799         /* enable with pattern 1 (as per spec) */
2800
2801         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2802
2803         /*
2804          * Magic for VLV/CHV. We _must_ first set up the register
2805          * without actually enabling the port, and then do another
2806          * write to enable the port. Otherwise link training will
2807          * fail when the power sequencer is freshly used for this port.
2808          */
2809         intel_dp->DP |= DP_PORT_EN;
2810         if (old_crtc_state->has_audio)
2811                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2812
2813         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2814         POSTING_READ(intel_dp->output_reg);
2815 }
2816
2817 static void intel_enable_dp(struct intel_encoder *encoder,
2818                             struct intel_crtc_state *pipe_config,
2819                             struct drm_connector_state *conn_state)
2820 {
2821         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2822         struct drm_device *dev = encoder->base.dev;
2823         struct drm_i915_private *dev_priv = to_i915(dev);
2824         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2825         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2826         enum pipe pipe = crtc->pipe;
2827
2828         if (WARN_ON(dp_reg & DP_PORT_EN))
2829                 return;
2830
2831         pps_lock(intel_dp);
2832
2833         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2834                 vlv_init_panel_power_sequencer(intel_dp);
2835
2836         intel_dp_enable_port(intel_dp, pipe_config);
2837
2838         edp_panel_vdd_on(intel_dp);
2839         edp_panel_on(intel_dp);
2840         edp_panel_vdd_off(intel_dp, true);
2841
2842         pps_unlock(intel_dp);
2843
2844         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2845                 unsigned int lane_mask = 0x0;
2846
2847                 if (IS_CHERRYVIEW(dev_priv))
2848                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2849
2850                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2851                                     lane_mask);
2852         }
2853
2854         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2855         intel_dp_start_link_train(intel_dp);
2856         intel_dp_stop_link_train(intel_dp);
2857
2858         if (pipe_config->has_audio) {
2859                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2860                                  pipe_name(pipe));
2861                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
2862         }
2863 }
2864
2865 static void g4x_enable_dp(struct intel_encoder *encoder,
2866                           struct intel_crtc_state *pipe_config,
2867                           struct drm_connector_state *conn_state)
2868 {
2869         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2870
2871         intel_enable_dp(encoder, pipe_config, conn_state);
2872         intel_edp_backlight_on(intel_dp);
2873 }
2874
2875 static void vlv_enable_dp(struct intel_encoder *encoder,
2876                           struct intel_crtc_state *pipe_config,
2877                           struct drm_connector_state *conn_state)
2878 {
2879         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2880
2881         intel_edp_backlight_on(intel_dp);
2882         intel_psr_enable(intel_dp);
2883 }
2884
2885 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2886                               struct intel_crtc_state *pipe_config,
2887                               struct drm_connector_state *conn_state)
2888 {
2889         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2890         enum port port = dp_to_dig_port(intel_dp)->port;
2891
2892         intel_dp_prepare(encoder, pipe_config);
2893
2894         /* Only ilk+ has port A */
2895         if (port == PORT_A)
2896                 ironlake_edp_pll_on(intel_dp, pipe_config);
2897 }
2898
2899 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2900 {
2901         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2902         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2903         enum pipe pipe = intel_dp->pps_pipe;
2904         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2905
2906         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2907
2908         edp_panel_vdd_off_sync(intel_dp);
2909
2910         /*
2911          * VLV seems to get confused when multiple power seqeuencers
2912          * have the same port selected (even if only one has power/vdd
2913          * enabled). The failure manifests as vlv_wait_port_ready() failing
2914          * CHV on the other hand doesn't seem to mind having the same port
2915          * selected in multiple power seqeuencers, but let's clear the
2916          * port select always when logically disconnecting a power sequencer
2917          * from a port.
2918          */
2919         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2920                       pipe_name(pipe), port_name(intel_dig_port->port));
2921         I915_WRITE(pp_on_reg, 0);
2922         POSTING_READ(pp_on_reg);
2923
2924         intel_dp->pps_pipe = INVALID_PIPE;
2925 }
2926
2927 static void vlv_steal_power_sequencer(struct drm_device *dev,
2928                                       enum pipe pipe)
2929 {
2930         struct drm_i915_private *dev_priv = to_i915(dev);
2931         struct intel_encoder *encoder;
2932
2933         lockdep_assert_held(&dev_priv->pps_mutex);
2934
2935         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2936                 return;
2937
2938         for_each_intel_encoder(dev, encoder) {
2939                 struct intel_dp *intel_dp;
2940                 enum port port;
2941
2942                 if (encoder->type != INTEL_OUTPUT_DP &&
2943                     encoder->type != INTEL_OUTPUT_EDP)
2944                         continue;
2945
2946                 intel_dp = enc_to_intel_dp(&encoder->base);
2947                 port = dp_to_dig_port(intel_dp)->port;
2948
2949                 WARN(intel_dp->active_pipe == pipe,
2950                      "stealing pipe %c power sequencer from active (e)DP port %c\n",
2951                      pipe_name(pipe), port_name(port));
2952
2953                 if (intel_dp->pps_pipe != pipe)
2954                         continue;
2955
2956                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2957                               pipe_name(pipe), port_name(port));
2958
2959                 /* make sure vdd is off before we steal it */
2960                 vlv_detach_power_sequencer(intel_dp);
2961         }
2962 }
2963
2964 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2965 {
2966         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2967         struct intel_encoder *encoder = &intel_dig_port->base;
2968         struct drm_device *dev = encoder->base.dev;
2969         struct drm_i915_private *dev_priv = to_i915(dev);
2970         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2971
2972         lockdep_assert_held(&dev_priv->pps_mutex);
2973
2974         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2975
2976         if (intel_dp->pps_pipe != INVALID_PIPE &&
2977             intel_dp->pps_pipe != crtc->pipe) {
2978                 /*
2979                  * If another power sequencer was being used on this
2980                  * port previously make sure to turn off vdd there while
2981                  * we still have control of it.
2982                  */
2983                 vlv_detach_power_sequencer(intel_dp);
2984         }
2985
2986         /*
2987          * We may be stealing the power
2988          * sequencer from another port.
2989          */
2990         vlv_steal_power_sequencer(dev, crtc->pipe);
2991
2992         intel_dp->active_pipe = crtc->pipe;
2993
2994         if (!is_edp(intel_dp))
2995                 return;
2996
2997         /* now it's all ours */
2998         intel_dp->pps_pipe = crtc->pipe;
2999
3000         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3001                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
3002
3003         /* init power sequencer on this pipe and port */
3004         intel_dp_init_panel_power_sequencer(dev, intel_dp);
3005         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
3006 }
3007
3008 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3009                               struct intel_crtc_state *pipe_config,
3010                               struct drm_connector_state *conn_state)
3011 {
3012         vlv_phy_pre_encoder_enable(encoder);
3013
3014         intel_enable_dp(encoder, pipe_config, conn_state);
3015 }
3016
3017 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3018                                   struct intel_crtc_state *pipe_config,
3019                                   struct drm_connector_state *conn_state)
3020 {
3021         intel_dp_prepare(encoder, pipe_config);
3022
3023         vlv_phy_pre_pll_enable(encoder);
3024 }
3025
3026 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3027                               struct intel_crtc_state *pipe_config,
3028                               struct drm_connector_state *conn_state)
3029 {
3030         chv_phy_pre_encoder_enable(encoder);
3031
3032         intel_enable_dp(encoder, pipe_config, conn_state);
3033
3034         /* Second common lane will stay alive on its own now */
3035         chv_phy_release_cl2_override(encoder);
3036 }
3037
3038 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3039                                   struct intel_crtc_state *pipe_config,
3040                                   struct drm_connector_state *conn_state)
3041 {
3042         intel_dp_prepare(encoder, pipe_config);
3043
3044         chv_phy_pre_pll_enable(encoder);
3045 }
3046
3047 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3048                                     struct intel_crtc_state *pipe_config,
3049                                     struct drm_connector_state *conn_state)
3050 {
3051         chv_phy_post_pll_disable(encoder);
3052 }
3053
3054 /*
3055  * Fetch AUX CH registers 0x202 - 0x207 which contain
3056  * link status information
3057  */
3058 bool
3059 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3060 {
3061         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3062                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3063 }
3064
3065 static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
3066 {
3067         uint8_t psr_caps = 0;
3068
3069         drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
3070         return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
3071 }
3072
3073 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3074 {
3075         uint8_t dprx = 0;
3076
3077         drm_dp_dpcd_readb(&intel_dp->aux,
3078                         DP_DPRX_FEATURE_ENUMERATION_LIST,
3079                         &dprx);
3080         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3081 }
3082
3083 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
3084 {
3085         uint8_t alpm_caps = 0;
3086
3087         drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
3088         return alpm_caps & DP_ALPM_CAP;
3089 }
3090
3091 /* These are source-specific values. */
3092 uint8_t
3093 intel_dp_voltage_max(struct intel_dp *intel_dp)
3094 {
3095         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3096         enum port port = dp_to_dig_port(intel_dp)->port;
3097
3098         if (IS_GEN9_LP(dev_priv))
3099                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3100         else if (INTEL_GEN(dev_priv) >= 9) {
3101                 if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
3102                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3103                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3104         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3105                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3106         else if (IS_GEN7(dev_priv) && port == PORT_A)
3107                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3108         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3109                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3110         else
3111                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3112 }
3113
3114 uint8_t
3115 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3116 {
3117         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3118         enum port port = dp_to_dig_port(intel_dp)->port;
3119
3120         if (INTEL_GEN(dev_priv) >= 9) {
3121                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3122                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3123                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3124                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3125                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3126                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3127                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3128                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3129                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3130                 default:
3131                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3132                 }
3133         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3134                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3135                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3136                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3137                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3138                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3139                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3140                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3141                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3142                 default:
3143                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3144                 }
3145         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3146                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3147                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3148                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3149                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3150                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3151                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3152                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3153                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3154                 default:
3155                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3156                 }
3157         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3158                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3159                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3160                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3161                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3162                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3163                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3164                 default:
3165                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3166                 }
3167         } else {
3168                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3169                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3170                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3171                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3172                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3173                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3174                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3175                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3176                 default:
3177                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3178                 }
3179         }
3180 }
3181
3182 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3183 {
3184         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3185         unsigned long demph_reg_value, preemph_reg_value,
3186                 uniqtranscale_reg_value;
3187         uint8_t train_set = intel_dp->train_set[0];
3188
3189         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3190         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3191                 preemph_reg_value = 0x0004000;
3192                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3193                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3194                         demph_reg_value = 0x2B405555;
3195                         uniqtranscale_reg_value = 0x552AB83A;
3196                         break;
3197                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3198                         demph_reg_value = 0x2B404040;
3199                         uniqtranscale_reg_value = 0x5548B83A;
3200                         break;
3201                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3202                         demph_reg_value = 0x2B245555;
3203                         uniqtranscale_reg_value = 0x5560B83A;
3204                         break;
3205                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3206                         demph_reg_value = 0x2B405555;
3207                         uniqtranscale_reg_value = 0x5598DA3A;
3208                         break;
3209                 default:
3210                         return 0;
3211                 }
3212                 break;
3213         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3214                 preemph_reg_value = 0x0002000;
3215                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3216                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3217                         demph_reg_value = 0x2B404040;
3218                         uniqtranscale_reg_value = 0x5552B83A;
3219                         break;
3220                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3221                         demph_reg_value = 0x2B404848;
3222                         uniqtranscale_reg_value = 0x5580B83A;
3223                         break;
3224                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3225                         demph_reg_value = 0x2B404040;
3226                         uniqtranscale_reg_value = 0x55ADDA3A;
3227                         break;
3228                 default:
3229                         return 0;
3230                 }
3231                 break;
3232         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3233                 preemph_reg_value = 0x0000000;
3234                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3235                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3236                         demph_reg_value = 0x2B305555;
3237                         uniqtranscale_reg_value = 0x5570B83A;
3238                         break;
3239                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3240                         demph_reg_value = 0x2B2B4040;
3241                         uniqtranscale_reg_value = 0x55ADDA3A;
3242                         break;
3243                 default:
3244                         return 0;
3245                 }
3246                 break;
3247         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3248                 preemph_reg_value = 0x0006000;
3249                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3250                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3251                         demph_reg_value = 0x1B405555;
3252                         uniqtranscale_reg_value = 0x55ADDA3A;
3253                         break;
3254                 default:
3255                         return 0;
3256                 }
3257                 break;
3258         default:
3259                 return 0;
3260         }
3261
3262         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3263                                  uniqtranscale_reg_value, 0);
3264
3265         return 0;
3266 }
3267
3268 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3269 {
3270         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3271         u32 deemph_reg_value, margin_reg_value;
3272         bool uniq_trans_scale = false;
3273         uint8_t train_set = intel_dp->train_set[0];
3274
3275         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3276         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3277                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3278                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3279                         deemph_reg_value = 128;
3280                         margin_reg_value = 52;
3281                         break;
3282                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3283                         deemph_reg_value = 128;
3284                         margin_reg_value = 77;
3285                         break;
3286                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3287                         deemph_reg_value = 128;
3288                         margin_reg_value = 102;
3289                         break;
3290                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3291                         deemph_reg_value = 128;
3292                         margin_reg_value = 154;
3293                         uniq_trans_scale = true;
3294                         break;
3295                 default:
3296                         return 0;
3297                 }
3298                 break;
3299         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3300                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3301                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3302                         deemph_reg_value = 85;
3303                         margin_reg_value = 78;
3304                         break;
3305                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3306                         deemph_reg_value = 85;
3307                         margin_reg_value = 116;
3308                         break;
3309                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3310                         deemph_reg_value = 85;
3311                         margin_reg_value = 154;
3312                         break;
3313                 default:
3314                         return 0;
3315                 }
3316                 break;
3317         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3318                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3319                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3320                         deemph_reg_value = 64;
3321                         margin_reg_value = 104;
3322                         break;
3323                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3324                         deemph_reg_value = 64;
3325                         margin_reg_value = 154;
3326                         break;
3327                 default:
3328                         return 0;
3329                 }
3330                 break;
3331         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3332                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3333                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3334                         deemph_reg_value = 43;
3335                         margin_reg_value = 154;
3336                         break;
3337                 default:
3338                         return 0;
3339                 }
3340                 break;
3341         default:
3342                 return 0;
3343         }
3344
3345         chv_set_phy_signal_level(encoder, deemph_reg_value,
3346                                  margin_reg_value, uniq_trans_scale);
3347
3348         return 0;
3349 }
3350
3351 static uint32_t
3352 gen4_signal_levels(uint8_t train_set)
3353 {
3354         uint32_t        signal_levels = 0;
3355
3356         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3357         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3358         default:
3359                 signal_levels |= DP_VOLTAGE_0_4;
3360                 break;
3361         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3362                 signal_levels |= DP_VOLTAGE_0_6;
3363                 break;
3364         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3365                 signal_levels |= DP_VOLTAGE_0_8;
3366                 break;
3367         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3368                 signal_levels |= DP_VOLTAGE_1_2;
3369                 break;
3370         }
3371         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3372         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3373         default:
3374                 signal_levels |= DP_PRE_EMPHASIS_0;
3375                 break;
3376         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3377                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3378                 break;
3379         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3380                 signal_levels |= DP_PRE_EMPHASIS_6;
3381                 break;
3382         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3383                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3384                 break;
3385         }
3386         return signal_levels;
3387 }
3388
3389 /* Gen6's DP voltage swing and pre-emphasis control */
3390 static uint32_t
3391 gen6_edp_signal_levels(uint8_t train_set)
3392 {
3393         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3394                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3395         switch (signal_levels) {
3396         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3397         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3398                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3399         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3400                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3401         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3402         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3403                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3404         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3405         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3406                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3407         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3408         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3409                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3410         default:
3411                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3412                               "0x%x\n", signal_levels);
3413                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3414         }
3415 }
3416
3417 /* Gen7's DP voltage swing and pre-emphasis control */
3418 static uint32_t
3419 gen7_edp_signal_levels(uint8_t train_set)
3420 {
3421         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3422                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3423         switch (signal_levels) {
3424         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3425                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3426         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3427                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3428         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3429                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3430
3431         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3432                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3433         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3434                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3435
3436         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3437                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3438         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3439                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3440
3441         default:
3442                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3443                               "0x%x\n", signal_levels);
3444                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3445         }
3446 }
3447
3448 void
3449 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3450 {
3451         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3452         enum port port = intel_dig_port->port;
3453         struct drm_device *dev = intel_dig_port->base.base.dev;
3454         struct drm_i915_private *dev_priv = to_i915(dev);
3455         uint32_t signal_levels, mask = 0;
3456         uint8_t train_set = intel_dp->train_set[0];
3457
3458         if (HAS_DDI(dev_priv)) {
3459                 signal_levels = ddi_signal_levels(intel_dp);
3460
3461                 if (IS_GEN9_LP(dev_priv))
3462                         signal_levels = 0;
3463                 else
3464                         mask = DDI_BUF_EMP_MASK;
3465         } else if (IS_CHERRYVIEW(dev_priv)) {
3466                 signal_levels = chv_signal_levels(intel_dp);
3467         } else if (IS_VALLEYVIEW(dev_priv)) {
3468                 signal_levels = vlv_signal_levels(intel_dp);
3469         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3470                 signal_levels = gen7_edp_signal_levels(train_set);
3471                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3472         } else if (IS_GEN6(dev_priv) && port == PORT_A) {
3473                 signal_levels = gen6_edp_signal_levels(train_set);
3474                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3475         } else {
3476                 signal_levels = gen4_signal_levels(train_set);
3477                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3478         }
3479
3480         if (mask)
3481                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3482
3483         DRM_DEBUG_KMS("Using vswing level %d\n",
3484                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3485         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3486                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3487                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3488
3489         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3490
3491         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3492         POSTING_READ(intel_dp->output_reg);
3493 }
3494
3495 void
3496 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3497                                        uint8_t dp_train_pat)
3498 {
3499         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3500         struct drm_i915_private *dev_priv =
3501                 to_i915(intel_dig_port->base.base.dev);
3502
3503         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3504
3505         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3506         POSTING_READ(intel_dp->output_reg);
3507 }
3508
3509 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3510 {
3511         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3512         struct drm_device *dev = intel_dig_port->base.base.dev;
3513         struct drm_i915_private *dev_priv = to_i915(dev);
3514         enum port port = intel_dig_port->port;
3515         uint32_t val;
3516
3517         if (!HAS_DDI(dev_priv))
3518                 return;
3519
3520         val = I915_READ(DP_TP_CTL(port));
3521         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3522         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3523         I915_WRITE(DP_TP_CTL(port), val);
3524
3525         /*
3526          * On PORT_A we can have only eDP in SST mode. There the only reason
3527          * we need to set idle transmission mode is to work around a HW issue
3528          * where we enable the pipe while not in idle link-training mode.
3529          * In this case there is requirement to wait for a minimum number of
3530          * idle patterns to be sent.
3531          */
3532         if (port == PORT_A)
3533                 return;
3534
3535         if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3536                                     DP_TP_STATUS_IDLE_DONE,
3537                                     DP_TP_STATUS_IDLE_DONE,
3538                                     1))
3539                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3540 }
3541
3542 static void
3543 intel_dp_link_down(struct intel_dp *intel_dp)
3544 {
3545         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3546         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3547         enum port port = intel_dig_port->port;
3548         struct drm_device *dev = intel_dig_port->base.base.dev;
3549         struct drm_i915_private *dev_priv = to_i915(dev);
3550         uint32_t DP = intel_dp->DP;
3551
3552         if (WARN_ON(HAS_DDI(dev_priv)))
3553                 return;
3554
3555         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3556                 return;
3557
3558         DRM_DEBUG_KMS("\n");
3559
3560         if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3561             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3562                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3563                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3564         } else {
3565                 if (IS_CHERRYVIEW(dev_priv))
3566                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3567                 else
3568                         DP &= ~DP_LINK_TRAIN_MASK;
3569                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3570         }
3571         I915_WRITE(intel_dp->output_reg, DP);
3572         POSTING_READ(intel_dp->output_reg);
3573
3574         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3575         I915_WRITE(intel_dp->output_reg, DP);
3576         POSTING_READ(intel_dp->output_reg);
3577
3578         /*
3579          * HW workaround for IBX, we need to move the port
3580          * to transcoder A after disabling it to allow the
3581          * matching HDMI port to be enabled on transcoder A.
3582          */
3583         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3584                 /*
3585                  * We get CPU/PCH FIFO underruns on the other pipe when
3586                  * doing the workaround. Sweep them under the rug.
3587                  */
3588                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3589                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3590
3591                 /* always enable with pattern 1 (as per spec) */
3592                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3593                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3594                 I915_WRITE(intel_dp->output_reg, DP);
3595                 POSTING_READ(intel_dp->output_reg);
3596
3597                 DP &= ~DP_PORT_EN;
3598                 I915_WRITE(intel_dp->output_reg, DP);
3599                 POSTING_READ(intel_dp->output_reg);
3600
3601                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3602                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3603                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3604         }
3605
3606         msleep(intel_dp->panel_power_down_delay);
3607
3608         intel_dp->DP = DP;
3609
3610         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3611                 pps_lock(intel_dp);
3612                 intel_dp->active_pipe = INVALID_PIPE;
3613                 pps_unlock(intel_dp);
3614         }
3615 }
3616
3617 bool
3618 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3619 {
3620         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3621                              sizeof(intel_dp->dpcd)) < 0)
3622                 return false; /* aux transfer failed */
3623
3624         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3625
3626         return intel_dp->dpcd[DP_DPCD_REV] != 0;
3627 }
3628
3629 static bool
3630 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3631 {
3632         struct drm_i915_private *dev_priv =
3633                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3634
3635         /* this function is meant to be called only once */
3636         WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3637
3638         if (!intel_dp_read_dpcd(intel_dp))
3639                 return false;
3640
3641         intel_dp_read_desc(intel_dp);
3642
3643         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3644                 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3645                         DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3646
3647         /* Check if the panel supports PSR */
3648         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3649                          intel_dp->psr_dpcd,
3650                          sizeof(intel_dp->psr_dpcd));
3651         if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3652                 dev_priv->psr.sink_support = true;
3653                 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3654         }
3655
3656         if (INTEL_GEN(dev_priv) >= 9 &&
3657             (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3658                 uint8_t frame_sync_cap;
3659
3660                 dev_priv->psr.sink_support = true;
3661                 drm_dp_dpcd_read(&intel_dp->aux,
3662                                  DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3663                                  &frame_sync_cap, 1);
3664                 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3665                 /* PSR2 needs frame sync as well */
3666                 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3667                 DRM_DEBUG_KMS("PSR2 %s on sink",
3668                               dev_priv->psr.psr2_support ? "supported" : "not supported");
3669
3670                 if (dev_priv->psr.psr2_support) {
3671                         dev_priv->psr.y_cord_support =
3672                                 intel_dp_get_y_cord_status(intel_dp);
3673                         dev_priv->psr.colorimetry_support =
3674                                 intel_dp_get_colorimetry_status(intel_dp);
3675                         dev_priv->psr.alpm =
3676                                 intel_dp_get_alpm_status(intel_dp);
3677                 }
3678
3679         }
3680
3681         /* Read the eDP Display control capabilities registers */
3682         if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3683             drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3684                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3685                              sizeof(intel_dp->edp_dpcd))
3686                 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3687                               intel_dp->edp_dpcd);
3688
3689         /* Intermediate frequency support */
3690         if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
3691                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3692                 int i;
3693
3694                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3695                                 sink_rates, sizeof(sink_rates));
3696
3697                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3698                         int val = le16_to_cpu(sink_rates[i]);
3699
3700                         if (val == 0)
3701                                 break;
3702
3703                         /* Value read multiplied by 200kHz gives the per-lane
3704                          * link rate in kHz. The source rates are, however,
3705                          * stored in terms of LS_Clk kHz. The full conversion
3706                          * back to symbols is
3707                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3708                          */
3709                         intel_dp->sink_rates[i] = (val * 200) / 10;
3710                 }
3711                 intel_dp->num_sink_rates = i;
3712         }
3713
3714         return true;
3715 }
3716
3717
3718 static bool
3719 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3720 {
3721         if (!intel_dp_read_dpcd(intel_dp))
3722                 return false;
3723
3724         if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3725                              &intel_dp->sink_count, 1) < 0)
3726                 return false;
3727
3728         /*
3729          * Sink count can change between short pulse hpd hence
3730          * a member variable in intel_dp will track any changes
3731          * between short pulse interrupts.
3732          */
3733         intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3734
3735         /*
3736          * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3737          * a dongle is present but no display. Unless we require to know
3738          * if a dongle is present or not, we don't need to update
3739          * downstream port information. So, an early return here saves
3740          * time from performing other operations which are not required.
3741          */
3742         if (!is_edp(intel_dp) && !intel_dp->sink_count)
3743                 return false;
3744
3745         if (!drm_dp_is_branch(intel_dp->dpcd))
3746                 return true; /* native DP sink */
3747
3748         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3749                 return true; /* no per-port downstream info */
3750
3751         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3752                              intel_dp->downstream_ports,
3753                              DP_MAX_DOWNSTREAM_PORTS) < 0)
3754                 return false; /* downstream port status fetch failed */
3755
3756         return true;
3757 }
3758
3759 static bool
3760 intel_dp_can_mst(struct intel_dp *intel_dp)
3761 {
3762         u8 buf[1];
3763
3764         if (!i915.enable_dp_mst)
3765                 return false;
3766
3767         if (!intel_dp->can_mst)
3768                 return false;
3769
3770         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3771                 return false;
3772
3773         if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
3774                 return false;
3775
3776         return buf[0] & DP_MST_CAP;
3777 }
3778
3779 static void
3780 intel_dp_configure_mst(struct intel_dp *intel_dp)
3781 {
3782         if (!i915.enable_dp_mst)
3783                 return;
3784
3785         if (!intel_dp->can_mst)
3786                 return;
3787
3788         intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3789
3790         if (intel_dp->is_mst)
3791                 DRM_DEBUG_KMS("Sink is MST capable\n");
3792         else
3793                 DRM_DEBUG_KMS("Sink is not MST capable\n");
3794
3795         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3796                                         intel_dp->is_mst);
3797 }
3798
3799 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3800 {
3801         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3802         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3803         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3804         u8 buf;
3805         int ret = 0;
3806         int count = 0;
3807         int attempts = 10;
3808
3809         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3810                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3811                 ret = -EIO;
3812                 goto out;
3813         }
3814
3815         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3816                                buf & ~DP_TEST_SINK_START) < 0) {
3817                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3818                 ret = -EIO;
3819                 goto out;
3820         }
3821
3822         do {
3823                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3824
3825                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3826                                       DP_TEST_SINK_MISC, &buf) < 0) {
3827                         ret = -EIO;
3828                         goto out;
3829                 }
3830                 count = buf & DP_TEST_COUNT_MASK;
3831         } while (--attempts && count);
3832
3833         if (attempts == 0) {
3834                 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3835                 ret = -ETIMEDOUT;
3836         }
3837
3838  out:
3839         hsw_enable_ips(intel_crtc);
3840         return ret;
3841 }
3842
3843 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3844 {
3845         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3846         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3847         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3848         u8 buf;
3849         int ret;
3850
3851         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3852                 return -EIO;
3853
3854         if (!(buf & DP_TEST_CRC_SUPPORTED))
3855                 return -ENOTTY;
3856
3857         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3858                 return -EIO;
3859
3860         if (buf & DP_TEST_SINK_START) {
3861                 ret = intel_dp_sink_crc_stop(intel_dp);
3862                 if (ret)
3863                         return ret;
3864         }
3865
3866         hsw_disable_ips(intel_crtc);
3867
3868         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3869                                buf | DP_TEST_SINK_START) < 0) {
3870                 hsw_enable_ips(intel_crtc);
3871                 return -EIO;
3872         }
3873
3874         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3875         return 0;
3876 }
3877
3878 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3879 {
3880         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3881         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3882         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3883         u8 buf;
3884         int count, ret;
3885         int attempts = 6;
3886
3887         ret = intel_dp_sink_crc_start(intel_dp);
3888         if (ret)
3889                 return ret;
3890
3891         do {
3892                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3893
3894                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3895                                       DP_TEST_SINK_MISC, &buf) < 0) {
3896                         ret = -EIO;
3897                         goto stop;
3898                 }
3899                 count = buf & DP_TEST_COUNT_MASK;
3900
3901         } while (--attempts && count == 0);
3902
3903         if (attempts == 0) {
3904                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3905                 ret = -ETIMEDOUT;
3906                 goto stop;
3907         }
3908
3909         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3910                 ret = -EIO;
3911                 goto stop;
3912         }
3913
3914 stop:
3915         intel_dp_sink_crc_stop(intel_dp);
3916         return ret;
3917 }
3918
3919 static bool
3920 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3921 {
3922         return drm_dp_dpcd_read(&intel_dp->aux,
3923                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3924                                        sink_irq_vector, 1) == 1;
3925 }
3926
3927 static bool
3928 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3929 {
3930         int ret;
3931
3932         ret = drm_dp_dpcd_read(&intel_dp->aux,
3933                                              DP_SINK_COUNT_ESI,
3934                                              sink_irq_vector, 14);
3935         if (ret != 14)
3936                 return false;
3937
3938         return true;
3939 }
3940
3941 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3942 {
3943         int status = 0;
3944         int min_lane_count = 1;
3945         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
3946         int link_rate_index, test_link_rate;
3947         uint8_t test_lane_count, test_link_bw;
3948         /* (DP CTS 1.2)
3949          * 4.3.1.11
3950          */
3951         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
3952         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
3953                                    &test_lane_count);
3954
3955         if (status <= 0) {
3956                 DRM_DEBUG_KMS("Lane count read failed\n");
3957                 return DP_TEST_NAK;
3958         }
3959         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
3960         /* Validate the requested lane count */
3961         if (test_lane_count < min_lane_count ||
3962             test_lane_count > intel_dp->max_sink_lane_count)
3963                 return DP_TEST_NAK;
3964
3965         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
3966                                    &test_link_bw);
3967         if (status <= 0) {
3968                 DRM_DEBUG_KMS("Link Rate read failed\n");
3969                 return DP_TEST_NAK;
3970         }
3971         /* Validate the requested link rate */
3972         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
3973         link_rate_index = intel_dp_link_rate_index(intel_dp,
3974                                                    common_rates,
3975                                                    test_link_rate);
3976         if (link_rate_index < 0)
3977                 return DP_TEST_NAK;
3978
3979         intel_dp->compliance.test_lane_count = test_lane_count;
3980         intel_dp->compliance.test_link_rate = test_link_rate;
3981
3982         return DP_TEST_ACK;
3983 }
3984
3985 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3986 {
3987         uint8_t test_pattern;
3988         uint16_t test_misc;
3989         __be16 h_width, v_height;
3990         int status = 0;
3991
3992         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
3993         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
3994                                   &test_pattern, 1);
3995         if (status <= 0) {
3996                 DRM_DEBUG_KMS("Test pattern read failed\n");
3997                 return DP_TEST_NAK;
3998         }
3999         if (test_pattern != DP_COLOR_RAMP)
4000                 return DP_TEST_NAK;
4001
4002         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4003                                   &h_width, 2);
4004         if (status <= 0) {
4005                 DRM_DEBUG_KMS("H Width read failed\n");
4006                 return DP_TEST_NAK;
4007         }
4008
4009         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4010                                   &v_height, 2);
4011         if (status <= 0) {
4012                 DRM_DEBUG_KMS("V Height read failed\n");
4013                 return DP_TEST_NAK;
4014         }
4015
4016         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
4017                                   &test_misc, 1);
4018         if (status <= 0) {
4019                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4020                 return DP_TEST_NAK;
4021         }
4022         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4023                 return DP_TEST_NAK;
4024         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4025                 return DP_TEST_NAK;
4026         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4027         case DP_TEST_BIT_DEPTH_6:
4028                 intel_dp->compliance.test_data.bpc = 6;
4029                 break;
4030         case DP_TEST_BIT_DEPTH_8:
4031                 intel_dp->compliance.test_data.bpc = 8;
4032                 break;
4033         default:
4034                 return DP_TEST_NAK;
4035         }
4036
4037         intel_dp->compliance.test_data.video_pattern = test_pattern;
4038         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4039         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4040         /* Set test active flag here so userspace doesn't interrupt things */
4041         intel_dp->compliance.test_active = 1;
4042
4043         return DP_TEST_ACK;
4044 }
4045
4046 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4047 {
4048         uint8_t test_result = DP_TEST_ACK;
4049         struct intel_connector *intel_connector = intel_dp->attached_connector;
4050         struct drm_connector *connector = &intel_connector->base;
4051
4052         if (intel_connector->detect_edid == NULL ||
4053             connector->edid_corrupt ||
4054             intel_dp->aux.i2c_defer_count > 6) {
4055                 /* Check EDID read for NACKs, DEFERs and corruption
4056                  * (DP CTS 1.2 Core r1.1)
4057                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4058                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4059                  *    4.2.2.6 : EDID corruption detected
4060                  * Use failsafe mode for all cases
4061                  */
4062                 if (intel_dp->aux.i2c_nack_count > 0 ||
4063                         intel_dp->aux.i2c_defer_count > 0)
4064                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4065                                       intel_dp->aux.i2c_nack_count,
4066                                       intel_dp->aux.i2c_defer_count);
4067                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4068         } else {
4069                 struct edid *block = intel_connector->detect_edid;
4070
4071                 /* We have to write the checksum
4072                  * of the last block read
4073                  */
4074                 block += intel_connector->detect_edid->extensions;
4075
4076                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4077                                         DP_TEST_EDID_CHECKSUM,
4078                                         &block->checksum,
4079                                         1))
4080                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4081
4082                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4083                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4084         }
4085
4086         /* Set test active flag here so userspace doesn't interrupt things */
4087         intel_dp->compliance.test_active = 1;
4088
4089         return test_result;
4090 }
4091
4092 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4093 {
4094         uint8_t test_result = DP_TEST_NAK;
4095         return test_result;
4096 }
4097
4098 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4099 {
4100         uint8_t response = DP_TEST_NAK;
4101         uint8_t request = 0;
4102         int status;
4103
4104         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4105         if (status <= 0) {
4106                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4107                 goto update_status;
4108         }
4109
4110         switch (request) {
4111         case DP_TEST_LINK_TRAINING:
4112                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4113                 response = intel_dp_autotest_link_training(intel_dp);
4114                 break;
4115         case DP_TEST_LINK_VIDEO_PATTERN:
4116                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4117                 response = intel_dp_autotest_video_pattern(intel_dp);
4118                 break;
4119         case DP_TEST_LINK_EDID_READ:
4120                 DRM_DEBUG_KMS("EDID test requested\n");
4121                 response = intel_dp_autotest_edid(intel_dp);
4122                 break;
4123         case DP_TEST_LINK_PHY_TEST_PATTERN:
4124                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4125                 response = intel_dp_autotest_phy_pattern(intel_dp);
4126                 break;
4127         default:
4128                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4129                 break;
4130         }
4131
4132         if (response & DP_TEST_ACK)
4133                 intel_dp->compliance.test_type = request;
4134
4135 update_status:
4136         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4137         if (status <= 0)
4138                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4139 }
4140
4141 static int
4142 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4143 {
4144         bool bret;
4145
4146         if (intel_dp->is_mst) {
4147                 u8 esi[16] = { 0 };
4148                 int ret = 0;
4149                 int retry;
4150                 bool handled;
4151                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4152 go_again:
4153                 if (bret == true) {
4154
4155                         /* check link status - esi[10] = 0x200c */
4156                         if (intel_dp->active_mst_links &&
4157                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4158                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4159                                 intel_dp_start_link_train(intel_dp);
4160                                 intel_dp_stop_link_train(intel_dp);
4161                         }
4162
4163                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4164                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4165
4166                         if (handled) {
4167                                 for (retry = 0; retry < 3; retry++) {
4168                                         int wret;
4169                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4170                                                                  DP_SINK_COUNT_ESI+1,
4171                                                                  &esi[1], 3);
4172                                         if (wret == 3) {
4173                                                 break;
4174                                         }
4175                                 }
4176
4177                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4178                                 if (bret == true) {
4179                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4180                                         goto go_again;
4181                                 }
4182                         } else
4183                                 ret = 0;
4184
4185                         return ret;
4186                 } else {
4187                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4188                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4189                         intel_dp->is_mst = false;
4190                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4191                         /* send a hotplug event */
4192                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4193                 }
4194         }
4195         return -EINVAL;
4196 }
4197
4198 static void
4199 intel_dp_retrain_link(struct intel_dp *intel_dp)
4200 {
4201         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4202         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4203         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4204
4205         /* Suppress underruns caused by re-training */
4206         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4207         if (crtc->config->has_pch_encoder)
4208                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4209                                                       intel_crtc_pch_transcoder(crtc), false);
4210
4211         intel_dp_start_link_train(intel_dp);
4212         intel_dp_stop_link_train(intel_dp);
4213
4214         /* Keep underrun reporting disabled until things are stable */
4215         intel_wait_for_vblank(dev_priv, crtc->pipe);
4216
4217         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4218         if (crtc->config->has_pch_encoder)
4219                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4220                                                       intel_crtc_pch_transcoder(crtc), true);
4221 }
4222
4223 static void
4224 intel_dp_check_link_status(struct intel_dp *intel_dp)
4225 {
4226         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4227         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4228         u8 link_status[DP_LINK_STATUS_SIZE];
4229
4230         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4231
4232         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4233                 DRM_ERROR("Failed to get link status\n");
4234                 return;
4235         }
4236
4237         if (!intel_encoder->base.crtc)
4238                 return;
4239
4240         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4241                 return;
4242
4243         /* FIXME: we need to synchronize this sort of stuff with hardware
4244          * readout. Currently fast link training doesn't work on boot-up. */
4245         if (!intel_dp->lane_count)
4246                 return;
4247
4248         /* Retrain if Channel EQ or CR not ok */
4249         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4250                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4251                               intel_encoder->base.name);
4252
4253                 intel_dp_retrain_link(intel_dp);
4254         }
4255 }
4256
4257 /*
4258  * According to DP spec
4259  * 5.1.2:
4260  *  1. Read DPCD
4261  *  2. Configure link according to Receiver Capabilities
4262  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4263  *  4. Check link status on receipt of hot-plug interrupt
4264  *
4265  * intel_dp_short_pulse -  handles short pulse interrupts
4266  * when full detection is not required.
4267  * Returns %true if short pulse is handled and full detection
4268  * is NOT required and %false otherwise.
4269  */
4270 static bool
4271 intel_dp_short_pulse(struct intel_dp *intel_dp)
4272 {
4273         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4274         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4275         u8 sink_irq_vector = 0;
4276         u8 old_sink_count = intel_dp->sink_count;
4277         bool ret;
4278
4279         /*
4280          * Clearing compliance test variables to allow capturing
4281          * of values for next automated test request.
4282          */
4283         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4284
4285         /*
4286          * Now read the DPCD to see if it's actually running
4287          * If the current value of sink count doesn't match with
4288          * the value that was stored earlier or dpcd read failed
4289          * we need to do full detection
4290          */
4291         ret = intel_dp_get_dpcd(intel_dp);
4292
4293         if ((old_sink_count != intel_dp->sink_count) || !ret) {
4294                 /* No need to proceed if we are going to do full detect */
4295                 return false;
4296         }
4297
4298         /* Try to read the source of the interrupt */
4299         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4300             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4301             sink_irq_vector != 0) {
4302                 /* Clear interrupt source */
4303                 drm_dp_dpcd_writeb(&intel_dp->aux,
4304                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4305                                    sink_irq_vector);
4306
4307                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4308                         intel_dp_handle_test_request(intel_dp);
4309                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4310                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4311         }
4312
4313         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4314         intel_dp_check_link_status(intel_dp);
4315         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4316         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4317                 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4318                 /* Send a Hotplug Uevent to userspace to start modeset */
4319                 drm_kms_helper_hotplug_event(intel_encoder->base.dev);
4320         }
4321
4322         return true;
4323 }
4324
4325 /* XXX this is probably wrong for multiple downstream ports */
4326 static enum drm_connector_status
4327 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4328 {
4329         uint8_t *dpcd = intel_dp->dpcd;
4330         uint8_t type;
4331
4332         if (!intel_dp_get_dpcd(intel_dp))
4333                 return connector_status_disconnected;
4334
4335         if (is_edp(intel_dp))
4336                 return connector_status_connected;
4337
4338         /* if there's no downstream port, we're done */
4339         if (!drm_dp_is_branch(dpcd))
4340                 return connector_status_connected;
4341
4342         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4343         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4344             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4345
4346                 return intel_dp->sink_count ?
4347                 connector_status_connected : connector_status_disconnected;
4348         }
4349
4350         if (intel_dp_can_mst(intel_dp))
4351                 return connector_status_connected;
4352
4353         /* If no HPD, poke DDC gently */
4354         if (drm_probe_ddc(&intel_dp->aux.ddc))
4355                 return connector_status_connected;
4356
4357         /* Well we tried, say unknown for unreliable port types */
4358         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4359                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4360                 if (type == DP_DS_PORT_TYPE_VGA ||
4361                     type == DP_DS_PORT_TYPE_NON_EDID)
4362                         return connector_status_unknown;
4363         } else {
4364                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4365                         DP_DWN_STRM_PORT_TYPE_MASK;
4366                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4367                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4368                         return connector_status_unknown;
4369         }
4370
4371         /* Anything else is out of spec, warn and ignore */
4372         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4373         return connector_status_disconnected;
4374 }
4375
4376 static enum drm_connector_status
4377 edp_detect(struct intel_dp *intel_dp)
4378 {
4379         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4380         struct drm_i915_private *dev_priv = to_i915(dev);
4381         enum drm_connector_status status;
4382
4383         status = intel_panel_detect(dev_priv);
4384         if (status == connector_status_unknown)
4385                 status = connector_status_connected;
4386
4387         return status;
4388 }
4389
4390 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4391                                        struct intel_digital_port *port)
4392 {
4393         u32 bit;
4394
4395         switch (port->port) {
4396         case PORT_A:
4397                 return true;
4398         case PORT_B:
4399                 bit = SDE_PORTB_HOTPLUG;
4400                 break;
4401         case PORT_C:
4402                 bit = SDE_PORTC_HOTPLUG;
4403                 break;
4404         case PORT_D:
4405                 bit = SDE_PORTD_HOTPLUG;
4406                 break;
4407         default:
4408                 MISSING_CASE(port->port);
4409                 return false;
4410         }
4411
4412         return I915_READ(SDEISR) & bit;
4413 }
4414
4415 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4416                                        struct intel_digital_port *port)
4417 {
4418         u32 bit;
4419
4420         switch (port->port) {
4421         case PORT_A:
4422                 return true;
4423         case PORT_B:
4424                 bit = SDE_PORTB_HOTPLUG_CPT;
4425                 break;
4426         case PORT_C:
4427                 bit = SDE_PORTC_HOTPLUG_CPT;
4428                 break;
4429         case PORT_D:
4430                 bit = SDE_PORTD_HOTPLUG_CPT;
4431                 break;
4432         case PORT_E:
4433                 bit = SDE_PORTE_HOTPLUG_SPT;
4434                 break;
4435         default:
4436                 MISSING_CASE(port->port);
4437                 return false;
4438         }
4439
4440         return I915_READ(SDEISR) & bit;
4441 }
4442
4443 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4444                                        struct intel_digital_port *port)
4445 {
4446         u32 bit;
4447
4448         switch (port->port) {
4449         case PORT_B:
4450                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4451                 break;
4452         case PORT_C:
4453                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4454                 break;
4455         case PORT_D:
4456                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4457                 break;
4458         default:
4459                 MISSING_CASE(port->port);
4460                 return false;
4461         }
4462
4463         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4464 }
4465
4466 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4467                                         struct intel_digital_port *port)
4468 {
4469         u32 bit;
4470
4471         switch (port->port) {
4472         case PORT_B:
4473                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4474                 break;
4475         case PORT_C:
4476                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4477                 break;
4478         case PORT_D:
4479                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4480                 break;
4481         default:
4482                 MISSING_CASE(port->port);
4483                 return false;
4484         }
4485
4486         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4487 }
4488
4489 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4490                                        struct intel_digital_port *intel_dig_port)
4491 {
4492         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4493         enum port port;
4494         u32 bit;
4495
4496         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4497         switch (port) {
4498         case PORT_A:
4499                 bit = BXT_DE_PORT_HP_DDIA;
4500                 break;
4501         case PORT_B:
4502                 bit = BXT_DE_PORT_HP_DDIB;
4503                 break;
4504         case PORT_C:
4505                 bit = BXT_DE_PORT_HP_DDIC;
4506                 break;
4507         default:
4508                 MISSING_CASE(port);
4509                 return false;
4510         }
4511
4512         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4513 }
4514
4515 /*
4516  * intel_digital_port_connected - is the specified port connected?
4517  * @dev_priv: i915 private structure
4518  * @port: the port to test
4519  *
4520  * Return %true if @port is connected, %false otherwise.
4521  */
4522 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4523                                   struct intel_digital_port *port)
4524 {
4525         if (HAS_PCH_IBX(dev_priv))
4526                 return ibx_digital_port_connected(dev_priv, port);
4527         else if (HAS_PCH_SPLIT(dev_priv))
4528                 return cpt_digital_port_connected(dev_priv, port);
4529         else if (IS_GEN9_LP(dev_priv))
4530                 return bxt_digital_port_connected(dev_priv, port);
4531         else if (IS_GM45(dev_priv))
4532                 return gm45_digital_port_connected(dev_priv, port);
4533         else
4534                 return g4x_digital_port_connected(dev_priv, port);
4535 }
4536
4537 static struct edid *
4538 intel_dp_get_edid(struct intel_dp *intel_dp)
4539 {
4540         struct intel_connector *intel_connector = intel_dp->attached_connector;
4541
4542         /* use cached edid if we have one */
4543         if (intel_connector->edid) {
4544                 /* invalid edid */
4545                 if (IS_ERR(intel_connector->edid))
4546                         return NULL;
4547
4548                 return drm_edid_duplicate(intel_connector->edid);
4549         } else
4550                 return drm_get_edid(&intel_connector->base,
4551                                     &intel_dp->aux.ddc);
4552 }
4553
4554 static void
4555 intel_dp_set_edid(struct intel_dp *intel_dp)
4556 {
4557         struct intel_connector *intel_connector = intel_dp->attached_connector;
4558         struct edid *edid;
4559
4560         intel_dp_unset_edid(intel_dp);
4561         edid = intel_dp_get_edid(intel_dp);
4562         intel_connector->detect_edid = edid;
4563
4564         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4565                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4566         else
4567                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4568 }
4569
4570 static void
4571 intel_dp_unset_edid(struct intel_dp *intel_dp)
4572 {
4573         struct intel_connector *intel_connector = intel_dp->attached_connector;
4574
4575         kfree(intel_connector->detect_edid);
4576         intel_connector->detect_edid = NULL;
4577
4578         intel_dp->has_audio = false;
4579 }
4580
4581 static enum drm_connector_status
4582 intel_dp_long_pulse(struct intel_connector *intel_connector)
4583 {
4584         struct drm_connector *connector = &intel_connector->base;
4585         struct intel_dp *intel_dp = intel_attached_dp(connector);
4586         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4587         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4588         struct drm_device *dev = connector->dev;
4589         enum drm_connector_status status;
4590         enum intel_display_power_domain power_domain;
4591         u8 sink_irq_vector = 0;
4592
4593         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4594         intel_display_power_get(to_i915(dev), power_domain);
4595
4596         /* Can't disconnect eDP, but you can close the lid... */
4597         if (is_edp(intel_dp))
4598                 status = edp_detect(intel_dp);
4599         else if (intel_digital_port_connected(to_i915(dev),
4600                                               dp_to_dig_port(intel_dp)))
4601                 status = intel_dp_detect_dpcd(intel_dp);
4602         else
4603                 status = connector_status_disconnected;
4604
4605         if (status == connector_status_disconnected) {
4606                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4607
4608                 if (intel_dp->is_mst) {
4609                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4610                                       intel_dp->is_mst,
4611                                       intel_dp->mst_mgr.mst_state);
4612                         intel_dp->is_mst = false;
4613                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4614                                                         intel_dp->is_mst);
4615                 }
4616
4617                 goto out;
4618         }
4619
4620         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4621                 intel_encoder->type = INTEL_OUTPUT_DP;
4622
4623         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4624                       yesno(intel_dp_source_supports_hbr2(intel_dp)),
4625                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4626
4627         /* Set the max lane count for sink */
4628         intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
4629
4630         /* Set the max link BW for sink */
4631         intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
4632
4633         intel_dp_print_rates(intel_dp);
4634
4635         intel_dp_read_desc(intel_dp);
4636
4637         intel_dp_configure_mst(intel_dp);
4638
4639         if (intel_dp->is_mst) {
4640                 /*
4641                  * If we are in MST mode then this connector
4642                  * won't appear connected or have anything
4643                  * with EDID on it
4644                  */
4645                 status = connector_status_disconnected;
4646                 goto out;
4647         } else if (connector->status == connector_status_connected) {
4648                 /*
4649                  * If display was connected already and is still connected
4650                  * check links status, there has been known issues of
4651                  * link loss triggerring long pulse!!!!
4652                  */
4653                 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4654                 intel_dp_check_link_status(intel_dp);
4655                 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4656                 goto out;
4657         }
4658
4659         /*
4660          * Clearing NACK and defer counts to get their exact values
4661          * while reading EDID which are required by Compliance tests
4662          * 4.2.2.4 and 4.2.2.5
4663          */
4664         intel_dp->aux.i2c_nack_count = 0;
4665         intel_dp->aux.i2c_defer_count = 0;
4666
4667         intel_dp_set_edid(intel_dp);
4668         if (is_edp(intel_dp) || intel_connector->detect_edid)
4669                 status = connector_status_connected;
4670         intel_dp->detect_done = true;
4671
4672         /* Try to read the source of the interrupt */
4673         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4674             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4675             sink_irq_vector != 0) {
4676                 /* Clear interrupt source */
4677                 drm_dp_dpcd_writeb(&intel_dp->aux,
4678                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4679                                    sink_irq_vector);
4680
4681                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4682                         intel_dp_handle_test_request(intel_dp);
4683                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4684                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4685         }
4686
4687 out:
4688         if (status != connector_status_connected && !intel_dp->is_mst)
4689                 intel_dp_unset_edid(intel_dp);
4690
4691         intel_display_power_put(to_i915(dev), power_domain);
4692         return status;
4693 }
4694
4695 static enum drm_connector_status
4696 intel_dp_detect(struct drm_connector *connector, bool force)
4697 {
4698         struct intel_dp *intel_dp = intel_attached_dp(connector);
4699         enum drm_connector_status status = connector->status;
4700
4701         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4702                       connector->base.id, connector->name);
4703
4704         /* If full detect is not performed yet, do a full detect */
4705         if (!intel_dp->detect_done)
4706                 status = intel_dp_long_pulse(intel_dp->attached_connector);
4707
4708         intel_dp->detect_done = false;
4709
4710         return status;
4711 }
4712
4713 static void
4714 intel_dp_force(struct drm_connector *connector)
4715 {
4716         struct intel_dp *intel_dp = intel_attached_dp(connector);
4717         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4718         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4719         enum intel_display_power_domain power_domain;
4720
4721         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4722                       connector->base.id, connector->name);
4723         intel_dp_unset_edid(intel_dp);
4724
4725         if (connector->status != connector_status_connected)
4726                 return;
4727
4728         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4729         intel_display_power_get(dev_priv, power_domain);
4730
4731         intel_dp_set_edid(intel_dp);
4732
4733         intel_display_power_put(dev_priv, power_domain);
4734
4735         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4736                 intel_encoder->type = INTEL_OUTPUT_DP;
4737 }
4738
4739 static int intel_dp_get_modes(struct drm_connector *connector)
4740 {
4741         struct intel_connector *intel_connector = to_intel_connector(connector);
4742         struct edid *edid;
4743
4744         edid = intel_connector->detect_edid;
4745         if (edid) {
4746                 int ret = intel_connector_update_modes(connector, edid);
4747                 if (ret)
4748                         return ret;
4749         }
4750
4751         /* if eDP has no EDID, fall back to fixed mode */
4752         if (is_edp(intel_attached_dp(connector)) &&
4753             intel_connector->panel.fixed_mode) {
4754                 struct drm_display_mode *mode;
4755
4756                 mode = drm_mode_duplicate(connector->dev,
4757                                           intel_connector->panel.fixed_mode);
4758                 if (mode) {
4759                         drm_mode_probed_add(connector, mode);
4760                         return 1;
4761                 }
4762         }
4763
4764         return 0;
4765 }
4766
4767 static bool
4768 intel_dp_detect_audio(struct drm_connector *connector)
4769 {
4770         bool has_audio = false;
4771         struct edid *edid;
4772
4773         edid = to_intel_connector(connector)->detect_edid;
4774         if (edid)
4775                 has_audio = drm_detect_monitor_audio(edid);
4776
4777         return has_audio;
4778 }
4779
4780 static int
4781 intel_dp_set_property(struct drm_connector *connector,
4782                       struct drm_property *property,
4783                       uint64_t val)
4784 {
4785         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4786         struct intel_connector *intel_connector = to_intel_connector(connector);
4787         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4788         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4789         int ret;
4790
4791         ret = drm_object_property_set_value(&connector->base, property, val);
4792         if (ret)
4793                 return ret;
4794
4795         if (property == dev_priv->force_audio_property) {
4796                 int i = val;
4797                 bool has_audio;
4798
4799                 if (i == intel_dp->force_audio)
4800                         return 0;
4801
4802                 intel_dp->force_audio = i;
4803
4804                 if (i == HDMI_AUDIO_AUTO)
4805                         has_audio = intel_dp_detect_audio(connector);
4806                 else
4807                         has_audio = (i == HDMI_AUDIO_ON);
4808
4809                 if (has_audio == intel_dp->has_audio)
4810                         return 0;
4811
4812                 intel_dp->has_audio = has_audio;
4813                 goto done;
4814         }
4815
4816         if (property == dev_priv->broadcast_rgb_property) {
4817                 bool old_auto = intel_dp->color_range_auto;
4818                 bool old_range = intel_dp->limited_color_range;
4819
4820                 switch (val) {
4821                 case INTEL_BROADCAST_RGB_AUTO:
4822                         intel_dp->color_range_auto = true;
4823                         break;
4824                 case INTEL_BROADCAST_RGB_FULL:
4825                         intel_dp->color_range_auto = false;
4826                         intel_dp->limited_color_range = false;
4827                         break;
4828                 case INTEL_BROADCAST_RGB_LIMITED:
4829                         intel_dp->color_range_auto = false;
4830                         intel_dp->limited_color_range = true;
4831                         break;
4832                 default:
4833                         return -EINVAL;
4834                 }
4835
4836                 if (old_auto == intel_dp->color_range_auto &&
4837                     old_range == intel_dp->limited_color_range)
4838                         return 0;
4839
4840                 goto done;
4841         }
4842
4843         if (is_edp(intel_dp) &&
4844             property == connector->dev->mode_config.scaling_mode_property) {
4845                 if (val == DRM_MODE_SCALE_NONE) {
4846                         DRM_DEBUG_KMS("no scaling not supported\n");
4847                         return -EINVAL;
4848                 }
4849                 if (HAS_GMCH_DISPLAY(dev_priv) &&
4850                     val == DRM_MODE_SCALE_CENTER) {
4851                         DRM_DEBUG_KMS("centering not supported\n");
4852                         return -EINVAL;
4853                 }
4854
4855                 if (intel_connector->panel.fitting_mode == val) {
4856                         /* the eDP scaling property is not changed */
4857                         return 0;
4858                 }
4859                 intel_connector->panel.fitting_mode = val;
4860
4861                 goto done;
4862         }
4863
4864         return -EINVAL;
4865
4866 done:
4867         if (intel_encoder->base.crtc)
4868                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4869
4870         return 0;
4871 }
4872
4873 static int
4874 intel_dp_connector_register(struct drm_connector *connector)
4875 {
4876         struct intel_dp *intel_dp = intel_attached_dp(connector);
4877         int ret;
4878
4879         ret = intel_connector_register(connector);
4880         if (ret)
4881                 return ret;
4882
4883         i915_debugfs_connector_add(connector);
4884
4885         DRM_DEBUG_KMS("registering %s bus for %s\n",
4886                       intel_dp->aux.name, connector->kdev->kobj.name);
4887
4888         intel_dp->aux.dev = connector->kdev;
4889         return drm_dp_aux_register(&intel_dp->aux);
4890 }
4891
4892 static void
4893 intel_dp_connector_unregister(struct drm_connector *connector)
4894 {
4895         drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4896         intel_connector_unregister(connector);
4897 }
4898
4899 static void
4900 intel_dp_connector_destroy(struct drm_connector *connector)
4901 {
4902         struct intel_connector *intel_connector = to_intel_connector(connector);
4903
4904         kfree(intel_connector->detect_edid);
4905
4906         if (!IS_ERR_OR_NULL(intel_connector->edid))
4907                 kfree(intel_connector->edid);
4908
4909         /* Can't call is_edp() since the encoder may have been destroyed
4910          * already. */
4911         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4912                 intel_panel_fini(&intel_connector->panel);
4913
4914         drm_connector_cleanup(connector);
4915         kfree(connector);
4916 }
4917
4918 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4919 {
4920         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4921         struct intel_dp *intel_dp = &intel_dig_port->dp;
4922
4923         intel_dp_mst_encoder_cleanup(intel_dig_port);
4924         if (is_edp(intel_dp)) {
4925                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4926                 /*
4927                  * vdd might still be enabled do to the delayed vdd off.
4928                  * Make sure vdd is actually turned off here.
4929                  */
4930                 pps_lock(intel_dp);
4931                 edp_panel_vdd_off_sync(intel_dp);
4932                 pps_unlock(intel_dp);
4933
4934                 if (intel_dp->edp_notifier.notifier_call) {
4935                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4936                         intel_dp->edp_notifier.notifier_call = NULL;
4937                 }
4938         }
4939
4940         intel_dp_aux_fini(intel_dp);
4941
4942         drm_encoder_cleanup(encoder);
4943         kfree(intel_dig_port);
4944 }
4945
4946 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4947 {
4948         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4949
4950         if (!is_edp(intel_dp))
4951                 return;
4952
4953         /*
4954          * vdd might still be enabled do to the delayed vdd off.
4955          * Make sure vdd is actually turned off here.
4956          */
4957         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4958         pps_lock(intel_dp);
4959         edp_panel_vdd_off_sync(intel_dp);
4960         pps_unlock(intel_dp);
4961 }
4962
4963 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4964 {
4965         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4966         struct drm_device *dev = intel_dig_port->base.base.dev;
4967         struct drm_i915_private *dev_priv = to_i915(dev);
4968         enum intel_display_power_domain power_domain;
4969
4970         lockdep_assert_held(&dev_priv->pps_mutex);
4971
4972         if (!edp_have_panel_vdd(intel_dp))
4973                 return;
4974
4975         /*
4976          * The VDD bit needs a power domain reference, so if the bit is
4977          * already enabled when we boot or resume, grab this reference and
4978          * schedule a vdd off, so we don't hold on to the reference
4979          * indefinitely.
4980          */
4981         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4982         power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4983         intel_display_power_get(dev_priv, power_domain);
4984
4985         edp_panel_vdd_schedule_off(intel_dp);
4986 }
4987
4988 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
4989 {
4990         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4991
4992         if ((intel_dp->DP & DP_PORT_EN) == 0)
4993                 return INVALID_PIPE;
4994
4995         if (IS_CHERRYVIEW(dev_priv))
4996                 return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
4997         else
4998                 return PORT_TO_PIPE(intel_dp->DP);
4999 }
5000
5001 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5002 {
5003         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5004         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5005         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5006
5007         if (!HAS_DDI(dev_priv))
5008                 intel_dp->DP = I915_READ(intel_dp->output_reg);
5009
5010         if (lspcon->active)
5011                 lspcon_resume(lspcon);
5012
5013         pps_lock(intel_dp);
5014
5015         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5016                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5017
5018         if (is_edp(intel_dp)) {
5019                 /* Reinit the power sequencer, in case BIOS did something with it. */
5020                 intel_dp_pps_init(encoder->dev, intel_dp);
5021                 intel_edp_panel_vdd_sanitize(intel_dp);
5022         }
5023
5024         pps_unlock(intel_dp);
5025 }
5026
5027 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5028         .dpms = drm_atomic_helper_connector_dpms,
5029         .detect = intel_dp_detect,
5030         .force = intel_dp_force,
5031         .fill_modes = drm_helper_probe_single_connector_modes,
5032         .set_property = intel_dp_set_property,
5033         .atomic_get_property = intel_connector_atomic_get_property,
5034         .late_register = intel_dp_connector_register,
5035         .early_unregister = intel_dp_connector_unregister,
5036         .destroy = intel_dp_connector_destroy,
5037         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5038         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5039 };
5040
5041 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5042         .get_modes = intel_dp_get_modes,
5043         .mode_valid = intel_dp_mode_valid,
5044 };
5045
5046 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5047         .reset = intel_dp_encoder_reset,
5048         .destroy = intel_dp_encoder_destroy,
5049 };
5050
5051 enum irqreturn
5052 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5053 {
5054         struct intel_dp *intel_dp = &intel_dig_port->dp;
5055         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5056         struct drm_device *dev = intel_dig_port->base.base.dev;
5057         struct drm_i915_private *dev_priv = to_i915(dev);
5058         enum intel_display_power_domain power_domain;
5059         enum irqreturn ret = IRQ_NONE;
5060
5061         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5062             intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5063                 intel_dig_port->base.type = INTEL_OUTPUT_DP;
5064
5065         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5066                 /*
5067                  * vdd off can generate a long pulse on eDP which
5068                  * would require vdd on to handle it, and thus we
5069                  * would end up in an endless cycle of
5070                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5071                  */
5072                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5073                               port_name(intel_dig_port->port));
5074                 return IRQ_HANDLED;
5075         }
5076
5077         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5078                       port_name(intel_dig_port->port),
5079                       long_hpd ? "long" : "short");
5080
5081         if (long_hpd) {
5082                 intel_dp->detect_done = false;
5083                 return IRQ_NONE;
5084         }
5085
5086         power_domain = intel_display_port_aux_power_domain(intel_encoder);
5087         intel_display_power_get(dev_priv, power_domain);
5088
5089         if (intel_dp->is_mst) {
5090                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5091                         /*
5092                          * If we were in MST mode, and device is not
5093                          * there, get out of MST mode
5094                          */
5095                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5096                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5097                         intel_dp->is_mst = false;
5098                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5099                                                         intel_dp->is_mst);
5100                         intel_dp->detect_done = false;
5101                         goto put_power;
5102                 }
5103         }
5104
5105         if (!intel_dp->is_mst) {
5106                 if (!intel_dp_short_pulse(intel_dp)) {
5107                         intel_dp->detect_done = false;
5108                         goto put_power;
5109                 }
5110         }
5111
5112         ret = IRQ_HANDLED;
5113
5114 put_power:
5115         intel_display_power_put(dev_priv, power_domain);
5116
5117         return ret;
5118 }
5119
5120 /* check the VBT to see whether the eDP is on another port */
5121 bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
5122 {
5123         /*
5124          * eDP not supported on g4x. so bail out early just
5125          * for a bit extra safety in case the VBT is bonkers.
5126          */
5127         if (INTEL_GEN(dev_priv) < 5)
5128                 return false;
5129
5130         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5131                 return true;
5132
5133         return intel_bios_is_port_edp(dev_priv, port);
5134 }
5135
5136 void
5137 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5138 {
5139         struct intel_connector *intel_connector = to_intel_connector(connector);
5140
5141         intel_attach_force_audio_property(connector);
5142         intel_attach_broadcast_rgb_property(connector);
5143         intel_dp->color_range_auto = true;
5144
5145         if (is_edp(intel_dp)) {
5146                 drm_mode_create_scaling_mode_property(connector->dev);
5147                 drm_object_attach_property(
5148                         &connector->base,
5149                         connector->dev->mode_config.scaling_mode_property,
5150                         DRM_MODE_SCALE_ASPECT);
5151                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5152         }
5153 }
5154
5155 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5156 {
5157         intel_dp->panel_power_off_time = ktime_get_boottime();
5158         intel_dp->last_power_on = jiffies;
5159         intel_dp->last_backlight_off = jiffies;
5160 }
5161
5162 static void
5163 intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
5164                            struct intel_dp *intel_dp, struct edp_power_seq *seq)
5165 {
5166         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5167         struct pps_registers regs;
5168
5169         intel_pps_get_registers(dev_priv, intel_dp, &regs);
5170
5171         /* Workaround: Need to write PP_CONTROL with the unlock key as
5172          * the very first thing. */
5173         pp_ctl = ironlake_get_pp_control(intel_dp);
5174
5175         pp_on = I915_READ(regs.pp_on);
5176         pp_off = I915_READ(regs.pp_off);
5177         if (!IS_GEN9_LP(dev_priv)) {
5178                 I915_WRITE(regs.pp_ctrl, pp_ctl);
5179                 pp_div = I915_READ(regs.pp_div);
5180         }
5181
5182         /* Pull timing values out of registers */
5183         seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5184                      PANEL_POWER_UP_DELAY_SHIFT;
5185
5186         seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5187                   PANEL_LIGHT_ON_DELAY_SHIFT;
5188
5189         seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5190                   PANEL_LIGHT_OFF_DELAY_SHIFT;
5191
5192         seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5193                    PANEL_POWER_DOWN_DELAY_SHIFT;
5194
5195         if (IS_GEN9_LP(dev_priv)) {
5196                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5197                         BXT_POWER_CYCLE_DELAY_SHIFT;
5198                 if (tmp > 0)
5199                         seq->t11_t12 = (tmp - 1) * 1000;
5200                 else
5201                         seq->t11_t12 = 0;
5202         } else {
5203                 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5204                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5205         }
5206 }
5207
5208 static void
5209 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
5210 {
5211         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5212                       state_name,
5213                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
5214 }
5215
5216 static void
5217 intel_pps_verify_state(struct drm_i915_private *dev_priv,
5218                        struct intel_dp *intel_dp)
5219 {
5220         struct edp_power_seq hw;
5221         struct edp_power_seq *sw = &intel_dp->pps_delays;
5222
5223         intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
5224
5225         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
5226             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
5227                 DRM_ERROR("PPS state mismatch\n");
5228                 intel_pps_dump_state("sw", sw);
5229                 intel_pps_dump_state("hw", &hw);
5230         }
5231 }
5232
5233 static void
5234 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5235                                     struct intel_dp *intel_dp)
5236 {
5237         struct drm_i915_private *dev_priv = to_i915(dev);
5238         struct edp_power_seq cur, vbt, spec,
5239                 *final = &intel_dp->pps_delays;
5240
5241         lockdep_assert_held(&dev_priv->pps_mutex);
5242
5243         /* already initialized? */
5244         if (final->t11_t12 != 0)
5245                 return;
5246
5247         intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
5248
5249         intel_pps_dump_state("cur", &cur);
5250
5251         vbt = dev_priv->vbt.edp.pps;
5252
5253         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5254          * our hw here, which are all in 100usec. */
5255         spec.t1_t3 = 210 * 10;
5256         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5257         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5258         spec.t10 = 500 * 10;
5259         /* This one is special and actually in units of 100ms, but zero
5260          * based in the hw (so we need to add 100 ms). But the sw vbt
5261          * table multiplies it with 1000 to make it in units of 100usec,
5262          * too. */
5263         spec.t11_t12 = (510 + 100) * 10;
5264
5265         intel_pps_dump_state("vbt", &vbt);
5266
5267         /* Use the max of the register settings and vbt. If both are
5268          * unset, fall back to the spec limits. */
5269 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5270                                        spec.field : \
5271                                        max(cur.field, vbt.field))
5272         assign_final(t1_t3);
5273         assign_final(t8);
5274         assign_final(t9);
5275         assign_final(t10);
5276         assign_final(t11_t12);
5277 #undef assign_final
5278
5279 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5280         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5281         intel_dp->backlight_on_delay = get_delay(t8);
5282         intel_dp->backlight_off_delay = get_delay(t9);
5283         intel_dp->panel_power_down_delay = get_delay(t10);
5284         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5285 #undef get_delay
5286
5287         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5288                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5289                       intel_dp->panel_power_cycle_delay);
5290
5291         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5292                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5293
5294         /*
5295          * We override the HW backlight delays to 1 because we do manual waits
5296          * on them. For T8, even BSpec recommends doing it. For T9, if we
5297          * don't do this, we'll end up waiting for the backlight off delay
5298          * twice: once when we do the manual sleep, and once when we disable
5299          * the panel and wait for the PP_STATUS bit to become zero.
5300          */
5301         final->t8 = 1;
5302         final->t9 = 1;
5303 }
5304
5305 static void
5306 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5307                                               struct intel_dp *intel_dp,
5308                                               bool force_disable_vdd)
5309 {
5310         struct drm_i915_private *dev_priv = to_i915(dev);
5311         u32 pp_on, pp_off, pp_div, port_sel = 0;
5312         int div = dev_priv->rawclk_freq / 1000;
5313         struct pps_registers regs;
5314         enum port port = dp_to_dig_port(intel_dp)->port;
5315         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5316
5317         lockdep_assert_held(&dev_priv->pps_mutex);
5318
5319         intel_pps_get_registers(dev_priv, intel_dp, &regs);
5320
5321         /*
5322          * On some VLV machines the BIOS can leave the VDD
5323          * enabled even on power seqeuencers which aren't
5324          * hooked up to any port. This would mess up the
5325          * power domain tracking the first time we pick
5326          * one of these power sequencers for use since
5327          * edp_panel_vdd_on() would notice that the VDD was
5328          * already on and therefore wouldn't grab the power
5329          * domain reference. Disable VDD first to avoid this.
5330          * This also avoids spuriously turning the VDD on as
5331          * soon as the new power seqeuencer gets initialized.
5332          */
5333         if (force_disable_vdd) {
5334                 u32 pp = ironlake_get_pp_control(intel_dp);
5335
5336                 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5337
5338                 if (pp & EDP_FORCE_VDD)
5339                         DRM_DEBUG_KMS("VDD already on, disabling first\n");
5340
5341                 pp &= ~EDP_FORCE_VDD;
5342
5343                 I915_WRITE(regs.pp_ctrl, pp);
5344         }
5345
5346         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5347                 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5348         pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5349                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5350         /* Compute the divisor for the pp clock, simply match the Bspec
5351          * formula. */
5352         if (IS_GEN9_LP(dev_priv)) {
5353                 pp_div = I915_READ(regs.pp_ctrl);
5354                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5355                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5356                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5357         } else {
5358                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5359                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5360                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5361         }
5362
5363         /* Haswell doesn't have any port selection bits for the panel
5364          * power sequencer any more. */
5365         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5366                 port_sel = PANEL_PORT_SELECT_VLV(port);
5367         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5368                 if (port == PORT_A)
5369                         port_sel = PANEL_PORT_SELECT_DPA;
5370                 else
5371                         port_sel = PANEL_PORT_SELECT_DPD;
5372         }
5373
5374         pp_on |= port_sel;
5375
5376         I915_WRITE(regs.pp_on, pp_on);
5377         I915_WRITE(regs.pp_off, pp_off);
5378         if (IS_GEN9_LP(dev_priv))
5379                 I915_WRITE(regs.pp_ctrl, pp_div);
5380         else
5381                 I915_WRITE(regs.pp_div, pp_div);
5382
5383         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5384                       I915_READ(regs.pp_on),
5385                       I915_READ(regs.pp_off),
5386                       IS_GEN9_LP(dev_priv) ?
5387                       (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5388                       I915_READ(regs.pp_div));
5389 }
5390
5391 static void intel_dp_pps_init(struct drm_device *dev,
5392                               struct intel_dp *intel_dp)
5393 {
5394         struct drm_i915_private *dev_priv = to_i915(dev);
5395
5396         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5397                 vlv_initial_power_sequencer_setup(intel_dp);
5398         } else {
5399                 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5400                 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5401         }
5402 }
5403
5404 /**
5405  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5406  * @dev_priv: i915 device
5407  * @crtc_state: a pointer to the active intel_crtc_state
5408  * @refresh_rate: RR to be programmed
5409  *
5410  * This function gets called when refresh rate (RR) has to be changed from
5411  * one frequency to another. Switches can be between high and low RR
5412  * supported by the panel or to any other RR based on media playback (in
5413  * this case, RR value needs to be passed from user space).
5414  *
5415  * The caller of this function needs to take a lock on dev_priv->drrs.
5416  */
5417 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5418                                     struct intel_crtc_state *crtc_state,
5419                                     int refresh_rate)
5420 {
5421         struct intel_encoder *encoder;
5422         struct intel_digital_port *dig_port = NULL;
5423         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5424         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5425         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5426
5427         if (refresh_rate <= 0) {
5428                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5429                 return;
5430         }
5431
5432         if (intel_dp == NULL) {
5433                 DRM_DEBUG_KMS("DRRS not supported.\n");
5434                 return;
5435         }
5436
5437         /*
5438          * FIXME: This needs proper synchronization with psr state for some
5439          * platforms that cannot have PSR and DRRS enabled at the same time.
5440          */
5441
5442         dig_port = dp_to_dig_port(intel_dp);
5443         encoder = &dig_port->base;
5444         intel_crtc = to_intel_crtc(encoder->base.crtc);
5445
5446         if (!intel_crtc) {
5447                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5448                 return;
5449         }
5450
5451         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5452                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5453                 return;
5454         }
5455
5456         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5457                         refresh_rate)
5458                 index = DRRS_LOW_RR;
5459
5460         if (index == dev_priv->drrs.refresh_rate_type) {
5461                 DRM_DEBUG_KMS(
5462                         "DRRS requested for previously set RR...ignoring\n");
5463                 return;
5464         }
5465
5466         if (!crtc_state->base.active) {
5467                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5468                 return;
5469         }
5470
5471         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5472                 switch (index) {
5473                 case DRRS_HIGH_RR:
5474                         intel_dp_set_m_n(intel_crtc, M1_N1);
5475                         break;
5476                 case DRRS_LOW_RR:
5477                         intel_dp_set_m_n(intel_crtc, M2_N2);
5478                         break;
5479                 case DRRS_MAX_RR:
5480                 default:
5481                         DRM_ERROR("Unsupported refreshrate type\n");
5482                 }
5483         } else if (INTEL_GEN(dev_priv) > 6) {
5484                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5485                 u32 val;
5486
5487                 val = I915_READ(reg);
5488                 if (index > DRRS_HIGH_RR) {
5489                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5490                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5491                         else
5492                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5493                 } else {
5494                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5495                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5496                         else
5497                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5498                 }
5499                 I915_WRITE(reg, val);
5500         }
5501
5502         dev_priv->drrs.refresh_rate_type = index;
5503
5504         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5505 }
5506
5507 /**
5508  * intel_edp_drrs_enable - init drrs struct if supported
5509  * @intel_dp: DP struct
5510  * @crtc_state: A pointer to the active crtc state.
5511  *
5512  * Initializes frontbuffer_bits and drrs.dp
5513  */
5514 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5515                            struct intel_crtc_state *crtc_state)
5516 {
5517         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5518         struct drm_i915_private *dev_priv = to_i915(dev);
5519
5520         if (!crtc_state->has_drrs) {
5521                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5522                 return;
5523         }
5524
5525         mutex_lock(&dev_priv->drrs.mutex);
5526         if (WARN_ON(dev_priv->drrs.dp)) {
5527                 DRM_ERROR("DRRS already enabled\n");
5528                 goto unlock;
5529         }
5530
5531         dev_priv->drrs.busy_frontbuffer_bits = 0;
5532
5533         dev_priv->drrs.dp = intel_dp;
5534
5535 unlock:
5536         mutex_unlock(&dev_priv->drrs.mutex);
5537 }
5538
5539 /**
5540  * intel_edp_drrs_disable - Disable DRRS
5541  * @intel_dp: DP struct
5542  * @old_crtc_state: Pointer to old crtc_state.
5543  *
5544  */
5545 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5546                             struct intel_crtc_state *old_crtc_state)
5547 {
5548         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5549         struct drm_i915_private *dev_priv = to_i915(dev);
5550
5551         if (!old_crtc_state->has_drrs)
5552                 return;
5553
5554         mutex_lock(&dev_priv->drrs.mutex);
5555         if (!dev_priv->drrs.dp) {
5556                 mutex_unlock(&dev_priv->drrs.mutex);
5557                 return;
5558         }
5559
5560         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5561                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5562                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5563
5564         dev_priv->drrs.dp = NULL;
5565         mutex_unlock(&dev_priv->drrs.mutex);
5566
5567         cancel_delayed_work_sync(&dev_priv->drrs.work);
5568 }
5569
5570 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5571 {
5572         struct drm_i915_private *dev_priv =
5573                 container_of(work, typeof(*dev_priv), drrs.work.work);
5574         struct intel_dp *intel_dp;
5575
5576         mutex_lock(&dev_priv->drrs.mutex);
5577
5578         intel_dp = dev_priv->drrs.dp;
5579
5580         if (!intel_dp)
5581                 goto unlock;
5582
5583         /*
5584          * The delayed work can race with an invalidate hence we need to
5585          * recheck.
5586          */
5587
5588         if (dev_priv->drrs.busy_frontbuffer_bits)
5589                 goto unlock;
5590
5591         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5592                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5593
5594                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5595                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5596         }
5597
5598 unlock:
5599         mutex_unlock(&dev_priv->drrs.mutex);
5600 }
5601
5602 /**
5603  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5604  * @dev_priv: i915 device
5605  * @frontbuffer_bits: frontbuffer plane tracking bits
5606  *
5607  * This function gets called everytime rendering on the given planes start.
5608  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5609  *
5610  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5611  */
5612 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5613                                unsigned int frontbuffer_bits)
5614 {
5615         struct drm_crtc *crtc;
5616         enum pipe pipe;
5617
5618         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5619                 return;
5620
5621         cancel_delayed_work(&dev_priv->drrs.work);
5622
5623         mutex_lock(&dev_priv->drrs.mutex);
5624         if (!dev_priv->drrs.dp) {
5625                 mutex_unlock(&dev_priv->drrs.mutex);
5626                 return;
5627         }
5628
5629         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5630         pipe = to_intel_crtc(crtc)->pipe;
5631
5632         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5633         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5634
5635         /* invalidate means busy screen hence upclock */
5636         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5637                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5638                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5639
5640         mutex_unlock(&dev_priv->drrs.mutex);
5641 }
5642
5643 /**
5644  * intel_edp_drrs_flush - Restart Idleness DRRS
5645  * @dev_priv: i915 device
5646  * @frontbuffer_bits: frontbuffer plane tracking bits
5647  *
5648  * This function gets called every time rendering on the given planes has
5649  * completed or flip on a crtc is completed. So DRRS should be upclocked
5650  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5651  * if no other planes are dirty.
5652  *
5653  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5654  */
5655 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5656                           unsigned int frontbuffer_bits)
5657 {
5658         struct drm_crtc *crtc;
5659         enum pipe pipe;
5660
5661         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5662                 return;
5663
5664         cancel_delayed_work(&dev_priv->drrs.work);
5665
5666         mutex_lock(&dev_priv->drrs.mutex);
5667         if (!dev_priv->drrs.dp) {
5668                 mutex_unlock(&dev_priv->drrs.mutex);
5669                 return;
5670         }
5671
5672         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5673         pipe = to_intel_crtc(crtc)->pipe;
5674
5675         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5676         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5677
5678         /* flush means busy screen hence upclock */
5679         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5680                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5681                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5682
5683         /*
5684          * flush also means no more activity hence schedule downclock, if all
5685          * other fbs are quiescent too
5686          */
5687         if (!dev_priv->drrs.busy_frontbuffer_bits)
5688                 schedule_delayed_work(&dev_priv->drrs.work,
5689                                 msecs_to_jiffies(1000));
5690         mutex_unlock(&dev_priv->drrs.mutex);
5691 }
5692
5693 /**
5694  * DOC: Display Refresh Rate Switching (DRRS)
5695  *
5696  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5697  * which enables swtching between low and high refresh rates,
5698  * dynamically, based on the usage scenario. This feature is applicable
5699  * for internal panels.
5700  *
5701  * Indication that the panel supports DRRS is given by the panel EDID, which
5702  * would list multiple refresh rates for one resolution.
5703  *
5704  * DRRS is of 2 types - static and seamless.
5705  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5706  * (may appear as a blink on screen) and is used in dock-undock scenario.
5707  * Seamless DRRS involves changing RR without any visual effect to the user
5708  * and can be used during normal system usage. This is done by programming
5709  * certain registers.
5710  *
5711  * Support for static/seamless DRRS may be indicated in the VBT based on
5712  * inputs from the panel spec.
5713  *
5714  * DRRS saves power by switching to low RR based on usage scenarios.
5715  *
5716  * The implementation is based on frontbuffer tracking implementation.  When
5717  * there is a disturbance on the screen triggered by user activity or a periodic
5718  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5719  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5720  * made.
5721  *
5722  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5723  * and intel_edp_drrs_flush() are called.
5724  *
5725  * DRRS can be further extended to support other internal panels and also
5726  * the scenario of video playback wherein RR is set based on the rate
5727  * requested by userspace.
5728  */
5729
5730 /**
5731  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5732  * @intel_connector: eDP connector
5733  * @fixed_mode: preferred mode of panel
5734  *
5735  * This function is  called only once at driver load to initialize basic
5736  * DRRS stuff.
5737  *
5738  * Returns:
5739  * Downclock mode if panel supports it, else return NULL.
5740  * DRRS support is determined by the presence of downclock mode (apart
5741  * from VBT setting).
5742  */
5743 static struct drm_display_mode *
5744 intel_dp_drrs_init(struct intel_connector *intel_connector,
5745                 struct drm_display_mode *fixed_mode)
5746 {
5747         struct drm_connector *connector = &intel_connector->base;
5748         struct drm_device *dev = connector->dev;
5749         struct drm_i915_private *dev_priv = to_i915(dev);
5750         struct drm_display_mode *downclock_mode = NULL;
5751
5752         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5753         mutex_init(&dev_priv->drrs.mutex);
5754
5755         if (INTEL_GEN(dev_priv) <= 6) {
5756                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5757                 return NULL;
5758         }
5759
5760         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5761                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5762                 return NULL;
5763         }
5764
5765         downclock_mode = intel_find_panel_downclock
5766                                         (dev_priv, fixed_mode, connector);
5767
5768         if (!downclock_mode) {
5769                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5770                 return NULL;
5771         }
5772
5773         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5774
5775         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5776         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5777         return downclock_mode;
5778 }
5779
5780 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5781                                      struct intel_connector *intel_connector)
5782 {
5783         struct drm_connector *connector = &intel_connector->base;
5784         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5785         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5786         struct drm_device *dev = intel_encoder->base.dev;
5787         struct drm_i915_private *dev_priv = to_i915(dev);
5788         struct drm_display_mode *fixed_mode = NULL;
5789         struct drm_display_mode *downclock_mode = NULL;
5790         bool has_dpcd;
5791         struct drm_display_mode *scan;
5792         struct edid *edid;
5793         enum pipe pipe = INVALID_PIPE;
5794
5795         if (!is_edp(intel_dp))
5796                 return true;
5797
5798         /*
5799          * On IBX/CPT we may get here with LVDS already registered. Since the
5800          * driver uses the only internal power sequencer available for both
5801          * eDP and LVDS bail out early in this case to prevent interfering
5802          * with an already powered-on LVDS power sequencer.
5803          */
5804         if (intel_get_lvds_encoder(dev)) {
5805                 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5806                 DRM_INFO("LVDS was detected, not registering eDP\n");
5807
5808                 return false;
5809         }
5810
5811         pps_lock(intel_dp);
5812
5813         intel_dp_init_panel_power_timestamps(intel_dp);
5814         intel_dp_pps_init(dev, intel_dp);
5815         intel_edp_panel_vdd_sanitize(intel_dp);
5816
5817         pps_unlock(intel_dp);
5818
5819         /* Cache DPCD and EDID for edp. */
5820         has_dpcd = intel_edp_init_dpcd(intel_dp);
5821
5822         if (!has_dpcd) {
5823                 /* if this fails, presume the device is a ghost */
5824                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5825                 goto out_vdd_off;
5826         }
5827
5828         mutex_lock(&dev->mode_config.mutex);
5829         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5830         if (edid) {
5831                 if (drm_add_edid_modes(connector, edid)) {
5832                         drm_mode_connector_update_edid_property(connector,
5833                                                                 edid);
5834                         drm_edid_to_eld(connector, edid);
5835                 } else {
5836                         kfree(edid);
5837                         edid = ERR_PTR(-EINVAL);
5838                 }
5839         } else {
5840                 edid = ERR_PTR(-ENOENT);
5841         }
5842         intel_connector->edid = edid;
5843
5844         /* prefer fixed mode from EDID if available */
5845         list_for_each_entry(scan, &connector->probed_modes, head) {
5846                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5847                         fixed_mode = drm_mode_duplicate(dev, scan);
5848                         downclock_mode = intel_dp_drrs_init(
5849                                                 intel_connector, fixed_mode);
5850                         break;
5851                 }
5852         }
5853
5854         /* fallback to VBT if available for eDP */
5855         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5856                 fixed_mode = drm_mode_duplicate(dev,
5857                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5858                 if (fixed_mode) {
5859                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5860                         connector->display_info.width_mm = fixed_mode->width_mm;
5861                         connector->display_info.height_mm = fixed_mode->height_mm;
5862                 }
5863         }
5864         mutex_unlock(&dev->mode_config.mutex);
5865
5866         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5867                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5868                 register_reboot_notifier(&intel_dp->edp_notifier);
5869
5870                 /*
5871                  * Figure out the current pipe for the initial backlight setup.
5872                  * If the current pipe isn't valid, try the PPS pipe, and if that
5873                  * fails just assume pipe A.
5874                  */
5875                 pipe = vlv_active_pipe(intel_dp);
5876
5877                 if (pipe != PIPE_A && pipe != PIPE_B)
5878                         pipe = intel_dp->pps_pipe;
5879
5880                 if (pipe != PIPE_A && pipe != PIPE_B)
5881                         pipe = PIPE_A;
5882
5883                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5884                               pipe_name(pipe));
5885         }
5886
5887         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5888         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5889         intel_panel_setup_backlight(connector, pipe);
5890
5891         return true;
5892
5893 out_vdd_off:
5894         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5895         /*
5896          * vdd might still be enabled do to the delayed vdd off.
5897          * Make sure vdd is actually turned off here.
5898          */
5899         pps_lock(intel_dp);
5900         edp_panel_vdd_off_sync(intel_dp);
5901         pps_unlock(intel_dp);
5902
5903         return false;
5904 }
5905
5906 bool
5907 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5908                         struct intel_connector *intel_connector)
5909 {
5910         struct drm_connector *connector = &intel_connector->base;
5911         struct intel_dp *intel_dp = &intel_dig_port->dp;
5912         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5913         struct drm_device *dev = intel_encoder->base.dev;
5914         struct drm_i915_private *dev_priv = to_i915(dev);
5915         enum port port = intel_dig_port->port;
5916         int type;
5917
5918         if (WARN(intel_dig_port->max_lanes < 1,
5919                  "Not enough lanes (%d) for DP on port %c\n",
5920                  intel_dig_port->max_lanes, port_name(port)))
5921                 return false;
5922
5923         intel_dp->pps_pipe = INVALID_PIPE;
5924         intel_dp->active_pipe = INVALID_PIPE;
5925
5926         /* intel_dp vfuncs */
5927         if (INTEL_GEN(dev_priv) >= 9)
5928                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5929         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5930                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5931         else if (HAS_PCH_SPLIT(dev_priv))
5932                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5933         else
5934                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5935
5936         if (INTEL_GEN(dev_priv) >= 9)
5937                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5938         else
5939                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5940
5941         if (HAS_DDI(dev_priv))
5942                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5943
5944         /* Preserve the current hw state. */
5945         intel_dp->DP = I915_READ(intel_dp->output_reg);
5946         intel_dp->attached_connector = intel_connector;
5947
5948         if (intel_dp_is_edp(dev_priv, port))
5949                 type = DRM_MODE_CONNECTOR_eDP;
5950         else
5951                 type = DRM_MODE_CONNECTOR_DisplayPort;
5952
5953         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5954                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5955
5956         /*
5957          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5958          * for DP the encoder type can be set by the caller to
5959          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5960          */
5961         if (type == DRM_MODE_CONNECTOR_eDP)
5962                 intel_encoder->type = INTEL_OUTPUT_EDP;
5963
5964         /* eDP only on port B and/or C on vlv/chv */
5965         if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5966                     is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5967                 return false;
5968
5969         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5970                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5971                         port_name(port));
5972
5973         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5974         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5975
5976         connector->interlace_allowed = true;
5977         connector->doublescan_allowed = 0;
5978
5979         intel_dp_aux_init(intel_dp);
5980
5981         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5982                           edp_panel_vdd_work);
5983
5984         intel_connector_attach_encoder(intel_connector, intel_encoder);
5985
5986         if (HAS_DDI(dev_priv))
5987                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5988         else
5989                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5990
5991         /* Set up the hotplug pin. */
5992         switch (port) {
5993         case PORT_A:
5994                 intel_encoder->hpd_pin = HPD_PORT_A;
5995                 break;
5996         case PORT_B:
5997                 intel_encoder->hpd_pin = HPD_PORT_B;
5998                 break;
5999         case PORT_C:
6000                 intel_encoder->hpd_pin = HPD_PORT_C;
6001                 break;
6002         case PORT_D:
6003                 intel_encoder->hpd_pin = HPD_PORT_D;
6004                 break;
6005         case PORT_E:
6006                 intel_encoder->hpd_pin = HPD_PORT_E;
6007                 break;
6008         default:
6009                 BUG();
6010         }
6011
6012         /* init MST on ports that can support it */
6013         if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
6014             (port == PORT_B || port == PORT_C || port == PORT_D))
6015                 intel_dp_mst_encoder_init(intel_dig_port,
6016                                           intel_connector->base.base.id);
6017
6018         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6019                 intel_dp_aux_fini(intel_dp);
6020                 intel_dp_mst_encoder_cleanup(intel_dig_port);
6021                 goto fail;
6022         }
6023
6024         intel_dp_add_properties(intel_dp, connector);
6025
6026         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6027          * 0xd.  Failure to do so will result in spurious interrupts being
6028          * generated on the port when a cable is not attached.
6029          */
6030         if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
6031                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6032                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6033         }
6034
6035         return true;
6036
6037 fail:
6038         drm_connector_cleanup(connector);
6039
6040         return false;
6041 }
6042
6043 bool intel_dp_init(struct drm_i915_private *dev_priv,
6044                    i915_reg_t output_reg,
6045                    enum port port)
6046 {
6047         struct intel_digital_port *intel_dig_port;
6048         struct intel_encoder *intel_encoder;
6049         struct drm_encoder *encoder;
6050         struct intel_connector *intel_connector;
6051
6052         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6053         if (!intel_dig_port)
6054                 return false;
6055
6056         intel_connector = intel_connector_alloc();
6057         if (!intel_connector)
6058                 goto err_connector_alloc;
6059
6060         intel_encoder = &intel_dig_port->base;
6061         encoder = &intel_encoder->base;
6062
6063         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6064                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6065                              "DP %c", port_name(port)))
6066                 goto err_encoder_init;
6067
6068         intel_encoder->compute_config = intel_dp_compute_config;
6069         intel_encoder->disable = intel_disable_dp;
6070         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6071         intel_encoder->get_config = intel_dp_get_config;
6072         intel_encoder->suspend = intel_dp_encoder_suspend;
6073         if (IS_CHERRYVIEW(dev_priv)) {
6074                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6075                 intel_encoder->pre_enable = chv_pre_enable_dp;
6076                 intel_encoder->enable = vlv_enable_dp;
6077                 intel_encoder->post_disable = chv_post_disable_dp;
6078                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6079         } else if (IS_VALLEYVIEW(dev_priv)) {
6080                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6081                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6082                 intel_encoder->enable = vlv_enable_dp;
6083                 intel_encoder->post_disable = vlv_post_disable_dp;
6084         } else {
6085                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6086                 intel_encoder->enable = g4x_enable_dp;
6087                 if (INTEL_GEN(dev_priv) >= 5)
6088                         intel_encoder->post_disable = ilk_post_disable_dp;
6089         }
6090
6091         intel_dig_port->port = port;
6092         intel_dig_port->dp.output_reg = output_reg;
6093         intel_dig_port->max_lanes = 4;
6094
6095         intel_encoder->type = INTEL_OUTPUT_DP;
6096         if (IS_CHERRYVIEW(dev_priv)) {
6097                 if (port == PORT_D)
6098                         intel_encoder->crtc_mask = 1 << 2;
6099                 else
6100                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6101         } else {
6102                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6103         }
6104         intel_encoder->cloneable = 0;
6105         intel_encoder->port = port;
6106
6107         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6108         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6109
6110         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6111                 goto err_init_connector;
6112
6113         return true;
6114
6115 err_init_connector:
6116         drm_encoder_cleanup(encoder);
6117 err_encoder_init:
6118         kfree(intel_connector);
6119 err_connector_alloc:
6120         kfree(intel_dig_port);
6121         return false;
6122 }
6123
6124 void intel_dp_mst_suspend(struct drm_device *dev)
6125 {
6126         struct drm_i915_private *dev_priv = to_i915(dev);
6127         int i;
6128
6129         /* disable MST */
6130         for (i = 0; i < I915_MAX_PORTS; i++) {
6131                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6132
6133                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6134                         continue;
6135
6136                 if (intel_dig_port->dp.is_mst)
6137                         drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6138         }
6139 }
6140
6141 void intel_dp_mst_resume(struct drm_device *dev)
6142 {
6143         struct drm_i915_private *dev_priv = to_i915(dev);
6144         int i;
6145
6146         for (i = 0; i < I915_MAX_PORTS; i++) {
6147                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6148                 int ret;
6149
6150                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6151                         continue;
6152
6153                 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6154                 if (ret)
6155                         intel_dp_check_mst_status(&intel_dig_port->dp);
6156         }
6157 }