2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int skl_rates[] = { 162000, 216000, 270000,
95 324000, 432000, 540000 };
96 static const int default_rates[] = { 162000, 270000, 540000 };
99 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
100 * @intel_dp: DP struct
102 * If a CPU or PCH DP output is attached to an eDP panel, this function
103 * will return true, and false otherwise.
105 static bool is_edp(struct intel_dp *intel_dp)
107 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
114 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
116 return intel_dig_port->base.base.dev;
119 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
121 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 static void intel_dp_link_down(struct intel_dp *intel_dp);
125 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
126 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
127 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
128 static void vlv_steal_power_sequencer(struct drm_device *dev,
132 intel_dp_max_link_bw(struct intel_dp *intel_dp)
134 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
136 switch (max_link_bw) {
137 case DP_LINK_BW_1_62:
142 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
144 max_link_bw = DP_LINK_BW_1_62;
150 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
152 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
153 struct drm_device *dev = intel_dig_port->base.base.dev;
154 u8 source_max, sink_max;
157 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
158 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
161 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
163 return min(source_max, sink_max);
167 * The units on the numbers in the next two are... bizarre. Examples will
168 * make it clearer; this one parallels an example in the eDP spec.
170 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
172 * 270000 * 1 * 8 / 10 == 216000
174 * The actual data capacity of that configuration is 2.16Gbit/s, so the
175 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
176 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
177 * 119000. At 18bpp that's 2142000 kilobits per second.
179 * Thus the strange-looking division by 10 in intel_dp_link_required, to
180 * get the result in decakilobits instead of kilobits.
184 intel_dp_link_required(int pixel_clock, int bpp)
186 return (pixel_clock * bpp + 9) / 10;
190 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
192 return (max_link_clock * max_lanes * 8) / 10;
195 static enum drm_mode_status
196 intel_dp_mode_valid(struct drm_connector *connector,
197 struct drm_display_mode *mode)
199 struct intel_dp *intel_dp = intel_attached_dp(connector);
200 struct intel_connector *intel_connector = to_intel_connector(connector);
201 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
202 int target_clock = mode->clock;
203 int max_rate, mode_rate, max_lanes, max_link_clock;
205 if (is_edp(intel_dp) && fixed_mode) {
206 if (mode->hdisplay > fixed_mode->hdisplay)
209 if (mode->vdisplay > fixed_mode->vdisplay)
212 target_clock = fixed_mode->clock;
215 max_link_clock = intel_dp_max_link_rate(intel_dp);
216 max_lanes = intel_dp_max_lane_count(intel_dp);
218 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
219 mode_rate = intel_dp_link_required(target_clock, 18);
221 if (mode_rate > max_rate)
222 return MODE_CLOCK_HIGH;
224 if (mode->clock < 10000)
225 return MODE_CLOCK_LOW;
227 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
228 return MODE_H_ILLEGAL;
233 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
240 for (i = 0; i < src_bytes; i++)
241 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
250 for (i = 0; i < dst_bytes; i++)
251 dst[i] = src >> ((3-i) * 8);
254 /* hrawclock is 1/4 the FSB frequency */
256 intel_hrawclk(struct drm_device *dev)
258 struct drm_i915_private *dev_priv = dev->dev_private;
261 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
262 if (IS_VALLEYVIEW(dev))
265 clkcfg = I915_READ(CLKCFG);
266 switch (clkcfg & CLKCFG_FSB_MASK) {
275 case CLKCFG_FSB_1067:
277 case CLKCFG_FSB_1333:
279 /* these two are just a guess; one of them might be right */
280 case CLKCFG_FSB_1600:
281 case CLKCFG_FSB_1600_ALT:
289 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
290 struct intel_dp *intel_dp);
292 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
293 struct intel_dp *intel_dp);
295 static void pps_lock(struct intel_dp *intel_dp)
297 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
298 struct intel_encoder *encoder = &intel_dig_port->base;
299 struct drm_device *dev = encoder->base.dev;
300 struct drm_i915_private *dev_priv = dev->dev_private;
301 enum intel_display_power_domain power_domain;
304 * See vlv_power_sequencer_reset() why we need
305 * a power domain reference here.
307 power_domain = intel_display_port_power_domain(encoder);
308 intel_display_power_get(dev_priv, power_domain);
310 mutex_lock(&dev_priv->pps_mutex);
313 static void pps_unlock(struct intel_dp *intel_dp)
315 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
316 struct intel_encoder *encoder = &intel_dig_port->base;
317 struct drm_device *dev = encoder->base.dev;
318 struct drm_i915_private *dev_priv = dev->dev_private;
319 enum intel_display_power_domain power_domain;
321 mutex_unlock(&dev_priv->pps_mutex);
323 power_domain = intel_display_port_power_domain(encoder);
324 intel_display_power_put(dev_priv, power_domain);
328 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
330 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
331 struct drm_device *dev = intel_dig_port->base.base.dev;
332 struct drm_i915_private *dev_priv = dev->dev_private;
333 enum pipe pipe = intel_dp->pps_pipe;
337 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
338 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
339 pipe_name(pipe), port_name(intel_dig_port->port)))
342 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
343 pipe_name(pipe), port_name(intel_dig_port->port));
345 /* Preserve the BIOS-computed detected bit. This is
346 * supposed to be read-only.
348 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
349 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
350 DP |= DP_PORT_WIDTH(1);
351 DP |= DP_LINK_TRAIN_PAT_1;
353 if (IS_CHERRYVIEW(dev))
354 DP |= DP_PIPE_SELECT_CHV(pipe);
355 else if (pipe == PIPE_B)
356 DP |= DP_PIPEB_SELECT;
358 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
361 * The DPLL for the pipe must be enabled for this to work.
362 * So enable temporarily it if it's not already enabled.
365 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
366 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
369 * Similar magic as in intel_dp_enable_port().
370 * We _must_ do this port enable + disable trick
371 * to make this power seqeuencer lock onto the port.
372 * Otherwise even VDD force bit won't work.
374 I915_WRITE(intel_dp->output_reg, DP);
375 POSTING_READ(intel_dp->output_reg);
377 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
380 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
384 vlv_force_pll_off(dev, pipe);
388 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
390 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
391 struct drm_device *dev = intel_dig_port->base.base.dev;
392 struct drm_i915_private *dev_priv = dev->dev_private;
393 struct intel_encoder *encoder;
394 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
397 lockdep_assert_held(&dev_priv->pps_mutex);
399 /* We should never land here with regular DP ports */
400 WARN_ON(!is_edp(intel_dp));
402 if (intel_dp->pps_pipe != INVALID_PIPE)
403 return intel_dp->pps_pipe;
406 * We don't have power sequencer currently.
407 * Pick one that's not used by other ports.
409 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
411 struct intel_dp *tmp;
413 if (encoder->type != INTEL_OUTPUT_EDP)
416 tmp = enc_to_intel_dp(&encoder->base);
418 if (tmp->pps_pipe != INVALID_PIPE)
419 pipes &= ~(1 << tmp->pps_pipe);
423 * Didn't find one. This should not happen since there
424 * are two power sequencers and up to two eDP ports.
426 if (WARN_ON(pipes == 0))
429 pipe = ffs(pipes) - 1;
431 vlv_steal_power_sequencer(dev, pipe);
432 intel_dp->pps_pipe = pipe;
434 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
435 pipe_name(intel_dp->pps_pipe),
436 port_name(intel_dig_port->port));
438 /* init power sequencer on this pipe and port */
439 intel_dp_init_panel_power_sequencer(dev, intel_dp);
440 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
443 * Even vdd force doesn't work until we've made
444 * the power sequencer lock in on the port.
446 vlv_power_sequencer_kick(intel_dp);
448 return intel_dp->pps_pipe;
451 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
454 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
457 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
460 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
463 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
466 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
473 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
475 vlv_pipe_check pipe_check)
479 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
480 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
481 PANEL_PORT_SELECT_MASK;
483 if (port_sel != PANEL_PORT_SELECT_VLV(port))
486 if (!pipe_check(dev_priv, pipe))
496 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
498 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
499 struct drm_device *dev = intel_dig_port->base.base.dev;
500 struct drm_i915_private *dev_priv = dev->dev_private;
501 enum port port = intel_dig_port->port;
503 lockdep_assert_held(&dev_priv->pps_mutex);
505 /* try to find a pipe with this port selected */
506 /* first pick one where the panel is on */
507 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 /* didn't find one? pick one where vdd is on */
510 if (intel_dp->pps_pipe == INVALID_PIPE)
511 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
512 vlv_pipe_has_vdd_on);
513 /* didn't find one? pick one with just the correct port */
514 if (intel_dp->pps_pipe == INVALID_PIPE)
515 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
518 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
519 if (intel_dp->pps_pipe == INVALID_PIPE) {
520 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
525 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
526 port_name(port), pipe_name(intel_dp->pps_pipe));
528 intel_dp_init_panel_power_sequencer(dev, intel_dp);
529 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
532 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
534 struct drm_device *dev = dev_priv->dev;
535 struct intel_encoder *encoder;
537 if (WARN_ON(!IS_VALLEYVIEW(dev)))
541 * We can't grab pps_mutex here due to deadlock with power_domain
542 * mutex when power_domain functions are called while holding pps_mutex.
543 * That also means that in order to use pps_pipe the code needs to
544 * hold both a power domain reference and pps_mutex, and the power domain
545 * reference get/put must be done while _not_ holding pps_mutex.
546 * pps_{lock,unlock}() do these steps in the correct order, so one
547 * should use them always.
550 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
551 struct intel_dp *intel_dp;
553 if (encoder->type != INTEL_OUTPUT_EDP)
556 intel_dp = enc_to_intel_dp(&encoder->base);
557 intel_dp->pps_pipe = INVALID_PIPE;
561 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
563 struct drm_device *dev = intel_dp_to_dev(intel_dp);
565 if (HAS_PCH_SPLIT(dev))
566 return PCH_PP_CONTROL;
568 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
571 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
573 struct drm_device *dev = intel_dp_to_dev(intel_dp);
575 if (HAS_PCH_SPLIT(dev))
576 return PCH_PP_STATUS;
578 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
581 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
582 This function only applicable when panel PM state is not to be tracked */
583 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
586 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
588 struct drm_device *dev = intel_dp_to_dev(intel_dp);
589 struct drm_i915_private *dev_priv = dev->dev_private;
591 u32 pp_ctrl_reg, pp_div_reg;
593 if (!is_edp(intel_dp) || code != SYS_RESTART)
598 if (IS_VALLEYVIEW(dev)) {
599 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
601 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
602 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
603 pp_div = I915_READ(pp_div_reg);
604 pp_div &= PP_REFERENCE_DIVIDER_MASK;
606 /* 0x1F write to PP_DIV_REG sets max cycle delay */
607 I915_WRITE(pp_div_reg, pp_div | 0x1F);
608 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
609 msleep(intel_dp->panel_power_cycle_delay);
612 pps_unlock(intel_dp);
617 static bool edp_have_panel_power(struct intel_dp *intel_dp)
619 struct drm_device *dev = intel_dp_to_dev(intel_dp);
620 struct drm_i915_private *dev_priv = dev->dev_private;
622 lockdep_assert_held(&dev_priv->pps_mutex);
624 if (IS_VALLEYVIEW(dev) &&
625 intel_dp->pps_pipe == INVALID_PIPE)
628 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
631 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
634 struct drm_i915_private *dev_priv = dev->dev_private;
636 lockdep_assert_held(&dev_priv->pps_mutex);
638 if (IS_VALLEYVIEW(dev) &&
639 intel_dp->pps_pipe == INVALID_PIPE)
642 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
646 intel_dp_check_edp(struct intel_dp *intel_dp)
648 struct drm_device *dev = intel_dp_to_dev(intel_dp);
649 struct drm_i915_private *dev_priv = dev->dev_private;
651 if (!is_edp(intel_dp))
654 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
655 WARN(1, "eDP powered off while attempting aux channel communication.\n");
656 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
657 I915_READ(_pp_stat_reg(intel_dp)),
658 I915_READ(_pp_ctrl_reg(intel_dp)));
663 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
665 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
666 struct drm_device *dev = intel_dig_port->base.base.dev;
667 struct drm_i915_private *dev_priv = dev->dev_private;
668 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
672 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
674 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
675 msecs_to_jiffies_timeout(10));
677 done = wait_for_atomic(C, 10) == 0;
679 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
686 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
692 * The clock divider is based off the hrawclk, and would like to run at
693 * 2MHz. So, take the hrawclk value and divide by 2 and use that
695 return index ? 0 : intel_hrawclk(dev) / 2;
698 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
700 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701 struct drm_device *dev = intel_dig_port->base.base.dev;
702 struct drm_i915_private *dev_priv = dev->dev_private;
707 if (intel_dig_port->port == PORT_A) {
708 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
710 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
716 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
717 struct drm_device *dev = intel_dig_port->base.base.dev;
718 struct drm_i915_private *dev_priv = dev->dev_private;
720 if (intel_dig_port->port == PORT_A) {
723 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
724 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
725 /* Workaround for non-ULT HSW */
732 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
736 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738 return index ? 0 : 100;
741 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744 * SKL doesn't need us to program the AUX clock divider (Hardware will
745 * derive the clock from CDCLK automatically). We still implement the
746 * get_aux_clock_divider vfunc to plug-in into the existing code.
748 return index ? 0 : 1;
751 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
754 uint32_t aux_clock_divider)
756 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
757 struct drm_device *dev = intel_dig_port->base.base.dev;
758 uint32_t precharge, timeout;
765 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
766 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
768 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
770 return DP_AUX_CH_CTL_SEND_BUSY |
772 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
773 DP_AUX_CH_CTL_TIME_OUT_ERROR |
775 DP_AUX_CH_CTL_RECEIVE_ERROR |
776 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
777 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
778 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
781 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
786 return DP_AUX_CH_CTL_SEND_BUSY |
788 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
789 DP_AUX_CH_CTL_TIME_OUT_ERROR |
790 DP_AUX_CH_CTL_TIME_OUT_1600us |
791 DP_AUX_CH_CTL_RECEIVE_ERROR |
792 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
793 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797 intel_dp_aux_ch(struct intel_dp *intel_dp,
798 const uint8_t *send, int send_bytes,
799 uint8_t *recv, int recv_size)
801 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
802 struct drm_device *dev = intel_dig_port->base.base.dev;
803 struct drm_i915_private *dev_priv = dev->dev_private;
804 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
805 uint32_t ch_data = ch_ctl + 4;
806 uint32_t aux_clock_divider;
807 int i, ret, recv_bytes;
810 bool has_aux_irq = HAS_AUX_IRQ(dev);
816 * We will be called with VDD already enabled for dpcd/edid/oui reads.
817 * In such cases we want to leave VDD enabled and it's up to upper layers
818 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
821 vdd = edp_panel_vdd_on(intel_dp);
823 /* dp aux is extremely sensitive to irq latency, hence request the
824 * lowest possible wakeup latency and so prevent the cpu from going into
827 pm_qos_update_request(&dev_priv->pm_qos, 0);
829 intel_dp_check_edp(intel_dp);
831 intel_aux_display_runtime_get(dev_priv);
833 /* Try to wait for any previous AUX channel activity */
834 for (try = 0; try < 3; try++) {
835 status = I915_READ_NOTRACE(ch_ctl);
836 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
842 WARN(1, "dp_aux_ch not started status 0x%08x\n",
848 /* Only 5 data registers! */
849 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
854 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
855 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
860 /* Must try at least 3 times according to DP spec */
861 for (try = 0; try < 5; try++) {
862 /* Load the send data into the aux channel data registers */
863 for (i = 0; i < send_bytes; i += 4)
864 I915_WRITE(ch_data + i,
865 intel_dp_pack_aux(send + i,
868 /* Send the command and wait for it to complete */
869 I915_WRITE(ch_ctl, send_ctl);
871 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
873 /* Clear done status and any errors */
877 DP_AUX_CH_CTL_TIME_OUT_ERROR |
878 DP_AUX_CH_CTL_RECEIVE_ERROR);
880 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
883 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
884 * 400us delay required for errors and timeouts
885 * Timeout errors from the HW already meet this
886 * requirement so skip to next iteration
888 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
889 usleep_range(400, 500);
892 if (status & DP_AUX_CH_CTL_DONE)
897 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
898 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
904 /* Check for timeout or receive error.
905 * Timeouts occur when the sink is not connected
907 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
908 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
913 /* Timeouts occur when the device isn't connected, so they're
914 * "normal" -- don't fill the kernel log with these */
915 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
916 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
921 /* Unload any bytes sent back from the other side */
922 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
923 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
924 if (recv_bytes > recv_size)
925 recv_bytes = recv_size;
927 for (i = 0; i < recv_bytes; i += 4)
928 intel_dp_unpack_aux(I915_READ(ch_data + i),
929 recv + i, recv_bytes - i);
933 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
934 intel_aux_display_runtime_put(dev_priv);
937 edp_panel_vdd_off(intel_dp, false);
939 pps_unlock(intel_dp);
944 #define BARE_ADDRESS_SIZE 3
945 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
947 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
949 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950 uint8_t txbuf[20], rxbuf[20];
951 size_t txsize, rxsize;
954 txbuf[0] = (msg->request << 4) |
955 ((msg->address >> 16) & 0xf);
956 txbuf[1] = (msg->address >> 8) & 0xff;
957 txbuf[2] = msg->address & 0xff;
958 txbuf[3] = msg->size - 1;
960 switch (msg->request & ~DP_AUX_I2C_MOT) {
961 case DP_AUX_NATIVE_WRITE:
962 case DP_AUX_I2C_WRITE:
963 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
964 rxsize = 2; /* 0 or 1 data bytes */
966 if (WARN_ON(txsize > 20))
969 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
971 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
973 msg->reply = rxbuf[0] >> 4;
976 /* Number of bytes written in a short write. */
977 ret = clamp_t(int, rxbuf[1], 0, msg->size);
979 /* Return payload size. */
985 case DP_AUX_NATIVE_READ:
986 case DP_AUX_I2C_READ:
987 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
988 rxsize = msg->size + 1;
990 if (WARN_ON(rxsize > 20))
993 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
995 msg->reply = rxbuf[0] >> 4;
997 * Assume happy day, and copy the data. The caller is
998 * expected to check msg->reply before touching it.
1000 * Return payload size.
1003 memcpy(msg->buffer, rxbuf + 1, ret);
1016 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1018 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1019 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1020 enum port port = intel_dig_port->port;
1021 const char *name = NULL;
1026 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1030 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1034 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1038 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1046 * The AUX_CTL register is usually DP_CTL + 0x10.
1048 * On Haswell and Broadwell though:
1049 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1050 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1052 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1054 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1055 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1057 intel_dp->aux.name = name;
1058 intel_dp->aux.dev = dev->dev;
1059 intel_dp->aux.transfer = intel_dp_aux_transfer;
1061 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1062 connector->base.kdev->kobj.name);
1064 ret = drm_dp_aux_register(&intel_dp->aux);
1066 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1071 ret = sysfs_create_link(&connector->base.kdev->kobj,
1072 &intel_dp->aux.ddc.dev.kobj,
1073 intel_dp->aux.ddc.dev.kobj.name);
1075 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1076 drm_dp_aux_unregister(&intel_dp->aux);
1081 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1083 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1085 if (!intel_connector->mst_port)
1086 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1087 intel_dp->aux.ddc.dev.kobj.name);
1088 intel_connector_unregister(intel_connector);
1092 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1096 memset(&pipe_config->dpll_hw_state, 0,
1097 sizeof(pipe_config->dpll_hw_state));
1099 pipe_config->ddi_pll_sel = SKL_DPLL0;
1100 pipe_config->dpll_hw_state.cfgcr1 = 0;
1101 pipe_config->dpll_hw_state.cfgcr2 = 0;
1103 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1104 switch (link_clock / 2) {
1106 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1110 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1114 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1118 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1121 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1122 results in CDCLK change. Need to handle the change of CDCLK by
1123 disabling pipes and re-enabling them */
1125 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1134 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1138 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1140 memset(&pipe_config->dpll_hw_state, 0,
1141 sizeof(pipe_config->dpll_hw_state));
1144 case DP_LINK_BW_1_62:
1145 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1147 case DP_LINK_BW_2_7:
1148 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1150 case DP_LINK_BW_5_4:
1151 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1157 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1159 if (intel_dp->num_sink_rates) {
1160 *sink_rates = intel_dp->sink_rates;
1161 return intel_dp->num_sink_rates;
1164 *sink_rates = default_rates;
1166 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1170 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1172 if (IS_SKYLAKE(dev)) {
1173 *source_rates = skl_rates;
1174 return ARRAY_SIZE(skl_rates);
1177 *source_rates = default_rates;
1179 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1180 /* WaDisableHBR2:skl */
1181 return (DP_LINK_BW_2_7 >> 3) + 1;
1182 else if (INTEL_INFO(dev)->gen >= 8 ||
1183 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1184 return (DP_LINK_BW_5_4 >> 3) + 1;
1186 return (DP_LINK_BW_2_7 >> 3) + 1;
1190 intel_dp_set_clock(struct intel_encoder *encoder,
1191 struct intel_crtc_state *pipe_config, int link_bw)
1193 struct drm_device *dev = encoder->base.dev;
1194 const struct dp_link_dpll *divisor = NULL;
1198 divisor = gen4_dpll;
1199 count = ARRAY_SIZE(gen4_dpll);
1200 } else if (HAS_PCH_SPLIT(dev)) {
1202 count = ARRAY_SIZE(pch_dpll);
1203 } else if (IS_CHERRYVIEW(dev)) {
1205 count = ARRAY_SIZE(chv_dpll);
1206 } else if (IS_VALLEYVIEW(dev)) {
1208 count = ARRAY_SIZE(vlv_dpll);
1211 if (divisor && count) {
1212 for (i = 0; i < count; i++) {
1213 if (link_bw == divisor[i].link_bw) {
1214 pipe_config->dpll = divisor[i].dpll;
1215 pipe_config->clock_set = true;
1222 static int intersect_rates(const int *source_rates, int source_len,
1223 const int *sink_rates, int sink_len,
1226 int i = 0, j = 0, k = 0;
1228 while (i < source_len && j < sink_len) {
1229 if (source_rates[i] == sink_rates[j]) {
1230 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1232 common_rates[k] = source_rates[i];
1236 } else if (source_rates[i] < sink_rates[j]) {
1245 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1248 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1249 const int *source_rates, *sink_rates;
1250 int source_len, sink_len;
1252 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1253 source_len = intel_dp_source_rates(dev, &source_rates);
1255 return intersect_rates(source_rates, source_len,
1256 sink_rates, sink_len,
1260 static void snprintf_int_array(char *str, size_t len,
1261 const int *array, int nelem)
1267 for (i = 0; i < nelem; i++) {
1268 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1276 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1278 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1279 const int *source_rates, *sink_rates;
1280 int source_len, sink_len, common_len;
1281 int common_rates[DP_MAX_SUPPORTED_RATES];
1282 char str[128]; /* FIXME: too big for stack? */
1284 if ((drm_debug & DRM_UT_KMS) == 0)
1287 source_len = intel_dp_source_rates(dev, &source_rates);
1288 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1289 DRM_DEBUG_KMS("source rates: %s\n", str);
1291 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1292 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1293 DRM_DEBUG_KMS("sink rates: %s\n", str);
1295 common_len = intel_dp_common_rates(intel_dp, common_rates);
1296 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1297 DRM_DEBUG_KMS("common rates: %s\n", str);
1300 static int rate_to_index(int find, const int *rates)
1304 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1305 if (find == rates[i])
1312 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1314 int rates[DP_MAX_SUPPORTED_RATES] = {};
1317 len = intel_dp_common_rates(intel_dp, rates);
1318 if (WARN_ON(len <= 0))
1321 return rates[rate_to_index(0, rates) - 1];
1324 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1326 return rate_to_index(rate, intel_dp->sink_rates);
1330 intel_dp_compute_config(struct intel_encoder *encoder,
1331 struct intel_crtc_state *pipe_config)
1333 struct drm_device *dev = encoder->base.dev;
1334 struct drm_i915_private *dev_priv = dev->dev_private;
1335 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1336 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1337 enum port port = dp_to_dig_port(intel_dp)->port;
1338 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1339 struct intel_connector *intel_connector = intel_dp->attached_connector;
1340 int lane_count, clock;
1341 int min_lane_count = 1;
1342 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1343 /* Conveniently, the link BW constants become indices with a shift...*/
1347 int link_avail, link_clock;
1348 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1351 common_len = intel_dp_common_rates(intel_dp, common_rates);
1353 /* No common link rates between source and sink */
1354 WARN_ON(common_len <= 0);
1356 max_clock = common_len - 1;
1358 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1359 pipe_config->has_pch_encoder = true;
1361 pipe_config->has_dp_encoder = true;
1362 pipe_config->has_drrs = false;
1363 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1365 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1366 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1369 if (INTEL_INFO(dev)->gen >= 9) {
1371 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1376 if (!HAS_PCH_SPLIT(dev))
1377 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1378 intel_connector->panel.fitting_mode);
1380 intel_pch_panel_fitting(intel_crtc, pipe_config,
1381 intel_connector->panel.fitting_mode);
1384 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1387 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1388 "max bw %d pixel clock %iKHz\n",
1389 max_lane_count, common_rates[max_clock],
1390 adjusted_mode->crtc_clock);
1392 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1393 * bpc in between. */
1394 bpp = pipe_config->pipe_bpp;
1395 if (is_edp(intel_dp)) {
1396 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1397 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1398 dev_priv->vbt.edp_bpp);
1399 bpp = dev_priv->vbt.edp_bpp;
1403 * Use the maximum clock and number of lanes the eDP panel
1404 * advertizes being capable of. The panels are generally
1405 * designed to support only a single clock and lane
1406 * configuration, and typically these values correspond to the
1407 * native resolution of the panel.
1409 min_lane_count = max_lane_count;
1410 min_clock = max_clock;
1413 for (; bpp >= 6*3; bpp -= 2*3) {
1414 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1417 for (clock = min_clock; clock <= max_clock; clock++) {
1418 for (lane_count = min_lane_count;
1419 lane_count <= max_lane_count;
1422 link_clock = common_rates[clock];
1423 link_avail = intel_dp_max_data_rate(link_clock,
1426 if (mode_rate <= link_avail) {
1436 if (intel_dp->color_range_auto) {
1439 * CEA-861-E - 5.1 Default Encoding Parameters
1440 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1442 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1443 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1445 intel_dp->color_range = 0;
1448 if (intel_dp->color_range)
1449 pipe_config->limited_color_range = true;
1451 intel_dp->lane_count = lane_count;
1453 if (intel_dp->num_sink_rates) {
1454 intel_dp->link_bw = 0;
1455 intel_dp->rate_select =
1456 intel_dp_rate_select(intel_dp, common_rates[clock]);
1459 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1460 intel_dp->rate_select = 0;
1463 pipe_config->pipe_bpp = bpp;
1464 pipe_config->port_clock = common_rates[clock];
1466 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1467 intel_dp->link_bw, intel_dp->lane_count,
1468 pipe_config->port_clock, bpp);
1469 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1470 mode_rate, link_avail);
1472 intel_link_compute_m_n(bpp, lane_count,
1473 adjusted_mode->crtc_clock,
1474 pipe_config->port_clock,
1475 &pipe_config->dp_m_n);
1477 if (intel_connector->panel.downclock_mode != NULL &&
1478 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1479 pipe_config->has_drrs = true;
1480 intel_link_compute_m_n(bpp, lane_count,
1481 intel_connector->panel.downclock_mode->clock,
1482 pipe_config->port_clock,
1483 &pipe_config->dp_m2_n2);
1486 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1487 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1488 else if (IS_BROXTON(dev))
1489 /* handled in ddi */;
1490 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1491 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1493 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1498 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1500 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1501 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1502 struct drm_device *dev = crtc->base.dev;
1503 struct drm_i915_private *dev_priv = dev->dev_private;
1506 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1507 crtc->config->port_clock);
1508 dpa_ctl = I915_READ(DP_A);
1509 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1511 if (crtc->config->port_clock == 162000) {
1512 /* For a long time we've carried around a ILK-DevA w/a for the
1513 * 160MHz clock. If we're really unlucky, it's still required.
1515 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1516 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1517 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1519 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1520 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1523 I915_WRITE(DP_A, dpa_ctl);
1529 static void intel_dp_prepare(struct intel_encoder *encoder)
1531 struct drm_device *dev = encoder->base.dev;
1532 struct drm_i915_private *dev_priv = dev->dev_private;
1533 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1534 enum port port = dp_to_dig_port(intel_dp)->port;
1535 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1536 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1539 * There are four kinds of DP registers:
1546 * IBX PCH and CPU are the same for almost everything,
1547 * except that the CPU DP PLL is configured in this
1550 * CPT PCH is quite different, having many bits moved
1551 * to the TRANS_DP_CTL register instead. That
1552 * configuration happens (oddly) in ironlake_pch_enable
1555 /* Preserve the BIOS-computed detected bit. This is
1556 * supposed to be read-only.
1558 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1560 /* Handle DP bits in common between all three register formats */
1561 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1562 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1564 if (crtc->config->has_audio)
1565 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1567 /* Split out the IBX/CPU vs CPT settings */
1569 if (IS_GEN7(dev) && port == PORT_A) {
1570 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1571 intel_dp->DP |= DP_SYNC_HS_HIGH;
1572 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1573 intel_dp->DP |= DP_SYNC_VS_HIGH;
1574 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1576 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1577 intel_dp->DP |= DP_ENHANCED_FRAMING;
1579 intel_dp->DP |= crtc->pipe << 29;
1580 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1583 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1585 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1586 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1587 trans_dp |= TRANS_DP_ENH_FRAMING;
1589 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1590 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1592 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1593 intel_dp->DP |= intel_dp->color_range;
1595 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1596 intel_dp->DP |= DP_SYNC_HS_HIGH;
1597 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1598 intel_dp->DP |= DP_SYNC_VS_HIGH;
1599 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1601 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1602 intel_dp->DP |= DP_ENHANCED_FRAMING;
1604 if (IS_CHERRYVIEW(dev))
1605 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1606 else if (crtc->pipe == PIPE_B)
1607 intel_dp->DP |= DP_PIPEB_SELECT;
1611 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1612 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1614 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1615 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1617 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1618 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1620 static void wait_panel_status(struct intel_dp *intel_dp,
1624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1625 struct drm_i915_private *dev_priv = dev->dev_private;
1626 u32 pp_stat_reg, pp_ctrl_reg;
1628 lockdep_assert_held(&dev_priv->pps_mutex);
1630 pp_stat_reg = _pp_stat_reg(intel_dp);
1631 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1633 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1635 I915_READ(pp_stat_reg),
1636 I915_READ(pp_ctrl_reg));
1638 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1639 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1640 I915_READ(pp_stat_reg),
1641 I915_READ(pp_ctrl_reg));
1644 DRM_DEBUG_KMS("Wait complete\n");
1647 static void wait_panel_on(struct intel_dp *intel_dp)
1649 DRM_DEBUG_KMS("Wait for panel power on\n");
1650 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1653 static void wait_panel_off(struct intel_dp *intel_dp)
1655 DRM_DEBUG_KMS("Wait for panel power off time\n");
1656 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1659 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1661 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1663 /* When we disable the VDD override bit last we have to do the manual
1665 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1666 intel_dp->panel_power_cycle_delay);
1668 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1671 static void wait_backlight_on(struct intel_dp *intel_dp)
1673 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1674 intel_dp->backlight_on_delay);
1677 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1679 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1680 intel_dp->backlight_off_delay);
1683 /* Read the current pp_control value, unlocking the register if it
1687 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1689 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1690 struct drm_i915_private *dev_priv = dev->dev_private;
1693 lockdep_assert_held(&dev_priv->pps_mutex);
1695 control = I915_READ(_pp_ctrl_reg(intel_dp));
1696 control &= ~PANEL_UNLOCK_MASK;
1697 control |= PANEL_UNLOCK_REGS;
1702 * Must be paired with edp_panel_vdd_off().
1703 * Must hold pps_mutex around the whole on/off sequence.
1704 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1706 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1708 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1710 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1711 struct drm_i915_private *dev_priv = dev->dev_private;
1712 enum intel_display_power_domain power_domain;
1714 u32 pp_stat_reg, pp_ctrl_reg;
1715 bool need_to_disable = !intel_dp->want_panel_vdd;
1717 lockdep_assert_held(&dev_priv->pps_mutex);
1719 if (!is_edp(intel_dp))
1722 cancel_delayed_work(&intel_dp->panel_vdd_work);
1723 intel_dp->want_panel_vdd = true;
1725 if (edp_have_panel_vdd(intel_dp))
1726 return need_to_disable;
1728 power_domain = intel_display_port_power_domain(intel_encoder);
1729 intel_display_power_get(dev_priv, power_domain);
1731 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1732 port_name(intel_dig_port->port));
1734 if (!edp_have_panel_power(intel_dp))
1735 wait_panel_power_cycle(intel_dp);
1737 pp = ironlake_get_pp_control(intel_dp);
1738 pp |= EDP_FORCE_VDD;
1740 pp_stat_reg = _pp_stat_reg(intel_dp);
1741 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1743 I915_WRITE(pp_ctrl_reg, pp);
1744 POSTING_READ(pp_ctrl_reg);
1745 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1746 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1748 * If the panel wasn't on, delay before accessing aux channel
1750 if (!edp_have_panel_power(intel_dp)) {
1751 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1752 port_name(intel_dig_port->port));
1753 msleep(intel_dp->panel_power_up_delay);
1756 return need_to_disable;
1760 * Must be paired with intel_edp_panel_vdd_off() or
1761 * intel_edp_panel_off().
1762 * Nested calls to these functions are not allowed since
1763 * we drop the lock. Caller must use some higher level
1764 * locking to prevent nested calls from other threads.
1766 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1770 if (!is_edp(intel_dp))
1774 vdd = edp_panel_vdd_on(intel_dp);
1775 pps_unlock(intel_dp);
1777 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1778 port_name(dp_to_dig_port(intel_dp)->port));
1781 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1783 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1784 struct drm_i915_private *dev_priv = dev->dev_private;
1785 struct intel_digital_port *intel_dig_port =
1786 dp_to_dig_port(intel_dp);
1787 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1788 enum intel_display_power_domain power_domain;
1790 u32 pp_stat_reg, pp_ctrl_reg;
1792 lockdep_assert_held(&dev_priv->pps_mutex);
1794 WARN_ON(intel_dp->want_panel_vdd);
1796 if (!edp_have_panel_vdd(intel_dp))
1799 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1800 port_name(intel_dig_port->port));
1802 pp = ironlake_get_pp_control(intel_dp);
1803 pp &= ~EDP_FORCE_VDD;
1805 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1806 pp_stat_reg = _pp_stat_reg(intel_dp);
1808 I915_WRITE(pp_ctrl_reg, pp);
1809 POSTING_READ(pp_ctrl_reg);
1811 /* Make sure sequencer is idle before allowing subsequent activity */
1812 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1813 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1815 if ((pp & POWER_TARGET_ON) == 0)
1816 intel_dp->last_power_cycle = jiffies;
1818 power_domain = intel_display_port_power_domain(intel_encoder);
1819 intel_display_power_put(dev_priv, power_domain);
1822 static void edp_panel_vdd_work(struct work_struct *__work)
1824 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1825 struct intel_dp, panel_vdd_work);
1828 if (!intel_dp->want_panel_vdd)
1829 edp_panel_vdd_off_sync(intel_dp);
1830 pps_unlock(intel_dp);
1833 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1835 unsigned long delay;
1838 * Queue the timer to fire a long time from now (relative to the power
1839 * down delay) to keep the panel power up across a sequence of
1842 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1843 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1847 * Must be paired with edp_panel_vdd_on().
1848 * Must hold pps_mutex around the whole on/off sequence.
1849 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1851 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1853 struct drm_i915_private *dev_priv =
1854 intel_dp_to_dev(intel_dp)->dev_private;
1856 lockdep_assert_held(&dev_priv->pps_mutex);
1858 if (!is_edp(intel_dp))
1861 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1862 port_name(dp_to_dig_port(intel_dp)->port));
1864 intel_dp->want_panel_vdd = false;
1867 edp_panel_vdd_off_sync(intel_dp);
1869 edp_panel_vdd_schedule_off(intel_dp);
1872 static void edp_panel_on(struct intel_dp *intel_dp)
1874 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1875 struct drm_i915_private *dev_priv = dev->dev_private;
1879 lockdep_assert_held(&dev_priv->pps_mutex);
1881 if (!is_edp(intel_dp))
1884 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1885 port_name(dp_to_dig_port(intel_dp)->port));
1887 if (WARN(edp_have_panel_power(intel_dp),
1888 "eDP port %c panel power already on\n",
1889 port_name(dp_to_dig_port(intel_dp)->port)))
1892 wait_panel_power_cycle(intel_dp);
1894 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1895 pp = ironlake_get_pp_control(intel_dp);
1897 /* ILK workaround: disable reset around power sequence */
1898 pp &= ~PANEL_POWER_RESET;
1899 I915_WRITE(pp_ctrl_reg, pp);
1900 POSTING_READ(pp_ctrl_reg);
1903 pp |= POWER_TARGET_ON;
1905 pp |= PANEL_POWER_RESET;
1907 I915_WRITE(pp_ctrl_reg, pp);
1908 POSTING_READ(pp_ctrl_reg);
1910 wait_panel_on(intel_dp);
1911 intel_dp->last_power_on = jiffies;
1914 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1915 I915_WRITE(pp_ctrl_reg, pp);
1916 POSTING_READ(pp_ctrl_reg);
1920 void intel_edp_panel_on(struct intel_dp *intel_dp)
1922 if (!is_edp(intel_dp))
1926 edp_panel_on(intel_dp);
1927 pps_unlock(intel_dp);
1931 static void edp_panel_off(struct intel_dp *intel_dp)
1933 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1934 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1935 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1936 struct drm_i915_private *dev_priv = dev->dev_private;
1937 enum intel_display_power_domain power_domain;
1941 lockdep_assert_held(&dev_priv->pps_mutex);
1943 if (!is_edp(intel_dp))
1946 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1947 port_name(dp_to_dig_port(intel_dp)->port));
1949 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1950 port_name(dp_to_dig_port(intel_dp)->port));
1952 pp = ironlake_get_pp_control(intel_dp);
1953 /* We need to switch off panel power _and_ force vdd, for otherwise some
1954 * panels get very unhappy and cease to work. */
1955 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1958 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1960 intel_dp->want_panel_vdd = false;
1962 I915_WRITE(pp_ctrl_reg, pp);
1963 POSTING_READ(pp_ctrl_reg);
1965 intel_dp->last_power_cycle = jiffies;
1966 wait_panel_off(intel_dp);
1968 /* We got a reference when we enabled the VDD. */
1969 power_domain = intel_display_port_power_domain(intel_encoder);
1970 intel_display_power_put(dev_priv, power_domain);
1973 void intel_edp_panel_off(struct intel_dp *intel_dp)
1975 if (!is_edp(intel_dp))
1979 edp_panel_off(intel_dp);
1980 pps_unlock(intel_dp);
1983 /* Enable backlight in the panel power control. */
1984 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1986 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1987 struct drm_device *dev = intel_dig_port->base.base.dev;
1988 struct drm_i915_private *dev_priv = dev->dev_private;
1993 * If we enable the backlight right away following a panel power
1994 * on, we may see slight flicker as the panel syncs with the eDP
1995 * link. So delay a bit to make sure the image is solid before
1996 * allowing it to appear.
1998 wait_backlight_on(intel_dp);
2002 pp = ironlake_get_pp_control(intel_dp);
2003 pp |= EDP_BLC_ENABLE;
2005 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2007 I915_WRITE(pp_ctrl_reg, pp);
2008 POSTING_READ(pp_ctrl_reg);
2010 pps_unlock(intel_dp);
2013 /* Enable backlight PWM and backlight PP control. */
2014 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2016 if (!is_edp(intel_dp))
2019 DRM_DEBUG_KMS("\n");
2021 intel_panel_enable_backlight(intel_dp->attached_connector);
2022 _intel_edp_backlight_on(intel_dp);
2025 /* Disable backlight in the panel power control. */
2026 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2028 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2029 struct drm_i915_private *dev_priv = dev->dev_private;
2033 if (!is_edp(intel_dp))
2038 pp = ironlake_get_pp_control(intel_dp);
2039 pp &= ~EDP_BLC_ENABLE;
2041 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2043 I915_WRITE(pp_ctrl_reg, pp);
2044 POSTING_READ(pp_ctrl_reg);
2046 pps_unlock(intel_dp);
2048 intel_dp->last_backlight_off = jiffies;
2049 edp_wait_backlight_off(intel_dp);
2052 /* Disable backlight PP control and backlight PWM. */
2053 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2055 if (!is_edp(intel_dp))
2058 DRM_DEBUG_KMS("\n");
2060 _intel_edp_backlight_off(intel_dp);
2061 intel_panel_disable_backlight(intel_dp->attached_connector);
2065 * Hook for controlling the panel power control backlight through the bl_power
2066 * sysfs attribute. Take care to handle multiple calls.
2068 static void intel_edp_backlight_power(struct intel_connector *connector,
2071 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2075 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2076 pps_unlock(intel_dp);
2078 if (is_enabled == enable)
2081 DRM_DEBUG_KMS("panel power control backlight %s\n",
2082 enable ? "enable" : "disable");
2085 _intel_edp_backlight_on(intel_dp);
2087 _intel_edp_backlight_off(intel_dp);
2090 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2092 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2093 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2094 struct drm_device *dev = crtc->dev;
2095 struct drm_i915_private *dev_priv = dev->dev_private;
2098 assert_pipe_disabled(dev_priv,
2099 to_intel_crtc(crtc)->pipe);
2101 DRM_DEBUG_KMS("\n");
2102 dpa_ctl = I915_READ(DP_A);
2103 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2104 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2106 /* We don't adjust intel_dp->DP while tearing down the link, to
2107 * facilitate link retraining (e.g. after hotplug). Hence clear all
2108 * enable bits here to ensure that we don't enable too much. */
2109 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2110 intel_dp->DP |= DP_PLL_ENABLE;
2111 I915_WRITE(DP_A, intel_dp->DP);
2116 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2118 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2119 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2120 struct drm_device *dev = crtc->dev;
2121 struct drm_i915_private *dev_priv = dev->dev_private;
2124 assert_pipe_disabled(dev_priv,
2125 to_intel_crtc(crtc)->pipe);
2127 dpa_ctl = I915_READ(DP_A);
2128 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2129 "dp pll off, should be on\n");
2130 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2132 /* We can't rely on the value tracked for the DP register in
2133 * intel_dp->DP because link_down must not change that (otherwise link
2134 * re-training will fail. */
2135 dpa_ctl &= ~DP_PLL_ENABLE;
2136 I915_WRITE(DP_A, dpa_ctl);
2141 /* If the sink supports it, try to set the power state appropriately */
2142 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2146 /* Should have a valid DPCD by this point */
2147 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2150 if (mode != DRM_MODE_DPMS_ON) {
2151 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2155 * When turning on, we need to retry for 1ms to give the sink
2158 for (i = 0; i < 3; i++) {
2159 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2168 DRM_DEBUG_KMS("failed to %s sink power state\n",
2169 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2172 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2175 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2176 enum port port = dp_to_dig_port(intel_dp)->port;
2177 struct drm_device *dev = encoder->base.dev;
2178 struct drm_i915_private *dev_priv = dev->dev_private;
2179 enum intel_display_power_domain power_domain;
2182 power_domain = intel_display_port_power_domain(encoder);
2183 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2186 tmp = I915_READ(intel_dp->output_reg);
2188 if (!(tmp & DP_PORT_EN))
2191 if (IS_GEN7(dev) && port == PORT_A) {
2192 *pipe = PORT_TO_PIPE_CPT(tmp);
2193 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2196 for_each_pipe(dev_priv, p) {
2197 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2198 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2204 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2205 intel_dp->output_reg);
2206 } else if (IS_CHERRYVIEW(dev)) {
2207 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2209 *pipe = PORT_TO_PIPE(tmp);
2215 static void intel_dp_get_config(struct intel_encoder *encoder,
2216 struct intel_crtc_state *pipe_config)
2218 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2220 struct drm_device *dev = encoder->base.dev;
2221 struct drm_i915_private *dev_priv = dev->dev_private;
2222 enum port port = dp_to_dig_port(intel_dp)->port;
2223 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2226 tmp = I915_READ(intel_dp->output_reg);
2228 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2230 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2231 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2232 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2233 flags |= DRM_MODE_FLAG_PHSYNC;
2235 flags |= DRM_MODE_FLAG_NHSYNC;
2237 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2238 flags |= DRM_MODE_FLAG_PVSYNC;
2240 flags |= DRM_MODE_FLAG_NVSYNC;
2242 if (tmp & DP_SYNC_HS_HIGH)
2243 flags |= DRM_MODE_FLAG_PHSYNC;
2245 flags |= DRM_MODE_FLAG_NHSYNC;
2247 if (tmp & DP_SYNC_VS_HIGH)
2248 flags |= DRM_MODE_FLAG_PVSYNC;
2250 flags |= DRM_MODE_FLAG_NVSYNC;
2253 pipe_config->base.adjusted_mode.flags |= flags;
2255 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2256 tmp & DP_COLOR_RANGE_16_235)
2257 pipe_config->limited_color_range = true;
2259 pipe_config->has_dp_encoder = true;
2261 intel_dp_get_m_n(crtc, pipe_config);
2263 if (port == PORT_A) {
2264 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2265 pipe_config->port_clock = 162000;
2267 pipe_config->port_clock = 270000;
2270 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2271 &pipe_config->dp_m_n);
2273 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2274 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2276 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2278 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2279 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2281 * This is a big fat ugly hack.
2283 * Some machines in UEFI boot mode provide us a VBT that has 18
2284 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2285 * unknown we fail to light up. Yet the same BIOS boots up with
2286 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2287 * max, not what it tells us to use.
2289 * Note: This will still be broken if the eDP panel is not lit
2290 * up by the BIOS, and thus we can't get the mode at module
2293 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2294 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2295 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2299 static void intel_disable_dp(struct intel_encoder *encoder)
2301 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2302 struct drm_device *dev = encoder->base.dev;
2303 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2305 if (crtc->config->has_audio)
2306 intel_audio_codec_disable(encoder);
2308 if (HAS_PSR(dev) && !HAS_DDI(dev))
2309 intel_psr_disable(intel_dp);
2311 /* Make sure the panel is off before trying to change the mode. But also
2312 * ensure that we have vdd while we switch off the panel. */
2313 intel_edp_panel_vdd_on(intel_dp);
2314 intel_edp_backlight_off(intel_dp);
2315 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2316 intel_edp_panel_off(intel_dp);
2318 /* disable the port before the pipe on g4x */
2319 if (INTEL_INFO(dev)->gen < 5)
2320 intel_dp_link_down(intel_dp);
2323 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2325 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2326 enum port port = dp_to_dig_port(intel_dp)->port;
2328 intel_dp_link_down(intel_dp);
2330 ironlake_edp_pll_off(intel_dp);
2333 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2335 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2337 intel_dp_link_down(intel_dp);
2340 static void chv_post_disable_dp(struct intel_encoder *encoder)
2342 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2343 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2344 struct drm_device *dev = encoder->base.dev;
2345 struct drm_i915_private *dev_priv = dev->dev_private;
2346 struct intel_crtc *intel_crtc =
2347 to_intel_crtc(encoder->base.crtc);
2348 enum dpio_channel ch = vlv_dport_to_channel(dport);
2349 enum pipe pipe = intel_crtc->pipe;
2352 intel_dp_link_down(intel_dp);
2354 mutex_lock(&dev_priv->sb_lock);
2356 /* Propagate soft reset to data lane reset */
2357 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2358 val |= CHV_PCS_REQ_SOFTRESET_EN;
2359 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2361 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2362 val |= CHV_PCS_REQ_SOFTRESET_EN;
2363 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2365 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2366 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2367 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2369 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2370 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2371 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2373 mutex_unlock(&dev_priv->sb_lock);
2377 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2379 uint8_t dp_train_pat)
2381 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2382 struct drm_device *dev = intel_dig_port->base.base.dev;
2383 struct drm_i915_private *dev_priv = dev->dev_private;
2384 enum port port = intel_dig_port->port;
2387 uint32_t temp = I915_READ(DP_TP_CTL(port));
2389 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2390 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2392 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2394 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2395 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2396 case DP_TRAINING_PATTERN_DISABLE:
2397 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2400 case DP_TRAINING_PATTERN_1:
2401 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2403 case DP_TRAINING_PATTERN_2:
2404 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2406 case DP_TRAINING_PATTERN_3:
2407 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2410 I915_WRITE(DP_TP_CTL(port), temp);
2412 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2413 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2414 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2416 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2417 case DP_TRAINING_PATTERN_DISABLE:
2418 *DP |= DP_LINK_TRAIN_OFF_CPT;
2420 case DP_TRAINING_PATTERN_1:
2421 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2423 case DP_TRAINING_PATTERN_2:
2424 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2426 case DP_TRAINING_PATTERN_3:
2427 DRM_ERROR("DP training pattern 3 not supported\n");
2428 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2433 if (IS_CHERRYVIEW(dev))
2434 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2436 *DP &= ~DP_LINK_TRAIN_MASK;
2438 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2439 case DP_TRAINING_PATTERN_DISABLE:
2440 *DP |= DP_LINK_TRAIN_OFF;
2442 case DP_TRAINING_PATTERN_1:
2443 *DP |= DP_LINK_TRAIN_PAT_1;
2445 case DP_TRAINING_PATTERN_2:
2446 *DP |= DP_LINK_TRAIN_PAT_2;
2448 case DP_TRAINING_PATTERN_3:
2449 if (IS_CHERRYVIEW(dev)) {
2450 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2452 DRM_ERROR("DP training pattern 3 not supported\n");
2453 *DP |= DP_LINK_TRAIN_PAT_2;
2460 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2462 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2463 struct drm_i915_private *dev_priv = dev->dev_private;
2465 /* enable with pattern 1 (as per spec) */
2466 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2467 DP_TRAINING_PATTERN_1);
2469 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2470 POSTING_READ(intel_dp->output_reg);
2473 * Magic for VLV/CHV. We _must_ first set up the register
2474 * without actually enabling the port, and then do another
2475 * write to enable the port. Otherwise link training will
2476 * fail when the power sequencer is freshly used for this port.
2478 intel_dp->DP |= DP_PORT_EN;
2480 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2481 POSTING_READ(intel_dp->output_reg);
2484 static void intel_enable_dp(struct intel_encoder *encoder)
2486 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2487 struct drm_device *dev = encoder->base.dev;
2488 struct drm_i915_private *dev_priv = dev->dev_private;
2489 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2490 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2491 unsigned int lane_mask = 0x0;
2493 if (WARN_ON(dp_reg & DP_PORT_EN))
2498 if (IS_VALLEYVIEW(dev))
2499 vlv_init_panel_power_sequencer(intel_dp);
2501 intel_dp_enable_port(intel_dp);
2503 edp_panel_vdd_on(intel_dp);
2504 edp_panel_on(intel_dp);
2505 edp_panel_vdd_off(intel_dp, true);
2507 pps_unlock(intel_dp);
2509 if (IS_VALLEYVIEW(dev))
2510 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2513 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2514 intel_dp_start_link_train(intel_dp);
2515 intel_dp_complete_link_train(intel_dp);
2516 intel_dp_stop_link_train(intel_dp);
2518 if (crtc->config->has_audio) {
2519 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2520 pipe_name(crtc->pipe));
2521 intel_audio_codec_enable(encoder);
2525 static void g4x_enable_dp(struct intel_encoder *encoder)
2527 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2529 intel_enable_dp(encoder);
2530 intel_edp_backlight_on(intel_dp);
2533 static void vlv_enable_dp(struct intel_encoder *encoder)
2535 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2537 intel_edp_backlight_on(intel_dp);
2538 intel_psr_enable(intel_dp);
2541 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2543 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2544 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2546 intel_dp_prepare(encoder);
2548 /* Only ilk+ has port A */
2549 if (dport->port == PORT_A) {
2550 ironlake_set_pll_cpu_edp(intel_dp);
2551 ironlake_edp_pll_on(intel_dp);
2555 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2558 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2559 enum pipe pipe = intel_dp->pps_pipe;
2560 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2562 edp_panel_vdd_off_sync(intel_dp);
2565 * VLV seems to get confused when multiple power seqeuencers
2566 * have the same port selected (even if only one has power/vdd
2567 * enabled). The failure manifests as vlv_wait_port_ready() failing
2568 * CHV on the other hand doesn't seem to mind having the same port
2569 * selected in multiple power seqeuencers, but let's clear the
2570 * port select always when logically disconnecting a power sequencer
2573 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2574 pipe_name(pipe), port_name(intel_dig_port->port));
2575 I915_WRITE(pp_on_reg, 0);
2576 POSTING_READ(pp_on_reg);
2578 intel_dp->pps_pipe = INVALID_PIPE;
2581 static void vlv_steal_power_sequencer(struct drm_device *dev,
2584 struct drm_i915_private *dev_priv = dev->dev_private;
2585 struct intel_encoder *encoder;
2587 lockdep_assert_held(&dev_priv->pps_mutex);
2589 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2592 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2594 struct intel_dp *intel_dp;
2597 if (encoder->type != INTEL_OUTPUT_EDP)
2600 intel_dp = enc_to_intel_dp(&encoder->base);
2601 port = dp_to_dig_port(intel_dp)->port;
2603 if (intel_dp->pps_pipe != pipe)
2606 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2607 pipe_name(pipe), port_name(port));
2609 WARN(encoder->connectors_active,
2610 "stealing pipe %c power sequencer from active eDP port %c\n",
2611 pipe_name(pipe), port_name(port));
2613 /* make sure vdd is off before we steal it */
2614 vlv_detach_power_sequencer(intel_dp);
2618 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2621 struct intel_encoder *encoder = &intel_dig_port->base;
2622 struct drm_device *dev = encoder->base.dev;
2623 struct drm_i915_private *dev_priv = dev->dev_private;
2624 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2626 lockdep_assert_held(&dev_priv->pps_mutex);
2628 if (!is_edp(intel_dp))
2631 if (intel_dp->pps_pipe == crtc->pipe)
2635 * If another power sequencer was being used on this
2636 * port previously make sure to turn off vdd there while
2637 * we still have control of it.
2639 if (intel_dp->pps_pipe != INVALID_PIPE)
2640 vlv_detach_power_sequencer(intel_dp);
2643 * We may be stealing the power
2644 * sequencer from another port.
2646 vlv_steal_power_sequencer(dev, crtc->pipe);
2648 /* now it's all ours */
2649 intel_dp->pps_pipe = crtc->pipe;
2651 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2652 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2654 /* init power sequencer on this pipe and port */
2655 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2656 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2659 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2661 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2662 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2663 struct drm_device *dev = encoder->base.dev;
2664 struct drm_i915_private *dev_priv = dev->dev_private;
2665 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2666 enum dpio_channel port = vlv_dport_to_channel(dport);
2667 int pipe = intel_crtc->pipe;
2670 mutex_lock(&dev_priv->sb_lock);
2672 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2679 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2680 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2681 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2683 mutex_unlock(&dev_priv->sb_lock);
2685 intel_enable_dp(encoder);
2688 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2690 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2691 struct drm_device *dev = encoder->base.dev;
2692 struct drm_i915_private *dev_priv = dev->dev_private;
2693 struct intel_crtc *intel_crtc =
2694 to_intel_crtc(encoder->base.crtc);
2695 enum dpio_channel port = vlv_dport_to_channel(dport);
2696 int pipe = intel_crtc->pipe;
2698 intel_dp_prepare(encoder);
2700 /* Program Tx lane resets to default */
2701 mutex_lock(&dev_priv->sb_lock);
2702 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2703 DPIO_PCS_TX_LANE2_RESET |
2704 DPIO_PCS_TX_LANE1_RESET);
2705 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2706 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2707 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2708 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2709 DPIO_PCS_CLK_SOFT_RESET);
2711 /* Fix up inter-pair skew failure */
2712 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2713 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2714 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2715 mutex_unlock(&dev_priv->sb_lock);
2718 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2720 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2721 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2722 struct drm_device *dev = encoder->base.dev;
2723 struct drm_i915_private *dev_priv = dev->dev_private;
2724 struct intel_crtc *intel_crtc =
2725 to_intel_crtc(encoder->base.crtc);
2726 enum dpio_channel ch = vlv_dport_to_channel(dport);
2727 int pipe = intel_crtc->pipe;
2728 int data, i, stagger;
2731 mutex_lock(&dev_priv->sb_lock);
2733 /* allow hardware to manage TX FIFO reset source */
2734 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2735 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2736 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2738 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2739 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2740 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2742 /* Deassert soft data lane reset*/
2743 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2744 val |= CHV_PCS_REQ_SOFTRESET_EN;
2745 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2747 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2748 val |= CHV_PCS_REQ_SOFTRESET_EN;
2749 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2751 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2752 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2753 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2755 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2756 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2757 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2759 /* Program Tx lane latency optimal setting*/
2760 for (i = 0; i < 4; i++) {
2761 /* Set the upar bit */
2762 data = (i == 1) ? 0x0 : 0x1;
2763 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2764 data << DPIO_UPAR_SHIFT);
2767 /* Data lane stagger programming */
2768 if (intel_crtc->config->port_clock > 270000)
2770 else if (intel_crtc->config->port_clock > 135000)
2772 else if (intel_crtc->config->port_clock > 67500)
2774 else if (intel_crtc->config->port_clock > 33750)
2779 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2780 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2781 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2783 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2784 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2785 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2787 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2788 DPIO_LANESTAGGER_STRAP(stagger) |
2789 DPIO_LANESTAGGER_STRAP_OVRD |
2790 DPIO_TX1_STAGGER_MASK(0x1f) |
2791 DPIO_TX1_STAGGER_MULT(6) |
2792 DPIO_TX2_STAGGER_MULT(0));
2794 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2795 DPIO_LANESTAGGER_STRAP(stagger) |
2796 DPIO_LANESTAGGER_STRAP_OVRD |
2797 DPIO_TX1_STAGGER_MASK(0x1f) |
2798 DPIO_TX1_STAGGER_MULT(7) |
2799 DPIO_TX2_STAGGER_MULT(5));
2801 mutex_unlock(&dev_priv->sb_lock);
2803 intel_enable_dp(encoder);
2806 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2808 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2809 struct drm_device *dev = encoder->base.dev;
2810 struct drm_i915_private *dev_priv = dev->dev_private;
2811 struct intel_crtc *intel_crtc =
2812 to_intel_crtc(encoder->base.crtc);
2813 enum dpio_channel ch = vlv_dport_to_channel(dport);
2814 enum pipe pipe = intel_crtc->pipe;
2817 intel_dp_prepare(encoder);
2819 mutex_lock(&dev_priv->sb_lock);
2821 /* program left/right clock distribution */
2822 if (pipe != PIPE_B) {
2823 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2824 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2826 val |= CHV_BUFLEFTENA1_FORCE;
2828 val |= CHV_BUFRIGHTENA1_FORCE;
2829 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2831 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2832 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2834 val |= CHV_BUFLEFTENA2_FORCE;
2836 val |= CHV_BUFRIGHTENA2_FORCE;
2837 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2840 /* program clock channel usage */
2841 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2842 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2844 val &= ~CHV_PCS_USEDCLKCHANNEL;
2846 val |= CHV_PCS_USEDCLKCHANNEL;
2847 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2849 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2850 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2852 val &= ~CHV_PCS_USEDCLKCHANNEL;
2854 val |= CHV_PCS_USEDCLKCHANNEL;
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2858 * This a a bit weird since generally CL
2859 * matches the pipe, but here we need to
2860 * pick the CL based on the port.
2862 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2864 val &= ~CHV_CMN_USEDCLKCHANNEL;
2866 val |= CHV_CMN_USEDCLKCHANNEL;
2867 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2869 mutex_unlock(&dev_priv->sb_lock);
2873 * Native read with retry for link status and receiver capability reads for
2874 * cases where the sink may still be asleep.
2876 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2877 * supposed to retry 3 times per the spec.
2880 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2881 void *buffer, size_t size)
2887 * Sometime we just get the same incorrect byte repeated
2888 * over the entire buffer. Doing just one throw away read
2889 * initially seems to "solve" it.
2891 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2893 for (i = 0; i < 3; i++) {
2894 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2904 * Fetch AUX CH registers 0x202 - 0x207 which contain
2905 * link status information
2908 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2910 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2913 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2916 /* These are source-specific values. */
2918 intel_dp_voltage_max(struct intel_dp *intel_dp)
2920 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2921 struct drm_i915_private *dev_priv = dev->dev_private;
2922 enum port port = dp_to_dig_port(intel_dp)->port;
2924 if (IS_BROXTON(dev))
2925 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2926 else if (INTEL_INFO(dev)->gen >= 9) {
2927 if (dev_priv->edp_low_vswing && port == PORT_A)
2928 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2929 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2930 } else if (IS_VALLEYVIEW(dev))
2931 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2932 else if (IS_GEN7(dev) && port == PORT_A)
2933 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2934 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2935 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2937 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2941 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2943 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2944 enum port port = dp_to_dig_port(intel_dp)->port;
2946 if (INTEL_INFO(dev)->gen >= 9) {
2947 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2949 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2951 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2953 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2954 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2955 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2957 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2959 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2960 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2961 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2962 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2964 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2965 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2966 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2967 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2969 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2971 } else if (IS_VALLEYVIEW(dev)) {
2972 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2973 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2974 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2975 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2976 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2978 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2981 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2983 } else if (IS_GEN7(dev) && port == PORT_A) {
2984 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2985 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2986 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2989 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2991 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2994 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2995 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2996 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2997 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2998 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2999 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3000 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3001 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3003 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3008 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3011 struct drm_i915_private *dev_priv = dev->dev_private;
3012 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3013 struct intel_crtc *intel_crtc =
3014 to_intel_crtc(dport->base.base.crtc);
3015 unsigned long demph_reg_value, preemph_reg_value,
3016 uniqtranscale_reg_value;
3017 uint8_t train_set = intel_dp->train_set[0];
3018 enum dpio_channel port = vlv_dport_to_channel(dport);
3019 int pipe = intel_crtc->pipe;
3021 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3022 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3023 preemph_reg_value = 0x0004000;
3024 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3026 demph_reg_value = 0x2B405555;
3027 uniqtranscale_reg_value = 0x552AB83A;
3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3030 demph_reg_value = 0x2B404040;
3031 uniqtranscale_reg_value = 0x5548B83A;
3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3034 demph_reg_value = 0x2B245555;
3035 uniqtranscale_reg_value = 0x5560B83A;
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3038 demph_reg_value = 0x2B405555;
3039 uniqtranscale_reg_value = 0x5598DA3A;
3045 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3046 preemph_reg_value = 0x0002000;
3047 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3049 demph_reg_value = 0x2B404040;
3050 uniqtranscale_reg_value = 0x5552B83A;
3052 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3053 demph_reg_value = 0x2B404848;
3054 uniqtranscale_reg_value = 0x5580B83A;
3056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3057 demph_reg_value = 0x2B404040;
3058 uniqtranscale_reg_value = 0x55ADDA3A;
3064 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3065 preemph_reg_value = 0x0000000;
3066 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3067 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3068 demph_reg_value = 0x2B305555;
3069 uniqtranscale_reg_value = 0x5570B83A;
3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3072 demph_reg_value = 0x2B2B4040;
3073 uniqtranscale_reg_value = 0x55ADDA3A;
3079 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3080 preemph_reg_value = 0x0006000;
3081 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3082 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3083 demph_reg_value = 0x1B405555;
3084 uniqtranscale_reg_value = 0x55ADDA3A;
3094 mutex_lock(&dev_priv->sb_lock);
3095 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3096 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3097 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3098 uniqtranscale_reg_value);
3099 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3100 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3101 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3102 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3103 mutex_unlock(&dev_priv->sb_lock);
3108 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3110 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3111 struct drm_i915_private *dev_priv = dev->dev_private;
3112 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3113 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3114 u32 deemph_reg_value, margin_reg_value, val;
3115 uint8_t train_set = intel_dp->train_set[0];
3116 enum dpio_channel ch = vlv_dport_to_channel(dport);
3117 enum pipe pipe = intel_crtc->pipe;
3120 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3121 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3122 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3123 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3124 deemph_reg_value = 128;
3125 margin_reg_value = 52;
3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3128 deemph_reg_value = 128;
3129 margin_reg_value = 77;
3131 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3132 deemph_reg_value = 128;
3133 margin_reg_value = 102;
3135 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3136 deemph_reg_value = 128;
3137 margin_reg_value = 154;
3138 /* FIXME extra to set for 1200 */
3144 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3145 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3146 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3147 deemph_reg_value = 85;
3148 margin_reg_value = 78;
3150 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3151 deemph_reg_value = 85;
3152 margin_reg_value = 116;
3154 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3155 deemph_reg_value = 85;
3156 margin_reg_value = 154;
3162 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3163 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3164 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3165 deemph_reg_value = 64;
3166 margin_reg_value = 104;
3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3169 deemph_reg_value = 64;
3170 margin_reg_value = 154;
3176 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3177 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3179 deemph_reg_value = 43;
3180 margin_reg_value = 154;
3190 mutex_lock(&dev_priv->sb_lock);
3192 /* Clear calc init */
3193 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3194 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3195 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3196 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3197 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3199 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3200 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3201 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3202 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3203 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3205 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3206 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3207 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3208 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3210 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3211 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3212 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3213 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3215 /* Program swing deemph */
3216 for (i = 0; i < 4; i++) {
3217 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3218 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3219 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3220 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3223 /* Program swing margin */
3224 for (i = 0; i < 4; i++) {
3225 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3226 val &= ~DPIO_SWING_MARGIN000_MASK;
3227 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3228 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3231 /* Disable unique transition scale */
3232 for (i = 0; i < 4; i++) {
3233 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3234 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3235 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3238 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3239 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3240 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3241 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3244 * The document said it needs to set bit 27 for ch0 and bit 26
3245 * for ch1. Might be a typo in the doc.
3246 * For now, for this unique transition scale selection, set bit
3247 * 27 for ch0 and ch1.
3249 for (i = 0; i < 4; i++) {
3250 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3251 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3252 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3255 for (i = 0; i < 4; i++) {
3256 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3257 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3258 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3259 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3263 /* Start swing calculation */
3264 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3265 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3266 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3268 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3269 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3270 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3273 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3274 val |= DPIO_LRC_BYPASS;
3275 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3277 mutex_unlock(&dev_priv->sb_lock);
3283 intel_get_adjust_train(struct intel_dp *intel_dp,
3284 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3289 uint8_t voltage_max;
3290 uint8_t preemph_max;
3292 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3293 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3294 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3302 voltage_max = intel_dp_voltage_max(intel_dp);
3303 if (v >= voltage_max)
3304 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3306 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3307 if (p >= preemph_max)
3308 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3310 for (lane = 0; lane < 4; lane++)
3311 intel_dp->train_set[lane] = v | p;
3315 gen4_signal_levels(uint8_t train_set)
3317 uint32_t signal_levels = 0;
3319 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3322 signal_levels |= DP_VOLTAGE_0_4;
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3325 signal_levels |= DP_VOLTAGE_0_6;
3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3328 signal_levels |= DP_VOLTAGE_0_8;
3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3331 signal_levels |= DP_VOLTAGE_1_2;
3334 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3335 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3337 signal_levels |= DP_PRE_EMPHASIS_0;
3339 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3340 signal_levels |= DP_PRE_EMPHASIS_3_5;
3342 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3343 signal_levels |= DP_PRE_EMPHASIS_6;
3345 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3346 signal_levels |= DP_PRE_EMPHASIS_9_5;
3349 return signal_levels;
3352 /* Gen6's DP voltage swing and pre-emphasis control */
3354 gen6_edp_signal_levels(uint8_t train_set)
3356 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3357 DP_TRAIN_PRE_EMPHASIS_MASK);
3358 switch (signal_levels) {
3359 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3361 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3363 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3364 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3366 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3367 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3369 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3372 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3374 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3375 "0x%x\n", signal_levels);
3376 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3380 /* Gen7's DP voltage swing and pre-emphasis control */
3382 gen7_edp_signal_levels(uint8_t train_set)
3384 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3385 DP_TRAIN_PRE_EMPHASIS_MASK);
3386 switch (signal_levels) {
3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3388 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3390 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3391 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3392 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3395 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3397 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3400 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3401 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3402 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3405 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3406 "0x%x\n", signal_levels);
3407 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3411 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3413 hsw_signal_levels(uint8_t train_set)
3415 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3416 DP_TRAIN_PRE_EMPHASIS_MASK);
3417 switch (signal_levels) {
3418 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3419 return DDI_BUF_TRANS_SELECT(0);
3420 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3421 return DDI_BUF_TRANS_SELECT(1);
3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3423 return DDI_BUF_TRANS_SELECT(2);
3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3425 return DDI_BUF_TRANS_SELECT(3);
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3428 return DDI_BUF_TRANS_SELECT(4);
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3430 return DDI_BUF_TRANS_SELECT(5);
3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3432 return DDI_BUF_TRANS_SELECT(6);
3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3435 return DDI_BUF_TRANS_SELECT(7);
3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3437 return DDI_BUF_TRANS_SELECT(8);
3439 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3440 return DDI_BUF_TRANS_SELECT(9);
3442 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3443 "0x%x\n", signal_levels);
3444 return DDI_BUF_TRANS_SELECT(0);
3448 static void bxt_signal_levels(struct intel_dp *intel_dp)
3450 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3451 enum port port = dport->port;
3452 struct drm_device *dev = dport->base.base.dev;
3453 struct intel_encoder *encoder = &dport->base;
3454 uint8_t train_set = intel_dp->train_set[0];
3457 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3458 DP_TRAIN_PRE_EMPHASIS_MASK);
3459 switch (signal_levels) {
3461 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3494 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3497 /* Properly updates "DP" with the correct signal levels. */
3499 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3502 enum port port = intel_dig_port->port;
3503 struct drm_device *dev = intel_dig_port->base.base.dev;
3504 uint32_t signal_levels, mask;
3505 uint8_t train_set = intel_dp->train_set[0];
3507 if (IS_BROXTON(dev)) {
3509 bxt_signal_levels(intel_dp);
3511 } else if (HAS_DDI(dev)) {
3512 signal_levels = hsw_signal_levels(train_set);
3513 mask = DDI_BUF_EMP_MASK;
3514 } else if (IS_CHERRYVIEW(dev)) {
3515 signal_levels = chv_signal_levels(intel_dp);
3517 } else if (IS_VALLEYVIEW(dev)) {
3518 signal_levels = vlv_signal_levels(intel_dp);
3520 } else if (IS_GEN7(dev) && port == PORT_A) {
3521 signal_levels = gen7_edp_signal_levels(train_set);
3522 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3523 } else if (IS_GEN6(dev) && port == PORT_A) {
3524 signal_levels = gen6_edp_signal_levels(train_set);
3525 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3527 signal_levels = gen4_signal_levels(train_set);
3528 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3532 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3534 DRM_DEBUG_KMS("Using vswing level %d\n",
3535 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3536 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3537 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3538 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3540 *DP = (*DP & ~mask) | signal_levels;
3544 intel_dp_set_link_train(struct intel_dp *intel_dp,
3546 uint8_t dp_train_pat)
3548 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3549 struct drm_device *dev = intel_dig_port->base.base.dev;
3550 struct drm_i915_private *dev_priv = dev->dev_private;
3551 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3554 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3556 I915_WRITE(intel_dp->output_reg, *DP);
3557 POSTING_READ(intel_dp->output_reg);
3559 buf[0] = dp_train_pat;
3560 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3561 DP_TRAINING_PATTERN_DISABLE) {
3562 /* don't write DP_TRAINING_LANEx_SET on disable */
3565 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3566 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3567 len = intel_dp->lane_count + 1;
3570 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3577 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3578 uint8_t dp_train_pat)
3580 if (!intel_dp->train_set_valid)
3581 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3582 intel_dp_set_signal_levels(intel_dp, DP);
3583 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3587 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3588 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3590 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3591 struct drm_device *dev = intel_dig_port->base.base.dev;
3592 struct drm_i915_private *dev_priv = dev->dev_private;
3595 intel_get_adjust_train(intel_dp, link_status);
3596 intel_dp_set_signal_levels(intel_dp, DP);
3598 I915_WRITE(intel_dp->output_reg, *DP);
3599 POSTING_READ(intel_dp->output_reg);
3601 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3602 intel_dp->train_set, intel_dp->lane_count);
3604 return ret == intel_dp->lane_count;
3607 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3609 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3610 struct drm_device *dev = intel_dig_port->base.base.dev;
3611 struct drm_i915_private *dev_priv = dev->dev_private;
3612 enum port port = intel_dig_port->port;
3618 val = I915_READ(DP_TP_CTL(port));
3619 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3620 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3621 I915_WRITE(DP_TP_CTL(port), val);
3624 * On PORT_A we can have only eDP in SST mode. There the only reason
3625 * we need to set idle transmission mode is to work around a HW issue
3626 * where we enable the pipe while not in idle link-training mode.
3627 * In this case there is requirement to wait for a minimum number of
3628 * idle patterns to be sent.
3633 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3635 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3638 /* Enable corresponding port and start training pattern 1 */
3640 intel_dp_start_link_train(struct intel_dp *intel_dp)
3642 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3643 struct drm_device *dev = encoder->dev;
3646 int voltage_tries, loop_tries;
3647 uint32_t DP = intel_dp->DP;
3648 uint8_t link_config[2];
3651 intel_ddi_prepare_link_retrain(encoder);
3653 /* Write the link configuration data */
3654 link_config[0] = intel_dp->link_bw;
3655 link_config[1] = intel_dp->lane_count;
3656 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3657 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3658 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3659 if (intel_dp->num_sink_rates)
3660 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3661 &intel_dp->rate_select, 1);
3664 link_config[1] = DP_SET_ANSI_8B10B;
3665 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3669 /* clock recovery */
3670 if (!intel_dp_reset_link_train(intel_dp, &DP,
3671 DP_TRAINING_PATTERN_1 |
3672 DP_LINK_SCRAMBLING_DISABLE)) {
3673 DRM_ERROR("failed to enable link training\n");
3681 uint8_t link_status[DP_LINK_STATUS_SIZE];
3683 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3684 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3685 DRM_ERROR("failed to get link status\n");
3689 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3690 DRM_DEBUG_KMS("clock recovery OK\n");
3695 * if we used previously trained voltage and pre-emphasis values
3696 * and we don't get clock recovery, reset link training values
3698 if (intel_dp->train_set_valid) {
3699 DRM_DEBUG_KMS("clock recovery not ok, reset");
3700 /* clear the flag as we are not reusing train set */
3701 intel_dp->train_set_valid = false;
3702 if (!intel_dp_reset_link_train(intel_dp, &DP,
3703 DP_TRAINING_PATTERN_1 |
3704 DP_LINK_SCRAMBLING_DISABLE)) {
3705 DRM_ERROR("failed to enable link training\n");
3711 /* Check to see if we've tried the max voltage */
3712 for (i = 0; i < intel_dp->lane_count; i++)
3713 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3715 if (i == intel_dp->lane_count) {
3717 if (loop_tries == 5) {
3718 DRM_ERROR("too many full retries, give up\n");
3721 intel_dp_reset_link_train(intel_dp, &DP,
3722 DP_TRAINING_PATTERN_1 |
3723 DP_LINK_SCRAMBLING_DISABLE);
3728 /* Check to see if we've tried the same voltage 5 times */
3729 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3731 if (voltage_tries == 5) {
3732 DRM_ERROR("too many voltage retries, give up\n");
3737 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3739 /* Update training set as requested by target */
3740 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3741 DRM_ERROR("failed to update link training\n");
3750 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3752 bool channel_eq = false;
3753 int tries, cr_tries;
3754 uint32_t DP = intel_dp->DP;
3755 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3757 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3758 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3759 training_pattern = DP_TRAINING_PATTERN_3;
3761 /* channel equalization */
3762 if (!intel_dp_set_link_train(intel_dp, &DP,
3764 DP_LINK_SCRAMBLING_DISABLE)) {
3765 DRM_ERROR("failed to start channel equalization\n");
3773 uint8_t link_status[DP_LINK_STATUS_SIZE];
3776 DRM_ERROR("failed to train DP, aborting\n");
3780 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3781 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3782 DRM_ERROR("failed to get link status\n");
3786 /* Make sure clock is still ok */
3787 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3788 intel_dp->train_set_valid = false;
3789 intel_dp_start_link_train(intel_dp);
3790 intel_dp_set_link_train(intel_dp, &DP,
3792 DP_LINK_SCRAMBLING_DISABLE);
3797 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3802 /* Try 5 times, then try clock recovery if that fails */
3804 intel_dp->train_set_valid = false;
3805 intel_dp_start_link_train(intel_dp);
3806 intel_dp_set_link_train(intel_dp, &DP,
3808 DP_LINK_SCRAMBLING_DISABLE);
3814 /* Update training set as requested by target */
3815 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3816 DRM_ERROR("failed to update link training\n");
3822 intel_dp_set_idle_link_train(intel_dp);
3827 intel_dp->train_set_valid = true;
3828 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3832 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3834 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3835 DP_TRAINING_PATTERN_DISABLE);
3839 intel_dp_link_down(struct intel_dp *intel_dp)
3841 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3842 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3843 enum port port = intel_dig_port->port;
3844 struct drm_device *dev = intel_dig_port->base.base.dev;
3845 struct drm_i915_private *dev_priv = dev->dev_private;
3846 uint32_t DP = intel_dp->DP;
3848 if (WARN_ON(HAS_DDI(dev)))
3851 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3854 DRM_DEBUG_KMS("\n");
3856 if ((IS_GEN7(dev) && port == PORT_A) ||
3857 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3858 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3859 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3861 if (IS_CHERRYVIEW(dev))
3862 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3864 DP &= ~DP_LINK_TRAIN_MASK;
3865 DP |= DP_LINK_TRAIN_PAT_IDLE;
3867 I915_WRITE(intel_dp->output_reg, DP);
3868 POSTING_READ(intel_dp->output_reg);
3870 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3871 I915_WRITE(intel_dp->output_reg, DP);
3872 POSTING_READ(intel_dp->output_reg);
3875 * HW workaround for IBX, we need to move the port
3876 * to transcoder A after disabling it to allow the
3877 * matching HDMI port to be enabled on transcoder A.
3879 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3880 /* always enable with pattern 1 (as per spec) */
3881 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3882 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3883 I915_WRITE(intel_dp->output_reg, DP);
3884 POSTING_READ(intel_dp->output_reg);
3887 I915_WRITE(intel_dp->output_reg, DP);
3888 POSTING_READ(intel_dp->output_reg);
3891 msleep(intel_dp->panel_power_down_delay);
3895 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3897 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3898 struct drm_device *dev = dig_port->base.base.dev;
3899 struct drm_i915_private *dev_priv = dev->dev_private;
3902 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3903 sizeof(intel_dp->dpcd)) < 0)
3904 return false; /* aux transfer failed */
3906 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3908 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3909 return false; /* DPCD not present */
3911 /* Check if the panel supports PSR */
3912 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3913 if (is_edp(intel_dp)) {
3914 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3916 sizeof(intel_dp->psr_dpcd));
3917 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3918 dev_priv->psr.sink_support = true;
3919 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3922 if (INTEL_INFO(dev)->gen >= 9 &&
3923 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3924 uint8_t frame_sync_cap;
3926 dev_priv->psr.sink_support = true;
3927 intel_dp_dpcd_read_wake(&intel_dp->aux,
3928 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3929 &frame_sync_cap, 1);
3930 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3931 /* PSR2 needs frame sync as well */
3932 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3933 DRM_DEBUG_KMS("PSR2 %s on sink",
3934 dev_priv->psr.psr2_support ? "supported" : "not supported");
3938 /* Training Pattern 3 support, both source and sink */
3939 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3940 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3941 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3942 intel_dp->use_tps3 = true;
3943 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3945 intel_dp->use_tps3 = false;
3947 /* Intermediate frequency support */
3948 if (is_edp(intel_dp) &&
3949 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3950 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3951 (rev >= 0x03)) { /* eDp v1.4 or higher */
3952 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3955 intel_dp_dpcd_read_wake(&intel_dp->aux,
3956 DP_SUPPORTED_LINK_RATES,
3958 sizeof(sink_rates));
3960 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3961 int val = le16_to_cpu(sink_rates[i]);
3966 /* Value read is in kHz while drm clock is saved in deca-kHz */
3967 intel_dp->sink_rates[i] = (val * 200) / 10;
3969 intel_dp->num_sink_rates = i;
3972 intel_dp_print_rates(intel_dp);
3974 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3975 DP_DWN_STRM_PORT_PRESENT))
3976 return true; /* native DP sink */
3978 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3979 return true; /* no per-port downstream info */
3981 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3982 intel_dp->downstream_ports,
3983 DP_MAX_DOWNSTREAM_PORTS) < 0)
3984 return false; /* downstream port status fetch failed */
3990 intel_dp_probe_oui(struct intel_dp *intel_dp)
3994 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3997 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3998 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3999 buf[0], buf[1], buf[2]);
4001 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4002 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4003 buf[0], buf[1], buf[2]);
4007 intel_dp_probe_mst(struct intel_dp *intel_dp)
4011 if (!intel_dp->can_mst)
4014 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4017 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4018 if (buf[0] & DP_MST_CAP) {
4019 DRM_DEBUG_KMS("Sink is MST capable\n");
4020 intel_dp->is_mst = true;
4022 DRM_DEBUG_KMS("Sink is not MST capable\n");
4023 intel_dp->is_mst = false;
4027 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4028 return intel_dp->is_mst;
4031 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4033 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4034 struct drm_device *dev = intel_dig_port->base.base.dev;
4035 struct intel_crtc *intel_crtc =
4036 to_intel_crtc(intel_dig_port->base.base.crtc);
4042 hsw_disable_ips(intel_crtc);
4044 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4049 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4054 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4059 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4060 buf | DP_TEST_SINK_START) < 0) {
4065 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4070 test_crc_count = buf & DP_TEST_COUNT_MASK;
4073 if (drm_dp_dpcd_readb(&intel_dp->aux,
4074 DP_TEST_SINK_MISC, &buf) < 0) {
4078 intel_wait_for_vblank(dev, intel_crtc->pipe);
4079 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4081 if (attempts == 0) {
4082 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4087 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4092 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4096 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4097 buf & ~DP_TEST_SINK_START) < 0) {
4102 hsw_enable_ips(intel_crtc);
4107 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4109 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4110 DP_DEVICE_SERVICE_IRQ_VECTOR,
4111 sink_irq_vector, 1) == 1;
4115 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4119 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4121 sink_irq_vector, 14);
4128 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4130 uint8_t test_result = DP_TEST_ACK;
4134 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4136 uint8_t test_result = DP_TEST_NAK;
4140 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4142 uint8_t test_result = DP_TEST_NAK;
4143 struct intel_connector *intel_connector = intel_dp->attached_connector;
4144 struct drm_connector *connector = &intel_connector->base;
4146 if (intel_connector->detect_edid == NULL ||
4147 connector->edid_corrupt ||
4148 intel_dp->aux.i2c_defer_count > 6) {
4149 /* Check EDID read for NACKs, DEFERs and corruption
4150 * (DP CTS 1.2 Core r1.1)
4151 * 4.2.2.4 : Failed EDID read, I2C_NAK
4152 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4153 * 4.2.2.6 : EDID corruption detected
4154 * Use failsafe mode for all cases
4156 if (intel_dp->aux.i2c_nack_count > 0 ||
4157 intel_dp->aux.i2c_defer_count > 0)
4158 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4159 intel_dp->aux.i2c_nack_count,
4160 intel_dp->aux.i2c_defer_count);
4161 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4163 if (!drm_dp_dpcd_write(&intel_dp->aux,
4164 DP_TEST_EDID_CHECKSUM,
4165 &intel_connector->detect_edid->checksum,
4167 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4169 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4170 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4173 /* Set test active flag here so userspace doesn't interrupt things */
4174 intel_dp->compliance_test_active = 1;
4179 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4181 uint8_t test_result = DP_TEST_NAK;
4185 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4187 uint8_t response = DP_TEST_NAK;
4191 intel_dp->compliance_test_active = 0;
4192 intel_dp->compliance_test_type = 0;
4193 intel_dp->compliance_test_data = 0;
4195 intel_dp->aux.i2c_nack_count = 0;
4196 intel_dp->aux.i2c_defer_count = 0;
4198 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4200 DRM_DEBUG_KMS("Could not read test request from sink\n");
4205 case DP_TEST_LINK_TRAINING:
4206 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4207 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4208 response = intel_dp_autotest_link_training(intel_dp);
4210 case DP_TEST_LINK_VIDEO_PATTERN:
4211 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4212 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4213 response = intel_dp_autotest_video_pattern(intel_dp);
4215 case DP_TEST_LINK_EDID_READ:
4216 DRM_DEBUG_KMS("EDID test requested\n");
4217 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4218 response = intel_dp_autotest_edid(intel_dp);
4220 case DP_TEST_LINK_PHY_TEST_PATTERN:
4221 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4222 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4223 response = intel_dp_autotest_phy_pattern(intel_dp);
4226 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4231 status = drm_dp_dpcd_write(&intel_dp->aux,
4235 DRM_DEBUG_KMS("Could not write test response to sink\n");
4239 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4243 if (intel_dp->is_mst) {
4248 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4252 /* check link status - esi[10] = 0x200c */
4253 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4254 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4255 intel_dp_start_link_train(intel_dp);
4256 intel_dp_complete_link_train(intel_dp);
4257 intel_dp_stop_link_train(intel_dp);
4260 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4261 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4264 for (retry = 0; retry < 3; retry++) {
4266 wret = drm_dp_dpcd_write(&intel_dp->aux,
4267 DP_SINK_COUNT_ESI+1,
4274 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4276 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4285 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4286 intel_dp->is_mst = false;
4287 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4288 /* send a hotplug event */
4289 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4296 * According to DP spec
4299 * 2. Configure link according to Receiver Capabilities
4300 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4301 * 4. Check link status on receipt of hot-plug interrupt
4304 intel_dp_check_link_status(struct intel_dp *intel_dp)
4306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4307 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4309 u8 link_status[DP_LINK_STATUS_SIZE];
4311 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4313 if (!intel_encoder->connectors_active)
4316 if (WARN_ON(!intel_encoder->base.crtc))
4319 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4322 /* Try to read receiver status if the link appears to be up */
4323 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4327 /* Now read the DPCD to see if it's actually running */
4328 if (!intel_dp_get_dpcd(intel_dp)) {
4332 /* Try to read the source of the interrupt */
4333 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4334 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4335 /* Clear interrupt source */
4336 drm_dp_dpcd_writeb(&intel_dp->aux,
4337 DP_DEVICE_SERVICE_IRQ_VECTOR,
4340 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4341 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4342 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4343 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4346 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4347 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4348 intel_encoder->base.name);
4349 intel_dp_start_link_train(intel_dp);
4350 intel_dp_complete_link_train(intel_dp);
4351 intel_dp_stop_link_train(intel_dp);
4355 /* XXX this is probably wrong for multiple downstream ports */
4356 static enum drm_connector_status
4357 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4359 uint8_t *dpcd = intel_dp->dpcd;
4362 if (!intel_dp_get_dpcd(intel_dp))
4363 return connector_status_disconnected;
4365 /* if there's no downstream port, we're done */
4366 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4367 return connector_status_connected;
4369 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4370 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4371 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4374 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4376 return connector_status_unknown;
4378 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4379 : connector_status_disconnected;
4382 /* If no HPD, poke DDC gently */
4383 if (drm_probe_ddc(&intel_dp->aux.ddc))
4384 return connector_status_connected;
4386 /* Well we tried, say unknown for unreliable port types */
4387 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4388 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4389 if (type == DP_DS_PORT_TYPE_VGA ||
4390 type == DP_DS_PORT_TYPE_NON_EDID)
4391 return connector_status_unknown;
4393 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4394 DP_DWN_STRM_PORT_TYPE_MASK;
4395 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4396 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4397 return connector_status_unknown;
4400 /* Anything else is out of spec, warn and ignore */
4401 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4402 return connector_status_disconnected;
4405 static enum drm_connector_status
4406 edp_detect(struct intel_dp *intel_dp)
4408 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4409 enum drm_connector_status status;
4411 status = intel_panel_detect(dev);
4412 if (status == connector_status_unknown)
4413 status = connector_status_connected;
4418 static enum drm_connector_status
4419 ironlake_dp_detect(struct intel_dp *intel_dp)
4421 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4422 struct drm_i915_private *dev_priv = dev->dev_private;
4423 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4425 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4426 return connector_status_disconnected;
4428 return intel_dp_detect_dpcd(intel_dp);
4431 static int g4x_digital_port_connected(struct drm_device *dev,
4432 struct intel_digital_port *intel_dig_port)
4434 struct drm_i915_private *dev_priv = dev->dev_private;
4437 if (IS_VALLEYVIEW(dev)) {
4438 switch (intel_dig_port->port) {
4440 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4443 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4446 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4452 switch (intel_dig_port->port) {
4454 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4457 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4460 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4467 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4472 static enum drm_connector_status
4473 g4x_dp_detect(struct intel_dp *intel_dp)
4475 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4476 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4479 /* Can't disconnect eDP, but you can close the lid... */
4480 if (is_edp(intel_dp)) {
4481 enum drm_connector_status status;
4483 status = intel_panel_detect(dev);
4484 if (status == connector_status_unknown)
4485 status = connector_status_connected;
4489 ret = g4x_digital_port_connected(dev, intel_dig_port);
4491 return connector_status_unknown;
4493 return connector_status_disconnected;
4495 return intel_dp_detect_dpcd(intel_dp);
4498 static struct edid *
4499 intel_dp_get_edid(struct intel_dp *intel_dp)
4501 struct intel_connector *intel_connector = intel_dp->attached_connector;
4503 /* use cached edid if we have one */
4504 if (intel_connector->edid) {
4506 if (IS_ERR(intel_connector->edid))
4509 return drm_edid_duplicate(intel_connector->edid);
4511 return drm_get_edid(&intel_connector->base,
4512 &intel_dp->aux.ddc);
4516 intel_dp_set_edid(struct intel_dp *intel_dp)
4518 struct intel_connector *intel_connector = intel_dp->attached_connector;
4521 edid = intel_dp_get_edid(intel_dp);
4522 intel_connector->detect_edid = edid;
4524 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4525 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4527 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4531 intel_dp_unset_edid(struct intel_dp *intel_dp)
4533 struct intel_connector *intel_connector = intel_dp->attached_connector;
4535 kfree(intel_connector->detect_edid);
4536 intel_connector->detect_edid = NULL;
4538 intel_dp->has_audio = false;
4541 static enum intel_display_power_domain
4542 intel_dp_power_get(struct intel_dp *dp)
4544 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4545 enum intel_display_power_domain power_domain;
4547 power_domain = intel_display_port_power_domain(encoder);
4548 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4550 return power_domain;
4554 intel_dp_power_put(struct intel_dp *dp,
4555 enum intel_display_power_domain power_domain)
4557 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4558 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4561 static enum drm_connector_status
4562 intel_dp_detect(struct drm_connector *connector, bool force)
4564 struct intel_dp *intel_dp = intel_attached_dp(connector);
4565 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4566 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4567 struct drm_device *dev = connector->dev;
4568 enum drm_connector_status status;
4569 enum intel_display_power_domain power_domain;
4573 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4574 connector->base.id, connector->name);
4575 intel_dp_unset_edid(intel_dp);
4577 if (intel_dp->is_mst) {
4578 /* MST devices are disconnected from a monitor POV */
4579 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4580 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4581 return connector_status_disconnected;
4584 power_domain = intel_dp_power_get(intel_dp);
4586 /* Can't disconnect eDP, but you can close the lid... */
4587 if (is_edp(intel_dp))
4588 status = edp_detect(intel_dp);
4589 else if (HAS_PCH_SPLIT(dev))
4590 status = ironlake_dp_detect(intel_dp);
4592 status = g4x_dp_detect(intel_dp);
4593 if (status != connector_status_connected)
4596 intel_dp_probe_oui(intel_dp);
4598 ret = intel_dp_probe_mst(intel_dp);
4600 /* if we are in MST mode then this connector
4601 won't appear connected or have anything with EDID on it */
4602 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4603 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4604 status = connector_status_disconnected;
4608 intel_dp_set_edid(intel_dp);
4610 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4611 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4612 status = connector_status_connected;
4614 /* Try to read the source of the interrupt */
4615 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4616 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4617 /* Clear interrupt source */
4618 drm_dp_dpcd_writeb(&intel_dp->aux,
4619 DP_DEVICE_SERVICE_IRQ_VECTOR,
4622 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4623 intel_dp_handle_test_request(intel_dp);
4624 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4625 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4629 intel_dp_power_put(intel_dp, power_domain);
4634 intel_dp_force(struct drm_connector *connector)
4636 struct intel_dp *intel_dp = intel_attached_dp(connector);
4637 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4638 enum intel_display_power_domain power_domain;
4640 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4641 connector->base.id, connector->name);
4642 intel_dp_unset_edid(intel_dp);
4644 if (connector->status != connector_status_connected)
4647 power_domain = intel_dp_power_get(intel_dp);
4649 intel_dp_set_edid(intel_dp);
4651 intel_dp_power_put(intel_dp, power_domain);
4653 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4654 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4657 static int intel_dp_get_modes(struct drm_connector *connector)
4659 struct intel_connector *intel_connector = to_intel_connector(connector);
4662 edid = intel_connector->detect_edid;
4664 int ret = intel_connector_update_modes(connector, edid);
4669 /* if eDP has no EDID, fall back to fixed mode */
4670 if (is_edp(intel_attached_dp(connector)) &&
4671 intel_connector->panel.fixed_mode) {
4672 struct drm_display_mode *mode;
4674 mode = drm_mode_duplicate(connector->dev,
4675 intel_connector->panel.fixed_mode);
4677 drm_mode_probed_add(connector, mode);
4686 intel_dp_detect_audio(struct drm_connector *connector)
4688 bool has_audio = false;
4691 edid = to_intel_connector(connector)->detect_edid;
4693 has_audio = drm_detect_monitor_audio(edid);
4699 intel_dp_set_property(struct drm_connector *connector,
4700 struct drm_property *property,
4703 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4704 struct intel_connector *intel_connector = to_intel_connector(connector);
4705 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4706 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4709 ret = drm_object_property_set_value(&connector->base, property, val);
4713 if (property == dev_priv->force_audio_property) {
4717 if (i == intel_dp->force_audio)
4720 intel_dp->force_audio = i;
4722 if (i == HDMI_AUDIO_AUTO)
4723 has_audio = intel_dp_detect_audio(connector);
4725 has_audio = (i == HDMI_AUDIO_ON);
4727 if (has_audio == intel_dp->has_audio)
4730 intel_dp->has_audio = has_audio;
4734 if (property == dev_priv->broadcast_rgb_property) {
4735 bool old_auto = intel_dp->color_range_auto;
4736 uint32_t old_range = intel_dp->color_range;
4739 case INTEL_BROADCAST_RGB_AUTO:
4740 intel_dp->color_range_auto = true;
4742 case INTEL_BROADCAST_RGB_FULL:
4743 intel_dp->color_range_auto = false;
4744 intel_dp->color_range = 0;
4746 case INTEL_BROADCAST_RGB_LIMITED:
4747 intel_dp->color_range_auto = false;
4748 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4754 if (old_auto == intel_dp->color_range_auto &&
4755 old_range == intel_dp->color_range)
4761 if (is_edp(intel_dp) &&
4762 property == connector->dev->mode_config.scaling_mode_property) {
4763 if (val == DRM_MODE_SCALE_NONE) {
4764 DRM_DEBUG_KMS("no scaling not supported\n");
4768 if (intel_connector->panel.fitting_mode == val) {
4769 /* the eDP scaling property is not changed */
4772 intel_connector->panel.fitting_mode = val;
4780 if (intel_encoder->base.crtc)
4781 intel_crtc_restore_mode(intel_encoder->base.crtc);
4787 intel_dp_connector_destroy(struct drm_connector *connector)
4789 struct intel_connector *intel_connector = to_intel_connector(connector);
4791 kfree(intel_connector->detect_edid);
4793 if (!IS_ERR_OR_NULL(intel_connector->edid))
4794 kfree(intel_connector->edid);
4796 /* Can't call is_edp() since the encoder may have been destroyed
4798 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4799 intel_panel_fini(&intel_connector->panel);
4801 drm_connector_cleanup(connector);
4805 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4807 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4808 struct intel_dp *intel_dp = &intel_dig_port->dp;
4810 drm_dp_aux_unregister(&intel_dp->aux);
4811 intel_dp_mst_encoder_cleanup(intel_dig_port);
4812 if (is_edp(intel_dp)) {
4813 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4815 * vdd might still be enabled do to the delayed vdd off.
4816 * Make sure vdd is actually turned off here.
4819 edp_panel_vdd_off_sync(intel_dp);
4820 pps_unlock(intel_dp);
4822 if (intel_dp->edp_notifier.notifier_call) {
4823 unregister_reboot_notifier(&intel_dp->edp_notifier);
4824 intel_dp->edp_notifier.notifier_call = NULL;
4827 drm_encoder_cleanup(encoder);
4828 kfree(intel_dig_port);
4831 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4833 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4835 if (!is_edp(intel_dp))
4839 * vdd might still be enabled do to the delayed vdd off.
4840 * Make sure vdd is actually turned off here.
4842 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4844 edp_panel_vdd_off_sync(intel_dp);
4845 pps_unlock(intel_dp);
4848 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4850 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4851 struct drm_device *dev = intel_dig_port->base.base.dev;
4852 struct drm_i915_private *dev_priv = dev->dev_private;
4853 enum intel_display_power_domain power_domain;
4855 lockdep_assert_held(&dev_priv->pps_mutex);
4857 if (!edp_have_panel_vdd(intel_dp))
4861 * The VDD bit needs a power domain reference, so if the bit is
4862 * already enabled when we boot or resume, grab this reference and
4863 * schedule a vdd off, so we don't hold on to the reference
4866 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4867 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4868 intel_display_power_get(dev_priv, power_domain);
4870 edp_panel_vdd_schedule_off(intel_dp);
4873 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4875 struct intel_dp *intel_dp;
4877 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4880 intel_dp = enc_to_intel_dp(encoder);
4885 * Read out the current power sequencer assignment,
4886 * in case the BIOS did something with it.
4888 if (IS_VALLEYVIEW(encoder->dev))
4889 vlv_initial_power_sequencer_setup(intel_dp);
4891 intel_edp_panel_vdd_sanitize(intel_dp);
4893 pps_unlock(intel_dp);
4896 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4897 .dpms = intel_connector_dpms,
4898 .detect = intel_dp_detect,
4899 .force = intel_dp_force,
4900 .fill_modes = drm_helper_probe_single_connector_modes,
4901 .set_property = intel_dp_set_property,
4902 .atomic_get_property = intel_connector_atomic_get_property,
4903 .destroy = intel_dp_connector_destroy,
4904 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4905 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4908 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4909 .get_modes = intel_dp_get_modes,
4910 .mode_valid = intel_dp_mode_valid,
4911 .best_encoder = intel_best_encoder,
4914 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4915 .reset = intel_dp_encoder_reset,
4916 .destroy = intel_dp_encoder_destroy,
4920 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4926 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4928 struct intel_dp *intel_dp = &intel_dig_port->dp;
4929 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4930 struct drm_device *dev = intel_dig_port->base.base.dev;
4931 struct drm_i915_private *dev_priv = dev->dev_private;
4932 enum intel_display_power_domain power_domain;
4933 enum irqreturn ret = IRQ_NONE;
4935 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4936 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4938 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4940 * vdd off can generate a long pulse on eDP which
4941 * would require vdd on to handle it, and thus we
4942 * would end up in an endless cycle of
4943 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4945 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4946 port_name(intel_dig_port->port));
4950 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4951 port_name(intel_dig_port->port),
4952 long_hpd ? "long" : "short");
4954 power_domain = intel_display_port_power_domain(intel_encoder);
4955 intel_display_power_get(dev_priv, power_domain);
4958 /* indicate that we need to restart link training */
4959 intel_dp->train_set_valid = false;
4961 if (HAS_PCH_SPLIT(dev)) {
4962 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4965 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4969 if (!intel_dp_get_dpcd(intel_dp)) {
4973 intel_dp_probe_oui(intel_dp);
4975 if (!intel_dp_probe_mst(intel_dp))
4979 if (intel_dp->is_mst) {
4980 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4984 if (!intel_dp->is_mst) {
4986 * we'll check the link status via the normal hot plug path later -
4987 * but for short hpds we should check it now
4989 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4990 intel_dp_check_link_status(intel_dp);
4991 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4999 /* if we were in MST mode, and device is not there get out of MST mode */
5000 if (intel_dp->is_mst) {
5001 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5002 intel_dp->is_mst = false;
5003 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5006 intel_display_power_put(dev_priv, power_domain);
5011 /* Return which DP Port should be selected for Transcoder DP control */
5013 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5015 struct drm_device *dev = crtc->dev;
5016 struct intel_encoder *intel_encoder;
5017 struct intel_dp *intel_dp;
5019 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5020 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5022 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5023 intel_encoder->type == INTEL_OUTPUT_EDP)
5024 return intel_dp->output_reg;
5030 /* check the VBT to see whether the eDP is on DP-D port */
5031 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5033 struct drm_i915_private *dev_priv = dev->dev_private;
5034 union child_device_config *p_child;
5036 static const short port_mapping[] = {
5037 [PORT_B] = PORT_IDPB,
5038 [PORT_C] = PORT_IDPC,
5039 [PORT_D] = PORT_IDPD,
5045 if (!dev_priv->vbt.child_dev_num)
5048 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5049 p_child = dev_priv->vbt.child_dev + i;
5051 if (p_child->common.dvo_port == port_mapping[port] &&
5052 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5053 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5060 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5062 struct intel_connector *intel_connector = to_intel_connector(connector);
5064 intel_attach_force_audio_property(connector);
5065 intel_attach_broadcast_rgb_property(connector);
5066 intel_dp->color_range_auto = true;
5068 if (is_edp(intel_dp)) {
5069 drm_mode_create_scaling_mode_property(connector->dev);
5070 drm_object_attach_property(
5072 connector->dev->mode_config.scaling_mode_property,
5073 DRM_MODE_SCALE_ASPECT);
5074 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5078 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5080 intel_dp->last_power_cycle = jiffies;
5081 intel_dp->last_power_on = jiffies;
5082 intel_dp->last_backlight_off = jiffies;
5086 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5087 struct intel_dp *intel_dp)
5089 struct drm_i915_private *dev_priv = dev->dev_private;
5090 struct edp_power_seq cur, vbt, spec,
5091 *final = &intel_dp->pps_delays;
5092 u32 pp_on, pp_off, pp_div, pp;
5093 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5095 lockdep_assert_held(&dev_priv->pps_mutex);
5097 /* already initialized? */
5098 if (final->t11_t12 != 0)
5101 if (HAS_PCH_SPLIT(dev)) {
5102 pp_ctrl_reg = PCH_PP_CONTROL;
5103 pp_on_reg = PCH_PP_ON_DELAYS;
5104 pp_off_reg = PCH_PP_OFF_DELAYS;
5105 pp_div_reg = PCH_PP_DIVISOR;
5107 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5109 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5110 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5111 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5112 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5115 /* Workaround: Need to write PP_CONTROL with the unlock key as
5116 * the very first thing. */
5117 pp = ironlake_get_pp_control(intel_dp);
5118 I915_WRITE(pp_ctrl_reg, pp);
5120 pp_on = I915_READ(pp_on_reg);
5121 pp_off = I915_READ(pp_off_reg);
5122 pp_div = I915_READ(pp_div_reg);
5124 /* Pull timing values out of registers */
5125 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5126 PANEL_POWER_UP_DELAY_SHIFT;
5128 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5129 PANEL_LIGHT_ON_DELAY_SHIFT;
5131 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5132 PANEL_LIGHT_OFF_DELAY_SHIFT;
5134 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5135 PANEL_POWER_DOWN_DELAY_SHIFT;
5137 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5138 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5140 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5141 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5143 vbt = dev_priv->vbt.edp_pps;
5145 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5146 * our hw here, which are all in 100usec. */
5147 spec.t1_t3 = 210 * 10;
5148 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5149 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5150 spec.t10 = 500 * 10;
5151 /* This one is special and actually in units of 100ms, but zero
5152 * based in the hw (so we need to add 100 ms). But the sw vbt
5153 * table multiplies it with 1000 to make it in units of 100usec,
5155 spec.t11_t12 = (510 + 100) * 10;
5157 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5158 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5160 /* Use the max of the register settings and vbt. If both are
5161 * unset, fall back to the spec limits. */
5162 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5164 max(cur.field, vbt.field))
5165 assign_final(t1_t3);
5169 assign_final(t11_t12);
5172 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5173 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5174 intel_dp->backlight_on_delay = get_delay(t8);
5175 intel_dp->backlight_off_delay = get_delay(t9);
5176 intel_dp->panel_power_down_delay = get_delay(t10);
5177 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5180 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5181 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5182 intel_dp->panel_power_cycle_delay);
5184 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5185 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5189 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5190 struct intel_dp *intel_dp)
5192 struct drm_i915_private *dev_priv = dev->dev_private;
5193 u32 pp_on, pp_off, pp_div, port_sel = 0;
5194 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5195 int pp_on_reg, pp_off_reg, pp_div_reg;
5196 enum port port = dp_to_dig_port(intel_dp)->port;
5197 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5199 lockdep_assert_held(&dev_priv->pps_mutex);
5201 if (HAS_PCH_SPLIT(dev)) {
5202 pp_on_reg = PCH_PP_ON_DELAYS;
5203 pp_off_reg = PCH_PP_OFF_DELAYS;
5204 pp_div_reg = PCH_PP_DIVISOR;
5206 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5208 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5209 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5210 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5214 * And finally store the new values in the power sequencer. The
5215 * backlight delays are set to 1 because we do manual waits on them. For
5216 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5217 * we'll end up waiting for the backlight off delay twice: once when we
5218 * do the manual sleep, and once when we disable the panel and wait for
5219 * the PP_STATUS bit to become zero.
5221 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5222 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5223 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5224 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5225 /* Compute the divisor for the pp clock, simply match the Bspec
5227 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5228 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5229 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5231 /* Haswell doesn't have any port selection bits for the panel
5232 * power sequencer any more. */
5233 if (IS_VALLEYVIEW(dev)) {
5234 port_sel = PANEL_PORT_SELECT_VLV(port);
5235 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5237 port_sel = PANEL_PORT_SELECT_DPA;
5239 port_sel = PANEL_PORT_SELECT_DPD;
5244 I915_WRITE(pp_on_reg, pp_on);
5245 I915_WRITE(pp_off_reg, pp_off);
5246 I915_WRITE(pp_div_reg, pp_div);
5248 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5249 I915_READ(pp_on_reg),
5250 I915_READ(pp_off_reg),
5251 I915_READ(pp_div_reg));
5255 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5257 * @refresh_rate: RR to be programmed
5259 * This function gets called when refresh rate (RR) has to be changed from
5260 * one frequency to another. Switches can be between high and low RR
5261 * supported by the panel or to any other RR based on media playback (in
5262 * this case, RR value needs to be passed from user space).
5264 * The caller of this function needs to take a lock on dev_priv->drrs.
5266 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5268 struct drm_i915_private *dev_priv = dev->dev_private;
5269 struct intel_encoder *encoder;
5270 struct intel_digital_port *dig_port = NULL;
5271 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5272 struct intel_crtc_state *config = NULL;
5273 struct intel_crtc *intel_crtc = NULL;
5275 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5277 if (refresh_rate <= 0) {
5278 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5282 if (intel_dp == NULL) {
5283 DRM_DEBUG_KMS("DRRS not supported.\n");
5288 * FIXME: This needs proper synchronization with psr state for some
5289 * platforms that cannot have PSR and DRRS enabled at the same time.
5292 dig_port = dp_to_dig_port(intel_dp);
5293 encoder = &dig_port->base;
5294 intel_crtc = to_intel_crtc(encoder->base.crtc);
5297 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5301 config = intel_crtc->config;
5303 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5304 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5308 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5310 index = DRRS_LOW_RR;
5312 if (index == dev_priv->drrs.refresh_rate_type) {
5314 "DRRS requested for previously set RR...ignoring\n");
5318 if (!intel_crtc->active) {
5319 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5323 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5326 intel_dp_set_m_n(intel_crtc, M1_N1);
5329 intel_dp_set_m_n(intel_crtc, M2_N2);
5333 DRM_ERROR("Unsupported refreshrate type\n");
5335 } else if (INTEL_INFO(dev)->gen > 6) {
5336 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5337 val = I915_READ(reg);
5339 if (index > DRRS_HIGH_RR) {
5340 if (IS_VALLEYVIEW(dev))
5341 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5343 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5345 if (IS_VALLEYVIEW(dev))
5346 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5348 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5350 I915_WRITE(reg, val);
5353 dev_priv->drrs.refresh_rate_type = index;
5355 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5359 * intel_edp_drrs_enable - init drrs struct if supported
5360 * @intel_dp: DP struct
5362 * Initializes frontbuffer_bits and drrs.dp
5364 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5366 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5367 struct drm_i915_private *dev_priv = dev->dev_private;
5368 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5369 struct drm_crtc *crtc = dig_port->base.base.crtc;
5370 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5372 if (!intel_crtc->config->has_drrs) {
5373 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5377 mutex_lock(&dev_priv->drrs.mutex);
5378 if (WARN_ON(dev_priv->drrs.dp)) {
5379 DRM_ERROR("DRRS already enabled\n");
5383 dev_priv->drrs.busy_frontbuffer_bits = 0;
5385 dev_priv->drrs.dp = intel_dp;
5388 mutex_unlock(&dev_priv->drrs.mutex);
5392 * intel_edp_drrs_disable - Disable DRRS
5393 * @intel_dp: DP struct
5396 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5398 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5399 struct drm_i915_private *dev_priv = dev->dev_private;
5400 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5401 struct drm_crtc *crtc = dig_port->base.base.crtc;
5402 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5404 if (!intel_crtc->config->has_drrs)
5407 mutex_lock(&dev_priv->drrs.mutex);
5408 if (!dev_priv->drrs.dp) {
5409 mutex_unlock(&dev_priv->drrs.mutex);
5413 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5414 intel_dp_set_drrs_state(dev_priv->dev,
5415 intel_dp->attached_connector->panel.
5416 fixed_mode->vrefresh);
5418 dev_priv->drrs.dp = NULL;
5419 mutex_unlock(&dev_priv->drrs.mutex);
5421 cancel_delayed_work_sync(&dev_priv->drrs.work);
5424 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5426 struct drm_i915_private *dev_priv =
5427 container_of(work, typeof(*dev_priv), drrs.work.work);
5428 struct intel_dp *intel_dp;
5430 mutex_lock(&dev_priv->drrs.mutex);
5432 intel_dp = dev_priv->drrs.dp;
5438 * The delayed work can race with an invalidate hence we need to
5442 if (dev_priv->drrs.busy_frontbuffer_bits)
5445 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5446 intel_dp_set_drrs_state(dev_priv->dev,
5447 intel_dp->attached_connector->panel.
5448 downclock_mode->vrefresh);
5451 mutex_unlock(&dev_priv->drrs.mutex);
5455 * intel_edp_drrs_invalidate - Invalidate DRRS
5457 * @frontbuffer_bits: frontbuffer plane tracking bits
5459 * When there is a disturbance on screen (due to cursor movement/time
5460 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5463 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5465 void intel_edp_drrs_invalidate(struct drm_device *dev,
5466 unsigned frontbuffer_bits)
5468 struct drm_i915_private *dev_priv = dev->dev_private;
5469 struct drm_crtc *crtc;
5472 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5475 cancel_delayed_work(&dev_priv->drrs.work);
5477 mutex_lock(&dev_priv->drrs.mutex);
5478 if (!dev_priv->drrs.dp) {
5479 mutex_unlock(&dev_priv->drrs.mutex);
5483 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5484 pipe = to_intel_crtc(crtc)->pipe;
5486 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5487 intel_dp_set_drrs_state(dev_priv->dev,
5488 dev_priv->drrs.dp->attached_connector->panel.
5489 fixed_mode->vrefresh);
5492 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5494 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5495 mutex_unlock(&dev_priv->drrs.mutex);
5499 * intel_edp_drrs_flush - Flush DRRS
5501 * @frontbuffer_bits: frontbuffer plane tracking bits
5503 * When there is no movement on screen, DRRS work can be scheduled.
5504 * This DRRS work is responsible for setting relevant registers after a
5505 * timeout of 1 second.
5507 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5509 void intel_edp_drrs_flush(struct drm_device *dev,
5510 unsigned frontbuffer_bits)
5512 struct drm_i915_private *dev_priv = dev->dev_private;
5513 struct drm_crtc *crtc;
5516 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5519 cancel_delayed_work(&dev_priv->drrs.work);
5521 mutex_lock(&dev_priv->drrs.mutex);
5522 if (!dev_priv->drrs.dp) {
5523 mutex_unlock(&dev_priv->drrs.mutex);
5527 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5528 pipe = to_intel_crtc(crtc)->pipe;
5529 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5531 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5532 !dev_priv->drrs.busy_frontbuffer_bits)
5533 schedule_delayed_work(&dev_priv->drrs.work,
5534 msecs_to_jiffies(1000));
5535 mutex_unlock(&dev_priv->drrs.mutex);
5539 * DOC: Display Refresh Rate Switching (DRRS)
5541 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5542 * which enables swtching between low and high refresh rates,
5543 * dynamically, based on the usage scenario. This feature is applicable
5544 * for internal panels.
5546 * Indication that the panel supports DRRS is given by the panel EDID, which
5547 * would list multiple refresh rates for one resolution.
5549 * DRRS is of 2 types - static and seamless.
5550 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5551 * (may appear as a blink on screen) and is used in dock-undock scenario.
5552 * Seamless DRRS involves changing RR without any visual effect to the user
5553 * and can be used during normal system usage. This is done by programming
5554 * certain registers.
5556 * Support for static/seamless DRRS may be indicated in the VBT based on
5557 * inputs from the panel spec.
5559 * DRRS saves power by switching to low RR based on usage scenarios.
5562 * The implementation is based on frontbuffer tracking implementation.
5563 * When there is a disturbance on the screen triggered by user activity or a
5564 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5565 * When there is no movement on screen, after a timeout of 1 second, a switch
5566 * to low RR is made.
5567 * For integration with frontbuffer tracking code,
5568 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5570 * DRRS can be further extended to support other internal panels and also
5571 * the scenario of video playback wherein RR is set based on the rate
5572 * requested by userspace.
5576 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5577 * @intel_connector: eDP connector
5578 * @fixed_mode: preferred mode of panel
5580 * This function is called only once at driver load to initialize basic
5584 * Downclock mode if panel supports it, else return NULL.
5585 * DRRS support is determined by the presence of downclock mode (apart
5586 * from VBT setting).
5588 static struct drm_display_mode *
5589 intel_dp_drrs_init(struct intel_connector *intel_connector,
5590 struct drm_display_mode *fixed_mode)
5592 struct drm_connector *connector = &intel_connector->base;
5593 struct drm_device *dev = connector->dev;
5594 struct drm_i915_private *dev_priv = dev->dev_private;
5595 struct drm_display_mode *downclock_mode = NULL;
5597 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5598 mutex_init(&dev_priv->drrs.mutex);
5600 if (INTEL_INFO(dev)->gen <= 6) {
5601 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5605 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5606 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5610 downclock_mode = intel_find_panel_downclock
5611 (dev, fixed_mode, connector);
5613 if (!downclock_mode) {
5614 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5618 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5620 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5621 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5622 return downclock_mode;
5625 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5626 struct intel_connector *intel_connector)
5628 struct drm_connector *connector = &intel_connector->base;
5629 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5630 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5631 struct drm_device *dev = intel_encoder->base.dev;
5632 struct drm_i915_private *dev_priv = dev->dev_private;
5633 struct drm_display_mode *fixed_mode = NULL;
5634 struct drm_display_mode *downclock_mode = NULL;
5636 struct drm_display_mode *scan;
5638 enum pipe pipe = INVALID_PIPE;
5640 if (!is_edp(intel_dp))
5644 intel_edp_panel_vdd_sanitize(intel_dp);
5645 pps_unlock(intel_dp);
5647 /* Cache DPCD and EDID for edp. */
5648 has_dpcd = intel_dp_get_dpcd(intel_dp);
5651 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5652 dev_priv->no_aux_handshake =
5653 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5654 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5656 /* if this fails, presume the device is a ghost */
5657 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5661 /* We now know it's not a ghost, init power sequence regs. */
5663 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5664 pps_unlock(intel_dp);
5666 mutex_lock(&dev->mode_config.mutex);
5667 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5669 if (drm_add_edid_modes(connector, edid)) {
5670 drm_mode_connector_update_edid_property(connector,
5672 drm_edid_to_eld(connector, edid);
5675 edid = ERR_PTR(-EINVAL);
5678 edid = ERR_PTR(-ENOENT);
5680 intel_connector->edid = edid;
5682 /* prefer fixed mode from EDID if available */
5683 list_for_each_entry(scan, &connector->probed_modes, head) {
5684 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5685 fixed_mode = drm_mode_duplicate(dev, scan);
5686 downclock_mode = intel_dp_drrs_init(
5687 intel_connector, fixed_mode);
5692 /* fallback to VBT if available for eDP */
5693 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5694 fixed_mode = drm_mode_duplicate(dev,
5695 dev_priv->vbt.lfp_lvds_vbt_mode);
5697 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5699 mutex_unlock(&dev->mode_config.mutex);
5701 if (IS_VALLEYVIEW(dev)) {
5702 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5703 register_reboot_notifier(&intel_dp->edp_notifier);
5706 * Figure out the current pipe for the initial backlight setup.
5707 * If the current pipe isn't valid, try the PPS pipe, and if that
5708 * fails just assume pipe A.
5710 if (IS_CHERRYVIEW(dev))
5711 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5713 pipe = PORT_TO_PIPE(intel_dp->DP);
5715 if (pipe != PIPE_A && pipe != PIPE_B)
5716 pipe = intel_dp->pps_pipe;
5718 if (pipe != PIPE_A && pipe != PIPE_B)
5721 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5725 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5726 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5727 intel_panel_setup_backlight(connector, pipe);
5733 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5734 struct intel_connector *intel_connector)
5736 struct drm_connector *connector = &intel_connector->base;
5737 struct intel_dp *intel_dp = &intel_dig_port->dp;
5738 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5739 struct drm_device *dev = intel_encoder->base.dev;
5740 struct drm_i915_private *dev_priv = dev->dev_private;
5741 enum port port = intel_dig_port->port;
5744 intel_dp->pps_pipe = INVALID_PIPE;
5746 /* intel_dp vfuncs */
5747 if (INTEL_INFO(dev)->gen >= 9)
5748 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5749 else if (IS_VALLEYVIEW(dev))
5750 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5751 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5752 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5753 else if (HAS_PCH_SPLIT(dev))
5754 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5756 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5758 if (INTEL_INFO(dev)->gen >= 9)
5759 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5761 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5763 /* Preserve the current hw state. */
5764 intel_dp->DP = I915_READ(intel_dp->output_reg);
5765 intel_dp->attached_connector = intel_connector;
5767 if (intel_dp_is_edp(dev, port))
5768 type = DRM_MODE_CONNECTOR_eDP;
5770 type = DRM_MODE_CONNECTOR_DisplayPort;
5773 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5774 * for DP the encoder type can be set by the caller to
5775 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5777 if (type == DRM_MODE_CONNECTOR_eDP)
5778 intel_encoder->type = INTEL_OUTPUT_EDP;
5780 /* eDP only on port B and/or C on vlv/chv */
5781 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5782 port != PORT_B && port != PORT_C))
5785 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5786 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5789 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5790 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5792 connector->interlace_allowed = true;
5793 connector->doublescan_allowed = 0;
5795 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5796 edp_panel_vdd_work);
5798 intel_connector_attach_encoder(intel_connector, intel_encoder);
5799 drm_connector_register(connector);
5802 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5804 intel_connector->get_hw_state = intel_connector_get_hw_state;
5805 intel_connector->unregister = intel_dp_connector_unregister;
5807 /* Set up the hotplug pin. */
5810 intel_encoder->hpd_pin = HPD_PORT_A;
5813 intel_encoder->hpd_pin = HPD_PORT_B;
5816 intel_encoder->hpd_pin = HPD_PORT_C;
5819 intel_encoder->hpd_pin = HPD_PORT_D;
5825 if (is_edp(intel_dp)) {
5827 intel_dp_init_panel_power_timestamps(intel_dp);
5828 if (IS_VALLEYVIEW(dev))
5829 vlv_initial_power_sequencer_setup(intel_dp);
5831 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5832 pps_unlock(intel_dp);
5835 intel_dp_aux_init(intel_dp, intel_connector);
5837 /* init MST on ports that can support it */
5838 if (HAS_DP_MST(dev) &&
5839 (port == PORT_B || port == PORT_C || port == PORT_D))
5840 intel_dp_mst_encoder_init(intel_dig_port,
5841 intel_connector->base.base.id);
5843 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5844 drm_dp_aux_unregister(&intel_dp->aux);
5845 if (is_edp(intel_dp)) {
5846 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5848 * vdd might still be enabled do to the delayed vdd off.
5849 * Make sure vdd is actually turned off here.
5852 edp_panel_vdd_off_sync(intel_dp);
5853 pps_unlock(intel_dp);
5855 drm_connector_unregister(connector);
5856 drm_connector_cleanup(connector);
5860 intel_dp_add_properties(intel_dp, connector);
5862 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5863 * 0xd. Failure to do so will result in spurious interrupts being
5864 * generated on the port when a cable is not attached.
5866 if (IS_G4X(dev) && !IS_GM45(dev)) {
5867 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5868 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5871 i915_debugfs_connector_add(connector);
5877 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5879 struct drm_i915_private *dev_priv = dev->dev_private;
5880 struct intel_digital_port *intel_dig_port;
5881 struct intel_encoder *intel_encoder;
5882 struct drm_encoder *encoder;
5883 struct intel_connector *intel_connector;
5885 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5886 if (!intel_dig_port)
5889 intel_connector = intel_connector_alloc();
5890 if (!intel_connector) {
5891 kfree(intel_dig_port);
5895 intel_encoder = &intel_dig_port->base;
5896 encoder = &intel_encoder->base;
5898 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5899 DRM_MODE_ENCODER_TMDS);
5901 intel_encoder->compute_config = intel_dp_compute_config;
5902 intel_encoder->disable = intel_disable_dp;
5903 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5904 intel_encoder->get_config = intel_dp_get_config;
5905 intel_encoder->suspend = intel_dp_encoder_suspend;
5906 if (IS_CHERRYVIEW(dev)) {
5907 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5908 intel_encoder->pre_enable = chv_pre_enable_dp;
5909 intel_encoder->enable = vlv_enable_dp;
5910 intel_encoder->post_disable = chv_post_disable_dp;
5911 } else if (IS_VALLEYVIEW(dev)) {
5912 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5913 intel_encoder->pre_enable = vlv_pre_enable_dp;
5914 intel_encoder->enable = vlv_enable_dp;
5915 intel_encoder->post_disable = vlv_post_disable_dp;
5917 intel_encoder->pre_enable = g4x_pre_enable_dp;
5918 intel_encoder->enable = g4x_enable_dp;
5919 if (INTEL_INFO(dev)->gen >= 5)
5920 intel_encoder->post_disable = ilk_post_disable_dp;
5923 intel_dig_port->port = port;
5924 intel_dig_port->dp.output_reg = output_reg;
5926 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5927 if (IS_CHERRYVIEW(dev)) {
5929 intel_encoder->crtc_mask = 1 << 2;
5931 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5933 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5935 intel_encoder->cloneable = 0;
5936 intel_encoder->hot_plug = intel_dp_hot_plug;
5938 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5939 dev_priv->hpd_irq_port[port] = intel_dig_port;
5941 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5942 drm_encoder_cleanup(encoder);
5943 kfree(intel_dig_port);
5944 kfree(intel_connector);
5948 void intel_dp_mst_suspend(struct drm_device *dev)
5950 struct drm_i915_private *dev_priv = dev->dev_private;
5954 for (i = 0; i < I915_MAX_PORTS; i++) {
5955 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5956 if (!intel_dig_port)
5959 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5960 if (!intel_dig_port->dp.can_mst)
5962 if (intel_dig_port->dp.is_mst)
5963 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5968 void intel_dp_mst_resume(struct drm_device *dev)
5970 struct drm_i915_private *dev_priv = dev->dev_private;
5973 for (i = 0; i < I915_MAX_PORTS; i++) {
5974 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5975 if (!intel_dig_port)
5977 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5980 if (!intel_dig_port->dp.can_mst)
5983 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5985 intel_dp_check_mst_status(&intel_dig_port->dp);