2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
49 static const struct dp_link_dpll gen4_dpll[] = {
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
56 static const struct dp_link_dpll pch_dpll[] = {
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
63 static const struct dp_link_dpll vlv_dpll[] = {
65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
74 static const struct dp_link_dpll chv_dpll[] = {
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
102 static bool is_edp(struct intel_dp *intel_dp)
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113 return intel_dig_port->base.base.dev;
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
129 intel_dp_max_link_bw(struct intel_dp *intel_dp)
131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
141 max_link_bw = DP_LINK_BW_1_62;
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160 return min(source_max, sink_max);
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169 * 270000 * 1 * 8 / 10 == 216000
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
181 intel_dp_link_required(int pixel_clock, int bpp)
183 return (pixel_clock * bpp + 9) / 10;
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 return (max_link_clock * max_lanes * 8) / 10;
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
196 struct intel_dp *intel_dp = intel_attached_dp(connector);
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
206 if (mode->vdisplay > fixed_mode->vdisplay)
209 target_clock = fixed_mode->clock;
212 max_link_clock = intel_dp_max_link_rate(intel_dp);
213 max_lanes = intel_dp_max_lane_count(intel_dp);
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
218 if (mode_rate > max_rate)
219 return MODE_CLOCK_HIGH;
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
251 /* hrawclock is 1/4 the FSB frequency */
253 intel_hrawclk(struct drm_device *dev)
255 struct drm_i915_private *dev_priv = dev->dev_private;
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_1067:
274 case CLKCFG_FSB_1333:
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287 struct intel_dp *intel_dp);
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp);
292 static void pps_lock(struct intel_dp *intel_dp)
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
307 mutex_lock(&dev_priv->pps_mutex);
310 static void pps_unlock(struct intel_dp *intel_dp)
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
318 mutex_unlock(&dev_priv->pps_mutex);
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
381 vlv_force_pll_off(dev, pipe);
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
394 lockdep_assert_held(&dev_priv->pps_mutex);
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
408 struct intel_dp *tmp;
410 if (encoder->type != INTEL_OUTPUT_EDP)
413 tmp = enc_to_intel_dp(&encoder->base);
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
423 if (WARN_ON(pipes == 0))
426 pipe = ffs(pipes) - 1;
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
435 /* init power sequencer on this pipe and port */
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
443 vlv_power_sequencer_kick(intel_dp);
445 return intel_dp->pps_pipe;
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
472 vlv_pipe_check pipe_check)
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
483 if (!pipe_check(dev_priv, pipe))
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 enum port port = intel_dig_port->port;
500 lockdep_assert_held(&dev_priv->pps_mutex);
502 /* try to find a pipe with this port selected */
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
550 if (encoder->type != INTEL_OUTPUT_EDP)
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
588 u32 pp_ctrl_reg, pp_div_reg;
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
595 if (IS_VALLEYVIEW(dev)) {
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
609 pps_unlock(intel_dp);
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
617 struct drm_i915_private *dev_priv = dev->dev_private;
619 lockdep_assert_held(&dev_priv->pps_mutex);
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
631 struct drm_i915_private *dev_priv = dev->dev_private;
633 lockdep_assert_held(&dev_priv->pps_mutex);
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
643 intel_dp_check_edp(struct intel_dp *intel_dp)
645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
646 struct drm_i915_private *dev_priv = dev->dev_private;
648 if (!is_edp(intel_dp))
651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672 msecs_to_jiffies_timeout(10));
674 done = wait_for_atomic(C, 10) == 0;
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
692 return index ? 0 : intel_hrawclk(dev) / 2;
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
707 return 225; /* eDP input clock at 450Mhz */
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
719 if (intel_dig_port->port == PORT_A) {
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
737 return index ? 0 : 100;
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
747 return index ? 0 : 1;
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
753 uint32_t aux_clock_divider)
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
769 return DP_AUX_CH_CTL_SEND_BUSY |
771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
774 DP_AUX_CH_CTL_RECEIVE_ERROR |
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 return DP_AUX_CH_CTL_SEND_BUSY |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797 const uint8_t *send, int send_bytes,
798 uint8_t *recv, int recv_size)
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
805 uint32_t aux_clock_divider;
806 int i, ret, recv_bytes;
809 bool has_aux_irq = HAS_AUX_IRQ(dev);
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
820 vdd = edp_panel_vdd_on(intel_dp);
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
828 intel_dp_check_edp(intel_dp);
830 intel_aux_display_runtime_get(dev_priv);
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
834 status = I915_READ_NOTRACE(ch_ctl);
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
864 intel_dp_pack_aux(send + i,
867 /* Send the command and wait for it to complete */
868 I915_WRITE(ch_ctl, send_ctl);
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
872 /* Clear done status and any errors */
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
882 if (status & DP_AUX_CH_CTL_DONE)
885 if (status & DP_AUX_CH_CTL_DONE)
889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
918 for (i = 0; i < recv_bytes; i += 4)
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
925 intel_aux_display_runtime_put(dev_priv);
928 edp_panel_vdd_off(intel_dp, false);
930 pps_unlock(intel_dp);
935 #define BARE_ADDRESS_SIZE 3
936 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
938 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
945 txbuf[0] = msg->request << 4;
946 txbuf[1] = msg->address >> 8;
947 txbuf[2] = msg->address & 0xff;
948 txbuf[3] = msg->size - 1;
950 switch (msg->request & ~DP_AUX_I2C_MOT) {
951 case DP_AUX_NATIVE_WRITE:
952 case DP_AUX_I2C_WRITE:
953 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
954 rxsize = 2; /* 0 or 1 data bytes */
956 if (WARN_ON(txsize > 20))
959 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 msg->reply = rxbuf[0] >> 4;
966 /* Number of bytes written in a short write. */
967 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 /* Return payload size. */
975 case DP_AUX_NATIVE_READ:
976 case DP_AUX_I2C_READ:
977 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
978 rxsize = msg->size + 1;
980 if (WARN_ON(rxsize > 20))
983 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985 msg->reply = rxbuf[0] >> 4;
987 * Assume happy day, and copy the data. The caller is
988 * expected to check msg->reply before touching it.
990 * Return payload size.
993 memcpy(msg->buffer, rxbuf + 1, ret);
1006 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1009 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1010 enum port port = intel_dig_port->port;
1011 const char *name = NULL;
1016 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1020 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1024 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1028 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1036 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 * On Haswell and Broadwell though:
1039 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1040 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1045 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1047 intel_dp->aux.name = name;
1048 intel_dp->aux.dev = dev->dev;
1049 intel_dp->aux.transfer = intel_dp_aux_transfer;
1051 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1052 connector->base.kdev->kobj.name);
1054 ret = drm_dp_aux_register(&intel_dp->aux);
1056 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1061 ret = sysfs_create_link(&connector->base.kdev->kobj,
1062 &intel_dp->aux.ddc.dev.kobj,
1063 intel_dp->aux.ddc.dev.kobj.name);
1065 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1066 drm_dp_aux_unregister(&intel_dp->aux);
1071 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075 if (!intel_connector->mst_port)
1076 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1077 intel_dp->aux.ddc.dev.kobj.name);
1078 intel_connector_unregister(intel_connector);
1082 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1086 pipe_config->ddi_pll_sel = SKL_DPLL0;
1087 pipe_config->dpll_hw_state.cfgcr1 = 0;
1088 pipe_config->dpll_hw_state.cfgcr2 = 0;
1090 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1091 switch (link_clock / 2) {
1093 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1097 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1101 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1105 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1108 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1109 results in CDCLK change. Need to handle the change of CDCLK by
1110 disabling pipes and re-enabling them */
1112 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1116 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1121 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1125 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1128 case DP_LINK_BW_1_62:
1129 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131 case DP_LINK_BW_2_7:
1132 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134 case DP_LINK_BW_5_4:
1135 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1141 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1143 if (intel_dp->num_sink_rates) {
1144 *sink_rates = intel_dp->sink_rates;
1145 return intel_dp->num_sink_rates;
1148 *sink_rates = default_rates;
1150 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1154 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1156 if (INTEL_INFO(dev)->gen >= 9) {
1157 *source_rates = gen9_rates;
1158 return ARRAY_SIZE(gen9_rates);
1159 } else if (IS_CHERRYVIEW(dev)) {
1160 *source_rates = chv_rates;
1161 return ARRAY_SIZE(chv_rates);
1164 *source_rates = default_rates;
1166 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1167 /* WaDisableHBR2:skl */
1168 return (DP_LINK_BW_2_7 >> 3) + 1;
1169 else if (INTEL_INFO(dev)->gen >= 8 ||
1170 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1171 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 return (DP_LINK_BW_2_7 >> 3) + 1;
1177 intel_dp_set_clock(struct intel_encoder *encoder,
1178 struct intel_crtc_state *pipe_config, int link_bw)
1180 struct drm_device *dev = encoder->base.dev;
1181 const struct dp_link_dpll *divisor = NULL;
1185 divisor = gen4_dpll;
1186 count = ARRAY_SIZE(gen4_dpll);
1187 } else if (HAS_PCH_SPLIT(dev)) {
1189 count = ARRAY_SIZE(pch_dpll);
1190 } else if (IS_CHERRYVIEW(dev)) {
1192 count = ARRAY_SIZE(chv_dpll);
1193 } else if (IS_VALLEYVIEW(dev)) {
1195 count = ARRAY_SIZE(vlv_dpll);
1198 if (divisor && count) {
1199 for (i = 0; i < count; i++) {
1200 if (link_bw == divisor[i].link_bw) {
1201 pipe_config->dpll = divisor[i].dpll;
1202 pipe_config->clock_set = true;
1209 static int intersect_rates(const int *source_rates, int source_len,
1210 const int *sink_rates, int sink_len,
1213 int i = 0, j = 0, k = 0;
1215 while (i < source_len && j < sink_len) {
1216 if (source_rates[i] == sink_rates[j]) {
1217 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 common_rates[k] = source_rates[i];
1223 } else if (source_rates[i] < sink_rates[j]) {
1232 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1235 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1236 const int *source_rates, *sink_rates;
1237 int source_len, sink_len;
1239 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1240 source_len = intel_dp_source_rates(dev, &source_rates);
1242 return intersect_rates(source_rates, source_len,
1243 sink_rates, sink_len,
1247 static void snprintf_int_array(char *str, size_t len,
1248 const int *array, int nelem)
1254 for (i = 0; i < nelem; i++) {
1255 int r = snprintf(str, len, "%d,", array[i]);
1263 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1266 const int *source_rates, *sink_rates;
1267 int source_len, sink_len, common_len;
1268 int common_rates[DP_MAX_SUPPORTED_RATES];
1269 char str[128]; /* FIXME: too big for stack? */
1271 if ((drm_debug & DRM_UT_KMS) == 0)
1274 source_len = intel_dp_source_rates(dev, &source_rates);
1275 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1276 DRM_DEBUG_KMS("source rates: %s\n", str);
1278 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1279 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1280 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282 common_len = intel_dp_common_rates(intel_dp, common_rates);
1283 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1284 DRM_DEBUG_KMS("common rates: %s\n", str);
1287 static int rate_to_index(int find, const int *rates)
1291 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1292 if (find == rates[i])
1299 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301 int rates[DP_MAX_SUPPORTED_RATES] = {};
1304 len = intel_dp_common_rates(intel_dp, rates);
1305 if (WARN_ON(len <= 0))
1308 return rates[rate_to_index(0, rates) - 1];
1311 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313 return rate_to_index(rate, intel_dp->sink_rates);
1317 intel_dp_compute_config(struct intel_encoder *encoder,
1318 struct intel_crtc_state *pipe_config)
1320 struct drm_device *dev = encoder->base.dev;
1321 struct drm_i915_private *dev_priv = dev->dev_private;
1322 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1323 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1324 enum port port = dp_to_dig_port(intel_dp)->port;
1325 struct intel_crtc *intel_crtc = encoder->new_crtc;
1326 struct intel_connector *intel_connector = intel_dp->attached_connector;
1327 int lane_count, clock;
1328 int min_lane_count = 1;
1329 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1330 /* Conveniently, the link BW constants become indices with a shift...*/
1334 int link_avail, link_clock;
1335 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1338 common_len = intel_dp_common_rates(intel_dp, common_rates);
1340 /* No common link rates between source and sink */
1341 WARN_ON(common_len <= 0);
1343 max_clock = common_len - 1;
1345 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1346 pipe_config->has_pch_encoder = true;
1348 pipe_config->has_dp_encoder = true;
1349 pipe_config->has_drrs = false;
1350 pipe_config->has_audio = intel_dp->has_audio;
1352 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1353 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355 if (!HAS_PCH_SPLIT(dev))
1356 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1357 intel_connector->panel.fitting_mode);
1359 intel_pch_panel_fitting(intel_crtc, pipe_config,
1360 intel_connector->panel.fitting_mode);
1363 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1366 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1367 "max bw %d pixel clock %iKHz\n",
1368 max_lane_count, common_rates[max_clock],
1369 adjusted_mode->crtc_clock);
1371 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1372 * bpc in between. */
1373 bpp = pipe_config->pipe_bpp;
1374 if (is_edp(intel_dp)) {
1375 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1376 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1377 dev_priv->vbt.edp_bpp);
1378 bpp = dev_priv->vbt.edp_bpp;
1382 * Use the maximum clock and number of lanes the eDP panel
1383 * advertizes being capable of. The panels are generally
1384 * designed to support only a single clock and lane
1385 * configuration, and typically these values correspond to the
1386 * native resolution of the panel.
1388 min_lane_count = max_lane_count;
1389 min_clock = max_clock;
1392 for (; bpp >= 6*3; bpp -= 2*3) {
1393 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1396 for (clock = min_clock; clock <= max_clock; clock++) {
1397 for (lane_count = min_lane_count;
1398 lane_count <= max_lane_count;
1401 link_clock = common_rates[clock];
1402 link_avail = intel_dp_max_data_rate(link_clock,
1405 if (mode_rate <= link_avail) {
1415 if (intel_dp->color_range_auto) {
1418 * CEA-861-E - 5.1 Default Encoding Parameters
1419 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1422 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424 intel_dp->color_range = 0;
1427 if (intel_dp->color_range)
1428 pipe_config->limited_color_range = true;
1430 intel_dp->lane_count = lane_count;
1432 if (intel_dp->num_sink_rates) {
1433 intel_dp->link_bw = 0;
1434 intel_dp->rate_select =
1435 intel_dp_rate_select(intel_dp, common_rates[clock]);
1438 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1439 intel_dp->rate_select = 0;
1442 pipe_config->pipe_bpp = bpp;
1443 pipe_config->port_clock = common_rates[clock];
1445 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1446 intel_dp->link_bw, intel_dp->lane_count,
1447 pipe_config->port_clock, bpp);
1448 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1449 mode_rate, link_avail);
1451 intel_link_compute_m_n(bpp, lane_count,
1452 adjusted_mode->crtc_clock,
1453 pipe_config->port_clock,
1454 &pipe_config->dp_m_n);
1456 if (intel_connector->panel.downclock_mode != NULL &&
1457 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1458 pipe_config->has_drrs = true;
1459 intel_link_compute_m_n(bpp, lane_count,
1460 intel_connector->panel.downclock_mode->clock,
1461 pipe_config->port_clock,
1462 &pipe_config->dp_m2_n2);
1465 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1466 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1467 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1468 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1475 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1477 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1478 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1479 struct drm_device *dev = crtc->base.dev;
1480 struct drm_i915_private *dev_priv = dev->dev_private;
1483 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1484 crtc->config->port_clock);
1485 dpa_ctl = I915_READ(DP_A);
1486 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488 if (crtc->config->port_clock == 162000) {
1489 /* For a long time we've carried around a ILK-DevA w/a for the
1490 * 160MHz clock. If we're really unlucky, it's still required.
1492 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1493 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1494 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1496 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1497 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1500 I915_WRITE(DP_A, dpa_ctl);
1506 static void intel_dp_prepare(struct intel_encoder *encoder)
1508 struct drm_device *dev = encoder->base.dev;
1509 struct drm_i915_private *dev_priv = dev->dev_private;
1510 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1511 enum port port = dp_to_dig_port(intel_dp)->port;
1512 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1513 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1516 * There are four kinds of DP registers:
1523 * IBX PCH and CPU are the same for almost everything,
1524 * except that the CPU DP PLL is configured in this
1527 * CPT PCH is quite different, having many bits moved
1528 * to the TRANS_DP_CTL register instead. That
1529 * configuration happens (oddly) in ironlake_pch_enable
1532 /* Preserve the BIOS-computed detected bit. This is
1533 * supposed to be read-only.
1535 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1537 /* Handle DP bits in common between all three register formats */
1538 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1539 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1541 if (crtc->config->has_audio)
1542 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1544 /* Split out the IBX/CPU vs CPT settings */
1546 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1547 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1548 intel_dp->DP |= DP_SYNC_HS_HIGH;
1549 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1550 intel_dp->DP |= DP_SYNC_VS_HIGH;
1551 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1554 intel_dp->DP |= DP_ENHANCED_FRAMING;
1556 intel_dp->DP |= crtc->pipe << 29;
1557 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1558 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1559 intel_dp->DP |= intel_dp->color_range;
1561 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1562 intel_dp->DP |= DP_SYNC_HS_HIGH;
1563 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1564 intel_dp->DP |= DP_SYNC_VS_HIGH;
1565 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1568 intel_dp->DP |= DP_ENHANCED_FRAMING;
1570 if (!IS_CHERRYVIEW(dev)) {
1571 if (crtc->pipe == 1)
1572 intel_dp->DP |= DP_PIPEB_SELECT;
1574 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1577 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1581 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1582 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1584 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1585 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1587 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1588 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1590 static void wait_panel_status(struct intel_dp *intel_dp,
1594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1595 struct drm_i915_private *dev_priv = dev->dev_private;
1596 u32 pp_stat_reg, pp_ctrl_reg;
1598 lockdep_assert_held(&dev_priv->pps_mutex);
1600 pp_stat_reg = _pp_stat_reg(intel_dp);
1601 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1603 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1605 I915_READ(pp_stat_reg),
1606 I915_READ(pp_ctrl_reg));
1608 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1609 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1610 I915_READ(pp_stat_reg),
1611 I915_READ(pp_ctrl_reg));
1614 DRM_DEBUG_KMS("Wait complete\n");
1617 static void wait_panel_on(struct intel_dp *intel_dp)
1619 DRM_DEBUG_KMS("Wait for panel power on\n");
1620 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1623 static void wait_panel_off(struct intel_dp *intel_dp)
1625 DRM_DEBUG_KMS("Wait for panel power off time\n");
1626 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1629 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1631 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1633 /* When we disable the VDD override bit last we have to do the manual
1635 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1636 intel_dp->panel_power_cycle_delay);
1638 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1641 static void wait_backlight_on(struct intel_dp *intel_dp)
1643 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1644 intel_dp->backlight_on_delay);
1647 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1649 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1650 intel_dp->backlight_off_delay);
1653 /* Read the current pp_control value, unlocking the register if it
1657 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1659 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1660 struct drm_i915_private *dev_priv = dev->dev_private;
1663 lockdep_assert_held(&dev_priv->pps_mutex);
1665 control = I915_READ(_pp_ctrl_reg(intel_dp));
1666 control &= ~PANEL_UNLOCK_MASK;
1667 control |= PANEL_UNLOCK_REGS;
1672 * Must be paired with edp_panel_vdd_off().
1673 * Must hold pps_mutex around the whole on/off sequence.
1674 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1678 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1680 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1681 struct drm_i915_private *dev_priv = dev->dev_private;
1682 enum intel_display_power_domain power_domain;
1684 u32 pp_stat_reg, pp_ctrl_reg;
1685 bool need_to_disable = !intel_dp->want_panel_vdd;
1687 lockdep_assert_held(&dev_priv->pps_mutex);
1689 if (!is_edp(intel_dp))
1692 cancel_delayed_work(&intel_dp->panel_vdd_work);
1693 intel_dp->want_panel_vdd = true;
1695 if (edp_have_panel_vdd(intel_dp))
1696 return need_to_disable;
1698 power_domain = intel_display_port_power_domain(intel_encoder);
1699 intel_display_power_get(dev_priv, power_domain);
1701 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1702 port_name(intel_dig_port->port));
1704 if (!edp_have_panel_power(intel_dp))
1705 wait_panel_power_cycle(intel_dp);
1707 pp = ironlake_get_pp_control(intel_dp);
1708 pp |= EDP_FORCE_VDD;
1710 pp_stat_reg = _pp_stat_reg(intel_dp);
1711 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1713 I915_WRITE(pp_ctrl_reg, pp);
1714 POSTING_READ(pp_ctrl_reg);
1715 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1716 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1718 * If the panel wasn't on, delay before accessing aux channel
1720 if (!edp_have_panel_power(intel_dp)) {
1721 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1722 port_name(intel_dig_port->port));
1723 msleep(intel_dp->panel_power_up_delay);
1726 return need_to_disable;
1730 * Must be paired with intel_edp_panel_vdd_off() or
1731 * intel_edp_panel_off().
1732 * Nested calls to these functions are not allowed since
1733 * we drop the lock. Caller must use some higher level
1734 * locking to prevent nested calls from other threads.
1736 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1740 if (!is_edp(intel_dp))
1744 vdd = edp_panel_vdd_on(intel_dp);
1745 pps_unlock(intel_dp);
1747 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1748 port_name(dp_to_dig_port(intel_dp)->port));
1751 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1753 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1754 struct drm_i915_private *dev_priv = dev->dev_private;
1755 struct intel_digital_port *intel_dig_port =
1756 dp_to_dig_port(intel_dp);
1757 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1758 enum intel_display_power_domain power_domain;
1760 u32 pp_stat_reg, pp_ctrl_reg;
1762 lockdep_assert_held(&dev_priv->pps_mutex);
1764 WARN_ON(intel_dp->want_panel_vdd);
1766 if (!edp_have_panel_vdd(intel_dp))
1769 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1770 port_name(intel_dig_port->port));
1772 pp = ironlake_get_pp_control(intel_dp);
1773 pp &= ~EDP_FORCE_VDD;
1775 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1776 pp_stat_reg = _pp_stat_reg(intel_dp);
1778 I915_WRITE(pp_ctrl_reg, pp);
1779 POSTING_READ(pp_ctrl_reg);
1781 /* Make sure sequencer is idle before allowing subsequent activity */
1782 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1783 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1785 if ((pp & POWER_TARGET_ON) == 0)
1786 intel_dp->last_power_cycle = jiffies;
1788 power_domain = intel_display_port_power_domain(intel_encoder);
1789 intel_display_power_put(dev_priv, power_domain);
1792 static void edp_panel_vdd_work(struct work_struct *__work)
1794 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1795 struct intel_dp, panel_vdd_work);
1798 if (!intel_dp->want_panel_vdd)
1799 edp_panel_vdd_off_sync(intel_dp);
1800 pps_unlock(intel_dp);
1803 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805 unsigned long delay;
1808 * Queue the timer to fire a long time from now (relative to the power
1809 * down delay) to keep the panel power up across a sequence of
1812 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1813 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1817 * Must be paired with edp_panel_vdd_on().
1818 * Must hold pps_mutex around the whole on/off sequence.
1819 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1823 struct drm_i915_private *dev_priv =
1824 intel_dp_to_dev(intel_dp)->dev_private;
1826 lockdep_assert_held(&dev_priv->pps_mutex);
1828 if (!is_edp(intel_dp))
1831 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1832 port_name(dp_to_dig_port(intel_dp)->port));
1834 intel_dp->want_panel_vdd = false;
1837 edp_panel_vdd_off_sync(intel_dp);
1839 edp_panel_vdd_schedule_off(intel_dp);
1842 static void edp_panel_on(struct intel_dp *intel_dp)
1844 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1845 struct drm_i915_private *dev_priv = dev->dev_private;
1849 lockdep_assert_held(&dev_priv->pps_mutex);
1851 if (!is_edp(intel_dp))
1854 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1855 port_name(dp_to_dig_port(intel_dp)->port));
1857 if (WARN(edp_have_panel_power(intel_dp),
1858 "eDP port %c panel power already on\n",
1859 port_name(dp_to_dig_port(intel_dp)->port)))
1862 wait_panel_power_cycle(intel_dp);
1864 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1865 pp = ironlake_get_pp_control(intel_dp);
1867 /* ILK workaround: disable reset around power sequence */
1868 pp &= ~PANEL_POWER_RESET;
1869 I915_WRITE(pp_ctrl_reg, pp);
1870 POSTING_READ(pp_ctrl_reg);
1873 pp |= POWER_TARGET_ON;
1875 pp |= PANEL_POWER_RESET;
1877 I915_WRITE(pp_ctrl_reg, pp);
1878 POSTING_READ(pp_ctrl_reg);
1880 wait_panel_on(intel_dp);
1881 intel_dp->last_power_on = jiffies;
1884 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1885 I915_WRITE(pp_ctrl_reg, pp);
1886 POSTING_READ(pp_ctrl_reg);
1890 void intel_edp_panel_on(struct intel_dp *intel_dp)
1892 if (!is_edp(intel_dp))
1896 edp_panel_on(intel_dp);
1897 pps_unlock(intel_dp);
1901 static void edp_panel_off(struct intel_dp *intel_dp)
1903 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1904 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1905 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1906 struct drm_i915_private *dev_priv = dev->dev_private;
1907 enum intel_display_power_domain power_domain;
1911 lockdep_assert_held(&dev_priv->pps_mutex);
1913 if (!is_edp(intel_dp))
1916 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1917 port_name(dp_to_dig_port(intel_dp)->port));
1919 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1920 port_name(dp_to_dig_port(intel_dp)->port));
1922 pp = ironlake_get_pp_control(intel_dp);
1923 /* We need to switch off panel power _and_ force vdd, for otherwise some
1924 * panels get very unhappy and cease to work. */
1925 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1928 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1930 intel_dp->want_panel_vdd = false;
1932 I915_WRITE(pp_ctrl_reg, pp);
1933 POSTING_READ(pp_ctrl_reg);
1935 intel_dp->last_power_cycle = jiffies;
1936 wait_panel_off(intel_dp);
1938 /* We got a reference when we enabled the VDD. */
1939 power_domain = intel_display_port_power_domain(intel_encoder);
1940 intel_display_power_put(dev_priv, power_domain);
1943 void intel_edp_panel_off(struct intel_dp *intel_dp)
1945 if (!is_edp(intel_dp))
1949 edp_panel_off(intel_dp);
1950 pps_unlock(intel_dp);
1953 /* Enable backlight in the panel power control. */
1954 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1956 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1957 struct drm_device *dev = intel_dig_port->base.base.dev;
1958 struct drm_i915_private *dev_priv = dev->dev_private;
1963 * If we enable the backlight right away following a panel power
1964 * on, we may see slight flicker as the panel syncs with the eDP
1965 * link. So delay a bit to make sure the image is solid before
1966 * allowing it to appear.
1968 wait_backlight_on(intel_dp);
1972 pp = ironlake_get_pp_control(intel_dp);
1973 pp |= EDP_BLC_ENABLE;
1975 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
1980 pps_unlock(intel_dp);
1983 /* Enable backlight PWM and backlight PP control. */
1984 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986 if (!is_edp(intel_dp))
1989 DRM_DEBUG_KMS("\n");
1991 intel_panel_enable_backlight(intel_dp->attached_connector);
1992 _intel_edp_backlight_on(intel_dp);
1995 /* Disable backlight in the panel power control. */
1996 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1998 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2003 if (!is_edp(intel_dp))
2008 pp = ironlake_get_pp_control(intel_dp);
2009 pp &= ~EDP_BLC_ENABLE;
2011 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2013 I915_WRITE(pp_ctrl_reg, pp);
2014 POSTING_READ(pp_ctrl_reg);
2016 pps_unlock(intel_dp);
2018 intel_dp->last_backlight_off = jiffies;
2019 edp_wait_backlight_off(intel_dp);
2022 /* Disable backlight PP control and backlight PWM. */
2023 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025 if (!is_edp(intel_dp))
2028 DRM_DEBUG_KMS("\n");
2030 _intel_edp_backlight_off(intel_dp);
2031 intel_panel_disable_backlight(intel_dp->attached_connector);
2035 * Hook for controlling the panel power control backlight through the bl_power
2036 * sysfs attribute. Take care to handle multiple calls.
2038 static void intel_edp_backlight_power(struct intel_connector *connector,
2041 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2045 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2046 pps_unlock(intel_dp);
2048 if (is_enabled == enable)
2051 DRM_DEBUG_KMS("panel power control backlight %s\n",
2052 enable ? "enable" : "disable");
2055 _intel_edp_backlight_on(intel_dp);
2057 _intel_edp_backlight_off(intel_dp);
2060 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2062 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2063 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2064 struct drm_device *dev = crtc->dev;
2065 struct drm_i915_private *dev_priv = dev->dev_private;
2068 assert_pipe_disabled(dev_priv,
2069 to_intel_crtc(crtc)->pipe);
2071 DRM_DEBUG_KMS("\n");
2072 dpa_ctl = I915_READ(DP_A);
2073 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2074 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076 /* We don't adjust intel_dp->DP while tearing down the link, to
2077 * facilitate link retraining (e.g. after hotplug). Hence clear all
2078 * enable bits here to ensure that we don't enable too much. */
2079 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2080 intel_dp->DP |= DP_PLL_ENABLE;
2081 I915_WRITE(DP_A, intel_dp->DP);
2086 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2088 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2090 struct drm_device *dev = crtc->dev;
2091 struct drm_i915_private *dev_priv = dev->dev_private;
2094 assert_pipe_disabled(dev_priv,
2095 to_intel_crtc(crtc)->pipe);
2097 dpa_ctl = I915_READ(DP_A);
2098 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2099 "dp pll off, should be on\n");
2100 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102 /* We can't rely on the value tracked for the DP register in
2103 * intel_dp->DP because link_down must not change that (otherwise link
2104 * re-training will fail. */
2105 dpa_ctl &= ~DP_PLL_ENABLE;
2106 I915_WRITE(DP_A, dpa_ctl);
2111 /* If the sink supports it, try to set the power state appropriately */
2112 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2116 /* Should have a valid DPCD by this point */
2117 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2120 if (mode != DRM_MODE_DPMS_ON) {
2121 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2125 * When turning on, we need to retry for 1ms to give the sink
2128 for (i = 0; i < 3; i++) {
2129 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2138 DRM_DEBUG_KMS("failed to %s sink power state\n",
2139 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2142 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2145 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2146 enum port port = dp_to_dig_port(intel_dp)->port;
2147 struct drm_device *dev = encoder->base.dev;
2148 struct drm_i915_private *dev_priv = dev->dev_private;
2149 enum intel_display_power_domain power_domain;
2152 power_domain = intel_display_port_power_domain(encoder);
2153 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2156 tmp = I915_READ(intel_dp->output_reg);
2158 if (!(tmp & DP_PORT_EN))
2161 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2162 *pipe = PORT_TO_PIPE_CPT(tmp);
2163 } else if (IS_CHERRYVIEW(dev)) {
2164 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2165 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2166 *pipe = PORT_TO_PIPE(tmp);
2172 switch (intel_dp->output_reg) {
2174 trans_sel = TRANS_DP_PORT_SEL_B;
2177 trans_sel = TRANS_DP_PORT_SEL_C;
2180 trans_sel = TRANS_DP_PORT_SEL_D;
2186 for_each_pipe(dev_priv, i) {
2187 trans_dp = I915_READ(TRANS_DP_CTL(i));
2188 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2194 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2195 intel_dp->output_reg);
2201 static void intel_dp_get_config(struct intel_encoder *encoder,
2202 struct intel_crtc_state *pipe_config)
2204 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2206 struct drm_device *dev = encoder->base.dev;
2207 struct drm_i915_private *dev_priv = dev->dev_private;
2208 enum port port = dp_to_dig_port(intel_dp)->port;
2209 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2212 tmp = I915_READ(intel_dp->output_reg);
2213 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2214 pipe_config->has_audio = true;
2216 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2217 if (tmp & DP_SYNC_HS_HIGH)
2218 flags |= DRM_MODE_FLAG_PHSYNC;
2220 flags |= DRM_MODE_FLAG_NHSYNC;
2222 if (tmp & DP_SYNC_VS_HIGH)
2223 flags |= DRM_MODE_FLAG_PVSYNC;
2225 flags |= DRM_MODE_FLAG_NVSYNC;
2227 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2228 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2229 flags |= DRM_MODE_FLAG_PHSYNC;
2231 flags |= DRM_MODE_FLAG_NHSYNC;
2233 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2234 flags |= DRM_MODE_FLAG_PVSYNC;
2236 flags |= DRM_MODE_FLAG_NVSYNC;
2239 pipe_config->base.adjusted_mode.flags |= flags;
2241 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2242 tmp & DP_COLOR_RANGE_16_235)
2243 pipe_config->limited_color_range = true;
2245 pipe_config->has_dp_encoder = true;
2247 intel_dp_get_m_n(crtc, pipe_config);
2249 if (port == PORT_A) {
2250 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2251 pipe_config->port_clock = 162000;
2253 pipe_config->port_clock = 270000;
2256 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2257 &pipe_config->dp_m_n);
2259 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2260 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2264 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2265 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267 * This is a big fat ugly hack.
2269 * Some machines in UEFI boot mode provide us a VBT that has 18
2270 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2271 * unknown we fail to light up. Yet the same BIOS boots up with
2272 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2273 * max, not what it tells us to use.
2275 * Note: This will still be broken if the eDP panel is not lit
2276 * up by the BIOS, and thus we can't get the mode at module
2279 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2280 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2281 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2285 static void intel_disable_dp(struct intel_encoder *encoder)
2287 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2288 struct drm_device *dev = encoder->base.dev;
2289 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291 if (crtc->config->has_audio)
2292 intel_audio_codec_disable(encoder);
2294 if (HAS_PSR(dev) && !HAS_DDI(dev))
2295 intel_psr_disable(intel_dp);
2297 /* Make sure the panel is off before trying to change the mode. But also
2298 * ensure that we have vdd while we switch off the panel. */
2299 intel_edp_panel_vdd_on(intel_dp);
2300 intel_edp_backlight_off(intel_dp);
2301 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2302 intel_edp_panel_off(intel_dp);
2304 /* disable the port before the pipe on g4x */
2305 if (INTEL_INFO(dev)->gen < 5)
2306 intel_dp_link_down(intel_dp);
2309 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2311 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2312 enum port port = dp_to_dig_port(intel_dp)->port;
2314 intel_dp_link_down(intel_dp);
2316 ironlake_edp_pll_off(intel_dp);
2319 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323 intel_dp_link_down(intel_dp);
2326 static void chv_post_disable_dp(struct intel_encoder *encoder)
2328 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2329 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2330 struct drm_device *dev = encoder->base.dev;
2331 struct drm_i915_private *dev_priv = dev->dev_private;
2332 struct intel_crtc *intel_crtc =
2333 to_intel_crtc(encoder->base.crtc);
2334 enum dpio_channel ch = vlv_dport_to_channel(dport);
2335 enum pipe pipe = intel_crtc->pipe;
2338 intel_dp_link_down(intel_dp);
2340 mutex_lock(&dev_priv->dpio_lock);
2342 /* Propagate soft reset to data lane reset */
2343 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2344 val |= CHV_PCS_REQ_SOFTRESET_EN;
2345 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2347 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2348 val |= CHV_PCS_REQ_SOFTRESET_EN;
2349 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2352 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2353 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2356 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2357 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2359 mutex_unlock(&dev_priv->dpio_lock);
2363 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2365 uint8_t dp_train_pat)
2367 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2368 struct drm_device *dev = intel_dig_port->base.base.dev;
2369 struct drm_i915_private *dev_priv = dev->dev_private;
2370 enum port port = intel_dig_port->port;
2373 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2376 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2381 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2382 case DP_TRAINING_PATTERN_DISABLE:
2383 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2386 case DP_TRAINING_PATTERN_1:
2387 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389 case DP_TRAINING_PATTERN_2:
2390 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392 case DP_TRAINING_PATTERN_3:
2393 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2396 I915_WRITE(DP_TP_CTL(port), temp);
2398 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2399 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402 case DP_TRAINING_PATTERN_DISABLE:
2403 *DP |= DP_LINK_TRAIN_OFF_CPT;
2405 case DP_TRAINING_PATTERN_1:
2406 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408 case DP_TRAINING_PATTERN_2:
2409 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411 case DP_TRAINING_PATTERN_3:
2412 DRM_ERROR("DP training pattern 3 not supported\n");
2413 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2418 if (IS_CHERRYVIEW(dev))
2419 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421 *DP &= ~DP_LINK_TRAIN_MASK;
2423 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2424 case DP_TRAINING_PATTERN_DISABLE:
2425 *DP |= DP_LINK_TRAIN_OFF;
2427 case DP_TRAINING_PATTERN_1:
2428 *DP |= DP_LINK_TRAIN_PAT_1;
2430 case DP_TRAINING_PATTERN_2:
2431 *DP |= DP_LINK_TRAIN_PAT_2;
2433 case DP_TRAINING_PATTERN_3:
2434 if (IS_CHERRYVIEW(dev)) {
2435 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437 DRM_ERROR("DP training pattern 3 not supported\n");
2438 *DP |= DP_LINK_TRAIN_PAT_2;
2445 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2448 struct drm_i915_private *dev_priv = dev->dev_private;
2450 /* enable with pattern 1 (as per spec) */
2451 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2452 DP_TRAINING_PATTERN_1);
2454 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2455 POSTING_READ(intel_dp->output_reg);
2458 * Magic for VLV/CHV. We _must_ first set up the register
2459 * without actually enabling the port, and then do another
2460 * write to enable the port. Otherwise link training will
2461 * fail when the power sequencer is freshly used for this port.
2463 intel_dp->DP |= DP_PORT_EN;
2465 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2466 POSTING_READ(intel_dp->output_reg);
2469 static void intel_enable_dp(struct intel_encoder *encoder)
2471 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2472 struct drm_device *dev = encoder->base.dev;
2473 struct drm_i915_private *dev_priv = dev->dev_private;
2474 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2475 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2477 if (WARN_ON(dp_reg & DP_PORT_EN))
2482 if (IS_VALLEYVIEW(dev))
2483 vlv_init_panel_power_sequencer(intel_dp);
2485 intel_dp_enable_port(intel_dp);
2487 edp_panel_vdd_on(intel_dp);
2488 edp_panel_on(intel_dp);
2489 edp_panel_vdd_off(intel_dp, true);
2491 pps_unlock(intel_dp);
2493 if (IS_VALLEYVIEW(dev))
2494 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2497 intel_dp_start_link_train(intel_dp);
2498 intel_dp_complete_link_train(intel_dp);
2499 intel_dp_stop_link_train(intel_dp);
2501 if (crtc->config->has_audio) {
2502 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2503 pipe_name(crtc->pipe));
2504 intel_audio_codec_enable(encoder);
2508 static void g4x_enable_dp(struct intel_encoder *encoder)
2510 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512 intel_enable_dp(encoder);
2513 intel_edp_backlight_on(intel_dp);
2516 static void vlv_enable_dp(struct intel_encoder *encoder)
2518 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520 intel_edp_backlight_on(intel_dp);
2521 intel_psr_enable(intel_dp);
2524 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2526 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2527 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529 intel_dp_prepare(encoder);
2531 /* Only ilk+ has port A */
2532 if (dport->port == PORT_A) {
2533 ironlake_set_pll_cpu_edp(intel_dp);
2534 ironlake_edp_pll_on(intel_dp);
2538 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2541 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2542 enum pipe pipe = intel_dp->pps_pipe;
2543 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545 edp_panel_vdd_off_sync(intel_dp);
2548 * VLV seems to get confused when multiple power seqeuencers
2549 * have the same port selected (even if only one has power/vdd
2550 * enabled). The failure manifests as vlv_wait_port_ready() failing
2551 * CHV on the other hand doesn't seem to mind having the same port
2552 * selected in multiple power seqeuencers, but let's clear the
2553 * port select always when logically disconnecting a power sequencer
2556 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2557 pipe_name(pipe), port_name(intel_dig_port->port));
2558 I915_WRITE(pp_on_reg, 0);
2559 POSTING_READ(pp_on_reg);
2561 intel_dp->pps_pipe = INVALID_PIPE;
2564 static void vlv_steal_power_sequencer(struct drm_device *dev,
2567 struct drm_i915_private *dev_priv = dev->dev_private;
2568 struct intel_encoder *encoder;
2570 lockdep_assert_held(&dev_priv->pps_mutex);
2572 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2575 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577 struct intel_dp *intel_dp;
2580 if (encoder->type != INTEL_OUTPUT_EDP)
2583 intel_dp = enc_to_intel_dp(&encoder->base);
2584 port = dp_to_dig_port(intel_dp)->port;
2586 if (intel_dp->pps_pipe != pipe)
2589 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2590 pipe_name(pipe), port_name(port));
2592 WARN(encoder->connectors_active,
2593 "stealing pipe %c power sequencer from active eDP port %c\n",
2594 pipe_name(pipe), port_name(port));
2596 /* make sure vdd is off before we steal it */
2597 vlv_detach_power_sequencer(intel_dp);
2601 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2604 struct intel_encoder *encoder = &intel_dig_port->base;
2605 struct drm_device *dev = encoder->base.dev;
2606 struct drm_i915_private *dev_priv = dev->dev_private;
2607 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2609 lockdep_assert_held(&dev_priv->pps_mutex);
2611 if (!is_edp(intel_dp))
2614 if (intel_dp->pps_pipe == crtc->pipe)
2618 * If another power sequencer was being used on this
2619 * port previously make sure to turn off vdd there while
2620 * we still have control of it.
2622 if (intel_dp->pps_pipe != INVALID_PIPE)
2623 vlv_detach_power_sequencer(intel_dp);
2626 * We may be stealing the power
2627 * sequencer from another port.
2629 vlv_steal_power_sequencer(dev, crtc->pipe);
2631 /* now it's all ours */
2632 intel_dp->pps_pipe = crtc->pipe;
2634 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2635 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637 /* init power sequencer on this pipe and port */
2638 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2639 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2642 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2644 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2645 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2646 struct drm_device *dev = encoder->base.dev;
2647 struct drm_i915_private *dev_priv = dev->dev_private;
2648 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2649 enum dpio_channel port = vlv_dport_to_channel(dport);
2650 int pipe = intel_crtc->pipe;
2653 mutex_lock(&dev_priv->dpio_lock);
2655 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2662 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2664 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2666 mutex_unlock(&dev_priv->dpio_lock);
2668 intel_enable_dp(encoder);
2671 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2673 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2674 struct drm_device *dev = encoder->base.dev;
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct intel_crtc *intel_crtc =
2677 to_intel_crtc(encoder->base.crtc);
2678 enum dpio_channel port = vlv_dport_to_channel(dport);
2679 int pipe = intel_crtc->pipe;
2681 intel_dp_prepare(encoder);
2683 /* Program Tx lane resets to default */
2684 mutex_lock(&dev_priv->dpio_lock);
2685 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2686 DPIO_PCS_TX_LANE2_RESET |
2687 DPIO_PCS_TX_LANE1_RESET);
2688 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2689 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2690 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2691 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2692 DPIO_PCS_CLK_SOFT_RESET);
2694 /* Fix up inter-pair skew failure */
2695 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2696 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2698 mutex_unlock(&dev_priv->dpio_lock);
2701 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2704 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2705 struct drm_device *dev = encoder->base.dev;
2706 struct drm_i915_private *dev_priv = dev->dev_private;
2707 struct intel_crtc *intel_crtc =
2708 to_intel_crtc(encoder->base.crtc);
2709 enum dpio_channel ch = vlv_dport_to_channel(dport);
2710 int pipe = intel_crtc->pipe;
2714 mutex_lock(&dev_priv->dpio_lock);
2716 /* allow hardware to manage TX FIFO reset source */
2717 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2718 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2719 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2722 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2723 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725 /* Deassert soft data lane reset*/
2726 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2727 val |= CHV_PCS_REQ_SOFTRESET_EN;
2728 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2731 val |= CHV_PCS_REQ_SOFTRESET_EN;
2732 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2735 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2736 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2738 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2739 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2740 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2742 /* Program Tx lane latency optimal setting*/
2743 for (i = 0; i < 4; i++) {
2744 /* Set the latency optimal bit */
2745 data = (i == 1) ? 0x0 : 0x6;
2746 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2747 data << DPIO_FRC_LATENCY_SHFIT);
2749 /* Set the upar bit */
2750 data = (i == 1) ? 0x0 : 0x1;
2751 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2752 data << DPIO_UPAR_SHIFT);
2755 /* Data lane stagger programming */
2756 /* FIXME: Fix up value only after power analysis */
2758 mutex_unlock(&dev_priv->dpio_lock);
2760 intel_enable_dp(encoder);
2763 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2765 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2766 struct drm_device *dev = encoder->base.dev;
2767 struct drm_i915_private *dev_priv = dev->dev_private;
2768 struct intel_crtc *intel_crtc =
2769 to_intel_crtc(encoder->base.crtc);
2770 enum dpio_channel ch = vlv_dport_to_channel(dport);
2771 enum pipe pipe = intel_crtc->pipe;
2774 intel_dp_prepare(encoder);
2776 mutex_lock(&dev_priv->dpio_lock);
2778 /* program left/right clock distribution */
2779 if (pipe != PIPE_B) {
2780 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2781 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2783 val |= CHV_BUFLEFTENA1_FORCE;
2785 val |= CHV_BUFRIGHTENA1_FORCE;
2786 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2788 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2789 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2791 val |= CHV_BUFLEFTENA2_FORCE;
2793 val |= CHV_BUFRIGHTENA2_FORCE;
2794 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2797 /* program clock channel usage */
2798 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2799 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2801 val &= ~CHV_PCS_USEDCLKCHANNEL;
2803 val |= CHV_PCS_USEDCLKCHANNEL;
2804 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2806 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2807 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2809 val &= ~CHV_PCS_USEDCLKCHANNEL;
2811 val |= CHV_PCS_USEDCLKCHANNEL;
2812 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2815 * This a a bit weird since generally CL
2816 * matches the pipe, but here we need to
2817 * pick the CL based on the port.
2819 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2821 val &= ~CHV_CMN_USEDCLKCHANNEL;
2823 val |= CHV_CMN_USEDCLKCHANNEL;
2824 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2826 mutex_unlock(&dev_priv->dpio_lock);
2830 * Native read with retry for link status and receiver capability reads for
2831 * cases where the sink may still be asleep.
2833 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2834 * supposed to retry 3 times per the spec.
2837 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2838 void *buffer, size_t size)
2844 * Sometime we just get the same incorrect byte repeated
2845 * over the entire buffer. Doing just one throw away read
2846 * initially seems to "solve" it.
2848 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2850 for (i = 0; i < 3; i++) {
2851 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2861 * Fetch AUX CH registers 0x202 - 0x207 which contain
2862 * link status information
2865 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2867 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2870 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2873 /* These are source-specific values. */
2875 intel_dp_voltage_max(struct intel_dp *intel_dp)
2877 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2878 struct drm_i915_private *dev_priv = dev->dev_private;
2879 enum port port = dp_to_dig_port(intel_dp)->port;
2881 if (INTEL_INFO(dev)->gen >= 9) {
2882 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2883 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2884 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2885 } else if (IS_VALLEYVIEW(dev))
2886 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2887 else if (IS_GEN7(dev) && port == PORT_A)
2888 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2889 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2890 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2892 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2896 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2898 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2899 enum port port = dp_to_dig_port(intel_dp)->port;
2901 if (INTEL_INFO(dev)->gen >= 9) {
2902 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2904 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2905 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2906 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2907 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2908 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2909 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2910 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2912 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2914 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2915 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2916 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2917 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2919 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2920 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2921 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2922 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2924 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2926 } else if (IS_VALLEYVIEW(dev)) {
2927 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2928 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2929 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2930 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2931 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2932 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2933 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2934 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2936 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2938 } else if (IS_GEN7(dev) && port == PORT_A) {
2939 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2940 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2941 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2942 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2943 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2944 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2946 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2949 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2951 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2953 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2954 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2955 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2956 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2958 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2963 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2965 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2966 struct drm_i915_private *dev_priv = dev->dev_private;
2967 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2968 struct intel_crtc *intel_crtc =
2969 to_intel_crtc(dport->base.base.crtc);
2970 unsigned long demph_reg_value, preemph_reg_value,
2971 uniqtranscale_reg_value;
2972 uint8_t train_set = intel_dp->train_set[0];
2973 enum dpio_channel port = vlv_dport_to_channel(dport);
2974 int pipe = intel_crtc->pipe;
2976 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2977 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2978 preemph_reg_value = 0x0004000;
2979 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2981 demph_reg_value = 0x2B405555;
2982 uniqtranscale_reg_value = 0x552AB83A;
2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2985 demph_reg_value = 0x2B404040;
2986 uniqtranscale_reg_value = 0x5548B83A;
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2989 demph_reg_value = 0x2B245555;
2990 uniqtranscale_reg_value = 0x5560B83A;
2992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2993 demph_reg_value = 0x2B405555;
2994 uniqtranscale_reg_value = 0x5598DA3A;
3000 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3001 preemph_reg_value = 0x0002000;
3002 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3004 demph_reg_value = 0x2B404040;
3005 uniqtranscale_reg_value = 0x5552B83A;
3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3008 demph_reg_value = 0x2B404848;
3009 uniqtranscale_reg_value = 0x5580B83A;
3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3012 demph_reg_value = 0x2B404040;
3013 uniqtranscale_reg_value = 0x55ADDA3A;
3019 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3020 preemph_reg_value = 0x0000000;
3021 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3023 demph_reg_value = 0x2B305555;
3024 uniqtranscale_reg_value = 0x5570B83A;
3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3027 demph_reg_value = 0x2B2B4040;
3028 uniqtranscale_reg_value = 0x55ADDA3A;
3034 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3035 preemph_reg_value = 0x0006000;
3036 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3038 demph_reg_value = 0x1B405555;
3039 uniqtranscale_reg_value = 0x55ADDA3A;
3049 mutex_lock(&dev_priv->dpio_lock);
3050 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3051 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3052 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3053 uniqtranscale_reg_value);
3054 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3055 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3056 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3057 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3058 mutex_unlock(&dev_priv->dpio_lock);
3063 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3065 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3066 struct drm_i915_private *dev_priv = dev->dev_private;
3067 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3068 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3069 u32 deemph_reg_value, margin_reg_value, val;
3070 uint8_t train_set = intel_dp->train_set[0];
3071 enum dpio_channel ch = vlv_dport_to_channel(dport);
3072 enum pipe pipe = intel_crtc->pipe;
3075 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3076 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3077 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3079 deemph_reg_value = 128;
3080 margin_reg_value = 52;
3082 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3083 deemph_reg_value = 128;
3084 margin_reg_value = 77;
3086 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3087 deemph_reg_value = 128;
3088 margin_reg_value = 102;
3090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3091 deemph_reg_value = 128;
3092 margin_reg_value = 154;
3093 /* FIXME extra to set for 1200 */
3099 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3100 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3102 deemph_reg_value = 85;
3103 margin_reg_value = 78;
3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3106 deemph_reg_value = 85;
3107 margin_reg_value = 116;
3109 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3110 deemph_reg_value = 85;
3111 margin_reg_value = 154;
3117 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3118 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3120 deemph_reg_value = 64;
3121 margin_reg_value = 104;
3123 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3124 deemph_reg_value = 64;
3125 margin_reg_value = 154;
3131 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3132 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3134 deemph_reg_value = 43;
3135 margin_reg_value = 154;
3145 mutex_lock(&dev_priv->dpio_lock);
3147 /* Clear calc init */
3148 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3149 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3150 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3151 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3152 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3154 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3155 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3156 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3157 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3158 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3160 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3161 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3162 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3163 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3165 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3166 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3167 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3168 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3170 /* Program swing deemph */
3171 for (i = 0; i < 4; i++) {
3172 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3173 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3174 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3175 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3178 /* Program swing margin */
3179 for (i = 0; i < 4; i++) {
3180 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3181 val &= ~DPIO_SWING_MARGIN000_MASK;
3182 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3183 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3186 /* Disable unique transition scale */
3187 for (i = 0; i < 4; i++) {
3188 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3189 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3190 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3193 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3194 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3195 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3196 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3199 * The document said it needs to set bit 27 for ch0 and bit 26
3200 * for ch1. Might be a typo in the doc.
3201 * For now, for this unique transition scale selection, set bit
3202 * 27 for ch0 and ch1.
3204 for (i = 0; i < 4; i++) {
3205 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3206 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3207 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3210 for (i = 0; i < 4; i++) {
3211 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3212 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3213 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3214 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3218 /* Start swing calculation */
3219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3220 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3221 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3223 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3224 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3225 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3228 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3229 val |= DPIO_LRC_BYPASS;
3230 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3232 mutex_unlock(&dev_priv->dpio_lock);
3238 intel_get_adjust_train(struct intel_dp *intel_dp,
3239 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3244 uint8_t voltage_max;
3245 uint8_t preemph_max;
3247 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3248 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3249 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3257 voltage_max = intel_dp_voltage_max(intel_dp);
3258 if (v >= voltage_max)
3259 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3261 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3262 if (p >= preemph_max)
3263 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3265 for (lane = 0; lane < 4; lane++)
3266 intel_dp->train_set[lane] = v | p;
3270 intel_gen4_signal_levels(uint8_t train_set)
3272 uint32_t signal_levels = 0;
3274 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3277 signal_levels |= DP_VOLTAGE_0_4;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3280 signal_levels |= DP_VOLTAGE_0_6;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283 signal_levels |= DP_VOLTAGE_0_8;
3285 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3286 signal_levels |= DP_VOLTAGE_1_2;
3289 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3290 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3292 signal_levels |= DP_PRE_EMPHASIS_0;
3294 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3295 signal_levels |= DP_PRE_EMPHASIS_3_5;
3297 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3298 signal_levels |= DP_PRE_EMPHASIS_6;
3300 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3301 signal_levels |= DP_PRE_EMPHASIS_9_5;
3304 return signal_levels;
3307 /* Gen6's DP voltage swing and pre-emphasis control */
3309 intel_gen6_edp_signal_levels(uint8_t train_set)
3311 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3312 DP_TRAIN_PRE_EMPHASIS_MASK);
3313 switch (signal_levels) {
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3316 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3318 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3321 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3324 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3327 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3329 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3330 "0x%x\n", signal_levels);
3331 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3335 /* Gen7's DP voltage swing and pre-emphasis control */
3337 intel_gen7_edp_signal_levels(uint8_t train_set)
3339 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3340 DP_TRAIN_PRE_EMPHASIS_MASK);
3341 switch (signal_levels) {
3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3343 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3344 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3345 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3347 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3350 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3352 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3355 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3357 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3360 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3361 "0x%x\n", signal_levels);
3362 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3366 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3368 intel_hsw_signal_levels(uint8_t train_set)
3370 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3371 DP_TRAIN_PRE_EMPHASIS_MASK);
3372 switch (signal_levels) {
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3374 return DDI_BUF_TRANS_SELECT(0);
3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3376 return DDI_BUF_TRANS_SELECT(1);
3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3378 return DDI_BUF_TRANS_SELECT(2);
3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3380 return DDI_BUF_TRANS_SELECT(3);
3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3383 return DDI_BUF_TRANS_SELECT(4);
3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3385 return DDI_BUF_TRANS_SELECT(5);
3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3387 return DDI_BUF_TRANS_SELECT(6);
3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3390 return DDI_BUF_TRANS_SELECT(7);
3391 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3392 return DDI_BUF_TRANS_SELECT(8);
3394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3395 return DDI_BUF_TRANS_SELECT(9);
3397 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3398 "0x%x\n", signal_levels);
3399 return DDI_BUF_TRANS_SELECT(0);
3403 /* Properly updates "DP" with the correct signal levels. */
3405 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3407 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3408 enum port port = intel_dig_port->port;
3409 struct drm_device *dev = intel_dig_port->base.base.dev;
3410 uint32_t signal_levels, mask;
3411 uint8_t train_set = intel_dp->train_set[0];
3413 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3414 signal_levels = intel_hsw_signal_levels(train_set);
3415 mask = DDI_BUF_EMP_MASK;
3416 } else if (IS_CHERRYVIEW(dev)) {
3417 signal_levels = intel_chv_signal_levels(intel_dp);
3419 } else if (IS_VALLEYVIEW(dev)) {
3420 signal_levels = intel_vlv_signal_levels(intel_dp);
3422 } else if (IS_GEN7(dev) && port == PORT_A) {
3423 signal_levels = intel_gen7_edp_signal_levels(train_set);
3424 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3425 } else if (IS_GEN6(dev) && port == PORT_A) {
3426 signal_levels = intel_gen6_edp_signal_levels(train_set);
3427 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3429 signal_levels = intel_gen4_signal_levels(train_set);
3430 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3433 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3435 *DP = (*DP & ~mask) | signal_levels;
3439 intel_dp_set_link_train(struct intel_dp *intel_dp,
3441 uint8_t dp_train_pat)
3443 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3444 struct drm_device *dev = intel_dig_port->base.base.dev;
3445 struct drm_i915_private *dev_priv = dev->dev_private;
3446 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3449 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3451 I915_WRITE(intel_dp->output_reg, *DP);
3452 POSTING_READ(intel_dp->output_reg);
3454 buf[0] = dp_train_pat;
3455 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3456 DP_TRAINING_PATTERN_DISABLE) {
3457 /* don't write DP_TRAINING_LANEx_SET on disable */
3460 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3461 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3462 len = intel_dp->lane_count + 1;
3465 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3472 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3473 uint8_t dp_train_pat)
3475 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3476 intel_dp_set_signal_levels(intel_dp, DP);
3477 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3481 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3482 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3484 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3485 struct drm_device *dev = intel_dig_port->base.base.dev;
3486 struct drm_i915_private *dev_priv = dev->dev_private;
3489 intel_get_adjust_train(intel_dp, link_status);
3490 intel_dp_set_signal_levels(intel_dp, DP);
3492 I915_WRITE(intel_dp->output_reg, *DP);
3493 POSTING_READ(intel_dp->output_reg);
3495 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3496 intel_dp->train_set, intel_dp->lane_count);
3498 return ret == intel_dp->lane_count;
3501 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3504 struct drm_device *dev = intel_dig_port->base.base.dev;
3505 struct drm_i915_private *dev_priv = dev->dev_private;
3506 enum port port = intel_dig_port->port;
3512 val = I915_READ(DP_TP_CTL(port));
3513 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3514 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3515 I915_WRITE(DP_TP_CTL(port), val);
3518 * On PORT_A we can have only eDP in SST mode. There the only reason
3519 * we need to set idle transmission mode is to work around a HW issue
3520 * where we enable the pipe while not in idle link-training mode.
3521 * In this case there is requirement to wait for a minimum number of
3522 * idle patterns to be sent.
3527 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3529 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3532 /* Enable corresponding port and start training pattern 1 */
3534 intel_dp_start_link_train(struct intel_dp *intel_dp)
3536 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3537 struct drm_device *dev = encoder->dev;
3540 int voltage_tries, loop_tries;
3541 uint32_t DP = intel_dp->DP;
3542 uint8_t link_config[2];
3545 intel_ddi_prepare_link_retrain(encoder);
3547 /* Write the link configuration data */
3548 link_config[0] = intel_dp->link_bw;
3549 link_config[1] = intel_dp->lane_count;
3550 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3551 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3552 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3553 if (intel_dp->num_sink_rates)
3554 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3555 &intel_dp->rate_select, 1);
3558 link_config[1] = DP_SET_ANSI_8B10B;
3559 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3563 /* clock recovery */
3564 if (!intel_dp_reset_link_train(intel_dp, &DP,
3565 DP_TRAINING_PATTERN_1 |
3566 DP_LINK_SCRAMBLING_DISABLE)) {
3567 DRM_ERROR("failed to enable link training\n");
3575 uint8_t link_status[DP_LINK_STATUS_SIZE];
3577 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3578 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3579 DRM_ERROR("failed to get link status\n");
3583 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3584 DRM_DEBUG_KMS("clock recovery OK\n");
3588 /* Check to see if we've tried the max voltage */
3589 for (i = 0; i < intel_dp->lane_count; i++)
3590 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3592 if (i == intel_dp->lane_count) {
3594 if (loop_tries == 5) {
3595 DRM_ERROR("too many full retries, give up\n");
3598 intel_dp_reset_link_train(intel_dp, &DP,
3599 DP_TRAINING_PATTERN_1 |
3600 DP_LINK_SCRAMBLING_DISABLE);
3605 /* Check to see if we've tried the same voltage 5 times */
3606 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3608 if (voltage_tries == 5) {
3609 DRM_ERROR("too many voltage retries, give up\n");
3614 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3616 /* Update training set as requested by target */
3617 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3618 DRM_ERROR("failed to update link training\n");
3627 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3629 bool channel_eq = false;
3630 int tries, cr_tries;
3631 uint32_t DP = intel_dp->DP;
3632 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3634 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3635 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3636 training_pattern = DP_TRAINING_PATTERN_3;
3638 /* channel equalization */
3639 if (!intel_dp_set_link_train(intel_dp, &DP,
3641 DP_LINK_SCRAMBLING_DISABLE)) {
3642 DRM_ERROR("failed to start channel equalization\n");
3650 uint8_t link_status[DP_LINK_STATUS_SIZE];
3653 DRM_ERROR("failed to train DP, aborting\n");
3657 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3658 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3659 DRM_ERROR("failed to get link status\n");
3663 /* Make sure clock is still ok */
3664 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3665 intel_dp_start_link_train(intel_dp);
3666 intel_dp_set_link_train(intel_dp, &DP,
3668 DP_LINK_SCRAMBLING_DISABLE);
3673 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3678 /* Try 5 times, then try clock recovery if that fails */
3680 intel_dp_start_link_train(intel_dp);
3681 intel_dp_set_link_train(intel_dp, &DP,
3683 DP_LINK_SCRAMBLING_DISABLE);
3689 /* Update training set as requested by target */
3690 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3691 DRM_ERROR("failed to update link training\n");
3697 intel_dp_set_idle_link_train(intel_dp);
3702 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3706 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3708 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3709 DP_TRAINING_PATTERN_DISABLE);
3713 intel_dp_link_down(struct intel_dp *intel_dp)
3715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3716 enum port port = intel_dig_port->port;
3717 struct drm_device *dev = intel_dig_port->base.base.dev;
3718 struct drm_i915_private *dev_priv = dev->dev_private;
3719 uint32_t DP = intel_dp->DP;
3721 if (WARN_ON(HAS_DDI(dev)))
3724 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3727 DRM_DEBUG_KMS("\n");
3729 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3730 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3731 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3733 if (IS_CHERRYVIEW(dev))
3734 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3736 DP &= ~DP_LINK_TRAIN_MASK;
3737 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3739 POSTING_READ(intel_dp->output_reg);
3741 if (HAS_PCH_IBX(dev) &&
3742 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3743 /* Hardware workaround: leaving our transcoder select
3744 * set to transcoder B while it's off will prevent the
3745 * corresponding HDMI output on transcoder A.
3747 * Combine this with another hardware workaround:
3748 * transcoder select bit can only be cleared while the
3751 DP &= ~DP_PIPEB_SELECT;
3752 I915_WRITE(intel_dp->output_reg, DP);
3753 POSTING_READ(intel_dp->output_reg);
3756 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3757 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3758 POSTING_READ(intel_dp->output_reg);
3759 msleep(intel_dp->panel_power_down_delay);
3763 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3765 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3766 struct drm_device *dev = dig_port->base.base.dev;
3767 struct drm_i915_private *dev_priv = dev->dev_private;
3770 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3771 sizeof(intel_dp->dpcd)) < 0)
3772 return false; /* aux transfer failed */
3774 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3776 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3777 return false; /* DPCD not present */
3779 /* Check if the panel supports PSR */
3780 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3781 if (is_edp(intel_dp)) {
3782 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3784 sizeof(intel_dp->psr_dpcd));
3785 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3786 dev_priv->psr.sink_support = true;
3787 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3791 /* Training Pattern 3 support, both source and sink */
3792 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3793 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3794 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3795 intel_dp->use_tps3 = true;
3796 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3798 intel_dp->use_tps3 = false;
3800 /* Intermediate frequency support */
3801 if (is_edp(intel_dp) &&
3802 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3803 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3804 (rev >= 0x03)) { /* eDp v1.4 or higher */
3805 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3808 intel_dp_dpcd_read_wake(&intel_dp->aux,
3809 DP_SUPPORTED_LINK_RATES,
3811 sizeof(sink_rates));
3813 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3814 int val = le16_to_cpu(sink_rates[i]);
3819 intel_dp->sink_rates[i] = val * 200;
3821 intel_dp->num_sink_rates = i;
3824 intel_dp_print_rates(intel_dp);
3826 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3827 DP_DWN_STRM_PORT_PRESENT))
3828 return true; /* native DP sink */
3830 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3831 return true; /* no per-port downstream info */
3833 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3834 intel_dp->downstream_ports,
3835 DP_MAX_DOWNSTREAM_PORTS) < 0)
3836 return false; /* downstream port status fetch failed */
3842 intel_dp_probe_oui(struct intel_dp *intel_dp)
3846 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3849 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3850 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3851 buf[0], buf[1], buf[2]);
3853 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3854 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3855 buf[0], buf[1], buf[2]);
3859 intel_dp_probe_mst(struct intel_dp *intel_dp)
3863 if (!intel_dp->can_mst)
3866 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3869 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3870 if (buf[0] & DP_MST_CAP) {
3871 DRM_DEBUG_KMS("Sink is MST capable\n");
3872 intel_dp->is_mst = true;
3874 DRM_DEBUG_KMS("Sink is not MST capable\n");
3875 intel_dp->is_mst = false;
3879 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3880 return intel_dp->is_mst;
3883 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3885 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3886 struct drm_device *dev = intel_dig_port->base.base.dev;
3887 struct intel_crtc *intel_crtc =
3888 to_intel_crtc(intel_dig_port->base.base.crtc);
3893 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3896 if (!(buf & DP_TEST_CRC_SUPPORTED))
3899 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3902 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3903 buf | DP_TEST_SINK_START) < 0)
3906 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3908 test_crc_count = buf & DP_TEST_COUNT_MASK;
3911 if (drm_dp_dpcd_readb(&intel_dp->aux,
3912 DP_TEST_SINK_MISC, &buf) < 0)
3914 intel_wait_for_vblank(dev, intel_crtc->pipe);
3915 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3917 if (attempts == 0) {
3918 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3922 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3925 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3927 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3928 buf & ~DP_TEST_SINK_START) < 0)
3935 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3937 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3938 DP_DEVICE_SERVICE_IRQ_VECTOR,
3939 sink_irq_vector, 1) == 1;
3943 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3947 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3949 sink_irq_vector, 14);
3957 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3959 /* NAK by default */
3960 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3964 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3968 if (intel_dp->is_mst) {
3973 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3977 /* check link status - esi[10] = 0x200c */
3978 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3979 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3980 intel_dp_start_link_train(intel_dp);
3981 intel_dp_complete_link_train(intel_dp);
3982 intel_dp_stop_link_train(intel_dp);
3985 DRM_DEBUG_KMS("got esi %3ph\n", esi);
3986 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3989 for (retry = 0; retry < 3; retry++) {
3991 wret = drm_dp_dpcd_write(&intel_dp->aux,
3992 DP_SINK_COUNT_ESI+1,
3999 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4001 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4009 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4010 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4011 intel_dp->is_mst = false;
4012 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4013 /* send a hotplug event */
4014 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4021 * According to DP spec
4024 * 2. Configure link according to Receiver Capabilities
4025 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4026 * 4. Check link status on receipt of hot-plug interrupt
4029 intel_dp_check_link_status(struct intel_dp *intel_dp)
4031 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4032 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4034 u8 link_status[DP_LINK_STATUS_SIZE];
4036 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4038 if (!intel_encoder->connectors_active)
4041 if (WARN_ON(!intel_encoder->base.crtc))
4044 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4047 /* Try to read receiver status if the link appears to be up */
4048 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4052 /* Now read the DPCD to see if it's actually running */
4053 if (!intel_dp_get_dpcd(intel_dp)) {
4057 /* Try to read the source of the interrupt */
4058 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4059 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4060 /* Clear interrupt source */
4061 drm_dp_dpcd_writeb(&intel_dp->aux,
4062 DP_DEVICE_SERVICE_IRQ_VECTOR,
4065 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4066 intel_dp_handle_test_request(intel_dp);
4067 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4068 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4071 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4072 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4073 intel_encoder->base.name);
4074 intel_dp_start_link_train(intel_dp);
4075 intel_dp_complete_link_train(intel_dp);
4076 intel_dp_stop_link_train(intel_dp);
4080 /* XXX this is probably wrong for multiple downstream ports */
4081 static enum drm_connector_status
4082 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4084 uint8_t *dpcd = intel_dp->dpcd;
4087 if (!intel_dp_get_dpcd(intel_dp))
4088 return connector_status_disconnected;
4090 /* if there's no downstream port, we're done */
4091 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4092 return connector_status_connected;
4094 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4095 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4096 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4099 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4101 return connector_status_unknown;
4103 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4104 : connector_status_disconnected;
4107 /* If no HPD, poke DDC gently */
4108 if (drm_probe_ddc(&intel_dp->aux.ddc))
4109 return connector_status_connected;
4111 /* Well we tried, say unknown for unreliable port types */
4112 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4113 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4114 if (type == DP_DS_PORT_TYPE_VGA ||
4115 type == DP_DS_PORT_TYPE_NON_EDID)
4116 return connector_status_unknown;
4118 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4119 DP_DWN_STRM_PORT_TYPE_MASK;
4120 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4121 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4122 return connector_status_unknown;
4125 /* Anything else is out of spec, warn and ignore */
4126 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4127 return connector_status_disconnected;
4130 static enum drm_connector_status
4131 edp_detect(struct intel_dp *intel_dp)
4133 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4134 enum drm_connector_status status;
4136 status = intel_panel_detect(dev);
4137 if (status == connector_status_unknown)
4138 status = connector_status_connected;
4143 static enum drm_connector_status
4144 ironlake_dp_detect(struct intel_dp *intel_dp)
4146 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4147 struct drm_i915_private *dev_priv = dev->dev_private;
4148 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4150 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4151 return connector_status_disconnected;
4153 return intel_dp_detect_dpcd(intel_dp);
4156 static int g4x_digital_port_connected(struct drm_device *dev,
4157 struct intel_digital_port *intel_dig_port)
4159 struct drm_i915_private *dev_priv = dev->dev_private;
4162 if (IS_VALLEYVIEW(dev)) {
4163 switch (intel_dig_port->port) {
4165 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4168 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4171 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4177 switch (intel_dig_port->port) {
4179 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4182 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4185 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4192 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4197 static enum drm_connector_status
4198 g4x_dp_detect(struct intel_dp *intel_dp)
4200 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4201 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4204 /* Can't disconnect eDP, but you can close the lid... */
4205 if (is_edp(intel_dp)) {
4206 enum drm_connector_status status;
4208 status = intel_panel_detect(dev);
4209 if (status == connector_status_unknown)
4210 status = connector_status_connected;
4214 ret = g4x_digital_port_connected(dev, intel_dig_port);
4216 return connector_status_unknown;
4218 return connector_status_disconnected;
4220 return intel_dp_detect_dpcd(intel_dp);
4223 static struct edid *
4224 intel_dp_get_edid(struct intel_dp *intel_dp)
4226 struct intel_connector *intel_connector = intel_dp->attached_connector;
4228 /* use cached edid if we have one */
4229 if (intel_connector->edid) {
4231 if (IS_ERR(intel_connector->edid))
4234 return drm_edid_duplicate(intel_connector->edid);
4236 return drm_get_edid(&intel_connector->base,
4237 &intel_dp->aux.ddc);
4241 intel_dp_set_edid(struct intel_dp *intel_dp)
4243 struct intel_connector *intel_connector = intel_dp->attached_connector;
4246 edid = intel_dp_get_edid(intel_dp);
4247 intel_connector->detect_edid = edid;
4249 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4250 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4252 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4256 intel_dp_unset_edid(struct intel_dp *intel_dp)
4258 struct intel_connector *intel_connector = intel_dp->attached_connector;
4260 kfree(intel_connector->detect_edid);
4261 intel_connector->detect_edid = NULL;
4263 intel_dp->has_audio = false;
4266 static enum intel_display_power_domain
4267 intel_dp_power_get(struct intel_dp *dp)
4269 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4270 enum intel_display_power_domain power_domain;
4272 power_domain = intel_display_port_power_domain(encoder);
4273 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4275 return power_domain;
4279 intel_dp_power_put(struct intel_dp *dp,
4280 enum intel_display_power_domain power_domain)
4282 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4283 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4286 static enum drm_connector_status
4287 intel_dp_detect(struct drm_connector *connector, bool force)
4289 struct intel_dp *intel_dp = intel_attached_dp(connector);
4290 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4291 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4292 struct drm_device *dev = connector->dev;
4293 enum drm_connector_status status;
4294 enum intel_display_power_domain power_domain;
4297 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4298 connector->base.id, connector->name);
4299 intel_dp_unset_edid(intel_dp);
4301 if (intel_dp->is_mst) {
4302 /* MST devices are disconnected from a monitor POV */
4303 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4304 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4305 return connector_status_disconnected;
4308 power_domain = intel_dp_power_get(intel_dp);
4310 /* Can't disconnect eDP, but you can close the lid... */
4311 if (is_edp(intel_dp))
4312 status = edp_detect(intel_dp);
4313 else if (HAS_PCH_SPLIT(dev))
4314 status = ironlake_dp_detect(intel_dp);
4316 status = g4x_dp_detect(intel_dp);
4317 if (status != connector_status_connected)
4320 intel_dp_probe_oui(intel_dp);
4322 ret = intel_dp_probe_mst(intel_dp);
4324 /* if we are in MST mode then this connector
4325 won't appear connected or have anything with EDID on it */
4326 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4327 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4328 status = connector_status_disconnected;
4332 intel_dp_set_edid(intel_dp);
4334 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4335 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4336 status = connector_status_connected;
4339 intel_dp_power_put(intel_dp, power_domain);
4344 intel_dp_force(struct drm_connector *connector)
4346 struct intel_dp *intel_dp = intel_attached_dp(connector);
4347 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4348 enum intel_display_power_domain power_domain;
4350 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4351 connector->base.id, connector->name);
4352 intel_dp_unset_edid(intel_dp);
4354 if (connector->status != connector_status_connected)
4357 power_domain = intel_dp_power_get(intel_dp);
4359 intel_dp_set_edid(intel_dp);
4361 intel_dp_power_put(intel_dp, power_domain);
4363 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4364 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4367 static int intel_dp_get_modes(struct drm_connector *connector)
4369 struct intel_connector *intel_connector = to_intel_connector(connector);
4372 edid = intel_connector->detect_edid;
4374 int ret = intel_connector_update_modes(connector, edid);
4379 /* if eDP has no EDID, fall back to fixed mode */
4380 if (is_edp(intel_attached_dp(connector)) &&
4381 intel_connector->panel.fixed_mode) {
4382 struct drm_display_mode *mode;
4384 mode = drm_mode_duplicate(connector->dev,
4385 intel_connector->panel.fixed_mode);
4387 drm_mode_probed_add(connector, mode);
4396 intel_dp_detect_audio(struct drm_connector *connector)
4398 bool has_audio = false;
4401 edid = to_intel_connector(connector)->detect_edid;
4403 has_audio = drm_detect_monitor_audio(edid);
4409 intel_dp_set_property(struct drm_connector *connector,
4410 struct drm_property *property,
4413 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4414 struct intel_connector *intel_connector = to_intel_connector(connector);
4415 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4416 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4419 ret = drm_object_property_set_value(&connector->base, property, val);
4423 if (property == dev_priv->force_audio_property) {
4427 if (i == intel_dp->force_audio)
4430 intel_dp->force_audio = i;
4432 if (i == HDMI_AUDIO_AUTO)
4433 has_audio = intel_dp_detect_audio(connector);
4435 has_audio = (i == HDMI_AUDIO_ON);
4437 if (has_audio == intel_dp->has_audio)
4440 intel_dp->has_audio = has_audio;
4444 if (property == dev_priv->broadcast_rgb_property) {
4445 bool old_auto = intel_dp->color_range_auto;
4446 uint32_t old_range = intel_dp->color_range;
4449 case INTEL_BROADCAST_RGB_AUTO:
4450 intel_dp->color_range_auto = true;
4452 case INTEL_BROADCAST_RGB_FULL:
4453 intel_dp->color_range_auto = false;
4454 intel_dp->color_range = 0;
4456 case INTEL_BROADCAST_RGB_LIMITED:
4457 intel_dp->color_range_auto = false;
4458 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4464 if (old_auto == intel_dp->color_range_auto &&
4465 old_range == intel_dp->color_range)
4471 if (is_edp(intel_dp) &&
4472 property == connector->dev->mode_config.scaling_mode_property) {
4473 if (val == DRM_MODE_SCALE_NONE) {
4474 DRM_DEBUG_KMS("no scaling not supported\n");
4478 if (intel_connector->panel.fitting_mode == val) {
4479 /* the eDP scaling property is not changed */
4482 intel_connector->panel.fitting_mode = val;
4490 if (intel_encoder->base.crtc)
4491 intel_crtc_restore_mode(intel_encoder->base.crtc);
4497 intel_dp_connector_destroy(struct drm_connector *connector)
4499 struct intel_connector *intel_connector = to_intel_connector(connector);
4501 kfree(intel_connector->detect_edid);
4503 if (!IS_ERR_OR_NULL(intel_connector->edid))
4504 kfree(intel_connector->edid);
4506 /* Can't call is_edp() since the encoder may have been destroyed
4508 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4509 intel_panel_fini(&intel_connector->panel);
4511 drm_connector_cleanup(connector);
4515 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4517 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4518 struct intel_dp *intel_dp = &intel_dig_port->dp;
4520 drm_dp_aux_unregister(&intel_dp->aux);
4521 intel_dp_mst_encoder_cleanup(intel_dig_port);
4522 if (is_edp(intel_dp)) {
4523 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4525 * vdd might still be enabled do to the delayed vdd off.
4526 * Make sure vdd is actually turned off here.
4529 edp_panel_vdd_off_sync(intel_dp);
4530 pps_unlock(intel_dp);
4532 if (intel_dp->edp_notifier.notifier_call) {
4533 unregister_reboot_notifier(&intel_dp->edp_notifier);
4534 intel_dp->edp_notifier.notifier_call = NULL;
4537 drm_encoder_cleanup(encoder);
4538 kfree(intel_dig_port);
4541 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4543 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4545 if (!is_edp(intel_dp))
4549 * vdd might still be enabled do to the delayed vdd off.
4550 * Make sure vdd is actually turned off here.
4552 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4554 edp_panel_vdd_off_sync(intel_dp);
4555 pps_unlock(intel_dp);
4558 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4561 struct drm_device *dev = intel_dig_port->base.base.dev;
4562 struct drm_i915_private *dev_priv = dev->dev_private;
4563 enum intel_display_power_domain power_domain;
4565 lockdep_assert_held(&dev_priv->pps_mutex);
4567 if (!edp_have_panel_vdd(intel_dp))
4571 * The VDD bit needs a power domain reference, so if the bit is
4572 * already enabled when we boot or resume, grab this reference and
4573 * schedule a vdd off, so we don't hold on to the reference
4576 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4577 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4578 intel_display_power_get(dev_priv, power_domain);
4580 edp_panel_vdd_schedule_off(intel_dp);
4583 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4585 struct intel_dp *intel_dp;
4587 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4590 intel_dp = enc_to_intel_dp(encoder);
4595 * Read out the current power sequencer assignment,
4596 * in case the BIOS did something with it.
4598 if (IS_VALLEYVIEW(encoder->dev))
4599 vlv_initial_power_sequencer_setup(intel_dp);
4601 intel_edp_panel_vdd_sanitize(intel_dp);
4603 pps_unlock(intel_dp);
4606 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4607 .dpms = intel_connector_dpms,
4608 .detect = intel_dp_detect,
4609 .force = intel_dp_force,
4610 .fill_modes = drm_helper_probe_single_connector_modes,
4611 .set_property = intel_dp_set_property,
4612 .atomic_get_property = intel_connector_atomic_get_property,
4613 .destroy = intel_dp_connector_destroy,
4614 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4617 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4618 .get_modes = intel_dp_get_modes,
4619 .mode_valid = intel_dp_mode_valid,
4620 .best_encoder = intel_best_encoder,
4623 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4624 .reset = intel_dp_encoder_reset,
4625 .destroy = intel_dp_encoder_destroy,
4629 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4635 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4637 struct intel_dp *intel_dp = &intel_dig_port->dp;
4638 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4639 struct drm_device *dev = intel_dig_port->base.base.dev;
4640 struct drm_i915_private *dev_priv = dev->dev_private;
4641 enum intel_display_power_domain power_domain;
4642 enum irqreturn ret = IRQ_NONE;
4644 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4645 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4647 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4649 * vdd off can generate a long pulse on eDP which
4650 * would require vdd on to handle it, and thus we
4651 * would end up in an endless cycle of
4652 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4654 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4655 port_name(intel_dig_port->port));
4659 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4660 port_name(intel_dig_port->port),
4661 long_hpd ? "long" : "short");
4663 power_domain = intel_display_port_power_domain(intel_encoder);
4664 intel_display_power_get(dev_priv, power_domain);
4668 if (HAS_PCH_SPLIT(dev)) {
4669 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4672 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4676 if (!intel_dp_get_dpcd(intel_dp)) {
4680 intel_dp_probe_oui(intel_dp);
4682 if (!intel_dp_probe_mst(intel_dp))
4686 if (intel_dp->is_mst) {
4687 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4691 if (!intel_dp->is_mst) {
4693 * we'll check the link status via the normal hot plug path later -
4694 * but for short hpds we should check it now
4696 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4697 intel_dp_check_link_status(intel_dp);
4698 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4706 /* if we were in MST mode, and device is not there get out of MST mode */
4707 if (intel_dp->is_mst) {
4708 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4709 intel_dp->is_mst = false;
4710 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4713 intel_display_power_put(dev_priv, power_domain);
4718 /* Return which DP Port should be selected for Transcoder DP control */
4720 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4722 struct drm_device *dev = crtc->dev;
4723 struct intel_encoder *intel_encoder;
4724 struct intel_dp *intel_dp;
4726 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4727 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4729 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4730 intel_encoder->type == INTEL_OUTPUT_EDP)
4731 return intel_dp->output_reg;
4737 /* check the VBT to see whether the eDP is on DP-D port */
4738 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4740 struct drm_i915_private *dev_priv = dev->dev_private;
4741 union child_device_config *p_child;
4743 static const short port_mapping[] = {
4744 [PORT_B] = PORT_IDPB,
4745 [PORT_C] = PORT_IDPC,
4746 [PORT_D] = PORT_IDPD,
4752 if (!dev_priv->vbt.child_dev_num)
4755 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4756 p_child = dev_priv->vbt.child_dev + i;
4758 if (p_child->common.dvo_port == port_mapping[port] &&
4759 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4760 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4767 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4769 struct intel_connector *intel_connector = to_intel_connector(connector);
4771 intel_attach_force_audio_property(connector);
4772 intel_attach_broadcast_rgb_property(connector);
4773 intel_dp->color_range_auto = true;
4775 if (is_edp(intel_dp)) {
4776 drm_mode_create_scaling_mode_property(connector->dev);
4777 drm_object_attach_property(
4779 connector->dev->mode_config.scaling_mode_property,
4780 DRM_MODE_SCALE_ASPECT);
4781 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4785 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4787 intel_dp->last_power_cycle = jiffies;
4788 intel_dp->last_power_on = jiffies;
4789 intel_dp->last_backlight_off = jiffies;
4793 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4794 struct intel_dp *intel_dp)
4796 struct drm_i915_private *dev_priv = dev->dev_private;
4797 struct edp_power_seq cur, vbt, spec,
4798 *final = &intel_dp->pps_delays;
4799 u32 pp_on, pp_off, pp_div, pp;
4800 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4802 lockdep_assert_held(&dev_priv->pps_mutex);
4804 /* already initialized? */
4805 if (final->t11_t12 != 0)
4808 if (HAS_PCH_SPLIT(dev)) {
4809 pp_ctrl_reg = PCH_PP_CONTROL;
4810 pp_on_reg = PCH_PP_ON_DELAYS;
4811 pp_off_reg = PCH_PP_OFF_DELAYS;
4812 pp_div_reg = PCH_PP_DIVISOR;
4814 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4816 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4817 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4818 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4819 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4822 /* Workaround: Need to write PP_CONTROL with the unlock key as
4823 * the very first thing. */
4824 pp = ironlake_get_pp_control(intel_dp);
4825 I915_WRITE(pp_ctrl_reg, pp);
4827 pp_on = I915_READ(pp_on_reg);
4828 pp_off = I915_READ(pp_off_reg);
4829 pp_div = I915_READ(pp_div_reg);
4831 /* Pull timing values out of registers */
4832 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4833 PANEL_POWER_UP_DELAY_SHIFT;
4835 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4836 PANEL_LIGHT_ON_DELAY_SHIFT;
4838 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4839 PANEL_LIGHT_OFF_DELAY_SHIFT;
4841 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4842 PANEL_POWER_DOWN_DELAY_SHIFT;
4844 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4845 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4847 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4848 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4850 vbt = dev_priv->vbt.edp_pps;
4852 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4853 * our hw here, which are all in 100usec. */
4854 spec.t1_t3 = 210 * 10;
4855 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4856 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4857 spec.t10 = 500 * 10;
4858 /* This one is special and actually in units of 100ms, but zero
4859 * based in the hw (so we need to add 100 ms). But the sw vbt
4860 * table multiplies it with 1000 to make it in units of 100usec,
4862 spec.t11_t12 = (510 + 100) * 10;
4864 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4865 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4867 /* Use the max of the register settings and vbt. If both are
4868 * unset, fall back to the spec limits. */
4869 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4871 max(cur.field, vbt.field))
4872 assign_final(t1_t3);
4876 assign_final(t11_t12);
4879 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4880 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4881 intel_dp->backlight_on_delay = get_delay(t8);
4882 intel_dp->backlight_off_delay = get_delay(t9);
4883 intel_dp->panel_power_down_delay = get_delay(t10);
4884 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4887 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4888 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4889 intel_dp->panel_power_cycle_delay);
4891 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4892 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4896 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4897 struct intel_dp *intel_dp)
4899 struct drm_i915_private *dev_priv = dev->dev_private;
4900 u32 pp_on, pp_off, pp_div, port_sel = 0;
4901 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4902 int pp_on_reg, pp_off_reg, pp_div_reg;
4903 enum port port = dp_to_dig_port(intel_dp)->port;
4904 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4906 lockdep_assert_held(&dev_priv->pps_mutex);
4908 if (HAS_PCH_SPLIT(dev)) {
4909 pp_on_reg = PCH_PP_ON_DELAYS;
4910 pp_off_reg = PCH_PP_OFF_DELAYS;
4911 pp_div_reg = PCH_PP_DIVISOR;
4913 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4915 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4916 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4917 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4921 * And finally store the new values in the power sequencer. The
4922 * backlight delays are set to 1 because we do manual waits on them. For
4923 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4924 * we'll end up waiting for the backlight off delay twice: once when we
4925 * do the manual sleep, and once when we disable the panel and wait for
4926 * the PP_STATUS bit to become zero.
4928 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4929 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4930 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4931 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4932 /* Compute the divisor for the pp clock, simply match the Bspec
4934 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4935 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4936 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4938 /* Haswell doesn't have any port selection bits for the panel
4939 * power sequencer any more. */
4940 if (IS_VALLEYVIEW(dev)) {
4941 port_sel = PANEL_PORT_SELECT_VLV(port);
4942 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4944 port_sel = PANEL_PORT_SELECT_DPA;
4946 port_sel = PANEL_PORT_SELECT_DPD;
4951 I915_WRITE(pp_on_reg, pp_on);
4952 I915_WRITE(pp_off_reg, pp_off);
4953 I915_WRITE(pp_div_reg, pp_div);
4955 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4956 I915_READ(pp_on_reg),
4957 I915_READ(pp_off_reg),
4958 I915_READ(pp_div_reg));
4962 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4964 * @refresh_rate: RR to be programmed
4966 * This function gets called when refresh rate (RR) has to be changed from
4967 * one frequency to another. Switches can be between high and low RR
4968 * supported by the panel or to any other RR based on media playback (in
4969 * this case, RR value needs to be passed from user space).
4971 * The caller of this function needs to take a lock on dev_priv->drrs.
4973 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4975 struct drm_i915_private *dev_priv = dev->dev_private;
4976 struct intel_encoder *encoder;
4977 struct intel_digital_port *dig_port = NULL;
4978 struct intel_dp *intel_dp = dev_priv->drrs.dp;
4979 struct intel_crtc_state *config = NULL;
4980 struct intel_crtc *intel_crtc = NULL;
4982 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4984 if (refresh_rate <= 0) {
4985 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4989 if (intel_dp == NULL) {
4990 DRM_DEBUG_KMS("DRRS not supported.\n");
4995 * FIXME: This needs proper synchronization with psr state for some
4996 * platforms that cannot have PSR and DRRS enabled at the same time.
4999 dig_port = dp_to_dig_port(intel_dp);
5000 encoder = &dig_port->base;
5001 intel_crtc = encoder->new_crtc;
5004 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5008 config = intel_crtc->config;
5010 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5011 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5015 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5017 index = DRRS_LOW_RR;
5019 if (index == dev_priv->drrs.refresh_rate_type) {
5021 "DRRS requested for previously set RR...ignoring\n");
5025 if (!intel_crtc->active) {
5026 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5030 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5033 intel_dp_set_m_n(intel_crtc, M1_N1);
5036 intel_dp_set_m_n(intel_crtc, M2_N2);
5040 DRM_ERROR("Unsupported refreshrate type\n");
5042 } else if (INTEL_INFO(dev)->gen > 6) {
5043 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5044 val = I915_READ(reg);
5046 if (index > DRRS_HIGH_RR) {
5047 if (IS_VALLEYVIEW(dev))
5048 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5050 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5052 if (IS_VALLEYVIEW(dev))
5053 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5055 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5057 I915_WRITE(reg, val);
5060 dev_priv->drrs.refresh_rate_type = index;
5062 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5066 * intel_edp_drrs_enable - init drrs struct if supported
5067 * @intel_dp: DP struct
5069 * Initializes frontbuffer_bits and drrs.dp
5071 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5073 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5074 struct drm_i915_private *dev_priv = dev->dev_private;
5075 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5076 struct drm_crtc *crtc = dig_port->base.base.crtc;
5077 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5079 if (!intel_crtc->config->has_drrs) {
5080 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5084 mutex_lock(&dev_priv->drrs.mutex);
5085 if (WARN_ON(dev_priv->drrs.dp)) {
5086 DRM_ERROR("DRRS already enabled\n");
5090 dev_priv->drrs.busy_frontbuffer_bits = 0;
5092 dev_priv->drrs.dp = intel_dp;
5095 mutex_unlock(&dev_priv->drrs.mutex);
5099 * intel_edp_drrs_disable - Disable DRRS
5100 * @intel_dp: DP struct
5103 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5105 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5106 struct drm_i915_private *dev_priv = dev->dev_private;
5107 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5108 struct drm_crtc *crtc = dig_port->base.base.crtc;
5109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5111 if (!intel_crtc->config->has_drrs)
5114 mutex_lock(&dev_priv->drrs.mutex);
5115 if (!dev_priv->drrs.dp) {
5116 mutex_unlock(&dev_priv->drrs.mutex);
5120 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5121 intel_dp_set_drrs_state(dev_priv->dev,
5122 intel_dp->attached_connector->panel.
5123 fixed_mode->vrefresh);
5125 dev_priv->drrs.dp = NULL;
5126 mutex_unlock(&dev_priv->drrs.mutex);
5128 cancel_delayed_work_sync(&dev_priv->drrs.work);
5131 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5133 struct drm_i915_private *dev_priv =
5134 container_of(work, typeof(*dev_priv), drrs.work.work);
5135 struct intel_dp *intel_dp;
5137 mutex_lock(&dev_priv->drrs.mutex);
5139 intel_dp = dev_priv->drrs.dp;
5145 * The delayed work can race with an invalidate hence we need to
5149 if (dev_priv->drrs.busy_frontbuffer_bits)
5152 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5153 intel_dp_set_drrs_state(dev_priv->dev,
5154 intel_dp->attached_connector->panel.
5155 downclock_mode->vrefresh);
5159 mutex_unlock(&dev_priv->drrs.mutex);
5163 * intel_edp_drrs_invalidate - Invalidate DRRS
5165 * @frontbuffer_bits: frontbuffer plane tracking bits
5167 * When there is a disturbance on screen (due to cursor movement/time
5168 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5171 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5173 void intel_edp_drrs_invalidate(struct drm_device *dev,
5174 unsigned frontbuffer_bits)
5176 struct drm_i915_private *dev_priv = dev->dev_private;
5177 struct drm_crtc *crtc;
5180 if (!dev_priv->drrs.dp)
5183 cancel_delayed_work_sync(&dev_priv->drrs.work);
5185 mutex_lock(&dev_priv->drrs.mutex);
5186 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5187 pipe = to_intel_crtc(crtc)->pipe;
5189 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5190 intel_dp_set_drrs_state(dev_priv->dev,
5191 dev_priv->drrs.dp->attached_connector->panel.
5192 fixed_mode->vrefresh);
5195 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5197 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5198 mutex_unlock(&dev_priv->drrs.mutex);
5202 * intel_edp_drrs_flush - Flush DRRS
5204 * @frontbuffer_bits: frontbuffer plane tracking bits
5206 * When there is no movement on screen, DRRS work can be scheduled.
5207 * This DRRS work is responsible for setting relevant registers after a
5208 * timeout of 1 second.
5210 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5212 void intel_edp_drrs_flush(struct drm_device *dev,
5213 unsigned frontbuffer_bits)
5215 struct drm_i915_private *dev_priv = dev->dev_private;
5216 struct drm_crtc *crtc;
5219 if (!dev_priv->drrs.dp)
5222 cancel_delayed_work_sync(&dev_priv->drrs.work);
5224 mutex_lock(&dev_priv->drrs.mutex);
5225 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5226 pipe = to_intel_crtc(crtc)->pipe;
5227 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5229 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5230 !dev_priv->drrs.busy_frontbuffer_bits)
5231 schedule_delayed_work(&dev_priv->drrs.work,
5232 msecs_to_jiffies(1000));
5233 mutex_unlock(&dev_priv->drrs.mutex);
5237 * DOC: Display Refresh Rate Switching (DRRS)
5239 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5240 * which enables swtching between low and high refresh rates,
5241 * dynamically, based on the usage scenario. This feature is applicable
5242 * for internal panels.
5244 * Indication that the panel supports DRRS is given by the panel EDID, which
5245 * would list multiple refresh rates for one resolution.
5247 * DRRS is of 2 types - static and seamless.
5248 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5249 * (may appear as a blink on screen) and is used in dock-undock scenario.
5250 * Seamless DRRS involves changing RR without any visual effect to the user
5251 * and can be used during normal system usage. This is done by programming
5252 * certain registers.
5254 * Support for static/seamless DRRS may be indicated in the VBT based on
5255 * inputs from the panel spec.
5257 * DRRS saves power by switching to low RR based on usage scenarios.
5260 * The implementation is based on frontbuffer tracking implementation.
5261 * When there is a disturbance on the screen triggered by user activity or a
5262 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5263 * When there is no movement on screen, after a timeout of 1 second, a switch
5264 * to low RR is made.
5265 * For integration with frontbuffer tracking code,
5266 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5268 * DRRS can be further extended to support other internal panels and also
5269 * the scenario of video playback wherein RR is set based on the rate
5270 * requested by userspace.
5274 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5275 * @intel_connector: eDP connector
5276 * @fixed_mode: preferred mode of panel
5278 * This function is called only once at driver load to initialize basic
5282 * Downclock mode if panel supports it, else return NULL.
5283 * DRRS support is determined by the presence of downclock mode (apart
5284 * from VBT setting).
5286 static struct drm_display_mode *
5287 intel_dp_drrs_init(struct intel_connector *intel_connector,
5288 struct drm_display_mode *fixed_mode)
5290 struct drm_connector *connector = &intel_connector->base;
5291 struct drm_device *dev = connector->dev;
5292 struct drm_i915_private *dev_priv = dev->dev_private;
5293 struct drm_display_mode *downclock_mode = NULL;
5295 if (INTEL_INFO(dev)->gen <= 6) {
5296 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5300 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5301 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5305 downclock_mode = intel_find_panel_downclock
5306 (dev, fixed_mode, connector);
5308 if (!downclock_mode) {
5309 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5313 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5315 mutex_init(&dev_priv->drrs.mutex);
5317 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5319 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5320 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5321 return downclock_mode;
5324 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5325 struct intel_connector *intel_connector)
5327 struct drm_connector *connector = &intel_connector->base;
5328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5329 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5330 struct drm_device *dev = intel_encoder->base.dev;
5331 struct drm_i915_private *dev_priv = dev->dev_private;
5332 struct drm_display_mode *fixed_mode = NULL;
5333 struct drm_display_mode *downclock_mode = NULL;
5335 struct drm_display_mode *scan;
5337 enum pipe pipe = INVALID_PIPE;
5339 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5341 if (!is_edp(intel_dp))
5345 intel_edp_panel_vdd_sanitize(intel_dp);
5346 pps_unlock(intel_dp);
5348 /* Cache DPCD and EDID for edp. */
5349 has_dpcd = intel_dp_get_dpcd(intel_dp);
5352 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5353 dev_priv->no_aux_handshake =
5354 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5355 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5357 /* if this fails, presume the device is a ghost */
5358 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5362 /* We now know it's not a ghost, init power sequence regs. */
5364 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5365 pps_unlock(intel_dp);
5367 mutex_lock(&dev->mode_config.mutex);
5368 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5370 if (drm_add_edid_modes(connector, edid)) {
5371 drm_mode_connector_update_edid_property(connector,
5373 drm_edid_to_eld(connector, edid);
5376 edid = ERR_PTR(-EINVAL);
5379 edid = ERR_PTR(-ENOENT);
5381 intel_connector->edid = edid;
5383 /* prefer fixed mode from EDID if available */
5384 list_for_each_entry(scan, &connector->probed_modes, head) {
5385 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5386 fixed_mode = drm_mode_duplicate(dev, scan);
5387 downclock_mode = intel_dp_drrs_init(
5388 intel_connector, fixed_mode);
5393 /* fallback to VBT if available for eDP */
5394 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5395 fixed_mode = drm_mode_duplicate(dev,
5396 dev_priv->vbt.lfp_lvds_vbt_mode);
5398 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5400 mutex_unlock(&dev->mode_config.mutex);
5402 if (IS_VALLEYVIEW(dev)) {
5403 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5404 register_reboot_notifier(&intel_dp->edp_notifier);
5407 * Figure out the current pipe for the initial backlight setup.
5408 * If the current pipe isn't valid, try the PPS pipe, and if that
5409 * fails just assume pipe A.
5411 if (IS_CHERRYVIEW(dev))
5412 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5414 pipe = PORT_TO_PIPE(intel_dp->DP);
5416 if (pipe != PIPE_A && pipe != PIPE_B)
5417 pipe = intel_dp->pps_pipe;
5419 if (pipe != PIPE_A && pipe != PIPE_B)
5422 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5426 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5427 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5428 intel_panel_setup_backlight(connector, pipe);
5434 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5435 struct intel_connector *intel_connector)
5437 struct drm_connector *connector = &intel_connector->base;
5438 struct intel_dp *intel_dp = &intel_dig_port->dp;
5439 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5440 struct drm_device *dev = intel_encoder->base.dev;
5441 struct drm_i915_private *dev_priv = dev->dev_private;
5442 enum port port = intel_dig_port->port;
5445 intel_dp->pps_pipe = INVALID_PIPE;
5447 /* intel_dp vfuncs */
5448 if (INTEL_INFO(dev)->gen >= 9)
5449 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5450 else if (IS_VALLEYVIEW(dev))
5451 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5452 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5453 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5454 else if (HAS_PCH_SPLIT(dev))
5455 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5457 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5459 if (INTEL_INFO(dev)->gen >= 9)
5460 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5462 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5464 /* Preserve the current hw state. */
5465 intel_dp->DP = I915_READ(intel_dp->output_reg);
5466 intel_dp->attached_connector = intel_connector;
5468 if (intel_dp_is_edp(dev, port))
5469 type = DRM_MODE_CONNECTOR_eDP;
5471 type = DRM_MODE_CONNECTOR_DisplayPort;
5474 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5475 * for DP the encoder type can be set by the caller to
5476 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5478 if (type == DRM_MODE_CONNECTOR_eDP)
5479 intel_encoder->type = INTEL_OUTPUT_EDP;
5481 /* eDP only on port B and/or C on vlv/chv */
5482 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5483 port != PORT_B && port != PORT_C))
5486 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5487 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5490 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5491 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5493 connector->interlace_allowed = true;
5494 connector->doublescan_allowed = 0;
5496 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5497 edp_panel_vdd_work);
5499 intel_connector_attach_encoder(intel_connector, intel_encoder);
5500 drm_connector_register(connector);
5503 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5505 intel_connector->get_hw_state = intel_connector_get_hw_state;
5506 intel_connector->unregister = intel_dp_connector_unregister;
5508 /* Set up the hotplug pin. */
5511 intel_encoder->hpd_pin = HPD_PORT_A;
5514 intel_encoder->hpd_pin = HPD_PORT_B;
5517 intel_encoder->hpd_pin = HPD_PORT_C;
5520 intel_encoder->hpd_pin = HPD_PORT_D;
5526 if (is_edp(intel_dp)) {
5528 intel_dp_init_panel_power_timestamps(intel_dp);
5529 if (IS_VALLEYVIEW(dev))
5530 vlv_initial_power_sequencer_setup(intel_dp);
5532 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5533 pps_unlock(intel_dp);
5536 intel_dp_aux_init(intel_dp, intel_connector);
5538 /* init MST on ports that can support it */
5539 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5540 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5541 intel_dp_mst_encoder_init(intel_dig_port,
5542 intel_connector->base.base.id);
5546 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5547 drm_dp_aux_unregister(&intel_dp->aux);
5548 if (is_edp(intel_dp)) {
5549 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5551 * vdd might still be enabled do to the delayed vdd off.
5552 * Make sure vdd is actually turned off here.
5555 edp_panel_vdd_off_sync(intel_dp);
5556 pps_unlock(intel_dp);
5558 drm_connector_unregister(connector);
5559 drm_connector_cleanup(connector);
5563 intel_dp_add_properties(intel_dp, connector);
5565 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5566 * 0xd. Failure to do so will result in spurious interrupts being
5567 * generated on the port when a cable is not attached.
5569 if (IS_G4X(dev) && !IS_GM45(dev)) {
5570 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5571 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5578 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5580 struct drm_i915_private *dev_priv = dev->dev_private;
5581 struct intel_digital_port *intel_dig_port;
5582 struct intel_encoder *intel_encoder;
5583 struct drm_encoder *encoder;
5584 struct intel_connector *intel_connector;
5586 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5587 if (!intel_dig_port)
5590 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5591 if (!intel_connector) {
5592 kfree(intel_dig_port);
5596 intel_encoder = &intel_dig_port->base;
5597 encoder = &intel_encoder->base;
5599 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5600 DRM_MODE_ENCODER_TMDS);
5602 intel_encoder->compute_config = intel_dp_compute_config;
5603 intel_encoder->disable = intel_disable_dp;
5604 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5605 intel_encoder->get_config = intel_dp_get_config;
5606 intel_encoder->suspend = intel_dp_encoder_suspend;
5607 if (IS_CHERRYVIEW(dev)) {
5608 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5609 intel_encoder->pre_enable = chv_pre_enable_dp;
5610 intel_encoder->enable = vlv_enable_dp;
5611 intel_encoder->post_disable = chv_post_disable_dp;
5612 } else if (IS_VALLEYVIEW(dev)) {
5613 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5614 intel_encoder->pre_enable = vlv_pre_enable_dp;
5615 intel_encoder->enable = vlv_enable_dp;
5616 intel_encoder->post_disable = vlv_post_disable_dp;
5618 intel_encoder->pre_enable = g4x_pre_enable_dp;
5619 intel_encoder->enable = g4x_enable_dp;
5620 if (INTEL_INFO(dev)->gen >= 5)
5621 intel_encoder->post_disable = ilk_post_disable_dp;
5624 intel_dig_port->port = port;
5625 intel_dig_port->dp.output_reg = output_reg;
5627 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5628 if (IS_CHERRYVIEW(dev)) {
5630 intel_encoder->crtc_mask = 1 << 2;
5632 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5634 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5636 intel_encoder->cloneable = 0;
5637 intel_encoder->hot_plug = intel_dp_hot_plug;
5639 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5640 dev_priv->hpd_irq_port[port] = intel_dig_port;
5642 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5643 drm_encoder_cleanup(encoder);
5644 kfree(intel_dig_port);
5645 kfree(intel_connector);
5649 void intel_dp_mst_suspend(struct drm_device *dev)
5651 struct drm_i915_private *dev_priv = dev->dev_private;
5655 for (i = 0; i < I915_MAX_PORTS; i++) {
5656 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5657 if (!intel_dig_port)
5660 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5661 if (!intel_dig_port->dp.can_mst)
5663 if (intel_dig_port->dp.is_mst)
5664 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5669 void intel_dp_mst_resume(struct drm_device *dev)
5671 struct drm_i915_private *dev_priv = dev->dev_private;
5674 for (i = 0; i < I915_MAX_PORTS; i++) {
5675 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5676 if (!intel_dig_port)
5678 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5681 if (!intel_dig_port->dp.can_mst)
5684 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5686 intel_dp_check_mst_status(&intel_dig_port->dp);