1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
19 #include "intel_sideband.h"
21 #include "intel_vga.h"
23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 enum i915_power_well_id power_well_id);
27 intel_display_power_domain_str(enum intel_display_power_domain domain)
30 case POWER_DOMAIN_DISPLAY_CORE:
31 return "DISPLAY_CORE";
32 case POWER_DOMAIN_PIPE_A:
34 case POWER_DOMAIN_PIPE_B:
36 case POWER_DOMAIN_PIPE_C:
38 case POWER_DOMAIN_PIPE_D:
40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 return "PIPE_A_PANEL_FITTER";
42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 return "PIPE_B_PANEL_FITTER";
44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 return "PIPE_C_PANEL_FITTER";
46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 return "PIPE_D_PANEL_FITTER";
48 case POWER_DOMAIN_TRANSCODER_A:
49 return "TRANSCODER_A";
50 case POWER_DOMAIN_TRANSCODER_B:
51 return "TRANSCODER_B";
52 case POWER_DOMAIN_TRANSCODER_C:
53 return "TRANSCODER_C";
54 case POWER_DOMAIN_TRANSCODER_D:
55 return "TRANSCODER_D";
56 case POWER_DOMAIN_TRANSCODER_EDP:
57 return "TRANSCODER_EDP";
58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 return "TRANSCODER_VDSC_PW2";
60 case POWER_DOMAIN_TRANSCODER_DSI_A:
61 return "TRANSCODER_DSI_A";
62 case POWER_DOMAIN_TRANSCODER_DSI_C:
63 return "TRANSCODER_DSI_C";
64 case POWER_DOMAIN_PORT_DDI_A_LANES:
65 return "PORT_DDI_A_LANES";
66 case POWER_DOMAIN_PORT_DDI_B_LANES:
67 return "PORT_DDI_B_LANES";
68 case POWER_DOMAIN_PORT_DDI_C_LANES:
69 return "PORT_DDI_C_LANES";
70 case POWER_DOMAIN_PORT_DDI_D_LANES:
71 return "PORT_DDI_D_LANES";
72 case POWER_DOMAIN_PORT_DDI_E_LANES:
73 return "PORT_DDI_E_LANES";
74 case POWER_DOMAIN_PORT_DDI_F_LANES:
75 return "PORT_DDI_F_LANES";
76 case POWER_DOMAIN_PORT_DDI_G_LANES:
77 return "PORT_DDI_G_LANES";
78 case POWER_DOMAIN_PORT_DDI_H_LANES:
79 return "PORT_DDI_H_LANES";
80 case POWER_DOMAIN_PORT_DDI_I_LANES:
81 return "PORT_DDI_I_LANES";
82 case POWER_DOMAIN_PORT_DDI_A_IO:
83 return "PORT_DDI_A_IO";
84 case POWER_DOMAIN_PORT_DDI_B_IO:
85 return "PORT_DDI_B_IO";
86 case POWER_DOMAIN_PORT_DDI_C_IO:
87 return "PORT_DDI_C_IO";
88 case POWER_DOMAIN_PORT_DDI_D_IO:
89 return "PORT_DDI_D_IO";
90 case POWER_DOMAIN_PORT_DDI_E_IO:
91 return "PORT_DDI_E_IO";
92 case POWER_DOMAIN_PORT_DDI_F_IO:
93 return "PORT_DDI_F_IO";
94 case POWER_DOMAIN_PORT_DDI_G_IO:
95 return "PORT_DDI_G_IO";
96 case POWER_DOMAIN_PORT_DDI_H_IO:
97 return "PORT_DDI_H_IO";
98 case POWER_DOMAIN_PORT_DDI_I_IO:
99 return "PORT_DDI_I_IO";
100 case POWER_DOMAIN_PORT_DSI:
102 case POWER_DOMAIN_PORT_CRT:
104 case POWER_DOMAIN_PORT_OTHER:
106 case POWER_DOMAIN_VGA:
108 case POWER_DOMAIN_AUDIO:
110 case POWER_DOMAIN_AUX_A:
112 case POWER_DOMAIN_AUX_B:
114 case POWER_DOMAIN_AUX_C:
116 case POWER_DOMAIN_AUX_D:
118 case POWER_DOMAIN_AUX_E:
120 case POWER_DOMAIN_AUX_F:
122 case POWER_DOMAIN_AUX_G:
124 case POWER_DOMAIN_AUX_H:
126 case POWER_DOMAIN_AUX_I:
128 case POWER_DOMAIN_AUX_IO_A:
130 case POWER_DOMAIN_AUX_C_TBT:
132 case POWER_DOMAIN_AUX_D_TBT:
134 case POWER_DOMAIN_AUX_E_TBT:
136 case POWER_DOMAIN_AUX_F_TBT:
138 case POWER_DOMAIN_AUX_G_TBT:
140 case POWER_DOMAIN_AUX_H_TBT:
142 case POWER_DOMAIN_AUX_I_TBT:
144 case POWER_DOMAIN_GMBUS:
146 case POWER_DOMAIN_INIT:
148 case POWER_DOMAIN_MODESET:
150 case POWER_DOMAIN_GT_IRQ:
152 case POWER_DOMAIN_DPLL_DC_OFF:
153 return "DPLL_DC_OFF";
155 MISSING_CASE(domain);
160 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
161 struct i915_power_well *power_well)
163 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
164 power_well->desc->ops->enable(dev_priv, power_well);
165 power_well->hw_enabled = true;
168 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
169 struct i915_power_well *power_well)
171 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
172 power_well->hw_enabled = false;
173 power_well->desc->ops->disable(dev_priv, power_well);
176 static void intel_power_well_get(struct drm_i915_private *dev_priv,
177 struct i915_power_well *power_well)
179 if (!power_well->count++)
180 intel_power_well_enable(dev_priv, power_well);
183 static void intel_power_well_put(struct drm_i915_private *dev_priv,
184 struct i915_power_well *power_well)
186 WARN(!power_well->count, "Use count on power well %s is already zero",
187 power_well->desc->name);
189 if (!--power_well->count)
190 intel_power_well_disable(dev_priv, power_well);
194 * __intel_display_power_is_enabled - unlocked check for a power domain
195 * @dev_priv: i915 device instance
196 * @domain: power domain to check
198 * This is the unlocked version of intel_display_power_is_enabled() and should
199 * only be used from error capture and recovery code where deadlocks are
203 * True when the power domain is enabled, false otherwise.
205 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
206 enum intel_display_power_domain domain)
208 struct i915_power_well *power_well;
211 if (dev_priv->runtime_pm.suspended)
216 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
217 if (power_well->desc->always_on)
220 if (!power_well->hw_enabled) {
230 * intel_display_power_is_enabled - check for a power domain
231 * @dev_priv: i915 device instance
232 * @domain: power domain to check
234 * This function can be used to check the hw power domain state. It is mostly
235 * used in hardware state readout functions. Everywhere else code should rely
236 * upon explicit power domain reference counting to ensure that the hardware
237 * block is powered up before accessing it.
239 * Callers must hold the relevant modesetting locks to ensure that concurrent
240 * threads can't disable the power well while the caller tries to read a few
244 * True when the power domain is enabled, false otherwise.
246 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
247 enum intel_display_power_domain domain)
249 struct i915_power_domains *power_domains;
252 power_domains = &dev_priv->power_domains;
254 mutex_lock(&power_domains->lock);
255 ret = __intel_display_power_is_enabled(dev_priv, domain);
256 mutex_unlock(&power_domains->lock);
262 * Starting with Haswell, we have a "Power Down Well" that can be turned off
263 * when not needed anymore. We have 4 registers that can request the power well
264 * to be enabled, and it will only be disabled if none of the registers is
265 * requesting it to be enabled.
267 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
268 u8 irq_pipe_mask, bool has_vga)
271 intel_vga_reset_io_mem(dev_priv);
274 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
277 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
281 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
284 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
285 struct i915_power_well *power_well)
287 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
288 int pw_idx = power_well->desc->hsw.idx;
290 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
291 if (intel_de_wait_for_set(dev_priv, regs->driver,
292 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
293 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
294 power_well->desc->name);
296 /* An AUX timeout is expected if the TBT DP tunnel is down. */
297 WARN_ON(!power_well->desc->hsw.is_tc_tbt);
301 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
302 const struct i915_power_well_regs *regs,
305 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
308 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
309 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
311 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
312 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
317 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
318 struct i915_power_well *power_well)
320 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
321 int pw_idx = power_well->desc->hsw.idx;
326 * Bspec doesn't require waiting for PWs to get disabled, but still do
327 * this for paranoia. The known cases where a PW will be forced on:
328 * - a KVMR request on any power well via the KVMR request register
329 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
330 * DEBUG request registers
331 * Skip the wait in case any of the request bits are set and print a
332 * diagnostic message.
334 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
335 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
336 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
340 drm_dbg_kms(&dev_priv->drm,
341 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
342 power_well->desc->name,
343 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
346 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
347 enum skl_power_gate pg)
349 /* Timeout 5us for PG#0, for other PGs 1us */
350 WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
351 SKL_FUSE_PG_DIST_STATUS(pg), 1));
354 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
355 struct i915_power_well *power_well)
357 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
358 int pw_idx = power_well->desc->hsw.idx;
359 bool wait_fuses = power_well->desc->hsw.has_fuses;
360 enum skl_power_gate uninitialized_var(pg);
364 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
365 SKL_PW_CTL_IDX_TO_PG(pw_idx);
367 * For PW1 we have to wait both for the PW0/PG0 fuse state
368 * before enabling the power well and PW1/PG1's own fuse
369 * state after the enabling. For all other power wells with
370 * fuses we only have to wait for that PW/PG's fuse state
371 * after the enabling.
374 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
377 val = intel_de_read(dev_priv, regs->driver);
378 intel_de_write(dev_priv, regs->driver,
379 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
380 hsw_wait_for_power_well_enable(dev_priv, power_well);
382 /* Display WA #1178: cnl */
383 if (IS_CANNONLAKE(dev_priv) &&
384 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
385 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
386 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
387 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
388 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
392 gen9_wait_for_power_well_fuses(dev_priv, pg);
394 hsw_power_well_post_enable(dev_priv,
395 power_well->desc->hsw.irq_pipe_mask,
396 power_well->desc->hsw.has_vga);
399 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
400 struct i915_power_well *power_well)
402 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
403 int pw_idx = power_well->desc->hsw.idx;
406 hsw_power_well_pre_disable(dev_priv,
407 power_well->desc->hsw.irq_pipe_mask);
409 val = intel_de_read(dev_priv, regs->driver);
410 intel_de_write(dev_priv, regs->driver,
411 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
412 hsw_wait_for_power_well_disable(dev_priv, power_well);
415 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
418 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
419 struct i915_power_well *power_well)
421 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
422 int pw_idx = power_well->desc->hsw.idx;
423 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
426 WARN_ON(!IS_ICELAKE(dev_priv));
428 val = intel_de_read(dev_priv, regs->driver);
429 intel_de_write(dev_priv, regs->driver,
430 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
432 if (INTEL_GEN(dev_priv) < 12) {
433 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
434 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
435 val | ICL_LANE_ENABLE_AUX);
438 hsw_wait_for_power_well_enable(dev_priv, power_well);
440 /* Display WA #1178: icl */
441 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
442 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
443 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
444 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
445 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
450 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
451 struct i915_power_well *power_well)
453 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
454 int pw_idx = power_well->desc->hsw.idx;
455 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
458 WARN_ON(!IS_ICELAKE(dev_priv));
460 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
461 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
462 val & ~ICL_LANE_ENABLE_AUX);
464 val = intel_de_read(dev_priv, regs->driver);
465 intel_de_write(dev_priv, regs->driver,
466 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
468 hsw_wait_for_power_well_disable(dev_priv, power_well);
471 #define ICL_AUX_PW_TO_CH(pw_idx) \
472 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
474 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
475 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
477 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
478 struct i915_power_well *power_well)
480 int pw_idx = power_well->desc->hsw.idx;
482 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
483 ICL_AUX_PW_TO_CH(pw_idx);
486 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
488 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
490 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
491 struct i915_power_well *power_well)
493 int refs = hweight64(power_well->desc->domains &
494 async_put_domains_mask(&dev_priv->power_domains));
496 WARN_ON(refs > power_well->count);
501 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
502 struct i915_power_well *power_well)
504 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
505 struct intel_digital_port *dig_port = NULL;
506 struct intel_encoder *encoder;
508 /* Bypass the check if all references are released asynchronously */
509 if (power_well_async_ref_count(dev_priv, power_well) ==
513 aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
515 for_each_intel_encoder(&dev_priv->drm, encoder) {
516 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
518 if (!intel_phy_is_tc(dev_priv, phy))
521 /* We'll check the MST primary port */
522 if (encoder->type == INTEL_OUTPUT_DP_MST)
525 dig_port = enc_to_dig_port(encoder);
526 if (WARN_ON(!dig_port))
529 if (dig_port->aux_ch != aux_ch) {
537 if (WARN_ON(!dig_port))
540 WARN_ON(!intel_tc_port_ref_held(dig_port));
545 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
546 struct i915_power_well *power_well)
552 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
555 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
556 struct i915_power_well *power_well)
558 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
561 icl_tc_port_assert_ref_held(dev_priv, power_well);
563 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
564 val &= ~DP_AUX_CH_CTL_TBT_IO;
565 if (power_well->desc->hsw.is_tc_tbt)
566 val |= DP_AUX_CH_CTL_TBT_IO;
567 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
569 hsw_power_well_enable(dev_priv, power_well);
571 if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
572 enum tc_port tc_port;
574 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
575 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
576 HIP_INDEX_VAL(tc_port, 0x2));
578 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
579 DKL_CMN_UC_DW27_UC_HEALTH, 1))
580 drm_warn(&dev_priv->drm,
581 "Timeout waiting TC uC health\n");
586 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
587 struct i915_power_well *power_well)
589 icl_tc_port_assert_ref_held(dev_priv, power_well);
591 hsw_power_well_disable(dev_priv, power_well);
595 * We should only use the power well if we explicitly asked the hardware to
596 * enable it, so check if it's enabled and also check if we've requested it to
599 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
600 struct i915_power_well *power_well)
602 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
603 enum i915_power_well_id id = power_well->desc->id;
604 int pw_idx = power_well->desc->hsw.idx;
605 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
606 HSW_PWR_WELL_CTL_STATE(pw_idx);
609 val = intel_de_read(dev_priv, regs->driver);
612 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
613 * and the MISC_IO PW will be not restored, so check instead for the
614 * BIOS's own request bits, which are forced-on for these power wells
615 * when exiting DC5/6.
617 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
618 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
619 val |= intel_de_read(dev_priv, regs->bios);
621 return (val & mask) == mask;
624 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
626 WARN_ONCE((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
627 "DC9 already programmed to be enabled.\n");
628 WARN_ONCE(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
629 "DC5 still not disabled to enable DC9.\n");
630 WARN_ONCE(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
631 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
632 "Power well 2 on.\n");
633 WARN_ONCE(intel_irqs_enabled(dev_priv),
634 "Interrupts not disabled yet.\n");
637 * TODO: check for the following to verify the conditions to enter DC9
638 * state are satisfied:
639 * 1] Check relevant display engine registers to verify if mode set
640 * disable sequence was followed.
641 * 2] Check if display uninitialize sequence is initialized.
645 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
647 WARN_ONCE(intel_irqs_enabled(dev_priv),
648 "Interrupts not disabled yet.\n");
649 WARN_ONCE(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
650 "DC5 still not disabled.\n");
653 * TODO: check for the following to verify DC9 state was indeed
654 * entered before programming to disable it:
655 * 1] Check relevant display engine registers to verify if mode
656 * set disable sequence was followed.
657 * 2] Check if display uninitialize sequence is initialized.
661 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
668 intel_de_write(dev_priv, DC_STATE_EN, state);
670 /* It has been observed that disabling the dc6 state sometimes
671 * doesn't stick and dmc keeps returning old value. Make sure
672 * the write really sticks enough times and also force rewrite until
673 * we are confident that state is exactly what we want.
676 v = intel_de_read(dev_priv, DC_STATE_EN);
679 intel_de_write(dev_priv, DC_STATE_EN, state);
682 } else if (rereads++ > 5) {
686 } while (rewrites < 100);
689 drm_err(&dev_priv->drm,
690 "Writing dc state to 0x%x failed, now 0x%x\n",
693 /* Most of the times we need one retry, avoid spam */
695 drm_dbg_kms(&dev_priv->drm,
696 "Rewrote dc state to 0x%x %d times\n",
700 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
704 mask = DC_STATE_EN_UPTO_DC5;
706 if (INTEL_GEN(dev_priv) >= 12)
707 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
709 else if (IS_GEN(dev_priv, 11))
710 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
711 else if (IS_GEN9_LP(dev_priv))
712 mask |= DC_STATE_EN_DC9;
714 mask |= DC_STATE_EN_UPTO_DC6;
719 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
723 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
725 drm_dbg_kms(&dev_priv->drm,
726 "Resetting DC state tracking from %02x to %02x\n",
727 dev_priv->csr.dc_state, val);
728 dev_priv->csr.dc_state = val;
732 * gen9_set_dc_state - set target display C power state
733 * @dev_priv: i915 device instance
734 * @state: target DC power state
736 * - DC_STATE_EN_UPTO_DC5
737 * - DC_STATE_EN_UPTO_DC6
740 * Signal to DMC firmware/HW the target DC power state passed in @state.
741 * DMC/HW can turn off individual display clocks and power rails when entering
742 * a deeper DC power state (higher in number) and turns these back when exiting
743 * that state to a shallower power state (lower in number). The HW will decide
744 * when to actually enter a given state on an on-demand basis, for instance
745 * depending on the active state of display pipes. The state of display
746 * registers backed by affected power rails are saved/restored as needed.
748 * Based on the above enabling a deeper DC power state is asynchronous wrt.
749 * enabling it. Disabling a deeper power state is synchronous: for instance
750 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
751 * back on and register state is restored. This is guaranteed by the MMIO write
752 * to DC_STATE_EN blocking until the state is restored.
754 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
759 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
760 state &= dev_priv->csr.allowed_dc_mask;
762 val = intel_de_read(dev_priv, DC_STATE_EN);
763 mask = gen9_dc_mask(dev_priv);
764 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
767 /* Check if DMC is ignoring our DC state requests */
768 if ((val & mask) != dev_priv->csr.dc_state)
769 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
770 dev_priv->csr.dc_state, val & mask);
775 gen9_write_dc_state(dev_priv, val);
777 dev_priv->csr.dc_state = val & mask;
781 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
785 DC_STATE_EN_UPTO_DC6,
786 DC_STATE_EN_UPTO_DC5,
792 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
793 if (target_dc_state != states[i])
796 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
799 target_dc_state = states[i + 1];
802 return target_dc_state;
805 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
807 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
808 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
811 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
815 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
816 val = intel_de_read(dev_priv, DC_STATE_EN);
817 val &= ~DC_STATE_DC3CO_STATUS;
818 intel_de_write(dev_priv, DC_STATE_EN, val);
819 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
821 * Delay of 200us DC3CO Exit time B.Spec 49196
823 usleep_range(200, 210);
826 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
828 assert_can_enable_dc9(dev_priv);
830 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
832 * Power sequencer reset is not needed on
833 * platforms with South Display Engine on PCH,
834 * because PPS registers are always on.
836 if (!HAS_PCH_SPLIT(dev_priv))
837 intel_power_sequencer_reset(dev_priv);
838 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
841 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
843 assert_can_disable_dc9(dev_priv);
845 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
847 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
849 intel_pps_unlock_regs_wa(dev_priv);
852 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
854 WARN_ONCE(!intel_de_read(dev_priv, CSR_PROGRAM(0)),
855 "CSR program storage start is NULL\n");
856 WARN_ONCE(!intel_de_read(dev_priv, CSR_SSP_BASE),
857 "CSR SSP Base Not fine\n");
858 WARN_ONCE(!intel_de_read(dev_priv, CSR_HTP_SKL), "CSR HTP Not fine\n");
861 static struct i915_power_well *
862 lookup_power_well(struct drm_i915_private *dev_priv,
863 enum i915_power_well_id power_well_id)
865 struct i915_power_well *power_well;
867 for_each_power_well(dev_priv, power_well)
868 if (power_well->desc->id == power_well_id)
872 * It's not feasible to add error checking code to the callers since
873 * this condition really shouldn't happen and it doesn't even make sense
874 * to abort things like display initialization sequences. Just return
875 * the first power well and hope the WARN gets reported so we can fix
878 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
879 return &dev_priv->power_domains.power_wells[0];
883 * intel_display_power_set_target_dc_state - Set target dc state.
884 * @dev_priv: i915 device
885 * @state: state which needs to be set as target_dc_state.
887 * This function set the "DC off" power well target_dc_state,
888 * based upon this target_dc_stste, "DC off" power well will
889 * enable desired DC state.
891 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
894 struct i915_power_well *power_well;
896 struct i915_power_domains *power_domains = &dev_priv->power_domains;
898 mutex_lock(&power_domains->lock);
899 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
901 if (WARN_ON(!power_well))
904 state = sanitize_target_dc_state(dev_priv, state);
906 if (state == dev_priv->csr.target_dc_state)
909 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
912 * If DC off power well is disabled, need to enable and disable the
913 * DC off power well to effect target DC state.
916 power_well->desc->ops->enable(dev_priv, power_well);
918 dev_priv->csr.target_dc_state = state;
921 power_well->desc->ops->disable(dev_priv, power_well);
924 mutex_unlock(&power_domains->lock);
927 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
929 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
932 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
934 WARN_ONCE((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
935 "DC5 already programmed to be enabled.\n");
936 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
938 assert_csr_loaded(dev_priv);
941 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
943 assert_can_enable_dc5(dev_priv);
945 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
947 /* Wa Display #1183: skl,kbl,cfl */
948 if (IS_GEN9_BC(dev_priv))
949 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
950 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
952 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
955 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
957 WARN_ONCE(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
958 "Backlight is not disabled.\n");
959 WARN_ONCE((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
960 "DC6 already programmed to be enabled.\n");
962 assert_csr_loaded(dev_priv);
965 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
967 assert_can_enable_dc6(dev_priv);
969 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
971 /* Wa Display #1183: skl,kbl,cfl */
972 if (IS_GEN9_BC(dev_priv))
973 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
974 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
976 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
979 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
980 struct i915_power_well *power_well)
982 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
983 int pw_idx = power_well->desc->hsw.idx;
984 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
985 u32 bios_req = intel_de_read(dev_priv, regs->bios);
987 /* Take over the request bit if set by BIOS. */
988 if (bios_req & mask) {
989 u32 drv_req = intel_de_read(dev_priv, regs->driver);
991 if (!(drv_req & mask))
992 intel_de_write(dev_priv, regs->driver, drv_req | mask);
993 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
997 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
998 struct i915_power_well *power_well)
1000 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1003 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1004 struct i915_power_well *power_well)
1006 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1009 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1010 struct i915_power_well *power_well)
1012 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1015 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1017 struct i915_power_well *power_well;
1019 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1020 if (power_well->count > 0)
1021 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1023 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1024 if (power_well->count > 0)
1025 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1027 if (IS_GEMINILAKE(dev_priv)) {
1028 power_well = lookup_power_well(dev_priv,
1029 GLK_DISP_PW_DPIO_CMN_C);
1030 if (power_well->count > 0)
1031 bxt_ddi_phy_verify_state(dev_priv,
1032 power_well->desc->bxt.phy);
1036 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1037 struct i915_power_well *power_well)
1039 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1040 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1043 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1045 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1046 u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1048 WARN(hw_enabled_dbuf_slices != enabled_dbuf_slices,
1049 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1050 hw_enabled_dbuf_slices,
1051 enabled_dbuf_slices);
1054 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1056 struct intel_cdclk_config cdclk_config = {};
1058 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1059 tgl_disable_dc3co(dev_priv);
1063 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1065 dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1066 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1067 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_config));
1069 gen9_assert_dbuf_enabled(dev_priv);
1071 if (IS_GEN9_LP(dev_priv))
1072 bxt_verify_ddi_phy_power_wells(dev_priv);
1074 if (INTEL_GEN(dev_priv) >= 11)
1076 * DMC retains HW context only for port A, the other combo
1077 * PHY's HW context for port B is lost after DC transitions,
1078 * so we need to restore it manually.
1080 intel_combo_phy_init(dev_priv);
1083 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1084 struct i915_power_well *power_well)
1086 gen9_disable_dc_states(dev_priv);
1089 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1090 struct i915_power_well *power_well)
1092 if (!dev_priv->csr.dmc_payload)
1095 switch (dev_priv->csr.target_dc_state) {
1096 case DC_STATE_EN_DC3CO:
1097 tgl_enable_dc3co(dev_priv);
1099 case DC_STATE_EN_UPTO_DC6:
1100 skl_enable_dc6(dev_priv);
1102 case DC_STATE_EN_UPTO_DC5:
1103 gen9_enable_dc5(dev_priv);
1108 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1109 struct i915_power_well *power_well)
1113 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1114 struct i915_power_well *power_well)
1118 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1119 struct i915_power_well *power_well)
1124 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1125 struct i915_power_well *power_well)
1127 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1128 i830_enable_pipe(dev_priv, PIPE_A);
1129 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1130 i830_enable_pipe(dev_priv, PIPE_B);
1133 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1134 struct i915_power_well *power_well)
1136 i830_disable_pipe(dev_priv, PIPE_B);
1137 i830_disable_pipe(dev_priv, PIPE_A);
1140 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1141 struct i915_power_well *power_well)
1143 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1144 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1147 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1148 struct i915_power_well *power_well)
1150 if (power_well->count > 0)
1151 i830_pipes_power_well_enable(dev_priv, power_well);
1153 i830_pipes_power_well_disable(dev_priv, power_well);
1156 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1157 struct i915_power_well *power_well, bool enable)
1159 int pw_idx = power_well->desc->vlv.idx;
1164 mask = PUNIT_PWRGT_MASK(pw_idx);
1165 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1166 PUNIT_PWRGT_PWR_GATE(pw_idx);
1168 vlv_punit_get(dev_priv);
1171 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1176 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1179 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1181 if (wait_for(COND, 100))
1182 drm_err(&dev_priv->drm,
1183 "timeout setting power well state %08x (%08x)\n",
1185 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1190 vlv_punit_put(dev_priv);
1193 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1194 struct i915_power_well *power_well)
1196 vlv_set_power_well(dev_priv, power_well, true);
1199 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1200 struct i915_power_well *power_well)
1202 vlv_set_power_well(dev_priv, power_well, false);
1205 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1206 struct i915_power_well *power_well)
1208 int pw_idx = power_well->desc->vlv.idx;
1209 bool enabled = false;
1214 mask = PUNIT_PWRGT_MASK(pw_idx);
1215 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1217 vlv_punit_get(dev_priv);
1219 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1221 * We only ever set the power-on and power-gate states, anything
1222 * else is unexpected.
1224 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1225 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1230 * A transient state at this point would mean some unexpected party
1231 * is poking at the power controls too.
1233 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1234 WARN_ON(ctrl != state);
1236 vlv_punit_put(dev_priv);
1241 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1246 * On driver load, a pipe may be active and driving a DSI display.
1247 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1248 * (and never recovering) in this case. intel_dsi_post_disable() will
1249 * clear it when we turn off the display.
1251 val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1252 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1253 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1254 intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1257 * Disable trickle feed and enable pnd deadline calculation
1259 intel_de_write(dev_priv, MI_ARB_VLV,
1260 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1261 intel_de_write(dev_priv, CBR1_VLV, 0);
1263 WARN_ON(RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1264 intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1265 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1269 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1271 struct intel_encoder *encoder;
1275 * Enable the CRI clock source so we can get at the
1276 * display and the reference clock for VGA
1277 * hotplug / manual detection. Supposedly DSI also
1278 * needs the ref clock up and running.
1280 * CHV DPLL B/C have some issues if VGA mode is enabled.
1282 for_each_pipe(dev_priv, pipe) {
1283 u32 val = intel_de_read(dev_priv, DPLL(pipe));
1285 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1287 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1289 intel_de_write(dev_priv, DPLL(pipe), val);
1292 vlv_init_display_clock_gating(dev_priv);
1294 spin_lock_irq(&dev_priv->irq_lock);
1295 valleyview_enable_display_irqs(dev_priv);
1296 spin_unlock_irq(&dev_priv->irq_lock);
1299 * During driver initialization/resume we can avoid restoring the
1300 * part of the HW/SW state that will be inited anyway explicitly.
1302 if (dev_priv->power_domains.initializing)
1305 intel_hpd_init(dev_priv);
1307 /* Re-enable the ADPA, if we have one */
1308 for_each_intel_encoder(&dev_priv->drm, encoder) {
1309 if (encoder->type == INTEL_OUTPUT_ANALOG)
1310 intel_crt_reset(&encoder->base);
1313 intel_vga_redisable_power_on(dev_priv);
1315 intel_pps_unlock_regs_wa(dev_priv);
1318 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1320 spin_lock_irq(&dev_priv->irq_lock);
1321 valleyview_disable_display_irqs(dev_priv);
1322 spin_unlock_irq(&dev_priv->irq_lock);
1324 /* make sure we're done processing display irqs */
1325 intel_synchronize_irq(dev_priv);
1327 intel_power_sequencer_reset(dev_priv);
1329 /* Prevent us from re-enabling polling on accident in late suspend */
1330 if (!dev_priv->drm.dev->power.is_suspended)
1331 intel_hpd_poll_init(dev_priv);
1334 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1335 struct i915_power_well *power_well)
1337 vlv_set_power_well(dev_priv, power_well, true);
1339 vlv_display_power_well_init(dev_priv);
1342 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1343 struct i915_power_well *power_well)
1345 vlv_display_power_well_deinit(dev_priv);
1347 vlv_set_power_well(dev_priv, power_well, false);
1350 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1351 struct i915_power_well *power_well)
1353 /* since ref/cri clock was enabled */
1354 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1356 vlv_set_power_well(dev_priv, power_well, true);
1359 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1360 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1361 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1362 * b. The other bits such as sfr settings / modesel may all
1365 * This should only be done on init and resume from S3 with
1366 * both PLLs disabled, or we risk losing DPIO and PLL
1369 intel_de_write(dev_priv, DPIO_CTL,
1370 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1373 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1374 struct i915_power_well *power_well)
1378 for_each_pipe(dev_priv, pipe)
1379 assert_pll_disabled(dev_priv, pipe);
1381 /* Assert common reset */
1382 intel_de_write(dev_priv, DPIO_CTL,
1383 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1385 vlv_set_power_well(dev_priv, power_well, false);
1388 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1390 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1392 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1394 struct i915_power_well *cmn_bc =
1395 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1396 struct i915_power_well *cmn_d =
1397 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1398 u32 phy_control = dev_priv->chv_phy_control;
1400 u32 phy_status_mask = 0xffffffff;
1403 * The BIOS can leave the PHY is some weird state
1404 * where it doesn't fully power down some parts.
1405 * Disable the asserts until the PHY has been fully
1406 * reset (ie. the power well has been disabled at
1409 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1410 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1411 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1412 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1413 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1414 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1415 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1417 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1418 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1419 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1420 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1422 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1423 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1425 /* this assumes override is only used to enable lanes */
1426 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1427 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1429 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1430 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1432 /* CL1 is on whenever anything is on in either channel */
1433 if (BITS_SET(phy_control,
1434 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1435 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1436 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1439 * The DPLLB check accounts for the pipe B + port A usage
1440 * with CL2 powered up but all the lanes in the second channel
1443 if (BITS_SET(phy_control,
1444 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1445 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1446 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1448 if (BITS_SET(phy_control,
1449 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1450 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1451 if (BITS_SET(phy_control,
1452 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1453 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1455 if (BITS_SET(phy_control,
1456 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1457 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1458 if (BITS_SET(phy_control,
1459 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1460 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1463 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1464 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1466 /* this assumes override is only used to enable lanes */
1467 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1468 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1470 if (BITS_SET(phy_control,
1471 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1472 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1474 if (BITS_SET(phy_control,
1475 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1476 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1477 if (BITS_SET(phy_control,
1478 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1479 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1482 phy_status &= phy_status_mask;
1485 * The PHY may be busy with some initial calibration and whatnot,
1486 * so the power state can take a while to actually change.
1488 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1489 phy_status_mask, phy_status, 10))
1490 drm_err(&dev_priv->drm,
1491 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1492 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1493 phy_status, dev_priv->chv_phy_control);
1498 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1499 struct i915_power_well *power_well)
1505 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1506 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1508 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1516 /* since ref/cri clock was enabled */
1517 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1518 vlv_set_power_well(dev_priv, power_well, true);
1520 /* Poll for phypwrgood signal */
1521 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1522 PHY_POWERGOOD(phy), 1))
1523 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1526 vlv_dpio_get(dev_priv);
1528 /* Enable dynamic power down */
1529 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1530 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1531 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1532 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1534 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1535 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1536 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1537 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1540 * Force the non-existing CL2 off. BXT does this
1541 * too, so maybe it saves some power even though
1542 * CL2 doesn't exist?
1544 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1545 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1546 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1549 vlv_dpio_put(dev_priv);
1551 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1552 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1553 dev_priv->chv_phy_control);
1555 drm_dbg_kms(&dev_priv->drm,
1556 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1557 phy, dev_priv->chv_phy_control);
1559 assert_chv_phy_status(dev_priv);
1562 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1563 struct i915_power_well *power_well)
1567 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1568 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1570 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1572 assert_pll_disabled(dev_priv, PIPE_A);
1573 assert_pll_disabled(dev_priv, PIPE_B);
1576 assert_pll_disabled(dev_priv, PIPE_C);
1579 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1580 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1581 dev_priv->chv_phy_control);
1583 vlv_set_power_well(dev_priv, power_well, false);
1585 drm_dbg_kms(&dev_priv->drm,
1586 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1587 phy, dev_priv->chv_phy_control);
1589 /* PHY is fully reset now, so we can enable the PHY state asserts */
1590 dev_priv->chv_phy_assert[phy] = true;
1592 assert_chv_phy_status(dev_priv);
1595 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1596 enum dpio_channel ch, bool override, unsigned int mask)
1598 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1599 u32 reg, val, expected, actual;
1602 * The BIOS can leave the PHY is some weird state
1603 * where it doesn't fully power down some parts.
1604 * Disable the asserts until the PHY has been fully
1605 * reset (ie. the power well has been disabled at
1608 if (!dev_priv->chv_phy_assert[phy])
1612 reg = _CHV_CMN_DW0_CH0;
1614 reg = _CHV_CMN_DW6_CH1;
1616 vlv_dpio_get(dev_priv);
1617 val = vlv_dpio_read(dev_priv, pipe, reg);
1618 vlv_dpio_put(dev_priv);
1621 * This assumes !override is only used when the port is disabled.
1622 * All lanes should power down even without the override when
1623 * the port is disabled.
1625 if (!override || mask == 0xf) {
1626 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1628 * If CH1 common lane is not active anymore
1629 * (eg. for pipe B DPLL) the entire channel will
1630 * shut down, which causes the common lane registers
1631 * to read as 0. That means we can't actually check
1632 * the lane power down status bits, but as the entire
1633 * register reads as 0 it's a good indication that the
1634 * channel is indeed entirely powered down.
1636 if (ch == DPIO_CH1 && val == 0)
1638 } else if (mask != 0x0) {
1639 expected = DPIO_ANYDL_POWERDOWN;
1645 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1647 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1648 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1650 WARN(actual != expected,
1651 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1652 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1653 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1657 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1658 enum dpio_channel ch, bool override)
1660 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1663 mutex_lock(&power_domains->lock);
1665 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1667 if (override == was_override)
1671 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1673 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1675 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1676 dev_priv->chv_phy_control);
1678 drm_dbg_kms(&dev_priv->drm,
1679 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1680 phy, ch, dev_priv->chv_phy_control);
1682 assert_chv_phy_status(dev_priv);
1685 mutex_unlock(&power_domains->lock);
1687 return was_override;
1690 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1691 bool override, unsigned int mask)
1693 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1694 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1695 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1696 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1698 mutex_lock(&power_domains->lock);
1700 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1701 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1704 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1706 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1708 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1709 dev_priv->chv_phy_control);
1711 drm_dbg_kms(&dev_priv->drm,
1712 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1713 phy, ch, mask, dev_priv->chv_phy_control);
1715 assert_chv_phy_status(dev_priv);
1717 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1719 mutex_unlock(&power_domains->lock);
1722 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1723 struct i915_power_well *power_well)
1725 enum pipe pipe = PIPE_A;
1729 vlv_punit_get(dev_priv);
1731 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1733 * We only ever set the power-on and power-gate states, anything
1734 * else is unexpected.
1736 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1737 enabled = state == DP_SSS_PWR_ON(pipe);
1740 * A transient state at this point would mean some unexpected party
1741 * is poking at the power controls too.
1743 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1744 WARN_ON(ctrl << 16 != state);
1746 vlv_punit_put(dev_priv);
1751 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1752 struct i915_power_well *power_well,
1755 enum pipe pipe = PIPE_A;
1759 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1761 vlv_punit_get(dev_priv);
1764 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1769 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1770 ctrl &= ~DP_SSC_MASK(pipe);
1771 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1772 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1774 if (wait_for(COND, 100))
1775 drm_err(&dev_priv->drm,
1776 "timeout setting power well state %08x (%08x)\n",
1778 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1783 vlv_punit_put(dev_priv);
1786 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1787 struct i915_power_well *power_well)
1789 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1790 dev_priv->chv_phy_control);
1793 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1794 struct i915_power_well *power_well)
1796 chv_set_pipe_power_well(dev_priv, power_well, true);
1798 vlv_display_power_well_init(dev_priv);
1801 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1802 struct i915_power_well *power_well)
1804 vlv_display_power_well_deinit(dev_priv);
1806 chv_set_pipe_power_well(dev_priv, power_well, false);
1809 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1811 return power_domains->async_put_domains[0] |
1812 power_domains->async_put_domains[1];
1815 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1818 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1820 return !WARN_ON(power_domains->async_put_domains[0] &
1821 power_domains->async_put_domains[1]);
1825 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1827 enum intel_display_power_domain domain;
1830 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1831 err |= WARN_ON(!!power_domains->async_put_wakeref !=
1832 !!__async_put_domains_mask(power_domains));
1834 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1835 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1840 static void print_power_domains(struct i915_power_domains *power_domains,
1841 const char *prefix, u64 mask)
1843 enum intel_display_power_domain domain;
1845 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1846 for_each_power_domain(domain, mask)
1847 DRM_DEBUG_DRIVER("%s use_count %d\n",
1848 intel_display_power_domain_str(domain),
1849 power_domains->domain_use_count[domain]);
1853 print_async_put_domains_state(struct i915_power_domains *power_domains)
1855 DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1856 power_domains->async_put_wakeref);
1858 print_power_domains(power_domains, "async_put_domains[0]",
1859 power_domains->async_put_domains[0]);
1860 print_power_domains(power_domains, "async_put_domains[1]",
1861 power_domains->async_put_domains[1]);
1865 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1867 if (!__async_put_domains_state_ok(power_domains))
1868 print_async_put_domains_state(power_domains);
1874 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1879 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1883 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1885 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1887 assert_async_put_domain_masks_disjoint(power_domains);
1889 return __async_put_domains_mask(power_domains);
1893 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1894 enum intel_display_power_domain domain)
1896 assert_async_put_domain_masks_disjoint(power_domains);
1898 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1899 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1903 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1904 enum intel_display_power_domain domain)
1906 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1909 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1912 async_put_domains_clear_domain(power_domains, domain);
1916 if (async_put_domains_mask(power_domains))
1919 cancel_delayed_work(&power_domains->async_put_work);
1920 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1921 fetch_and_zero(&power_domains->async_put_wakeref));
1923 verify_async_put_domains_state(power_domains);
1929 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1930 enum intel_display_power_domain domain)
1932 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1933 struct i915_power_well *power_well;
1935 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1938 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1939 intel_power_well_get(dev_priv, power_well);
1941 power_domains->domain_use_count[domain]++;
1945 * intel_display_power_get - grab a power domain reference
1946 * @dev_priv: i915 device instance
1947 * @domain: power domain to reference
1949 * This function grabs a power domain reference for @domain and ensures that the
1950 * power domain and all its parents are powered up. Therefore users should only
1951 * grab a reference to the innermost power domain they need.
1953 * Any power domain reference obtained by this function must have a symmetric
1954 * call to intel_display_power_put() to release the reference again.
1956 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1957 enum intel_display_power_domain domain)
1959 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1960 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1962 mutex_lock(&power_domains->lock);
1963 __intel_display_power_get_domain(dev_priv, domain);
1964 mutex_unlock(&power_domains->lock);
1970 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1971 * @dev_priv: i915 device instance
1972 * @domain: power domain to reference
1974 * This function grabs a power domain reference for @domain and ensures that the
1975 * power domain and all its parents are powered up. Therefore users should only
1976 * grab a reference to the innermost power domain they need.
1978 * Any power domain reference obtained by this function must have a symmetric
1979 * call to intel_display_power_put() to release the reference again.
1982 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1983 enum intel_display_power_domain domain)
1985 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1986 intel_wakeref_t wakeref;
1989 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1993 mutex_lock(&power_domains->lock);
1995 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1996 __intel_display_power_get_domain(dev_priv, domain);
2002 mutex_unlock(&power_domains->lock);
2005 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2013 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2014 enum intel_display_power_domain domain)
2016 struct i915_power_domains *power_domains;
2017 struct i915_power_well *power_well;
2018 const char *name = intel_display_power_domain_str(domain);
2020 power_domains = &dev_priv->power_domains;
2022 WARN(!power_domains->domain_use_count[domain],
2023 "Use count on domain %s is already zero\n",
2025 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
2026 "Async disabling of domain %s is pending\n",
2029 power_domains->domain_use_count[domain]--;
2031 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2032 intel_power_well_put(dev_priv, power_well);
2035 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2036 enum intel_display_power_domain domain)
2038 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2040 mutex_lock(&power_domains->lock);
2041 __intel_display_power_put_domain(dev_priv, domain);
2042 mutex_unlock(&power_domains->lock);
2046 * intel_display_power_put_unchecked - release an unchecked power domain reference
2047 * @dev_priv: i915 device instance
2048 * @domain: power domain to reference
2050 * This function drops the power domain reference obtained by
2051 * intel_display_power_get() and might power down the corresponding hardware
2052 * block right away if this is the last reference.
2054 * This function exists only for historical reasons and should be avoided in
2055 * new code, as the correctness of its use cannot be checked. Always use
2056 * intel_display_power_put() instead.
2058 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2059 enum intel_display_power_domain domain)
2061 __intel_display_power_put(dev_priv, domain);
2062 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2066 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2067 intel_wakeref_t wakeref)
2069 WARN_ON(power_domains->async_put_wakeref);
2070 power_domains->async_put_wakeref = wakeref;
2071 WARN_ON(!queue_delayed_work(system_unbound_wq,
2072 &power_domains->async_put_work,
2073 msecs_to_jiffies(100)));
2077 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2079 struct drm_i915_private *dev_priv =
2080 container_of(power_domains, struct drm_i915_private,
2082 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2083 enum intel_display_power_domain domain;
2084 intel_wakeref_t wakeref;
2087 * The caller must hold already raw wakeref, upgrade that to a proper
2088 * wakeref to make the state checker happy about the HW access during
2089 * power well disabling.
2091 assert_rpm_raw_wakeref_held(rpm);
2092 wakeref = intel_runtime_pm_get(rpm);
2094 for_each_power_domain(domain, mask) {
2095 /* Clear before put, so put's sanity check is happy. */
2096 async_put_domains_clear_domain(power_domains, domain);
2097 __intel_display_power_put_domain(dev_priv, domain);
2100 intel_runtime_pm_put(rpm, wakeref);
2104 intel_display_power_put_async_work(struct work_struct *work)
2106 struct drm_i915_private *dev_priv =
2107 container_of(work, struct drm_i915_private,
2108 power_domains.async_put_work.work);
2109 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2110 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2111 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2112 intel_wakeref_t old_work_wakeref = 0;
2114 mutex_lock(&power_domains->lock);
2117 * Bail out if all the domain refs pending to be released were grabbed
2118 * by subsequent gets or a flush_work.
2120 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2121 if (!old_work_wakeref)
2124 release_async_put_domains(power_domains,
2125 power_domains->async_put_domains[0]);
2127 /* Requeue the work if more domains were async put meanwhile. */
2128 if (power_domains->async_put_domains[1]) {
2129 power_domains->async_put_domains[0] =
2130 fetch_and_zero(&power_domains->async_put_domains[1]);
2131 queue_async_put_domains_work(power_domains,
2132 fetch_and_zero(&new_work_wakeref));
2136 verify_async_put_domains_state(power_domains);
2138 mutex_unlock(&power_domains->lock);
2140 if (old_work_wakeref)
2141 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2142 if (new_work_wakeref)
2143 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2147 * intel_display_power_put_async - release a power domain reference asynchronously
2148 * @i915: i915 device instance
2149 * @domain: power domain to reference
2150 * @wakeref: wakeref acquired for the reference that is being released
2152 * This function drops the power domain reference obtained by
2153 * intel_display_power_get*() and schedules a work to power down the
2154 * corresponding hardware block if this is the last reference.
2156 void __intel_display_power_put_async(struct drm_i915_private *i915,
2157 enum intel_display_power_domain domain,
2158 intel_wakeref_t wakeref)
2160 struct i915_power_domains *power_domains = &i915->power_domains;
2161 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2162 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2164 mutex_lock(&power_domains->lock);
2166 if (power_domains->domain_use_count[domain] > 1) {
2167 __intel_display_power_put_domain(i915, domain);
2172 WARN_ON(power_domains->domain_use_count[domain] != 1);
2174 /* Let a pending work requeue itself or queue a new one. */
2175 if (power_domains->async_put_wakeref) {
2176 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2178 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2179 queue_async_put_domains_work(power_domains,
2180 fetch_and_zero(&work_wakeref));
2184 verify_async_put_domains_state(power_domains);
2186 mutex_unlock(&power_domains->lock);
2189 intel_runtime_pm_put_raw(rpm, work_wakeref);
2191 intel_runtime_pm_put(rpm, wakeref);
2195 * intel_display_power_flush_work - flushes the async display power disabling work
2196 * @i915: i915 device instance
2198 * Flushes any pending work that was scheduled by a preceding
2199 * intel_display_power_put_async() call, completing the disabling of the
2200 * corresponding power domains.
2202 * Note that the work handler function may still be running after this
2203 * function returns; to ensure that the work handler isn't running use
2204 * intel_display_power_flush_work_sync() instead.
2206 void intel_display_power_flush_work(struct drm_i915_private *i915)
2208 struct i915_power_domains *power_domains = &i915->power_domains;
2209 intel_wakeref_t work_wakeref;
2211 mutex_lock(&power_domains->lock);
2213 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2217 release_async_put_domains(power_domains,
2218 async_put_domains_mask(power_domains));
2219 cancel_delayed_work(&power_domains->async_put_work);
2222 verify_async_put_domains_state(power_domains);
2224 mutex_unlock(&power_domains->lock);
2227 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2231 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2232 * @i915: i915 device instance
2234 * Like intel_display_power_flush_work(), but also ensure that the work
2235 * handler function is not running any more when this function returns.
2238 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2240 struct i915_power_domains *power_domains = &i915->power_domains;
2242 intel_display_power_flush_work(i915);
2243 cancel_delayed_work_sync(&power_domains->async_put_work);
2245 verify_async_put_domains_state(power_domains);
2247 WARN_ON(power_domains->async_put_wakeref);
2250 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2252 * intel_display_power_put - release a power domain reference
2253 * @dev_priv: i915 device instance
2254 * @domain: power domain to reference
2255 * @wakeref: wakeref acquired for the reference that is being released
2257 * This function drops the power domain reference obtained by
2258 * intel_display_power_get() and might power down the corresponding hardware
2259 * block right away if this is the last reference.
2261 void intel_display_power_put(struct drm_i915_private *dev_priv,
2262 enum intel_display_power_domain domain,
2263 intel_wakeref_t wakeref)
2265 __intel_display_power_put(dev_priv, domain);
2266 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2270 #define I830_PIPES_POWER_DOMAINS ( \
2271 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2272 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2273 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2274 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2275 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2276 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2277 BIT_ULL(POWER_DOMAIN_INIT))
2279 #define VLV_DISPLAY_POWER_DOMAINS ( \
2280 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2281 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2282 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2283 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2284 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2285 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2286 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2287 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2288 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2289 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2290 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2291 BIT_ULL(POWER_DOMAIN_VGA) | \
2292 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2293 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2294 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2295 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2296 BIT_ULL(POWER_DOMAIN_INIT))
2298 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2299 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2300 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2301 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2302 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2303 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2304 BIT_ULL(POWER_DOMAIN_INIT))
2306 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2307 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2308 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2309 BIT_ULL(POWER_DOMAIN_INIT))
2311 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2312 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2313 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2314 BIT_ULL(POWER_DOMAIN_INIT))
2316 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2317 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2318 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2319 BIT_ULL(POWER_DOMAIN_INIT))
2321 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2322 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2323 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2324 BIT_ULL(POWER_DOMAIN_INIT))
2326 #define CHV_DISPLAY_POWER_DOMAINS ( \
2327 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2328 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2329 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2330 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2331 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2332 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2333 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2334 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2335 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2336 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2337 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2338 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2339 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2340 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2341 BIT_ULL(POWER_DOMAIN_VGA) | \
2342 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2343 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2344 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2345 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2346 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2347 BIT_ULL(POWER_DOMAIN_INIT))
2349 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2350 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2351 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2352 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2353 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2354 BIT_ULL(POWER_DOMAIN_INIT))
2356 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2357 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2358 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2359 BIT_ULL(POWER_DOMAIN_INIT))
2361 #define HSW_DISPLAY_POWER_DOMAINS ( \
2362 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2363 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2364 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2365 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2366 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2367 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2368 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2369 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2370 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2371 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2372 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2373 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2374 BIT_ULL(POWER_DOMAIN_VGA) | \
2375 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2376 BIT_ULL(POWER_DOMAIN_INIT))
2378 #define BDW_DISPLAY_POWER_DOMAINS ( \
2379 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2380 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2381 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2382 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2383 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2384 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2385 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2386 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2387 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2388 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2389 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2390 BIT_ULL(POWER_DOMAIN_VGA) | \
2391 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2392 BIT_ULL(POWER_DOMAIN_INIT))
2394 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2395 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2396 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2397 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2398 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2399 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2400 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2401 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2402 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2403 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2404 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2405 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2406 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2407 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2408 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2409 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2410 BIT_ULL(POWER_DOMAIN_VGA) | \
2411 BIT_ULL(POWER_DOMAIN_INIT))
2412 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2413 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2414 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2415 BIT_ULL(POWER_DOMAIN_INIT))
2416 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2417 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2418 BIT_ULL(POWER_DOMAIN_INIT))
2419 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2420 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2421 BIT_ULL(POWER_DOMAIN_INIT))
2422 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2423 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2424 BIT_ULL(POWER_DOMAIN_INIT))
2425 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2426 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2427 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2428 BIT_ULL(POWER_DOMAIN_MODESET) | \
2429 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2430 BIT_ULL(POWER_DOMAIN_INIT))
2432 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2433 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2434 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2435 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2436 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2437 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2438 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2439 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2440 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2441 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2442 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2443 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2444 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2445 BIT_ULL(POWER_DOMAIN_VGA) | \
2446 BIT_ULL(POWER_DOMAIN_INIT))
2447 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2448 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2449 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2450 BIT_ULL(POWER_DOMAIN_MODESET) | \
2451 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2452 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2453 BIT_ULL(POWER_DOMAIN_INIT))
2454 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2455 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2456 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2457 BIT_ULL(POWER_DOMAIN_INIT))
2458 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2459 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2460 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2461 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2462 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2463 BIT_ULL(POWER_DOMAIN_INIT))
2465 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2466 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2467 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2468 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2469 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2470 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2471 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2472 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2473 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2474 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2475 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2476 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2477 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2478 BIT_ULL(POWER_DOMAIN_VGA) | \
2479 BIT_ULL(POWER_DOMAIN_INIT))
2480 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2481 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2482 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2483 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2484 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2485 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2486 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2487 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2488 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2489 BIT_ULL(POWER_DOMAIN_INIT))
2490 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2491 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2492 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2493 BIT_ULL(POWER_DOMAIN_INIT))
2494 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2495 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2496 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2497 BIT_ULL(POWER_DOMAIN_INIT))
2498 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2499 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2500 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2501 BIT_ULL(POWER_DOMAIN_INIT))
2502 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2503 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2504 BIT_ULL(POWER_DOMAIN_INIT))
2505 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2506 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2507 BIT_ULL(POWER_DOMAIN_INIT))
2508 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2509 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2510 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2511 BIT_ULL(POWER_DOMAIN_MODESET) | \
2512 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2513 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2514 BIT_ULL(POWER_DOMAIN_INIT))
2516 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2517 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2518 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2519 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2520 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2521 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2522 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2523 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2524 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2525 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2526 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2527 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2528 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2529 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2530 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2531 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2532 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2533 BIT_ULL(POWER_DOMAIN_VGA) | \
2534 BIT_ULL(POWER_DOMAIN_INIT))
2535 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2536 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2537 BIT_ULL(POWER_DOMAIN_INIT))
2538 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2539 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2540 BIT_ULL(POWER_DOMAIN_INIT))
2541 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2542 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2543 BIT_ULL(POWER_DOMAIN_INIT))
2544 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2545 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2546 BIT_ULL(POWER_DOMAIN_INIT))
2547 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2548 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2549 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2550 BIT_ULL(POWER_DOMAIN_INIT))
2551 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2552 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2553 BIT_ULL(POWER_DOMAIN_INIT))
2554 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2555 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2556 BIT_ULL(POWER_DOMAIN_INIT))
2557 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2558 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2559 BIT_ULL(POWER_DOMAIN_INIT))
2560 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2561 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2562 BIT_ULL(POWER_DOMAIN_INIT))
2563 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2564 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2565 BIT_ULL(POWER_DOMAIN_INIT))
2566 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2567 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2568 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2569 BIT_ULL(POWER_DOMAIN_MODESET) | \
2570 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2571 BIT_ULL(POWER_DOMAIN_INIT))
2574 * ICL PW_0/PG_0 domains (HW/DMC control):
2576 * - clocks except port PLL
2577 * - central power except FBC
2578 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2579 * ICL PW_1/PG_1 domains (HW/DMC control):
2581 * - PIPE_A and its planes, except VGA
2582 * - transcoder EDP + PSR
2587 #define ICL_PW_4_POWER_DOMAINS ( \
2588 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2589 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2590 BIT_ULL(POWER_DOMAIN_INIT))
2592 #define ICL_PW_3_POWER_DOMAINS ( \
2593 ICL_PW_4_POWER_DOMAINS | \
2594 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2595 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2596 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2597 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2598 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2599 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2600 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2601 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2602 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2603 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2604 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2605 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2606 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2607 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2608 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2609 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
2610 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2611 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2612 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2613 BIT_ULL(POWER_DOMAIN_VGA) | \
2614 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2615 BIT_ULL(POWER_DOMAIN_INIT))
2618 * - KVMR (HW control)
2620 #define ICL_PW_2_POWER_DOMAINS ( \
2621 ICL_PW_3_POWER_DOMAINS | \
2622 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2623 BIT_ULL(POWER_DOMAIN_INIT))
2625 * - KVMR (HW control)
2627 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2628 ICL_PW_2_POWER_DOMAINS | \
2629 BIT_ULL(POWER_DOMAIN_MODESET) | \
2630 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2631 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
2632 BIT_ULL(POWER_DOMAIN_INIT))
2634 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2636 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2637 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2638 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2639 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2640 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2641 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2642 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2643 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2644 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2645 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2647 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2648 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2649 BIT_ULL(POWER_DOMAIN_AUX_A))
2650 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2651 BIT_ULL(POWER_DOMAIN_AUX_B))
2652 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
2653 BIT_ULL(POWER_DOMAIN_AUX_C))
2654 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
2655 BIT_ULL(POWER_DOMAIN_AUX_D))
2656 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
2657 BIT_ULL(POWER_DOMAIN_AUX_E))
2658 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
2659 BIT_ULL(POWER_DOMAIN_AUX_F))
2660 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
2661 BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2662 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
2663 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2664 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
2665 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2666 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
2667 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2669 #define TGL_PW_5_POWER_DOMAINS ( \
2670 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
2671 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
2672 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
2673 BIT_ULL(POWER_DOMAIN_INIT))
2675 #define TGL_PW_4_POWER_DOMAINS ( \
2676 TGL_PW_5_POWER_DOMAINS | \
2677 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2678 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2679 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2680 BIT_ULL(POWER_DOMAIN_INIT))
2682 #define TGL_PW_3_POWER_DOMAINS ( \
2683 TGL_PW_4_POWER_DOMAINS | \
2684 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2685 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2686 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2687 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2688 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2689 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2690 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
2691 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
2692 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
2693 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2694 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2695 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2696 BIT_ULL(POWER_DOMAIN_AUX_G) | \
2697 BIT_ULL(POWER_DOMAIN_AUX_H) | \
2698 BIT_ULL(POWER_DOMAIN_AUX_I) | \
2699 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2700 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2701 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2702 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
2703 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
2704 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
2705 BIT_ULL(POWER_DOMAIN_VGA) | \
2706 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2707 BIT_ULL(POWER_DOMAIN_INIT))
2709 #define TGL_PW_2_POWER_DOMAINS ( \
2710 TGL_PW_3_POWER_DOMAINS | \
2711 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2712 BIT_ULL(POWER_DOMAIN_INIT))
2714 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2715 TGL_PW_2_POWER_DOMAINS | \
2716 BIT_ULL(POWER_DOMAIN_MODESET) | \
2717 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2718 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2719 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2720 BIT_ULL(POWER_DOMAIN_INIT))
2722 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
2723 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2724 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
2725 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2726 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
2727 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2728 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
2729 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2730 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
2731 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2732 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
2733 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2735 #define TGL_AUX_A_IO_POWER_DOMAINS ( \
2736 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2737 BIT_ULL(POWER_DOMAIN_AUX_A))
2738 #define TGL_AUX_B_IO_POWER_DOMAINS ( \
2739 BIT_ULL(POWER_DOMAIN_AUX_B))
2740 #define TGL_AUX_C_IO_POWER_DOMAINS ( \
2741 BIT_ULL(POWER_DOMAIN_AUX_C))
2742 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
2743 BIT_ULL(POWER_DOMAIN_AUX_D))
2744 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
2745 BIT_ULL(POWER_DOMAIN_AUX_E))
2746 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
2747 BIT_ULL(POWER_DOMAIN_AUX_F))
2748 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
2749 BIT_ULL(POWER_DOMAIN_AUX_G))
2750 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
2751 BIT_ULL(POWER_DOMAIN_AUX_H))
2752 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
2753 BIT_ULL(POWER_DOMAIN_AUX_I))
2754 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
2755 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2756 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
2757 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2758 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
2759 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2760 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
2761 BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2762 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
2763 BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2764 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
2765 BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2767 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2768 .sync_hw = i9xx_power_well_sync_hw_noop,
2769 .enable = i9xx_always_on_power_well_noop,
2770 .disable = i9xx_always_on_power_well_noop,
2771 .is_enabled = i9xx_always_on_power_well_enabled,
2774 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2775 .sync_hw = chv_pipe_power_well_sync_hw,
2776 .enable = chv_pipe_power_well_enable,
2777 .disable = chv_pipe_power_well_disable,
2778 .is_enabled = chv_pipe_power_well_enabled,
2781 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2782 .sync_hw = i9xx_power_well_sync_hw_noop,
2783 .enable = chv_dpio_cmn_power_well_enable,
2784 .disable = chv_dpio_cmn_power_well_disable,
2785 .is_enabled = vlv_power_well_enabled,
2788 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2790 .name = "always-on",
2792 .domains = POWER_DOMAIN_MASK,
2793 .ops = &i9xx_always_on_power_well_ops,
2794 .id = DISP_PW_ID_NONE,
2798 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2799 .sync_hw = i830_pipes_power_well_sync_hw,
2800 .enable = i830_pipes_power_well_enable,
2801 .disable = i830_pipes_power_well_disable,
2802 .is_enabled = i830_pipes_power_well_enabled,
2805 static const struct i915_power_well_desc i830_power_wells[] = {
2807 .name = "always-on",
2809 .domains = POWER_DOMAIN_MASK,
2810 .ops = &i9xx_always_on_power_well_ops,
2811 .id = DISP_PW_ID_NONE,
2815 .domains = I830_PIPES_POWER_DOMAINS,
2816 .ops = &i830_pipes_power_well_ops,
2817 .id = DISP_PW_ID_NONE,
2821 static const struct i915_power_well_ops hsw_power_well_ops = {
2822 .sync_hw = hsw_power_well_sync_hw,
2823 .enable = hsw_power_well_enable,
2824 .disable = hsw_power_well_disable,
2825 .is_enabled = hsw_power_well_enabled,
2828 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2829 .sync_hw = i9xx_power_well_sync_hw_noop,
2830 .enable = gen9_dc_off_power_well_enable,
2831 .disable = gen9_dc_off_power_well_disable,
2832 .is_enabled = gen9_dc_off_power_well_enabled,
2835 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2836 .sync_hw = i9xx_power_well_sync_hw_noop,
2837 .enable = bxt_dpio_cmn_power_well_enable,
2838 .disable = bxt_dpio_cmn_power_well_disable,
2839 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2842 static const struct i915_power_well_regs hsw_power_well_regs = {
2843 .bios = HSW_PWR_WELL_CTL1,
2844 .driver = HSW_PWR_WELL_CTL2,
2845 .kvmr = HSW_PWR_WELL_CTL3,
2846 .debug = HSW_PWR_WELL_CTL4,
2849 static const struct i915_power_well_desc hsw_power_wells[] = {
2851 .name = "always-on",
2853 .domains = POWER_DOMAIN_MASK,
2854 .ops = &i9xx_always_on_power_well_ops,
2855 .id = DISP_PW_ID_NONE,
2859 .domains = HSW_DISPLAY_POWER_DOMAINS,
2860 .ops = &hsw_power_well_ops,
2861 .id = HSW_DISP_PW_GLOBAL,
2863 .hsw.regs = &hsw_power_well_regs,
2864 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2865 .hsw.has_vga = true,
2870 static const struct i915_power_well_desc bdw_power_wells[] = {
2872 .name = "always-on",
2874 .domains = POWER_DOMAIN_MASK,
2875 .ops = &i9xx_always_on_power_well_ops,
2876 .id = DISP_PW_ID_NONE,
2880 .domains = BDW_DISPLAY_POWER_DOMAINS,
2881 .ops = &hsw_power_well_ops,
2882 .id = HSW_DISP_PW_GLOBAL,
2884 .hsw.regs = &hsw_power_well_regs,
2885 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2886 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2887 .hsw.has_vga = true,
2892 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2893 .sync_hw = i9xx_power_well_sync_hw_noop,
2894 .enable = vlv_display_power_well_enable,
2895 .disable = vlv_display_power_well_disable,
2896 .is_enabled = vlv_power_well_enabled,
2899 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2900 .sync_hw = i9xx_power_well_sync_hw_noop,
2901 .enable = vlv_dpio_cmn_power_well_enable,
2902 .disable = vlv_dpio_cmn_power_well_disable,
2903 .is_enabled = vlv_power_well_enabled,
2906 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2907 .sync_hw = i9xx_power_well_sync_hw_noop,
2908 .enable = vlv_power_well_enable,
2909 .disable = vlv_power_well_disable,
2910 .is_enabled = vlv_power_well_enabled,
2913 static const struct i915_power_well_desc vlv_power_wells[] = {
2915 .name = "always-on",
2917 .domains = POWER_DOMAIN_MASK,
2918 .ops = &i9xx_always_on_power_well_ops,
2919 .id = DISP_PW_ID_NONE,
2923 .domains = VLV_DISPLAY_POWER_DOMAINS,
2924 .ops = &vlv_display_power_well_ops,
2925 .id = VLV_DISP_PW_DISP2D,
2927 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2931 .name = "dpio-tx-b-01",
2932 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2933 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2934 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2935 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2936 .ops = &vlv_dpio_power_well_ops,
2937 .id = DISP_PW_ID_NONE,
2939 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2943 .name = "dpio-tx-b-23",
2944 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2945 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2946 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2947 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2948 .ops = &vlv_dpio_power_well_ops,
2949 .id = DISP_PW_ID_NONE,
2951 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2955 .name = "dpio-tx-c-01",
2956 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2957 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2958 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2959 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2960 .ops = &vlv_dpio_power_well_ops,
2961 .id = DISP_PW_ID_NONE,
2963 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2967 .name = "dpio-tx-c-23",
2968 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2969 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2970 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2971 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2972 .ops = &vlv_dpio_power_well_ops,
2973 .id = DISP_PW_ID_NONE,
2975 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2979 .name = "dpio-common",
2980 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2981 .ops = &vlv_dpio_cmn_power_well_ops,
2982 .id = VLV_DISP_PW_DPIO_CMN_BC,
2984 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2989 static const struct i915_power_well_desc chv_power_wells[] = {
2991 .name = "always-on",
2993 .domains = POWER_DOMAIN_MASK,
2994 .ops = &i9xx_always_on_power_well_ops,
2995 .id = DISP_PW_ID_NONE,
3000 * Pipe A power well is the new disp2d well. Pipe B and C
3001 * power wells don't actually exist. Pipe A power well is
3002 * required for any pipe to work.
3004 .domains = CHV_DISPLAY_POWER_DOMAINS,
3005 .ops = &chv_pipe_power_well_ops,
3006 .id = DISP_PW_ID_NONE,
3009 .name = "dpio-common-bc",
3010 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3011 .ops = &chv_dpio_cmn_power_well_ops,
3012 .id = VLV_DISP_PW_DPIO_CMN_BC,
3014 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3018 .name = "dpio-common-d",
3019 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3020 .ops = &chv_dpio_cmn_power_well_ops,
3021 .id = CHV_DISP_PW_DPIO_CMN_D,
3023 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3028 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3029 enum i915_power_well_id power_well_id)
3031 struct i915_power_well *power_well;
3034 power_well = lookup_power_well(dev_priv, power_well_id);
3035 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3040 static const struct i915_power_well_desc skl_power_wells[] = {
3042 .name = "always-on",
3044 .domains = POWER_DOMAIN_MASK,
3045 .ops = &i9xx_always_on_power_well_ops,
3046 .id = DISP_PW_ID_NONE,
3049 .name = "power well 1",
3050 /* Handled by the DMC firmware */
3053 .ops = &hsw_power_well_ops,
3054 .id = SKL_DISP_PW_1,
3056 .hsw.regs = &hsw_power_well_regs,
3057 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3058 .hsw.has_fuses = true,
3062 .name = "MISC IO power well",
3063 /* Handled by the DMC firmware */
3066 .ops = &hsw_power_well_ops,
3067 .id = SKL_DISP_PW_MISC_IO,
3069 .hsw.regs = &hsw_power_well_regs,
3070 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3075 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3076 .ops = &gen9_dc_off_power_well_ops,
3077 .id = SKL_DISP_DC_OFF,
3080 .name = "power well 2",
3081 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3082 .ops = &hsw_power_well_ops,
3083 .id = SKL_DISP_PW_2,
3085 .hsw.regs = &hsw_power_well_regs,
3086 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3087 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3088 .hsw.has_vga = true,
3089 .hsw.has_fuses = true,
3093 .name = "DDI A/E IO power well",
3094 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3095 .ops = &hsw_power_well_ops,
3096 .id = DISP_PW_ID_NONE,
3098 .hsw.regs = &hsw_power_well_regs,
3099 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3103 .name = "DDI B IO power well",
3104 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3105 .ops = &hsw_power_well_ops,
3106 .id = DISP_PW_ID_NONE,
3108 .hsw.regs = &hsw_power_well_regs,
3109 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3113 .name = "DDI C IO power well",
3114 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3115 .ops = &hsw_power_well_ops,
3116 .id = DISP_PW_ID_NONE,
3118 .hsw.regs = &hsw_power_well_regs,
3119 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3123 .name = "DDI D IO power well",
3124 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3125 .ops = &hsw_power_well_ops,
3126 .id = DISP_PW_ID_NONE,
3128 .hsw.regs = &hsw_power_well_regs,
3129 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3134 static const struct i915_power_well_desc bxt_power_wells[] = {
3136 .name = "always-on",
3138 .domains = POWER_DOMAIN_MASK,
3139 .ops = &i9xx_always_on_power_well_ops,
3140 .id = DISP_PW_ID_NONE,
3143 .name = "power well 1",
3144 /* Handled by the DMC firmware */
3147 .ops = &hsw_power_well_ops,
3148 .id = SKL_DISP_PW_1,
3150 .hsw.regs = &hsw_power_well_regs,
3151 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3152 .hsw.has_fuses = true,
3157 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3158 .ops = &gen9_dc_off_power_well_ops,
3159 .id = SKL_DISP_DC_OFF,
3162 .name = "power well 2",
3163 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3164 .ops = &hsw_power_well_ops,
3165 .id = SKL_DISP_PW_2,
3167 .hsw.regs = &hsw_power_well_regs,
3168 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3169 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3170 .hsw.has_vga = true,
3171 .hsw.has_fuses = true,
3175 .name = "dpio-common-a",
3176 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3177 .ops = &bxt_dpio_cmn_power_well_ops,
3178 .id = BXT_DISP_PW_DPIO_CMN_A,
3180 .bxt.phy = DPIO_PHY1,
3184 .name = "dpio-common-bc",
3185 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3186 .ops = &bxt_dpio_cmn_power_well_ops,
3187 .id = VLV_DISP_PW_DPIO_CMN_BC,
3189 .bxt.phy = DPIO_PHY0,
3194 static const struct i915_power_well_desc glk_power_wells[] = {
3196 .name = "always-on",
3198 .domains = POWER_DOMAIN_MASK,
3199 .ops = &i9xx_always_on_power_well_ops,
3200 .id = DISP_PW_ID_NONE,
3203 .name = "power well 1",
3204 /* Handled by the DMC firmware */
3207 .ops = &hsw_power_well_ops,
3208 .id = SKL_DISP_PW_1,
3210 .hsw.regs = &hsw_power_well_regs,
3211 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3212 .hsw.has_fuses = true,
3217 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3218 .ops = &gen9_dc_off_power_well_ops,
3219 .id = SKL_DISP_DC_OFF,
3222 .name = "power well 2",
3223 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3224 .ops = &hsw_power_well_ops,
3225 .id = SKL_DISP_PW_2,
3227 .hsw.regs = &hsw_power_well_regs,
3228 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3229 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3230 .hsw.has_vga = true,
3231 .hsw.has_fuses = true,
3235 .name = "dpio-common-a",
3236 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3237 .ops = &bxt_dpio_cmn_power_well_ops,
3238 .id = BXT_DISP_PW_DPIO_CMN_A,
3240 .bxt.phy = DPIO_PHY1,
3244 .name = "dpio-common-b",
3245 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3246 .ops = &bxt_dpio_cmn_power_well_ops,
3247 .id = VLV_DISP_PW_DPIO_CMN_BC,
3249 .bxt.phy = DPIO_PHY0,
3253 .name = "dpio-common-c",
3254 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3255 .ops = &bxt_dpio_cmn_power_well_ops,
3256 .id = GLK_DISP_PW_DPIO_CMN_C,
3258 .bxt.phy = DPIO_PHY2,
3263 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3264 .ops = &hsw_power_well_ops,
3265 .id = DISP_PW_ID_NONE,
3267 .hsw.regs = &hsw_power_well_regs,
3268 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3273 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3274 .ops = &hsw_power_well_ops,
3275 .id = DISP_PW_ID_NONE,
3277 .hsw.regs = &hsw_power_well_regs,
3278 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3283 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3284 .ops = &hsw_power_well_ops,
3285 .id = DISP_PW_ID_NONE,
3287 .hsw.regs = &hsw_power_well_regs,
3288 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3292 .name = "DDI A IO power well",
3293 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3294 .ops = &hsw_power_well_ops,
3295 .id = DISP_PW_ID_NONE,
3297 .hsw.regs = &hsw_power_well_regs,
3298 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3302 .name = "DDI B IO power well",
3303 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3304 .ops = &hsw_power_well_ops,
3305 .id = DISP_PW_ID_NONE,
3307 .hsw.regs = &hsw_power_well_regs,
3308 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3312 .name = "DDI C IO power well",
3313 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3314 .ops = &hsw_power_well_ops,
3315 .id = DISP_PW_ID_NONE,
3317 .hsw.regs = &hsw_power_well_regs,
3318 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3323 static const struct i915_power_well_desc cnl_power_wells[] = {
3325 .name = "always-on",
3327 .domains = POWER_DOMAIN_MASK,
3328 .ops = &i9xx_always_on_power_well_ops,
3329 .id = DISP_PW_ID_NONE,
3332 .name = "power well 1",
3333 /* Handled by the DMC firmware */
3336 .ops = &hsw_power_well_ops,
3337 .id = SKL_DISP_PW_1,
3339 .hsw.regs = &hsw_power_well_regs,
3340 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3341 .hsw.has_fuses = true,
3346 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3347 .ops = &hsw_power_well_ops,
3348 .id = DISP_PW_ID_NONE,
3350 .hsw.regs = &hsw_power_well_regs,
3351 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3356 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3357 .ops = &hsw_power_well_ops,
3358 .id = DISP_PW_ID_NONE,
3360 .hsw.regs = &hsw_power_well_regs,
3361 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3366 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3367 .ops = &hsw_power_well_ops,
3368 .id = DISP_PW_ID_NONE,
3370 .hsw.regs = &hsw_power_well_regs,
3371 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3376 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3377 .ops = &hsw_power_well_ops,
3378 .id = DISP_PW_ID_NONE,
3380 .hsw.regs = &hsw_power_well_regs,
3381 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3386 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3387 .ops = &gen9_dc_off_power_well_ops,
3388 .id = SKL_DISP_DC_OFF,
3391 .name = "power well 2",
3392 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3393 .ops = &hsw_power_well_ops,
3394 .id = SKL_DISP_PW_2,
3396 .hsw.regs = &hsw_power_well_regs,
3397 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3398 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3399 .hsw.has_vga = true,
3400 .hsw.has_fuses = true,
3404 .name = "DDI A IO power well",
3405 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3406 .ops = &hsw_power_well_ops,
3407 .id = DISP_PW_ID_NONE,
3409 .hsw.regs = &hsw_power_well_regs,
3410 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3414 .name = "DDI B IO power well",
3415 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3416 .ops = &hsw_power_well_ops,
3417 .id = DISP_PW_ID_NONE,
3419 .hsw.regs = &hsw_power_well_regs,
3420 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3424 .name = "DDI C IO power well",
3425 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3426 .ops = &hsw_power_well_ops,
3427 .id = DISP_PW_ID_NONE,
3429 .hsw.regs = &hsw_power_well_regs,
3430 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3434 .name = "DDI D IO power well",
3435 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3436 .ops = &hsw_power_well_ops,
3437 .id = DISP_PW_ID_NONE,
3439 .hsw.regs = &hsw_power_well_regs,
3440 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3444 .name = "DDI F IO power well",
3445 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3446 .ops = &hsw_power_well_ops,
3447 .id = DISP_PW_ID_NONE,
3449 .hsw.regs = &hsw_power_well_regs,
3450 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3455 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3456 .ops = &hsw_power_well_ops,
3457 .id = DISP_PW_ID_NONE,
3459 .hsw.regs = &hsw_power_well_regs,
3460 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3465 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3466 .sync_hw = hsw_power_well_sync_hw,
3467 .enable = icl_combo_phy_aux_power_well_enable,
3468 .disable = icl_combo_phy_aux_power_well_disable,
3469 .is_enabled = hsw_power_well_enabled,
3472 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3473 .sync_hw = hsw_power_well_sync_hw,
3474 .enable = icl_tc_phy_aux_power_well_enable,
3475 .disable = icl_tc_phy_aux_power_well_disable,
3476 .is_enabled = hsw_power_well_enabled,
3479 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3480 .bios = ICL_PWR_WELL_CTL_AUX1,
3481 .driver = ICL_PWR_WELL_CTL_AUX2,
3482 .debug = ICL_PWR_WELL_CTL_AUX4,
3485 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3486 .bios = ICL_PWR_WELL_CTL_DDI1,
3487 .driver = ICL_PWR_WELL_CTL_DDI2,
3488 .debug = ICL_PWR_WELL_CTL_DDI4,
3491 static const struct i915_power_well_desc icl_power_wells[] = {
3493 .name = "always-on",
3495 .domains = POWER_DOMAIN_MASK,
3496 .ops = &i9xx_always_on_power_well_ops,
3497 .id = DISP_PW_ID_NONE,
3500 .name = "power well 1",
3501 /* Handled by the DMC firmware */
3504 .ops = &hsw_power_well_ops,
3505 .id = SKL_DISP_PW_1,
3507 .hsw.regs = &hsw_power_well_regs,
3508 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3509 .hsw.has_fuses = true,
3514 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3515 .ops = &gen9_dc_off_power_well_ops,
3516 .id = SKL_DISP_DC_OFF,
3519 .name = "power well 2",
3520 .domains = ICL_PW_2_POWER_DOMAINS,
3521 .ops = &hsw_power_well_ops,
3522 .id = SKL_DISP_PW_2,
3524 .hsw.regs = &hsw_power_well_regs,
3525 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3526 .hsw.has_fuses = true,
3530 .name = "power well 3",
3531 .domains = ICL_PW_3_POWER_DOMAINS,
3532 .ops = &hsw_power_well_ops,
3533 .id = DISP_PW_ID_NONE,
3535 .hsw.regs = &hsw_power_well_regs,
3536 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3537 .hsw.irq_pipe_mask = BIT(PIPE_B),
3538 .hsw.has_vga = true,
3539 .hsw.has_fuses = true,
3544 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3545 .ops = &hsw_power_well_ops,
3546 .id = DISP_PW_ID_NONE,
3548 .hsw.regs = &icl_ddi_power_well_regs,
3549 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3554 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3555 .ops = &hsw_power_well_ops,
3556 .id = DISP_PW_ID_NONE,
3558 .hsw.regs = &icl_ddi_power_well_regs,
3559 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3564 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3565 .ops = &hsw_power_well_ops,
3566 .id = DISP_PW_ID_NONE,
3568 .hsw.regs = &icl_ddi_power_well_regs,
3569 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3574 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3575 .ops = &hsw_power_well_ops,
3576 .id = DISP_PW_ID_NONE,
3578 .hsw.regs = &icl_ddi_power_well_regs,
3579 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3584 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3585 .ops = &hsw_power_well_ops,
3586 .id = DISP_PW_ID_NONE,
3588 .hsw.regs = &icl_ddi_power_well_regs,
3589 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3594 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3595 .ops = &hsw_power_well_ops,
3596 .id = DISP_PW_ID_NONE,
3598 .hsw.regs = &icl_ddi_power_well_regs,
3599 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3604 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3605 .ops = &icl_combo_phy_aux_power_well_ops,
3606 .id = DISP_PW_ID_NONE,
3608 .hsw.regs = &icl_aux_power_well_regs,
3609 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3614 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3615 .ops = &icl_combo_phy_aux_power_well_ops,
3616 .id = DISP_PW_ID_NONE,
3618 .hsw.regs = &icl_aux_power_well_regs,
3619 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3623 .name = "AUX C TC1",
3624 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3625 .ops = &icl_tc_phy_aux_power_well_ops,
3626 .id = DISP_PW_ID_NONE,
3628 .hsw.regs = &icl_aux_power_well_regs,
3629 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3630 .hsw.is_tc_tbt = false,
3634 .name = "AUX D TC2",
3635 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3636 .ops = &icl_tc_phy_aux_power_well_ops,
3637 .id = DISP_PW_ID_NONE,
3639 .hsw.regs = &icl_aux_power_well_regs,
3640 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3641 .hsw.is_tc_tbt = false,
3645 .name = "AUX E TC3",
3646 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3647 .ops = &icl_tc_phy_aux_power_well_ops,
3648 .id = DISP_PW_ID_NONE,
3650 .hsw.regs = &icl_aux_power_well_regs,
3651 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3652 .hsw.is_tc_tbt = false,
3656 .name = "AUX F TC4",
3657 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3658 .ops = &icl_tc_phy_aux_power_well_ops,
3659 .id = DISP_PW_ID_NONE,
3661 .hsw.regs = &icl_aux_power_well_regs,
3662 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3663 .hsw.is_tc_tbt = false,
3667 .name = "AUX C TBT1",
3668 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3669 .ops = &icl_tc_phy_aux_power_well_ops,
3670 .id = DISP_PW_ID_NONE,
3672 .hsw.regs = &icl_aux_power_well_regs,
3673 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3674 .hsw.is_tc_tbt = true,
3678 .name = "AUX D TBT2",
3679 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3680 .ops = &icl_tc_phy_aux_power_well_ops,
3681 .id = DISP_PW_ID_NONE,
3683 .hsw.regs = &icl_aux_power_well_regs,
3684 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3685 .hsw.is_tc_tbt = true,
3689 .name = "AUX E TBT3",
3690 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3691 .ops = &icl_tc_phy_aux_power_well_ops,
3692 .id = DISP_PW_ID_NONE,
3694 .hsw.regs = &icl_aux_power_well_regs,
3695 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3696 .hsw.is_tc_tbt = true,
3700 .name = "AUX F TBT4",
3701 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3702 .ops = &icl_tc_phy_aux_power_well_ops,
3703 .id = DISP_PW_ID_NONE,
3705 .hsw.regs = &icl_aux_power_well_regs,
3706 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3707 .hsw.is_tc_tbt = true,
3711 .name = "power well 4",
3712 .domains = ICL_PW_4_POWER_DOMAINS,
3713 .ops = &hsw_power_well_ops,
3714 .id = DISP_PW_ID_NONE,
3716 .hsw.regs = &hsw_power_well_regs,
3717 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3718 .hsw.has_fuses = true,
3719 .hsw.irq_pipe_mask = BIT(PIPE_C),
3724 static const struct i915_power_well_desc ehl_power_wells[] = {
3726 .name = "always-on",
3728 .domains = POWER_DOMAIN_MASK,
3729 .ops = &i9xx_always_on_power_well_ops,
3730 .id = DISP_PW_ID_NONE,
3733 .name = "power well 1",
3734 /* Handled by the DMC firmware */
3737 .ops = &hsw_power_well_ops,
3738 .id = SKL_DISP_PW_1,
3740 .hsw.regs = &hsw_power_well_regs,
3741 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3742 .hsw.has_fuses = true,
3747 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3748 .ops = &gen9_dc_off_power_well_ops,
3749 .id = SKL_DISP_DC_OFF,
3752 .name = "power well 2",
3753 .domains = ICL_PW_2_POWER_DOMAINS,
3754 .ops = &hsw_power_well_ops,
3755 .id = SKL_DISP_PW_2,
3757 .hsw.regs = &hsw_power_well_regs,
3758 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3759 .hsw.has_fuses = true,
3763 .name = "power well 3",
3764 .domains = ICL_PW_3_POWER_DOMAINS,
3765 .ops = &hsw_power_well_ops,
3766 .id = DISP_PW_ID_NONE,
3768 .hsw.regs = &hsw_power_well_regs,
3769 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3770 .hsw.irq_pipe_mask = BIT(PIPE_B),
3771 .hsw.has_vga = true,
3772 .hsw.has_fuses = true,
3777 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3778 .ops = &hsw_power_well_ops,
3779 .id = DISP_PW_ID_NONE,
3781 .hsw.regs = &icl_ddi_power_well_regs,
3782 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3787 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3788 .ops = &hsw_power_well_ops,
3789 .id = DISP_PW_ID_NONE,
3791 .hsw.regs = &icl_ddi_power_well_regs,
3792 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3797 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3798 .ops = &hsw_power_well_ops,
3799 .id = DISP_PW_ID_NONE,
3801 .hsw.regs = &icl_ddi_power_well_regs,
3802 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3807 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3808 .ops = &hsw_power_well_ops,
3809 .id = DISP_PW_ID_NONE,
3811 .hsw.regs = &icl_ddi_power_well_regs,
3812 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3817 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3818 .ops = &hsw_power_well_ops,
3819 .id = DISP_PW_ID_NONE,
3821 .hsw.regs = &icl_aux_power_well_regs,
3822 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3827 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3828 .ops = &hsw_power_well_ops,
3829 .id = DISP_PW_ID_NONE,
3831 .hsw.regs = &icl_aux_power_well_regs,
3832 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3837 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3838 .ops = &hsw_power_well_ops,
3839 .id = DISP_PW_ID_NONE,
3841 .hsw.regs = &icl_aux_power_well_regs,
3842 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3847 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3848 .ops = &hsw_power_well_ops,
3849 .id = DISP_PW_ID_NONE,
3851 .hsw.regs = &icl_aux_power_well_regs,
3852 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3856 .name = "power well 4",
3857 .domains = ICL_PW_4_POWER_DOMAINS,
3858 .ops = &hsw_power_well_ops,
3859 .id = DISP_PW_ID_NONE,
3861 .hsw.regs = &hsw_power_well_regs,
3862 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3863 .hsw.has_fuses = true,
3864 .hsw.irq_pipe_mask = BIT(PIPE_C),
3869 static const struct i915_power_well_desc tgl_power_wells[] = {
3871 .name = "always-on",
3873 .domains = POWER_DOMAIN_MASK,
3874 .ops = &i9xx_always_on_power_well_ops,
3875 .id = DISP_PW_ID_NONE,
3878 .name = "power well 1",
3879 /* Handled by the DMC firmware */
3882 .ops = &hsw_power_well_ops,
3883 .id = SKL_DISP_PW_1,
3885 .hsw.regs = &hsw_power_well_regs,
3886 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3887 .hsw.has_fuses = true,
3892 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3893 .ops = &gen9_dc_off_power_well_ops,
3894 .id = SKL_DISP_DC_OFF,
3897 .name = "power well 2",
3898 .domains = TGL_PW_2_POWER_DOMAINS,
3899 .ops = &hsw_power_well_ops,
3900 .id = SKL_DISP_PW_2,
3902 .hsw.regs = &hsw_power_well_regs,
3903 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3904 .hsw.has_fuses = true,
3908 .name = "power well 3",
3909 .domains = TGL_PW_3_POWER_DOMAINS,
3910 .ops = &hsw_power_well_ops,
3911 .id = DISP_PW_ID_NONE,
3913 .hsw.regs = &hsw_power_well_regs,
3914 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3915 .hsw.irq_pipe_mask = BIT(PIPE_B),
3916 .hsw.has_vga = true,
3917 .hsw.has_fuses = true,
3922 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3923 .ops = &hsw_power_well_ops,
3924 .id = DISP_PW_ID_NONE,
3926 .hsw.regs = &icl_ddi_power_well_regs,
3927 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3932 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3933 .ops = &hsw_power_well_ops,
3934 .id = DISP_PW_ID_NONE,
3936 .hsw.regs = &icl_ddi_power_well_regs,
3937 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3942 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3943 .ops = &hsw_power_well_ops,
3944 .id = DISP_PW_ID_NONE,
3946 .hsw.regs = &icl_ddi_power_well_regs,
3947 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3951 .name = "DDI D TC1 IO",
3952 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
3953 .ops = &hsw_power_well_ops,
3954 .id = DISP_PW_ID_NONE,
3956 .hsw.regs = &icl_ddi_power_well_regs,
3957 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3961 .name = "DDI E TC2 IO",
3962 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
3963 .ops = &hsw_power_well_ops,
3964 .id = DISP_PW_ID_NONE,
3966 .hsw.regs = &icl_ddi_power_well_regs,
3967 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
3971 .name = "DDI F TC3 IO",
3972 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
3973 .ops = &hsw_power_well_ops,
3974 .id = DISP_PW_ID_NONE,
3976 .hsw.regs = &icl_ddi_power_well_regs,
3977 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
3981 .name = "DDI G TC4 IO",
3982 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
3983 .ops = &hsw_power_well_ops,
3984 .id = DISP_PW_ID_NONE,
3986 .hsw.regs = &icl_ddi_power_well_regs,
3987 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
3991 .name = "DDI H TC5 IO",
3992 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
3993 .ops = &hsw_power_well_ops,
3994 .id = DISP_PW_ID_NONE,
3996 .hsw.regs = &icl_ddi_power_well_regs,
3997 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4001 .name = "DDI I TC6 IO",
4002 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4003 .ops = &hsw_power_well_ops,
4004 .id = DISP_PW_ID_NONE,
4006 .hsw.regs = &icl_ddi_power_well_regs,
4007 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4012 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
4013 .ops = &hsw_power_well_ops,
4014 .id = DISP_PW_ID_NONE,
4016 .hsw.regs = &icl_aux_power_well_regs,
4017 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4022 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
4023 .ops = &hsw_power_well_ops,
4024 .id = DISP_PW_ID_NONE,
4026 .hsw.regs = &icl_aux_power_well_regs,
4027 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4032 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4033 .ops = &hsw_power_well_ops,
4034 .id = DISP_PW_ID_NONE,
4036 .hsw.regs = &icl_aux_power_well_regs,
4037 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4041 .name = "AUX D TC1",
4042 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4043 .ops = &icl_tc_phy_aux_power_well_ops,
4044 .id = DISP_PW_ID_NONE,
4046 .hsw.regs = &icl_aux_power_well_regs,
4047 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4048 .hsw.is_tc_tbt = false,
4052 .name = "AUX E TC2",
4053 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4054 .ops = &icl_tc_phy_aux_power_well_ops,
4055 .id = DISP_PW_ID_NONE,
4057 .hsw.regs = &icl_aux_power_well_regs,
4058 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4059 .hsw.is_tc_tbt = false,
4063 .name = "AUX F TC3",
4064 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4065 .ops = &icl_tc_phy_aux_power_well_ops,
4066 .id = DISP_PW_ID_NONE,
4068 .hsw.regs = &icl_aux_power_well_regs,
4069 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4070 .hsw.is_tc_tbt = false,
4074 .name = "AUX G TC4",
4075 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4076 .ops = &icl_tc_phy_aux_power_well_ops,
4077 .id = DISP_PW_ID_NONE,
4079 .hsw.regs = &icl_aux_power_well_regs,
4080 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4081 .hsw.is_tc_tbt = false,
4085 .name = "AUX H TC5",
4086 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4087 .ops = &icl_tc_phy_aux_power_well_ops,
4088 .id = DISP_PW_ID_NONE,
4090 .hsw.regs = &icl_aux_power_well_regs,
4091 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4092 .hsw.is_tc_tbt = false,
4096 .name = "AUX I TC6",
4097 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4098 .ops = &icl_tc_phy_aux_power_well_ops,
4099 .id = DISP_PW_ID_NONE,
4101 .hsw.regs = &icl_aux_power_well_regs,
4102 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4103 .hsw.is_tc_tbt = false,
4107 .name = "AUX D TBT1",
4108 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4109 .ops = &hsw_power_well_ops,
4110 .id = DISP_PW_ID_NONE,
4112 .hsw.regs = &icl_aux_power_well_regs,
4113 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4114 .hsw.is_tc_tbt = true,
4118 .name = "AUX E TBT2",
4119 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4120 .ops = &hsw_power_well_ops,
4121 .id = DISP_PW_ID_NONE,
4123 .hsw.regs = &icl_aux_power_well_regs,
4124 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4125 .hsw.is_tc_tbt = true,
4129 .name = "AUX F TBT3",
4130 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4131 .ops = &hsw_power_well_ops,
4132 .id = DISP_PW_ID_NONE,
4134 .hsw.regs = &icl_aux_power_well_regs,
4135 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4136 .hsw.is_tc_tbt = true,
4140 .name = "AUX G TBT4",
4141 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4142 .ops = &hsw_power_well_ops,
4143 .id = DISP_PW_ID_NONE,
4145 .hsw.regs = &icl_aux_power_well_regs,
4146 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4147 .hsw.is_tc_tbt = true,
4151 .name = "AUX H TBT5",
4152 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4153 .ops = &hsw_power_well_ops,
4154 .id = DISP_PW_ID_NONE,
4156 .hsw.regs = &icl_aux_power_well_regs,
4157 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4158 .hsw.is_tc_tbt = true,
4162 .name = "AUX I TBT6",
4163 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4164 .ops = &hsw_power_well_ops,
4165 .id = DISP_PW_ID_NONE,
4167 .hsw.regs = &icl_aux_power_well_regs,
4168 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4169 .hsw.is_tc_tbt = true,
4173 .name = "power well 4",
4174 .domains = TGL_PW_4_POWER_DOMAINS,
4175 .ops = &hsw_power_well_ops,
4176 .id = DISP_PW_ID_NONE,
4178 .hsw.regs = &hsw_power_well_regs,
4179 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4180 .hsw.has_fuses = true,
4181 .hsw.irq_pipe_mask = BIT(PIPE_C),
4185 .name = "power well 5",
4186 .domains = TGL_PW_5_POWER_DOMAINS,
4187 .ops = &hsw_power_well_ops,
4188 .id = DISP_PW_ID_NONE,
4190 .hsw.regs = &hsw_power_well_regs,
4191 .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4192 .hsw.has_fuses = true,
4193 .hsw.irq_pipe_mask = BIT(PIPE_D),
4199 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4200 int disable_power_well)
4202 if (disable_power_well >= 0)
4203 return !!disable_power_well;
4208 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4215 if (INTEL_GEN(dev_priv) >= 12) {
4218 * DC9 has a separate HW flow from the rest of the DC states,
4219 * not depending on the DMC firmware. It's needed by system
4220 * suspend/resume, so allow it unconditionally.
4222 mask = DC_STATE_EN_DC9;
4223 } else if (IS_GEN(dev_priv, 11)) {
4225 mask = DC_STATE_EN_DC9;
4226 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4229 } else if (IS_GEN9_LP(dev_priv)) {
4231 mask = DC_STATE_EN_DC9;
4237 if (!i915_modparams.disable_power_well)
4240 if (enable_dc >= 0 && enable_dc <= max_dc) {
4241 requested_dc = enable_dc;
4242 } else if (enable_dc == -1) {
4243 requested_dc = max_dc;
4244 } else if (enable_dc > max_dc && enable_dc <= 4) {
4245 drm_dbg_kms(&dev_priv->drm,
4246 "Adjusting requested max DC state (%d->%d)\n",
4248 requested_dc = max_dc;
4250 drm_err(&dev_priv->drm,
4251 "Unexpected value for enable_dc (%d)\n", enable_dc);
4252 requested_dc = max_dc;
4255 switch (requested_dc) {
4257 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4260 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4263 mask |= DC_STATE_EN_UPTO_DC6;
4266 mask |= DC_STATE_EN_UPTO_DC5;
4270 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4276 __set_power_wells(struct i915_power_domains *power_domains,
4277 const struct i915_power_well_desc *power_well_descs,
4278 int power_well_count)
4280 u64 power_well_ids = 0;
4283 power_domains->power_well_count = power_well_count;
4284 power_domains->power_wells =
4285 kcalloc(power_well_count,
4286 sizeof(*power_domains->power_wells),
4288 if (!power_domains->power_wells)
4291 for (i = 0; i < power_well_count; i++) {
4292 enum i915_power_well_id id = power_well_descs[i].id;
4294 power_domains->power_wells[i].desc = &power_well_descs[i];
4296 if (id == DISP_PW_ID_NONE)
4299 WARN_ON(id >= sizeof(power_well_ids) * 8);
4300 WARN_ON(power_well_ids & BIT_ULL(id));
4301 power_well_ids |= BIT_ULL(id);
4307 #define set_power_wells(power_domains, __power_well_descs) \
4308 __set_power_wells(power_domains, __power_well_descs, \
4309 ARRAY_SIZE(__power_well_descs))
4312 * intel_power_domains_init - initializes the power domain structures
4313 * @dev_priv: i915 device instance
4315 * Initializes the power domain structures for @dev_priv depending upon the
4316 * supported platform.
4318 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4320 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4323 i915_modparams.disable_power_well =
4324 sanitize_disable_power_well_option(dev_priv,
4325 i915_modparams.disable_power_well);
4326 dev_priv->csr.allowed_dc_mask =
4327 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4329 dev_priv->csr.target_dc_state =
4330 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4332 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4334 mutex_init(&power_domains->lock);
4336 INIT_DELAYED_WORK(&power_domains->async_put_work,
4337 intel_display_power_put_async_work);
4340 * The enabling order will be from lower to higher indexed wells,
4341 * the disabling order is reversed.
4343 if (IS_GEN(dev_priv, 12)) {
4344 err = set_power_wells(power_domains, tgl_power_wells);
4345 } else if (IS_ELKHARTLAKE(dev_priv)) {
4346 err = set_power_wells(power_domains, ehl_power_wells);
4347 } else if (IS_GEN(dev_priv, 11)) {
4348 err = set_power_wells(power_domains, icl_power_wells);
4349 } else if (IS_CANNONLAKE(dev_priv)) {
4350 err = set_power_wells(power_domains, cnl_power_wells);
4353 * DDI and Aux IO are getting enabled for all ports
4354 * regardless the presence or use. So, in order to avoid
4355 * timeouts, lets remove them from the list
4356 * for the SKUs without port F.
4358 if (!IS_CNL_WITH_PORT_F(dev_priv))
4359 power_domains->power_well_count -= 2;
4360 } else if (IS_GEMINILAKE(dev_priv)) {
4361 err = set_power_wells(power_domains, glk_power_wells);
4362 } else if (IS_BROXTON(dev_priv)) {
4363 err = set_power_wells(power_domains, bxt_power_wells);
4364 } else if (IS_GEN9_BC(dev_priv)) {
4365 err = set_power_wells(power_domains, skl_power_wells);
4366 } else if (IS_CHERRYVIEW(dev_priv)) {
4367 err = set_power_wells(power_domains, chv_power_wells);
4368 } else if (IS_BROADWELL(dev_priv)) {
4369 err = set_power_wells(power_domains, bdw_power_wells);
4370 } else if (IS_HASWELL(dev_priv)) {
4371 err = set_power_wells(power_domains, hsw_power_wells);
4372 } else if (IS_VALLEYVIEW(dev_priv)) {
4373 err = set_power_wells(power_domains, vlv_power_wells);
4374 } else if (IS_I830(dev_priv)) {
4375 err = set_power_wells(power_domains, i830_power_wells);
4377 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4384 * intel_power_domains_cleanup - clean up power domains resources
4385 * @dev_priv: i915 device instance
4387 * Release any resources acquired by intel_power_domains_init()
4389 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4391 kfree(dev_priv->power_domains.power_wells);
4394 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4396 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4397 struct i915_power_well *power_well;
4399 mutex_lock(&power_domains->lock);
4400 for_each_power_well(dev_priv, power_well) {
4401 power_well->desc->ops->sync_hw(dev_priv, power_well);
4402 power_well->hw_enabled =
4403 power_well->desc->ops->is_enabled(dev_priv, power_well);
4405 mutex_unlock(&power_domains->lock);
4409 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4410 i915_reg_t reg, bool enable)
4414 val = intel_de_read(dev_priv, reg);
4415 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4416 intel_de_write(dev_priv, reg, val);
4417 intel_de_posting_read(dev_priv, reg);
4420 status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4421 if ((enable && !status) || (!enable && status)) {
4422 drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4423 enable ? "enable" : "disable");
4429 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4431 icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4434 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4436 icl_dbuf_slices_update(dev_priv, 0);
4439 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4443 int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4444 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4446 WARN(hweight8(req_slices) > max_slices,
4447 "Invalid number of dbuf slices requested\n");
4449 DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
4452 * Might be running this in parallel to gen9_dc_off_power_well_enable
4453 * being called from intel_dp_detect for instance,
4454 * which causes assertion triggered by race condition,
4455 * as gen9_assert_dbuf_enabled might preempt this when registers
4456 * were already updated, while dev_priv was not.
4458 mutex_lock(&power_domains->lock);
4460 for (i = 0; i < max_slices; i++) {
4461 intel_dbuf_slice_set(dev_priv,
4463 (req_slices & BIT(i)) != 0);
4466 dev_priv->enabled_dbuf_slices_mask = req_slices;
4468 mutex_unlock(&power_domains->lock);
4471 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4473 skl_ddb_get_hw_state(dev_priv);
4475 * Just power up at least 1 slice, we will
4476 * figure out later which slices we have and what we need.
4478 icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4482 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4484 icl_dbuf_slices_update(dev_priv, 0);
4487 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4491 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4492 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4493 MBUS_ABOX_B_CREDIT(1) |
4494 MBUS_ABOX_BW_CREDIT(1);
4496 intel_de_write(dev_priv, MBUS_ABOX_CTL, val);
4499 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4501 u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4504 * The LCPLL register should be turned on by the BIOS. For now
4505 * let's just check its state and print errors in case
4506 * something is wrong. Don't even try to turn it on.
4509 if (val & LCPLL_CD_SOURCE_FCLK)
4510 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4512 if (val & LCPLL_PLL_DISABLE)
4513 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4515 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4516 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4519 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4521 struct drm_device *dev = &dev_priv->drm;
4522 struct intel_crtc *crtc;
4524 for_each_intel_crtc(dev, crtc)
4525 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4526 pipe_name(crtc->pipe));
4528 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4529 "Display power well on\n");
4530 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4532 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4533 "WRPLL1 enabled\n");
4534 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4535 "WRPLL2 enabled\n");
4536 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4537 "Panel power on\n");
4538 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4539 "CPU PWM1 enabled\n");
4540 if (IS_HASWELL(dev_priv))
4541 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4542 "CPU PWM2 enabled\n");
4543 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4544 "PCH PWM1 enabled\n");
4545 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4546 "Utility pin enabled\n");
4547 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4548 "PCH GTC enabled\n");
4551 * In theory we can still leave IRQs enabled, as long as only the HPD
4552 * interrupts remain enabled. We used to check for that, but since it's
4553 * gen-specific and since we only disable LCPLL after we fully disable
4554 * the interrupts, the check below should be enough.
4556 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4559 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4561 if (IS_HASWELL(dev_priv))
4562 return intel_de_read(dev_priv, D_COMP_HSW);
4564 return intel_de_read(dev_priv, D_COMP_BDW);
4567 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4569 if (IS_HASWELL(dev_priv)) {
4570 if (sandybridge_pcode_write(dev_priv,
4571 GEN6_PCODE_WRITE_D_COMP, val))
4572 drm_dbg_kms(&dev_priv->drm,
4573 "Failed to write to D_COMP\n");
4575 intel_de_write(dev_priv, D_COMP_BDW, val);
4576 intel_de_posting_read(dev_priv, D_COMP_BDW);
4581 * This function implements pieces of two sequences from BSpec:
4582 * - Sequence for display software to disable LCPLL
4583 * - Sequence for display software to allow package C8+
4584 * The steps implemented here are just the steps that actually touch the LCPLL
4585 * register. Callers should take care of disabling all the display engine
4586 * functions, doing the mode unset, fixing interrupts, etc.
4588 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4589 bool switch_to_fclk, bool allow_power_down)
4593 assert_can_disable_lcpll(dev_priv);
4595 val = intel_de_read(dev_priv, LCPLL_CTL);
4597 if (switch_to_fclk) {
4598 val |= LCPLL_CD_SOURCE_FCLK;
4599 intel_de_write(dev_priv, LCPLL_CTL, val);
4601 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4602 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4603 drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4605 val = intel_de_read(dev_priv, LCPLL_CTL);
4608 val |= LCPLL_PLL_DISABLE;
4609 intel_de_write(dev_priv, LCPLL_CTL, val);
4610 intel_de_posting_read(dev_priv, LCPLL_CTL);
4612 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4613 drm_err(&dev_priv->drm, "LCPLL still locked\n");
4615 val = hsw_read_dcomp(dev_priv);
4616 val |= D_COMP_COMP_DISABLE;
4617 hsw_write_dcomp(dev_priv, val);
4620 if (wait_for((hsw_read_dcomp(dev_priv) &
4621 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4622 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4624 if (allow_power_down) {
4625 val = intel_de_read(dev_priv, LCPLL_CTL);
4626 val |= LCPLL_POWER_DOWN_ALLOW;
4627 intel_de_write(dev_priv, LCPLL_CTL, val);
4628 intel_de_posting_read(dev_priv, LCPLL_CTL);
4633 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4636 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4640 val = intel_de_read(dev_priv, LCPLL_CTL);
4642 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4643 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4647 * Make sure we're not on PC8 state before disabling PC8, otherwise
4648 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4650 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4652 if (val & LCPLL_POWER_DOWN_ALLOW) {
4653 val &= ~LCPLL_POWER_DOWN_ALLOW;
4654 intel_de_write(dev_priv, LCPLL_CTL, val);
4655 intel_de_posting_read(dev_priv, LCPLL_CTL);
4658 val = hsw_read_dcomp(dev_priv);
4659 val |= D_COMP_COMP_FORCE;
4660 val &= ~D_COMP_COMP_DISABLE;
4661 hsw_write_dcomp(dev_priv, val);
4663 val = intel_de_read(dev_priv, LCPLL_CTL);
4664 val &= ~LCPLL_PLL_DISABLE;
4665 intel_de_write(dev_priv, LCPLL_CTL, val);
4667 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4668 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4670 if (val & LCPLL_CD_SOURCE_FCLK) {
4671 val = intel_de_read(dev_priv, LCPLL_CTL);
4672 val &= ~LCPLL_CD_SOURCE_FCLK;
4673 intel_de_write(dev_priv, LCPLL_CTL, val);
4675 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4676 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4677 drm_err(&dev_priv->drm,
4678 "Switching back to LCPLL failed\n");
4681 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4683 intel_update_cdclk(dev_priv);
4684 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4688 * Package states C8 and deeper are really deep PC states that can only be
4689 * reached when all the devices on the system allow it, so even if the graphics
4690 * device allows PC8+, it doesn't mean the system will actually get to these
4691 * states. Our driver only allows PC8+ when going into runtime PM.
4693 * The requirements for PC8+ are that all the outputs are disabled, the power
4694 * well is disabled and most interrupts are disabled, and these are also
4695 * requirements for runtime PM. When these conditions are met, we manually do
4696 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4697 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4700 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4701 * the state of some registers, so when we come back from PC8+ we need to
4702 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4703 * need to take care of the registers kept by RC6. Notice that this happens even
4704 * if we don't put the device in PCI D3 state (which is what currently happens
4705 * because of the runtime PM support).
4707 * For more, read "Display Sequences for Package C8" on the hardware
4710 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4714 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4716 if (HAS_PCH_LPT_LP(dev_priv)) {
4717 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4718 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4719 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4722 lpt_disable_clkout_dp(dev_priv);
4723 hsw_disable_lcpll(dev_priv, true, true);
4726 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4730 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4732 hsw_restore_lcpll(dev_priv);
4733 intel_init_pch_refclk(dev_priv);
4735 if (HAS_PCH_LPT_LP(dev_priv)) {
4736 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4737 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4738 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4742 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4746 u32 reset_bits, val;
4748 if (IS_IVYBRIDGE(dev_priv)) {
4750 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4752 reg = HSW_NDE_RSTWRN_OPT;
4753 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4756 val = intel_de_read(dev_priv, reg);
4763 intel_de_write(dev_priv, reg, val);
4766 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4769 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4770 struct i915_power_well *well;
4772 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4774 /* enable PCH reset handshake */
4775 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4777 /* enable PG1 and Misc I/O */
4778 mutex_lock(&power_domains->lock);
4780 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4781 intel_power_well_enable(dev_priv, well);
4783 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4784 intel_power_well_enable(dev_priv, well);
4786 mutex_unlock(&power_domains->lock);
4788 intel_cdclk_init_hw(dev_priv);
4790 gen9_dbuf_enable(dev_priv);
4792 if (resume && dev_priv->csr.dmc_payload)
4793 intel_csr_load_program(dev_priv);
4796 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4798 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4799 struct i915_power_well *well;
4801 gen9_disable_dc_states(dev_priv);
4803 gen9_dbuf_disable(dev_priv);
4805 intel_cdclk_uninit_hw(dev_priv);
4807 /* The spec doesn't call for removing the reset handshake flag */
4808 /* disable PG1 and Misc I/O */
4810 mutex_lock(&power_domains->lock);
4813 * BSpec says to keep the MISC IO power well enabled here, only
4814 * remove our request for power well 1.
4815 * Note that even though the driver's request is removed power well 1
4816 * may stay enabled after this due to DMC's own request on it.
4818 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4819 intel_power_well_disable(dev_priv, well);
4821 mutex_unlock(&power_domains->lock);
4823 usleep_range(10, 30); /* 10 us delay per Bspec */
4826 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4828 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4829 struct i915_power_well *well;
4831 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4834 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4835 * or else the reset will hang because there is no PCH to respond.
4836 * Move the handshake programming to initialization sequence.
4837 * Previously was left up to BIOS.
4839 intel_pch_reset_handshake(dev_priv, false);
4842 mutex_lock(&power_domains->lock);
4844 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4845 intel_power_well_enable(dev_priv, well);
4847 mutex_unlock(&power_domains->lock);
4849 intel_cdclk_init_hw(dev_priv);
4851 gen9_dbuf_enable(dev_priv);
4853 if (resume && dev_priv->csr.dmc_payload)
4854 intel_csr_load_program(dev_priv);
4857 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4859 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4860 struct i915_power_well *well;
4862 gen9_disable_dc_states(dev_priv);
4864 gen9_dbuf_disable(dev_priv);
4866 intel_cdclk_uninit_hw(dev_priv);
4868 /* The spec doesn't call for removing the reset handshake flag */
4871 * Disable PW1 (PG1).
4872 * Note that even though the driver's request is removed power well 1
4873 * may stay enabled after this due to DMC's own request on it.
4875 mutex_lock(&power_domains->lock);
4877 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4878 intel_power_well_disable(dev_priv, well);
4880 mutex_unlock(&power_domains->lock);
4882 usleep_range(10, 30); /* 10 us delay per Bspec */
4885 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4887 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4888 struct i915_power_well *well;
4890 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4892 /* 1. Enable PCH Reset Handshake */
4893 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4896 intel_combo_phy_init(dev_priv);
4899 * 4. Enable Power Well 1 (PG1).
4900 * The AUX IO power wells will be enabled on demand.
4902 mutex_lock(&power_domains->lock);
4903 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4904 intel_power_well_enable(dev_priv, well);
4905 mutex_unlock(&power_domains->lock);
4907 /* 5. Enable CD clock */
4908 intel_cdclk_init_hw(dev_priv);
4910 /* 6. Enable DBUF */
4911 gen9_dbuf_enable(dev_priv);
4913 if (resume && dev_priv->csr.dmc_payload)
4914 intel_csr_load_program(dev_priv);
4917 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4919 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4920 struct i915_power_well *well;
4922 gen9_disable_dc_states(dev_priv);
4924 /* 1. Disable all display engine functions -> aready done */
4926 /* 2. Disable DBUF */
4927 gen9_dbuf_disable(dev_priv);
4929 /* 3. Disable CD clock */
4930 intel_cdclk_uninit_hw(dev_priv);
4933 * 4. Disable Power Well 1 (PG1).
4934 * The AUX IO power wells are toggled on demand, so they are already
4935 * disabled at this point.
4937 mutex_lock(&power_domains->lock);
4938 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4939 intel_power_well_disable(dev_priv, well);
4940 mutex_unlock(&power_domains->lock);
4942 usleep_range(10, 30); /* 10 us delay per Bspec */
4945 intel_combo_phy_uninit(dev_priv);
4948 struct buddy_page_mask {
4954 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
4955 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
4956 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
4957 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
4958 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
4962 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
4963 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
4964 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
4965 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
4966 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
4970 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
4972 enum intel_dram_type type = dev_priv->dram_info.type;
4973 u8 num_channels = dev_priv->dram_info.num_channels;
4974 const struct buddy_page_mask *table;
4977 if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
4978 /* Wa_1409767108: tgl */
4979 table = wa_1409767108_buddy_page_masks;
4981 table = tgl_buddy_page_masks;
4983 for (i = 0; table[i].page_mask != 0; i++)
4984 if (table[i].num_channels == num_channels &&
4985 table[i].type == type)
4988 if (table[i].page_mask == 0) {
4989 drm_dbg(&dev_priv->drm,
4990 "Unknown memory configuration; disabling address buddy logic.\n");
4991 intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
4992 intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
4994 intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
4995 table[i].page_mask);
4996 intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
4997 table[i].page_mask);
5001 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5004 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5005 struct i915_power_well *well;
5007 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5009 /* 1. Enable PCH reset handshake. */
5010 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5012 /* 2. Initialize all combo phys */
5013 intel_combo_phy_init(dev_priv);
5016 * 3. Enable Power Well 1 (PG1).
5017 * The AUX IO power wells will be enabled on demand.
5019 mutex_lock(&power_domains->lock);
5020 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5021 intel_power_well_enable(dev_priv, well);
5022 mutex_unlock(&power_domains->lock);
5024 /* 4. Enable CDCLK. */
5025 intel_cdclk_init_hw(dev_priv);
5027 /* 5. Enable DBUF. */
5028 icl_dbuf_enable(dev_priv);
5030 /* 6. Setup MBUS. */
5031 icl_mbus_init(dev_priv);
5033 /* 7. Program arbiter BW_BUDDY registers */
5034 if (INTEL_GEN(dev_priv) >= 12)
5035 tgl_bw_buddy_init(dev_priv);
5037 if (resume && dev_priv->csr.dmc_payload)
5038 intel_csr_load_program(dev_priv);
5041 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5043 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5044 struct i915_power_well *well;
5046 gen9_disable_dc_states(dev_priv);
5048 /* 1. Disable all display engine functions -> aready done */
5050 /* 2. Disable DBUF */
5051 icl_dbuf_disable(dev_priv);
5053 /* 3. Disable CD clock */
5054 intel_cdclk_uninit_hw(dev_priv);
5057 * 4. Disable Power Well 1 (PG1).
5058 * The AUX IO power wells are toggled on demand, so they are already
5059 * disabled at this point.
5061 mutex_lock(&power_domains->lock);
5062 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5063 intel_power_well_disable(dev_priv, well);
5064 mutex_unlock(&power_domains->lock);
5067 intel_combo_phy_uninit(dev_priv);
5070 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5072 struct i915_power_well *cmn_bc =
5073 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5074 struct i915_power_well *cmn_d =
5075 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5078 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5079 * workaround never ever read DISPLAY_PHY_CONTROL, and
5080 * instead maintain a shadow copy ourselves. Use the actual
5081 * power well state and lane status to reconstruct the
5082 * expected initial value.
5084 dev_priv->chv_phy_control =
5085 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5086 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5087 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5088 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5089 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5092 * If all lanes are disabled we leave the override disabled
5093 * with all power down bits cleared to match the state we
5094 * would use after disabling the port. Otherwise enable the
5095 * override and set the lane powerdown bits accding to the
5096 * current lane status.
5098 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5099 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5102 mask = status & DPLL_PORTB_READY_MASK;
5106 dev_priv->chv_phy_control |=
5107 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5109 dev_priv->chv_phy_control |=
5110 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5112 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5116 dev_priv->chv_phy_control |=
5117 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5119 dev_priv->chv_phy_control |=
5120 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5122 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5124 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5126 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5129 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5130 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5133 mask = status & DPLL_PORTD_READY_MASK;
5138 dev_priv->chv_phy_control |=
5139 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5141 dev_priv->chv_phy_control |=
5142 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5144 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5146 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5148 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5151 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5152 dev_priv->chv_phy_control);
5154 /* Defer application of initial phy_control to enabling the powerwell */
5157 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5159 struct i915_power_well *cmn =
5160 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5161 struct i915_power_well *disp2d =
5162 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5164 /* If the display might be already active skip this */
5165 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5166 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5167 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5170 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5172 /* cmnlane needs DPLL registers */
5173 disp2d->desc->ops->enable(dev_priv, disp2d);
5176 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5177 * Need to assert and de-assert PHY SB reset by gating the
5178 * common lane power, then un-gating it.
5179 * Simply ungating isn't enough to reset the PHY enough to get
5180 * ports and lanes running.
5182 cmn->desc->ops->disable(dev_priv, cmn);
5185 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5189 vlv_punit_get(dev_priv);
5190 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5191 vlv_punit_put(dev_priv);
5196 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5198 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5199 "VED not power gated\n");
5202 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5204 static const struct pci_device_id isp_ids[] = {
5205 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5206 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5210 WARN(!pci_dev_present(isp_ids) &&
5211 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5212 "ISP not power gated\n");
5215 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5218 * intel_power_domains_init_hw - initialize hardware power domain state
5219 * @i915: i915 device instance
5220 * @resume: Called from resume code paths or not
5222 * This function initializes the hardware power domain state and enables all
5223 * power wells belonging to the INIT power domain. Power wells in other
5224 * domains (and not in the INIT domain) are referenced or disabled by
5225 * intel_modeset_readout_hw_state(). After that the reference count of each
5226 * power well must match its HW enabled state, see
5227 * intel_power_domains_verify_state().
5229 * It will return with power domains disabled (to be enabled later by
5230 * intel_power_domains_enable()) and must be paired with
5231 * intel_power_domains_driver_remove().
5233 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5235 struct i915_power_domains *power_domains = &i915->power_domains;
5237 power_domains->initializing = true;
5239 if (INTEL_GEN(i915) >= 11) {
5240 icl_display_core_init(i915, resume);
5241 } else if (IS_CANNONLAKE(i915)) {
5242 cnl_display_core_init(i915, resume);
5243 } else if (IS_GEN9_BC(i915)) {
5244 skl_display_core_init(i915, resume);
5245 } else if (IS_GEN9_LP(i915)) {
5246 bxt_display_core_init(i915, resume);
5247 } else if (IS_CHERRYVIEW(i915)) {
5248 mutex_lock(&power_domains->lock);
5249 chv_phy_control_init(i915);
5250 mutex_unlock(&power_domains->lock);
5251 assert_isp_power_gated(i915);
5252 } else if (IS_VALLEYVIEW(i915)) {
5253 mutex_lock(&power_domains->lock);
5254 vlv_cmnlane_wa(i915);
5255 mutex_unlock(&power_domains->lock);
5256 assert_ved_power_gated(i915);
5257 assert_isp_power_gated(i915);
5258 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5259 hsw_assert_cdclk(i915);
5260 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5261 } else if (IS_IVYBRIDGE(i915)) {
5262 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5266 * Keep all power wells enabled for any dependent HW access during
5267 * initialization and to make sure we keep BIOS enabled display HW
5268 * resources powered until display HW readout is complete. We drop
5269 * this reference in intel_power_domains_enable().
5271 power_domains->wakeref =
5272 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5274 /* Disable power support if the user asked so. */
5275 if (!i915_modparams.disable_power_well)
5276 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5277 intel_power_domains_sync_hw(i915);
5279 power_domains->initializing = false;
5283 * intel_power_domains_driver_remove - deinitialize hw power domain state
5284 * @i915: i915 device instance
5286 * De-initializes the display power domain HW state. It also ensures that the
5287 * device stays powered up so that the driver can be reloaded.
5289 * It must be called with power domains already disabled (after a call to
5290 * intel_power_domains_disable()) and must be paired with
5291 * intel_power_domains_init_hw().
5293 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5295 intel_wakeref_t wakeref __maybe_unused =
5296 fetch_and_zero(&i915->power_domains.wakeref);
5298 /* Remove the refcount we took to keep power well support disabled. */
5299 if (!i915_modparams.disable_power_well)
5300 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5302 intel_display_power_flush_work_sync(i915);
5304 intel_power_domains_verify_state(i915);
5306 /* Keep the power well enabled, but cancel its rpm wakeref. */
5307 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5311 * intel_power_domains_enable - enable toggling of display power wells
5312 * @i915: i915 device instance
5314 * Enable the ondemand enabling/disabling of the display power wells. Note that
5315 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5316 * only at specific points of the display modeset sequence, thus they are not
5317 * affected by the intel_power_domains_enable()/disable() calls. The purpose
5318 * of these function is to keep the rest of power wells enabled until the end
5319 * of display HW readout (which will acquire the power references reflecting
5320 * the current HW state).
5322 void intel_power_domains_enable(struct drm_i915_private *i915)
5324 intel_wakeref_t wakeref __maybe_unused =
5325 fetch_and_zero(&i915->power_domains.wakeref);
5327 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5328 intel_power_domains_verify_state(i915);
5332 * intel_power_domains_disable - disable toggling of display power wells
5333 * @i915: i915 device instance
5335 * Disable the ondemand enabling/disabling of the display power wells. See
5336 * intel_power_domains_enable() for which power wells this call controls.
5338 void intel_power_domains_disable(struct drm_i915_private *i915)
5340 struct i915_power_domains *power_domains = &i915->power_domains;
5342 WARN_ON(power_domains->wakeref);
5343 power_domains->wakeref =
5344 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5346 intel_power_domains_verify_state(i915);
5350 * intel_power_domains_suspend - suspend power domain state
5351 * @i915: i915 device instance
5352 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5354 * This function prepares the hardware power domain state before entering
5357 * It must be called with power domains already disabled (after a call to
5358 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5360 void intel_power_domains_suspend(struct drm_i915_private *i915,
5361 enum i915_drm_suspend_mode suspend_mode)
5363 struct i915_power_domains *power_domains = &i915->power_domains;
5364 intel_wakeref_t wakeref __maybe_unused =
5365 fetch_and_zero(&power_domains->wakeref);
5367 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5370 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5371 * support don't manually deinit the power domains. This also means the
5372 * CSR/DMC firmware will stay active, it will power down any HW
5373 * resources as required and also enable deeper system power states
5374 * that would be blocked if the firmware was inactive.
5376 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5377 suspend_mode == I915_DRM_SUSPEND_IDLE &&
5378 i915->csr.dmc_payload) {
5379 intel_display_power_flush_work(i915);
5380 intel_power_domains_verify_state(i915);
5385 * Even if power well support was disabled we still want to disable
5386 * power wells if power domains must be deinitialized for suspend.
5388 if (!i915_modparams.disable_power_well)
5389 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5391 intel_display_power_flush_work(i915);
5392 intel_power_domains_verify_state(i915);
5394 if (INTEL_GEN(i915) >= 11)
5395 icl_display_core_uninit(i915);
5396 else if (IS_CANNONLAKE(i915))
5397 cnl_display_core_uninit(i915);
5398 else if (IS_GEN9_BC(i915))
5399 skl_display_core_uninit(i915);
5400 else if (IS_GEN9_LP(i915))
5401 bxt_display_core_uninit(i915);
5403 power_domains->display_core_suspended = true;
5407 * intel_power_domains_resume - resume power domain state
5408 * @i915: i915 device instance
5410 * This function resume the hardware power domain state during system resume.
5412 * It will return with power domain support disabled (to be enabled later by
5413 * intel_power_domains_enable()) and must be paired with
5414 * intel_power_domains_suspend().
5416 void intel_power_domains_resume(struct drm_i915_private *i915)
5418 struct i915_power_domains *power_domains = &i915->power_domains;
5420 if (power_domains->display_core_suspended) {
5421 intel_power_domains_init_hw(i915, true);
5422 power_domains->display_core_suspended = false;
5424 WARN_ON(power_domains->wakeref);
5425 power_domains->wakeref =
5426 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5429 intel_power_domains_verify_state(i915);
5432 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5434 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5436 struct i915_power_domains *power_domains = &i915->power_domains;
5437 struct i915_power_well *power_well;
5439 for_each_power_well(i915, power_well) {
5440 enum intel_display_power_domain domain;
5442 drm_dbg(&i915->drm, "%-25s %d\n",
5443 power_well->desc->name, power_well->count);
5445 for_each_power_domain(domain, power_well->desc->domains)
5446 drm_dbg(&i915->drm, " %-23s %d\n",
5447 intel_display_power_domain_str(domain),
5448 power_domains->domain_use_count[domain]);
5453 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5454 * @i915: i915 device instance
5456 * Verify if the reference count of each power well matches its HW enabled
5457 * state and the total refcount of the domains it belongs to. This must be
5458 * called after modeset HW state sanitization, which is responsible for
5459 * acquiring reference counts for any power wells in use and disabling the
5460 * ones left on by BIOS but not required by any active output.
5462 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5464 struct i915_power_domains *power_domains = &i915->power_domains;
5465 struct i915_power_well *power_well;
5466 bool dump_domain_info;
5468 mutex_lock(&power_domains->lock);
5470 verify_async_put_domains_state(power_domains);
5472 dump_domain_info = false;
5473 for_each_power_well(i915, power_well) {
5474 enum intel_display_power_domain domain;
5478 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5479 if ((power_well->count || power_well->desc->always_on) !=
5482 "power well %s state mismatch (refcount %d/enabled %d)",
5483 power_well->desc->name,
5484 power_well->count, enabled);
5487 for_each_power_domain(domain, power_well->desc->domains)
5488 domains_count += power_domains->domain_use_count[domain];
5490 if (power_well->count != domains_count) {
5492 "power well %s refcount/domain refcount mismatch "
5493 "(refcount %d/domains refcount %d)\n",
5494 power_well->desc->name, power_well->count,
5496 dump_domain_info = true;
5500 if (dump_domain_info) {
5504 intel_power_domains_dump_info(i915);
5509 mutex_unlock(&power_domains->lock);
5514 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5520 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5522 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5523 bxt_enable_dc9(i915);
5524 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5525 hsw_enable_pc8(i915);
5528 void intel_display_power_resume_early(struct drm_i915_private *i915)
5530 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5531 gen9_sanitize_dc_state(i915);
5532 bxt_disable_dc9(i915);
5533 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5534 hsw_disable_pc8(i915);
5538 void intel_display_power_suspend(struct drm_i915_private *i915)
5540 if (INTEL_GEN(i915) >= 11) {
5541 icl_display_core_uninit(i915);
5542 bxt_enable_dc9(i915);
5543 } else if (IS_GEN9_LP(i915)) {
5544 bxt_display_core_uninit(i915);
5545 bxt_enable_dc9(i915);
5546 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5547 hsw_enable_pc8(i915);
5551 void intel_display_power_resume(struct drm_i915_private *i915)
5553 if (INTEL_GEN(i915) >= 11) {
5554 bxt_disable_dc9(i915);
5555 icl_display_core_init(i915, true);
5556 if (i915->csr.dmc_payload) {
5557 if (i915->csr.allowed_dc_mask &
5558 DC_STATE_EN_UPTO_DC6)
5559 skl_enable_dc6(i915);
5560 else if (i915->csr.allowed_dc_mask &
5561 DC_STATE_EN_UPTO_DC5)
5562 gen9_enable_dc5(i915);
5564 } else if (IS_GEN9_LP(i915)) {
5565 bxt_disable_dc9(i915);
5566 bxt_display_core_init(i915, true);
5567 if (i915->csr.dmc_payload &&
5568 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5569 gen9_enable_dc5(i915);
5570 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5571 hsw_disable_pc8(i915);