2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
58 #include "gt/intel_rps.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
84 #include "intel_vga.h"
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
100 DRM_FORMAT_XRGB2101010,
101 DRM_FORMAT_XBGR2101010,
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
110 DRM_FORMAT_XRGB2101010,
111 DRM_FORMAT_XBGR2101010,
112 DRM_FORMAT_XBGR16161616F,
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
123 DRM_FORMAT_XRGB2101010,
124 DRM_FORMAT_XBGR2101010,
125 DRM_FORMAT_ARGB2101010,
126 DRM_FORMAT_ABGR2101010,
127 DRM_FORMAT_XBGR16161616F,
130 static const u64 i9xx_format_modifiers[] = {
131 I915_FORMAT_MOD_X_TILED,
132 DRM_FORMAT_MOD_LINEAR,
133 DRM_FORMAT_MOD_INVALID
137 static const u32 intel_cursor_formats[] = {
141 static const u64 cursor_format_modifiers[] = {
142 DRM_FORMAT_MOD_LINEAR,
143 DRM_FORMAT_MOD_INVALID
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147 struct intel_crtc_state *pipe_config);
148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
149 struct intel_crtc_state *pipe_config);
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152 struct drm_i915_gem_object *obj,
153 struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157 const struct intel_link_m_n *m_n,
158 const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164 const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166 const struct intel_crtc_state *pipe_config);
167 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
168 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
169 static void intel_modeset_setup_hw_state(struct drm_device *dev,
170 struct drm_modeset_acquire_ctx *ctx);
171 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
176 } dot, vco, n, m, m1, m2, p, p1;
180 int p2_slow, p2_fast;
184 /* returns HPLL frequency in kHz */
185 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
187 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
189 /* Obtain SKU information */
190 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
191 CCK_FUSE_HPLL_FREQ_MASK;
193 return vco_freq[hpll_freq] * 1000;
196 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
197 const char *name, u32 reg, int ref_freq)
202 val = vlv_cck_read(dev_priv, reg);
203 divider = val & CCK_FREQUENCY_VALUES;
205 WARN((val & CCK_FREQUENCY_STATUS) !=
206 (divider << CCK_FREQUENCY_STATUS_SHIFT),
207 "%s change in progress\n", name);
209 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213 const char *name, u32 reg)
217 vlv_cck_get(dev_priv);
219 if (dev_priv->hpll_freq == 0)
220 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
222 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
224 vlv_cck_put(dev_priv);
229 static void intel_update_czclk(struct drm_i915_private *dev_priv)
231 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
234 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
235 CCK_CZ_CLOCK_CONTROL);
237 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
240 static inline u32 /* units of 100MHz */
241 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
242 const struct intel_crtc_state *pipe_config)
244 if (HAS_DDI(dev_priv))
245 return pipe_config->port_clock; /* SPLL */
247 return dev_priv->fdi_pll_freq;
250 static const struct intel_limit intel_limits_i8xx_dac = {
251 .dot = { .min = 25000, .max = 350000 },
252 .vco = { .min = 908000, .max = 1512000 },
253 .n = { .min = 2, .max = 16 },
254 .m = { .min = 96, .max = 140 },
255 .m1 = { .min = 18, .max = 26 },
256 .m2 = { .min = 6, .max = 16 },
257 .p = { .min = 4, .max = 128 },
258 .p1 = { .min = 2, .max = 33 },
259 .p2 = { .dot_limit = 165000,
260 .p2_slow = 4, .p2_fast = 2 },
263 static const struct intel_limit intel_limits_i8xx_dvo = {
264 .dot = { .min = 25000, .max = 350000 },
265 .vco = { .min = 908000, .max = 1512000 },
266 .n = { .min = 2, .max = 16 },
267 .m = { .min = 96, .max = 140 },
268 .m1 = { .min = 18, .max = 26 },
269 .m2 = { .min = 6, .max = 16 },
270 .p = { .min = 4, .max = 128 },
271 .p1 = { .min = 2, .max = 33 },
272 .p2 = { .dot_limit = 165000,
273 .p2_slow = 4, .p2_fast = 4 },
276 static const struct intel_limit intel_limits_i8xx_lvds = {
277 .dot = { .min = 25000, .max = 350000 },
278 .vco = { .min = 908000, .max = 1512000 },
279 .n = { .min = 2, .max = 16 },
280 .m = { .min = 96, .max = 140 },
281 .m1 = { .min = 18, .max = 26 },
282 .m2 = { .min = 6, .max = 16 },
283 .p = { .min = 4, .max = 128 },
284 .p1 = { .min = 1, .max = 6 },
285 .p2 = { .dot_limit = 165000,
286 .p2_slow = 14, .p2_fast = 7 },
289 static const struct intel_limit intel_limits_i9xx_sdvo = {
290 .dot = { .min = 20000, .max = 400000 },
291 .vco = { .min = 1400000, .max = 2800000 },
292 .n = { .min = 1, .max = 6 },
293 .m = { .min = 70, .max = 120 },
294 .m1 = { .min = 8, .max = 18 },
295 .m2 = { .min = 3, .max = 7 },
296 .p = { .min = 5, .max = 80 },
297 .p1 = { .min = 1, .max = 8 },
298 .p2 = { .dot_limit = 200000,
299 .p2_slow = 10, .p2_fast = 5 },
302 static const struct intel_limit intel_limits_i9xx_lvds = {
303 .dot = { .min = 20000, .max = 400000 },
304 .vco = { .min = 1400000, .max = 2800000 },
305 .n = { .min = 1, .max = 6 },
306 .m = { .min = 70, .max = 120 },
307 .m1 = { .min = 8, .max = 18 },
308 .m2 = { .min = 3, .max = 7 },
309 .p = { .min = 7, .max = 98 },
310 .p1 = { .min = 1, .max = 8 },
311 .p2 = { .dot_limit = 112000,
312 .p2_slow = 14, .p2_fast = 7 },
316 static const struct intel_limit intel_limits_g4x_sdvo = {
317 .dot = { .min = 25000, .max = 270000 },
318 .vco = { .min = 1750000, .max = 3500000},
319 .n = { .min = 1, .max = 4 },
320 .m = { .min = 104, .max = 138 },
321 .m1 = { .min = 17, .max = 23 },
322 .m2 = { .min = 5, .max = 11 },
323 .p = { .min = 10, .max = 30 },
324 .p1 = { .min = 1, .max = 3},
325 .p2 = { .dot_limit = 270000,
331 static const struct intel_limit intel_limits_g4x_hdmi = {
332 .dot = { .min = 22000, .max = 400000 },
333 .vco = { .min = 1750000, .max = 3500000},
334 .n = { .min = 1, .max = 4 },
335 .m = { .min = 104, .max = 138 },
336 .m1 = { .min = 16, .max = 23 },
337 .m2 = { .min = 5, .max = 11 },
338 .p = { .min = 5, .max = 80 },
339 .p1 = { .min = 1, .max = 8},
340 .p2 = { .dot_limit = 165000,
341 .p2_slow = 10, .p2_fast = 5 },
344 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
345 .dot = { .min = 20000, .max = 115000 },
346 .vco = { .min = 1750000, .max = 3500000 },
347 .n = { .min = 1, .max = 3 },
348 .m = { .min = 104, .max = 138 },
349 .m1 = { .min = 17, .max = 23 },
350 .m2 = { .min = 5, .max = 11 },
351 .p = { .min = 28, .max = 112 },
352 .p1 = { .min = 2, .max = 8 },
353 .p2 = { .dot_limit = 0,
354 .p2_slow = 14, .p2_fast = 14
358 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
359 .dot = { .min = 80000, .max = 224000 },
360 .vco = { .min = 1750000, .max = 3500000 },
361 .n = { .min = 1, .max = 3 },
362 .m = { .min = 104, .max = 138 },
363 .m1 = { .min = 17, .max = 23 },
364 .m2 = { .min = 5, .max = 11 },
365 .p = { .min = 14, .max = 42 },
366 .p1 = { .min = 2, .max = 6 },
367 .p2 = { .dot_limit = 0,
368 .p2_slow = 7, .p2_fast = 7
372 static const struct intel_limit intel_limits_pineview_sdvo = {
373 .dot = { .min = 20000, .max = 400000},
374 .vco = { .min = 1700000, .max = 3500000 },
375 /* Pineview's Ncounter is a ring counter */
376 .n = { .min = 3, .max = 6 },
377 .m = { .min = 2, .max = 256 },
378 /* Pineview only has one combined m divider, which we treat as m2. */
379 .m1 = { .min = 0, .max = 0 },
380 .m2 = { .min = 0, .max = 254 },
381 .p = { .min = 5, .max = 80 },
382 .p1 = { .min = 1, .max = 8 },
383 .p2 = { .dot_limit = 200000,
384 .p2_slow = 10, .p2_fast = 5 },
387 static const struct intel_limit intel_limits_pineview_lvds = {
388 .dot = { .min = 20000, .max = 400000 },
389 .vco = { .min = 1700000, .max = 3500000 },
390 .n = { .min = 3, .max = 6 },
391 .m = { .min = 2, .max = 256 },
392 .m1 = { .min = 0, .max = 0 },
393 .m2 = { .min = 0, .max = 254 },
394 .p = { .min = 7, .max = 112 },
395 .p1 = { .min = 1, .max = 8 },
396 .p2 = { .dot_limit = 112000,
397 .p2_slow = 14, .p2_fast = 14 },
400 /* Ironlake / Sandybridge
402 * We calculate clock using (register_value + 2) for N/M1/M2, so here
403 * the range value for them is (actual_value - 2).
405 static const struct intel_limit intel_limits_ironlake_dac = {
406 .dot = { .min = 25000, .max = 350000 },
407 .vco = { .min = 1760000, .max = 3510000 },
408 .n = { .min = 1, .max = 5 },
409 .m = { .min = 79, .max = 127 },
410 .m1 = { .min = 12, .max = 22 },
411 .m2 = { .min = 5, .max = 9 },
412 .p = { .min = 5, .max = 80 },
413 .p1 = { .min = 1, .max = 8 },
414 .p2 = { .dot_limit = 225000,
415 .p2_slow = 10, .p2_fast = 5 },
418 static const struct intel_limit intel_limits_ironlake_single_lvds = {
419 .dot = { .min = 25000, .max = 350000 },
420 .vco = { .min = 1760000, .max = 3510000 },
421 .n = { .min = 1, .max = 3 },
422 .m = { .min = 79, .max = 118 },
423 .m1 = { .min = 12, .max = 22 },
424 .m2 = { .min = 5, .max = 9 },
425 .p = { .min = 28, .max = 112 },
426 .p1 = { .min = 2, .max = 8 },
427 .p2 = { .dot_limit = 225000,
428 .p2_slow = 14, .p2_fast = 14 },
431 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
432 .dot = { .min = 25000, .max = 350000 },
433 .vco = { .min = 1760000, .max = 3510000 },
434 .n = { .min = 1, .max = 3 },
435 .m = { .min = 79, .max = 127 },
436 .m1 = { .min = 12, .max = 22 },
437 .m2 = { .min = 5, .max = 9 },
438 .p = { .min = 14, .max = 56 },
439 .p1 = { .min = 2, .max = 8 },
440 .p2 = { .dot_limit = 225000,
441 .p2_slow = 7, .p2_fast = 7 },
444 /* LVDS 100mhz refclk limits. */
445 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
446 .dot = { .min = 25000, .max = 350000 },
447 .vco = { .min = 1760000, .max = 3510000 },
448 .n = { .min = 1, .max = 2 },
449 .m = { .min = 79, .max = 126 },
450 .m1 = { .min = 12, .max = 22 },
451 .m2 = { .min = 5, .max = 9 },
452 .p = { .min = 28, .max = 112 },
453 .p1 = { .min = 2, .max = 8 },
454 .p2 = { .dot_limit = 225000,
455 .p2_slow = 14, .p2_fast = 14 },
458 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
459 .dot = { .min = 25000, .max = 350000 },
460 .vco = { .min = 1760000, .max = 3510000 },
461 .n = { .min = 1, .max = 3 },
462 .m = { .min = 79, .max = 126 },
463 .m1 = { .min = 12, .max = 22 },
464 .m2 = { .min = 5, .max = 9 },
465 .p = { .min = 14, .max = 42 },
466 .p1 = { .min = 2, .max = 6 },
467 .p2 = { .dot_limit = 225000,
468 .p2_slow = 7, .p2_fast = 7 },
471 static const struct intel_limit intel_limits_vlv = {
473 * These are the data rate limits (measured in fast clocks)
474 * since those are the strictest limits we have. The fast
475 * clock and actual rate limits are more relaxed, so checking
476 * them would make no difference.
478 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
479 .vco = { .min = 4000000, .max = 6000000 },
480 .n = { .min = 1, .max = 7 },
481 .m1 = { .min = 2, .max = 3 },
482 .m2 = { .min = 11, .max = 156 },
483 .p1 = { .min = 2, .max = 3 },
484 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
487 static const struct intel_limit intel_limits_chv = {
489 * These are the data rate limits (measured in fast clocks)
490 * since those are the strictest limits we have. The fast
491 * clock and actual rate limits are more relaxed, so checking
492 * them would make no difference.
494 .dot = { .min = 25000 * 5, .max = 540000 * 5},
495 .vco = { .min = 4800000, .max = 6480000 },
496 .n = { .min = 1, .max = 1 },
497 .m1 = { .min = 2, .max = 2 },
498 .m2 = { .min = 24 << 22, .max = 175 << 22 },
499 .p1 = { .min = 2, .max = 4 },
500 .p2 = { .p2_slow = 1, .p2_fast = 14 },
503 static const struct intel_limit intel_limits_bxt = {
504 /* FIXME: find real dot limits */
505 .dot = { .min = 0, .max = INT_MAX },
506 .vco = { .min = 4800000, .max = 6700000 },
507 .n = { .min = 1, .max = 1 },
508 .m1 = { .min = 2, .max = 2 },
509 /* FIXME: find real m2 limits */
510 .m2 = { .min = 2 << 22, .max = 255 << 22 },
511 .p1 = { .min = 2, .max = 4 },
512 .p2 = { .p2_slow = 1, .p2_fast = 20 },
515 /* WA Display #0827: Gen9:all */
517 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
520 I915_WRITE(CLKGATE_DIS_PSL(pipe),
521 I915_READ(CLKGATE_DIS_PSL(pipe)) |
522 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
524 I915_WRITE(CLKGATE_DIS_PSL(pipe),
525 I915_READ(CLKGATE_DIS_PSL(pipe)) &
526 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
529 /* Wa_2006604312:icl */
531 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
535 I915_WRITE(CLKGATE_DIS_PSL(pipe),
536 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
538 I915_WRITE(CLKGATE_DIS_PSL(pipe),
539 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
543 needs_modeset(const struct intel_crtc_state *state)
545 return drm_atomic_crtc_needs_modeset(&state->uapi);
549 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
551 return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
552 crtc_state->sync_mode_slaves_mask);
556 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
558 return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
559 crtc_state->sync_mode_slaves_mask);
563 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
565 return crtc_state->master_transcoder != INVALID_TRANSCODER;
569 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
570 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
571 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
572 * The helpers' return value is the rate of the clock that is fed to the
573 * display engine's pipe which can be the above fast dot clock rate or a
574 * divided-down version of it.
576 /* m1 is reserved as 0 in Pineview, n is a ring counter */
577 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
579 clock->m = clock->m2 + 2;
580 clock->p = clock->p1 * clock->p2;
581 if (WARN_ON(clock->n == 0 || clock->p == 0))
583 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
584 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
589 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
591 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
594 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
596 clock->m = i9xx_dpll_compute_m(clock);
597 clock->p = clock->p1 * clock->p2;
598 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
600 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
601 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
606 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
608 clock->m = clock->m1 * clock->m2;
609 clock->p = clock->p1 * clock->p2;
610 if (WARN_ON(clock->n == 0 || clock->p == 0))
612 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
613 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
615 return clock->dot / 5;
618 int chv_calc_dpll_params(int refclk, struct dpll *clock)
620 clock->m = clock->m1 * clock->m2;
621 clock->p = clock->p1 * clock->p2;
622 if (WARN_ON(clock->n == 0 || clock->p == 0))
624 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
626 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
628 return clock->dot / 5;
631 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
634 * Returns whether the given set of divisors are valid for a given refclk with
635 * the given connectors.
637 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
638 const struct intel_limit *limit,
639 const struct dpll *clock)
641 if (clock->n < limit->n.min || limit->n.max < clock->n)
642 INTELPllInvalid("n out of range\n");
643 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
644 INTELPllInvalid("p1 out of range\n");
645 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
646 INTELPllInvalid("m2 out of range\n");
647 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
648 INTELPllInvalid("m1 out of range\n");
650 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
651 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
652 if (clock->m1 <= clock->m2)
653 INTELPllInvalid("m1 <= m2\n");
655 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
656 !IS_GEN9_LP(dev_priv)) {
657 if (clock->p < limit->p.min || limit->p.max < clock->p)
658 INTELPllInvalid("p out of range\n");
659 if (clock->m < limit->m.min || limit->m.max < clock->m)
660 INTELPllInvalid("m out of range\n");
663 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
664 INTELPllInvalid("vco out of range\n");
665 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
666 * connector, etc., rather than just a single range.
668 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
669 INTELPllInvalid("dot out of range\n");
675 i9xx_select_p2_div(const struct intel_limit *limit,
676 const struct intel_crtc_state *crtc_state,
679 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
681 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
683 * For LVDS just rely on its current settings for dual-channel.
684 * We haven't figured out how to reliably set up different
685 * single/dual channel state, if we even can.
687 if (intel_is_dual_link_lvds(dev_priv))
688 return limit->p2.p2_fast;
690 return limit->p2.p2_slow;
692 if (target < limit->p2.dot_limit)
693 return limit->p2.p2_slow;
695 return limit->p2.p2_fast;
700 * Returns a set of divisors for the desired target clock with the given
701 * refclk, or FALSE. The returned values represent the clock equation:
702 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
704 * Target and reference clocks are specified in kHz.
706 * If match_clock is provided, then best_clock P divider must match the P
707 * divider from @match_clock used for LVDS downclocking.
710 i9xx_find_best_dpll(const struct intel_limit *limit,
711 struct intel_crtc_state *crtc_state,
712 int target, int refclk, struct dpll *match_clock,
713 struct dpll *best_clock)
715 struct drm_device *dev = crtc_state->uapi.crtc->dev;
719 memset(best_clock, 0, sizeof(*best_clock));
721 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
723 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
725 for (clock.m2 = limit->m2.min;
726 clock.m2 <= limit->m2.max; clock.m2++) {
727 if (clock.m2 >= clock.m1)
729 for (clock.n = limit->n.min;
730 clock.n <= limit->n.max; clock.n++) {
731 for (clock.p1 = limit->p1.min;
732 clock.p1 <= limit->p1.max; clock.p1++) {
735 i9xx_calc_dpll_params(refclk, &clock);
736 if (!intel_PLL_is_valid(to_i915(dev),
741 clock.p != match_clock->p)
744 this_err = abs(clock.dot - target);
745 if (this_err < err) {
754 return (err != target);
758 * Returns a set of divisors for the desired target clock with the given
759 * refclk, or FALSE. The returned values represent the clock equation:
760 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
762 * Target and reference clocks are specified in kHz.
764 * If match_clock is provided, then best_clock P divider must match the P
765 * divider from @match_clock used for LVDS downclocking.
768 pnv_find_best_dpll(const struct intel_limit *limit,
769 struct intel_crtc_state *crtc_state,
770 int target, int refclk, struct dpll *match_clock,
771 struct dpll *best_clock)
773 struct drm_device *dev = crtc_state->uapi.crtc->dev;
777 memset(best_clock, 0, sizeof(*best_clock));
779 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
781 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
783 for (clock.m2 = limit->m2.min;
784 clock.m2 <= limit->m2.max; clock.m2++) {
785 for (clock.n = limit->n.min;
786 clock.n <= limit->n.max; clock.n++) {
787 for (clock.p1 = limit->p1.min;
788 clock.p1 <= limit->p1.max; clock.p1++) {
791 pnv_calc_dpll_params(refclk, &clock);
792 if (!intel_PLL_is_valid(to_i915(dev),
797 clock.p != match_clock->p)
800 this_err = abs(clock.dot - target);
801 if (this_err < err) {
810 return (err != target);
814 * Returns a set of divisors for the desired target clock with the given
815 * refclk, or FALSE. The returned values represent the clock equation:
816 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
818 * Target and reference clocks are specified in kHz.
820 * If match_clock is provided, then best_clock P divider must match the P
821 * divider from @match_clock used for LVDS downclocking.
824 g4x_find_best_dpll(const struct intel_limit *limit,
825 struct intel_crtc_state *crtc_state,
826 int target, int refclk, struct dpll *match_clock,
827 struct dpll *best_clock)
829 struct drm_device *dev = crtc_state->uapi.crtc->dev;
833 /* approximately equals target * 0.00585 */
834 int err_most = (target >> 8) + (target >> 9);
836 memset(best_clock, 0, sizeof(*best_clock));
838 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
840 max_n = limit->n.max;
841 /* based on hardware requirement, prefer smaller n to precision */
842 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
843 /* based on hardware requirement, prefere larger m1,m2 */
844 for (clock.m1 = limit->m1.max;
845 clock.m1 >= limit->m1.min; clock.m1--) {
846 for (clock.m2 = limit->m2.max;
847 clock.m2 >= limit->m2.min; clock.m2--) {
848 for (clock.p1 = limit->p1.max;
849 clock.p1 >= limit->p1.min; clock.p1--) {
852 i9xx_calc_dpll_params(refclk, &clock);
853 if (!intel_PLL_is_valid(to_i915(dev),
858 this_err = abs(clock.dot - target);
859 if (this_err < err_most) {
873 * Check if the calculated PLL configuration is more optimal compared to the
874 * best configuration and error found so far. Return the calculated error.
876 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
877 const struct dpll *calculated_clock,
878 const struct dpll *best_clock,
879 unsigned int best_error_ppm,
880 unsigned int *error_ppm)
883 * For CHV ignore the error and consider only the P value.
884 * Prefer a bigger P value based on HW requirements.
886 if (IS_CHERRYVIEW(to_i915(dev))) {
889 return calculated_clock->p > best_clock->p;
892 if (WARN_ON_ONCE(!target_freq))
895 *error_ppm = div_u64(1000000ULL *
896 abs(target_freq - calculated_clock->dot),
899 * Prefer a better P value over a better (smaller) error if the error
900 * is small. Ensure this preference for future configurations too by
901 * setting the error to 0.
903 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
909 return *error_ppm + 10 < best_error_ppm;
913 * Returns a set of divisors for the desired target clock with the given
914 * refclk, or FALSE. The returned values represent the clock equation:
915 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
918 vlv_find_best_dpll(const struct intel_limit *limit,
919 struct intel_crtc_state *crtc_state,
920 int target, int refclk, struct dpll *match_clock,
921 struct dpll *best_clock)
923 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
924 struct drm_device *dev = crtc->base.dev;
926 unsigned int bestppm = 1000000;
927 /* min update 19.2 MHz */
928 int max_n = min(limit->n.max, refclk / 19200);
931 target *= 5; /* fast clock */
933 memset(best_clock, 0, sizeof(*best_clock));
935 /* based on hardware requirement, prefer smaller n to precision */
936 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
937 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
938 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
939 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
940 clock.p = clock.p1 * clock.p2;
941 /* based on hardware requirement, prefer bigger m1,m2 values */
942 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
945 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
948 vlv_calc_dpll_params(refclk, &clock);
950 if (!intel_PLL_is_valid(to_i915(dev),
955 if (!vlv_PLL_is_optimal(dev, target,
973 * Returns a set of divisors for the desired target clock with the given
974 * refclk, or FALSE. The returned values represent the clock equation:
975 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
978 chv_find_best_dpll(const struct intel_limit *limit,
979 struct intel_crtc_state *crtc_state,
980 int target, int refclk, struct dpll *match_clock,
981 struct dpll *best_clock)
983 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
984 struct drm_device *dev = crtc->base.dev;
985 unsigned int best_error_ppm;
990 memset(best_clock, 0, sizeof(*best_clock));
991 best_error_ppm = 1000000;
994 * Based on hardware doc, the n always set to 1, and m1 always
995 * set to 2. If requires to support 200Mhz refclk, we need to
996 * revisit this because n may not 1 anymore.
998 clock.n = 1, clock.m1 = 2;
999 target *= 5; /* fast clock */
1001 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1002 for (clock.p2 = limit->p2.p2_fast;
1003 clock.p2 >= limit->p2.p2_slow;
1004 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1005 unsigned int error_ppm;
1007 clock.p = clock.p1 * clock.p2;
1009 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1012 if (m2 > INT_MAX/clock.m1)
1017 chv_calc_dpll_params(refclk, &clock);
1019 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1022 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1023 best_error_ppm, &error_ppm))
1026 *best_clock = clock;
1027 best_error_ppm = error_ppm;
1035 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1036 struct dpll *best_clock)
1038 int refclk = 100000;
1039 const struct intel_limit *limit = &intel_limits_bxt;
1041 return chv_find_best_dpll(limit, crtc_state,
1042 crtc_state->port_clock, refclk,
1046 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1049 i915_reg_t reg = PIPEDSL(pipe);
1053 if (IS_GEN(dev_priv, 2))
1054 line_mask = DSL_LINEMASK_GEN2;
1056 line_mask = DSL_LINEMASK_GEN3;
1058 line1 = I915_READ(reg) & line_mask;
1060 line2 = I915_READ(reg) & line_mask;
1062 return line1 != line2;
1065 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1067 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1068 enum pipe pipe = crtc->pipe;
1070 /* Wait for the display line to settle/start moving */
1071 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1072 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1073 pipe_name(pipe), onoff(state));
1076 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1078 wait_for_pipe_scanline_moving(crtc, false);
1081 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1083 wait_for_pipe_scanline_moving(crtc, true);
1087 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1089 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1090 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092 if (INTEL_GEN(dev_priv) >= 4) {
1093 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1094 i915_reg_t reg = PIPECONF(cpu_transcoder);
1096 /* Wait for the Pipe State to go off */
1097 if (intel_de_wait_for_clear(dev_priv, reg,
1098 I965_PIPECONF_ACTIVE, 100))
1099 WARN(1, "pipe_off wait timed out\n");
1101 intel_wait_for_pipe_scanline_stopped(crtc);
1105 /* Only for pre-ILK configs */
1106 void assert_pll(struct drm_i915_private *dev_priv,
1107 enum pipe pipe, bool state)
1112 val = I915_READ(DPLL(pipe));
1113 cur_state = !!(val & DPLL_VCO_ENABLE);
1114 I915_STATE_WARN(cur_state != state,
1115 "PLL state assertion failure (expected %s, current %s)\n",
1116 onoff(state), onoff(cur_state));
1119 /* XXX: the dsi pll is shared between MIPI DSI ports */
1120 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1125 vlv_cck_get(dev_priv);
1126 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1127 vlv_cck_put(dev_priv);
1129 cur_state = val & DSI_PLL_VCO_EN;
1130 I915_STATE_WARN(cur_state != state,
1131 "DSI PLL state assertion failure (expected %s, current %s)\n",
1132 onoff(state), onoff(cur_state));
1135 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1136 enum pipe pipe, bool state)
1140 if (HAS_DDI(dev_priv)) {
1142 * DDI does not have a specific FDI_TX register.
1144 * FDI is never fed from EDP transcoder
1145 * so pipe->transcoder cast is fine here.
1147 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1148 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1149 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1151 u32 val = I915_READ(FDI_TX_CTL(pipe));
1152 cur_state = !!(val & FDI_TX_ENABLE);
1154 I915_STATE_WARN(cur_state != state,
1155 "FDI TX state assertion failure (expected %s, current %s)\n",
1156 onoff(state), onoff(cur_state));
1158 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1159 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1161 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1162 enum pipe pipe, bool state)
1167 val = I915_READ(FDI_RX_CTL(pipe));
1168 cur_state = !!(val & FDI_RX_ENABLE);
1169 I915_STATE_WARN(cur_state != state,
1170 "FDI RX state assertion failure (expected %s, current %s)\n",
1171 onoff(state), onoff(cur_state));
1173 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1174 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1176 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1181 /* ILK FDI PLL is always enabled */
1182 if (IS_GEN(dev_priv, 5))
1185 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1186 if (HAS_DDI(dev_priv))
1189 val = I915_READ(FDI_TX_CTL(pipe));
1190 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1193 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1194 enum pipe pipe, bool state)
1199 val = I915_READ(FDI_RX_CTL(pipe));
1200 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1201 I915_STATE_WARN(cur_state != state,
1202 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1203 onoff(state), onoff(cur_state));
1206 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1210 enum pipe panel_pipe = INVALID_PIPE;
1213 if (WARN_ON(HAS_DDI(dev_priv)))
1216 if (HAS_PCH_SPLIT(dev_priv)) {
1219 pp_reg = PP_CONTROL(0);
1220 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1223 case PANEL_PORT_SELECT_LVDS:
1224 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1226 case PANEL_PORT_SELECT_DPA:
1227 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1229 case PANEL_PORT_SELECT_DPC:
1230 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1232 case PANEL_PORT_SELECT_DPD:
1233 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1236 MISSING_CASE(port_sel);
1239 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1240 /* presumably write lock depends on pipe, not port select */
1241 pp_reg = PP_CONTROL(pipe);
1246 pp_reg = PP_CONTROL(0);
1247 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1249 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1250 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1253 val = I915_READ(pp_reg);
1254 if (!(val & PANEL_POWER_ON) ||
1255 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1258 I915_STATE_WARN(panel_pipe == pipe && locked,
1259 "panel assertion failure, pipe %c regs locked\n",
1263 void assert_pipe(struct drm_i915_private *dev_priv,
1264 enum transcoder cpu_transcoder, bool state)
1267 enum intel_display_power_domain power_domain;
1268 intel_wakeref_t wakeref;
1270 /* we keep both pipes enabled on 830 */
1271 if (IS_I830(dev_priv))
1274 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1275 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1277 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1278 cur_state = !!(val & PIPECONF_ENABLE);
1280 intel_display_power_put(dev_priv, power_domain, wakeref);
1285 I915_STATE_WARN(cur_state != state,
1286 "transcoder %s assertion failure (expected %s, current %s)\n",
1287 transcoder_name(cpu_transcoder),
1288 onoff(state), onoff(cur_state));
1291 static void assert_plane(struct intel_plane *plane, bool state)
1296 cur_state = plane->get_hw_state(plane, &pipe);
1298 I915_STATE_WARN(cur_state != state,
1299 "%s assertion failure (expected %s, current %s)\n",
1300 plane->base.name, onoff(state), onoff(cur_state));
1303 #define assert_plane_enabled(p) assert_plane(p, true)
1304 #define assert_plane_disabled(p) assert_plane(p, false)
1306 static void assert_planes_disabled(struct intel_crtc *crtc)
1308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1309 struct intel_plane *plane;
1311 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1312 assert_plane_disabled(plane);
1315 static void assert_vblank_disabled(struct drm_crtc *crtc)
1317 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1318 drm_crtc_vblank_put(crtc);
1321 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1327 val = I915_READ(PCH_TRANSCONF(pipe));
1328 enabled = !!(val & TRANS_ENABLE);
1329 I915_STATE_WARN(enabled,
1330 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1334 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1335 enum pipe pipe, enum port port,
1338 enum pipe port_pipe;
1341 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1343 I915_STATE_WARN(state && port_pipe == pipe,
1344 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1345 port_name(port), pipe_name(pipe));
1347 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1348 "IBX PCH DP %c still using transcoder B\n",
1352 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1353 enum pipe pipe, enum port port,
1354 i915_reg_t hdmi_reg)
1356 enum pipe port_pipe;
1359 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1361 I915_STATE_WARN(state && port_pipe == pipe,
1362 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1363 port_name(port), pipe_name(pipe));
1365 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1366 "IBX PCH HDMI %c still using transcoder B\n",
1370 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1373 enum pipe port_pipe;
1375 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1376 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1377 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1379 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1381 "PCH VGA enabled on transcoder %c, should be disabled\n",
1384 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1386 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1389 /* PCH SDVOB multiplex with HDMIB */
1390 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1391 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1392 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1395 static void _vlv_enable_pll(struct intel_crtc *crtc,
1396 const struct intel_crtc_state *pipe_config)
1398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1399 enum pipe pipe = crtc->pipe;
1401 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1402 POSTING_READ(DPLL(pipe));
1405 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1406 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1409 static void vlv_enable_pll(struct intel_crtc *crtc,
1410 const struct intel_crtc_state *pipe_config)
1412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1413 enum pipe pipe = crtc->pipe;
1415 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1417 /* PLL is protected by panel, make sure we can write it */
1418 assert_panel_unlocked(dev_priv, pipe);
1420 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1421 _vlv_enable_pll(crtc, pipe_config);
1423 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1424 POSTING_READ(DPLL_MD(pipe));
1428 static void _chv_enable_pll(struct intel_crtc *crtc,
1429 const struct intel_crtc_state *pipe_config)
1431 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1432 enum pipe pipe = crtc->pipe;
1433 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1436 vlv_dpio_get(dev_priv);
1438 /* Enable back the 10bit clock to display controller */
1439 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1440 tmp |= DPIO_DCLKP_EN;
1441 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1443 vlv_dpio_put(dev_priv);
1446 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1451 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1453 /* Check PLL is locked */
1454 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1455 DRM_ERROR("PLL %d failed to lock\n", pipe);
1458 static void chv_enable_pll(struct intel_crtc *crtc,
1459 const struct intel_crtc_state *pipe_config)
1461 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1462 enum pipe pipe = crtc->pipe;
1464 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1466 /* PLL is protected by panel, make sure we can write it */
1467 assert_panel_unlocked(dev_priv, pipe);
1469 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1470 _chv_enable_pll(crtc, pipe_config);
1472 if (pipe != PIPE_A) {
1474 * WaPixelRepeatModeFixForC0:chv
1476 * DPLLCMD is AWOL. Use chicken bits to propagate
1477 * the value from DPLLBMD to either pipe B or C.
1479 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1480 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1481 I915_WRITE(CBR4_VLV, 0);
1482 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1485 * DPLLB VGA mode also seems to cause problems.
1486 * We should always have it disabled.
1488 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1490 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1491 POSTING_READ(DPLL_MD(pipe));
1495 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1497 if (IS_I830(dev_priv))
1500 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1503 static void i9xx_enable_pll(struct intel_crtc *crtc,
1504 const struct intel_crtc_state *crtc_state)
1506 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1507 i915_reg_t reg = DPLL(crtc->pipe);
1508 u32 dpll = crtc_state->dpll_hw_state.dpll;
1511 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1513 /* PLL is protected by panel, make sure we can write it */
1514 if (i9xx_has_pps(dev_priv))
1515 assert_panel_unlocked(dev_priv, crtc->pipe);
1518 * Apparently we need to have VGA mode enabled prior to changing
1519 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1520 * dividers, even though the register value does change.
1522 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1523 I915_WRITE(reg, dpll);
1525 /* Wait for the clocks to stabilize. */
1529 if (INTEL_GEN(dev_priv) >= 4) {
1530 I915_WRITE(DPLL_MD(crtc->pipe),
1531 crtc_state->dpll_hw_state.dpll_md);
1533 /* The pixel multiplier can only be updated once the
1534 * DPLL is enabled and the clocks are stable.
1536 * So write it again.
1538 I915_WRITE(reg, dpll);
1541 /* We do this three times for luck */
1542 for (i = 0; i < 3; i++) {
1543 I915_WRITE(reg, dpll);
1545 udelay(150); /* wait for warmup */
1549 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1551 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1552 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1553 enum pipe pipe = crtc->pipe;
1555 /* Don't disable pipe or pipe PLLs if needed */
1556 if (IS_I830(dev_priv))
1559 /* Make sure the pipe isn't still relying on us */
1560 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1562 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1563 POSTING_READ(DPLL(pipe));
1566 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1570 /* Make sure the pipe isn't still relying on us */
1571 assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1573 val = DPLL_INTEGRATED_REF_CLK_VLV |
1574 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1576 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1578 I915_WRITE(DPLL(pipe), val);
1579 POSTING_READ(DPLL(pipe));
1582 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1584 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1587 /* Make sure the pipe isn't still relying on us */
1588 assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1590 val = DPLL_SSC_REF_CLK_CHV |
1591 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1593 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1595 I915_WRITE(DPLL(pipe), val);
1596 POSTING_READ(DPLL(pipe));
1598 vlv_dpio_get(dev_priv);
1600 /* Disable 10bit clock to display controller */
1601 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1602 val &= ~DPIO_DCLKP_EN;
1603 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1605 vlv_dpio_put(dev_priv);
1608 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1609 struct intel_digital_port *dport,
1610 unsigned int expected_mask)
1613 i915_reg_t dpll_reg;
1615 switch (dport->base.port) {
1617 port_mask = DPLL_PORTB_READY_MASK;
1621 port_mask = DPLL_PORTC_READY_MASK;
1623 expected_mask <<= 4;
1626 port_mask = DPLL_PORTD_READY_MASK;
1627 dpll_reg = DPIO_PHY_STATUS;
1633 if (intel_de_wait_for_register(dev_priv, dpll_reg,
1634 port_mask, expected_mask, 1000))
1635 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1636 dport->base.base.base.id, dport->base.base.name,
1637 I915_READ(dpll_reg) & port_mask, expected_mask);
1640 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1642 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1643 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1644 enum pipe pipe = crtc->pipe;
1646 u32 val, pipeconf_val;
1648 /* Make sure PCH DPLL is enabled */
1649 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1651 /* FDI must be feeding us bits for PCH ports */
1652 assert_fdi_tx_enabled(dev_priv, pipe);
1653 assert_fdi_rx_enabled(dev_priv, pipe);
1655 if (HAS_PCH_CPT(dev_priv)) {
1656 reg = TRANS_CHICKEN2(pipe);
1657 val = I915_READ(reg);
1659 * Workaround: Set the timing override bit
1660 * before enabling the pch transcoder.
1662 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1663 /* Configure frame start delay to match the CPU */
1664 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1665 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1666 I915_WRITE(reg, val);
1669 reg = PCH_TRANSCONF(pipe);
1670 val = I915_READ(reg);
1671 pipeconf_val = I915_READ(PIPECONF(pipe));
1673 if (HAS_PCH_IBX(dev_priv)) {
1674 /* Configure frame start delay to match the CPU */
1675 val &= ~TRANS_FRAME_START_DELAY_MASK;
1676 val |= TRANS_FRAME_START_DELAY(0);
1679 * Make the BPC in transcoder be consistent with
1680 * that in pipeconf reg. For HDMI we must use 8bpc
1681 * here for both 8bpc and 12bpc.
1683 val &= ~PIPECONF_BPC_MASK;
1684 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1685 val |= PIPECONF_8BPC;
1687 val |= pipeconf_val & PIPECONF_BPC_MASK;
1690 val &= ~TRANS_INTERLACE_MASK;
1691 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1692 if (HAS_PCH_IBX(dev_priv) &&
1693 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1694 val |= TRANS_LEGACY_INTERLACED_ILK;
1696 val |= TRANS_INTERLACED;
1698 val |= TRANS_PROGRESSIVE;
1701 I915_WRITE(reg, val | TRANS_ENABLE);
1702 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1703 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1706 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1707 enum transcoder cpu_transcoder)
1709 u32 val, pipeconf_val;
1711 /* FDI must be feeding us bits for PCH ports */
1712 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1713 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1715 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1716 /* Workaround: set timing override bit. */
1717 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1718 /* Configure frame start delay to match the CPU */
1719 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1720 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1721 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1724 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1726 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1727 PIPECONF_INTERLACED_ILK)
1728 val |= TRANS_INTERLACED;
1730 val |= TRANS_PROGRESSIVE;
1732 I915_WRITE(LPT_TRANSCONF, val);
1733 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1734 TRANS_STATE_ENABLE, 100))
1735 DRM_ERROR("Failed to enable PCH transcoder\n");
1738 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1744 /* FDI relies on the transcoder */
1745 assert_fdi_tx_disabled(dev_priv, pipe);
1746 assert_fdi_rx_disabled(dev_priv, pipe);
1748 /* Ports must be off as well */
1749 assert_pch_ports_disabled(dev_priv, pipe);
1751 reg = PCH_TRANSCONF(pipe);
1752 val = I915_READ(reg);
1753 val &= ~TRANS_ENABLE;
1754 I915_WRITE(reg, val);
1755 /* wait for PCH transcoder off, transcoder state */
1756 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1757 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1759 if (HAS_PCH_CPT(dev_priv)) {
1760 /* Workaround: Clear the timing override chicken bit again. */
1761 reg = TRANS_CHICKEN2(pipe);
1762 val = I915_READ(reg);
1763 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1764 I915_WRITE(reg, val);
1768 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1772 val = I915_READ(LPT_TRANSCONF);
1773 val &= ~TRANS_ENABLE;
1774 I915_WRITE(LPT_TRANSCONF, val);
1775 /* wait for PCH transcoder off, transcoder state */
1776 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1777 TRANS_STATE_ENABLE, 50))
1778 DRM_ERROR("Failed to disable PCH transcoder\n");
1780 /* Workaround: clear timing override bit. */
1781 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1782 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1783 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1786 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1788 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1790 if (HAS_PCH_LPT(dev_priv))
1796 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1798 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1801 * On i965gm the hardware frame counter reads
1802 * zero when the TV encoder is enabled :(
1804 if (IS_I965GM(dev_priv) &&
1805 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1808 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1809 return 0xffffffff; /* full 32 bit counter */
1810 else if (INTEL_GEN(dev_priv) >= 3)
1811 return 0xffffff; /* only 24 bits of frame count */
1813 return 0; /* Gen2 doesn't have a hardware frame counter */
1816 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1818 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1820 assert_vblank_disabled(&crtc->base);
1821 drm_crtc_set_max_vblank_count(&crtc->base,
1822 intel_crtc_max_vblank_count(crtc_state));
1823 drm_crtc_vblank_on(&crtc->base);
1826 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1828 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1830 drm_crtc_vblank_off(&crtc->base);
1831 assert_vblank_disabled(&crtc->base);
1834 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1836 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1837 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1838 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1839 enum pipe pipe = crtc->pipe;
1843 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1845 assert_planes_disabled(crtc);
1848 * A pipe without a PLL won't actually be able to drive bits from
1849 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1852 if (HAS_GMCH(dev_priv)) {
1853 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1854 assert_dsi_pll_enabled(dev_priv);
1856 assert_pll_enabled(dev_priv, pipe);
1858 if (new_crtc_state->has_pch_encoder) {
1859 /* if driving the PCH, we need FDI enabled */
1860 assert_fdi_rx_pll_enabled(dev_priv,
1861 intel_crtc_pch_transcoder(crtc));
1862 assert_fdi_tx_pll_enabled(dev_priv,
1863 (enum pipe) cpu_transcoder);
1865 /* FIXME: assert CPU port conditions for SNB+ */
1868 trace_intel_pipe_enable(crtc);
1870 reg = PIPECONF(cpu_transcoder);
1871 val = I915_READ(reg);
1872 if (val & PIPECONF_ENABLE) {
1873 /* we keep both pipes enabled on 830 */
1874 WARN_ON(!IS_I830(dev_priv));
1878 I915_WRITE(reg, val | PIPECONF_ENABLE);
1882 * Until the pipe starts PIPEDSL reads will return a stale value,
1883 * which causes an apparent vblank timestamp jump when PIPEDSL
1884 * resets to its proper value. That also messes up the frame count
1885 * when it's derived from the timestamps. So let's wait for the
1886 * pipe to start properly before we call drm_crtc_vblank_on()
1888 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1889 intel_wait_for_pipe_scanline_moving(crtc);
1892 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1894 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1895 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1896 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1897 enum pipe pipe = crtc->pipe;
1901 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1904 * Make sure planes won't keep trying to pump pixels to us,
1905 * or we might hang the display.
1907 assert_planes_disabled(crtc);
1909 trace_intel_pipe_disable(crtc);
1911 reg = PIPECONF(cpu_transcoder);
1912 val = I915_READ(reg);
1913 if ((val & PIPECONF_ENABLE) == 0)
1917 * Double wide has implications for planes
1918 * so best keep it disabled when not needed.
1920 if (old_crtc_state->double_wide)
1921 val &= ~PIPECONF_DOUBLE_WIDE;
1923 /* Don't disable pipe or pipe PLLs if needed */
1924 if (!IS_I830(dev_priv))
1925 val &= ~PIPECONF_ENABLE;
1927 I915_WRITE(reg, val);
1928 if ((val & PIPECONF_ENABLE) == 0)
1929 intel_wait_for_pipe_off(old_crtc_state);
1932 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1934 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1937 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1939 if (!is_ccs_modifier(fb->modifier))
1942 return plane >= fb->format->num_planes / 2;
1945 static bool is_gen12_ccs_modifier(u64 modifier)
1947 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
1950 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1952 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1955 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1957 if (is_ccs_modifier(fb->modifier))
1958 return is_ccs_plane(fb, plane);
1963 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1965 WARN_ON(!is_ccs_modifier(fb->modifier) ||
1966 (main_plane && main_plane >= fb->format->num_planes / 2));
1968 return fb->format->num_planes / 2 + main_plane;
1971 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1973 WARN_ON(!is_ccs_modifier(fb->modifier) ||
1974 ccs_plane < fb->format->num_planes / 2);
1976 return ccs_plane - fb->format->num_planes / 2;
1979 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1981 intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1983 if (is_ccs_modifier(fb->modifier))
1984 return main_to_ccs_plane(fb, main_plane);
1990 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1992 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1993 unsigned int cpp = fb->format->cpp[color_plane];
1995 switch (fb->modifier) {
1996 case DRM_FORMAT_MOD_LINEAR:
1997 return intel_tile_size(dev_priv);
1998 case I915_FORMAT_MOD_X_TILED:
1999 if (IS_GEN(dev_priv, 2))
2003 case I915_FORMAT_MOD_Y_TILED_CCS:
2004 if (is_ccs_plane(fb, color_plane))
2007 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2008 if (is_ccs_plane(fb, color_plane))
2011 case I915_FORMAT_MOD_Y_TILED:
2012 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2016 case I915_FORMAT_MOD_Yf_TILED_CCS:
2017 if (is_ccs_plane(fb, color_plane))
2020 case I915_FORMAT_MOD_Yf_TILED:
2036 MISSING_CASE(fb->modifier);
2042 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2044 if (is_gen12_ccs_plane(fb, color_plane))
2047 return intel_tile_size(to_i915(fb->dev)) /
2048 intel_tile_width_bytes(fb, color_plane);
2051 /* Return the tile dimensions in pixel units */
2052 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2053 unsigned int *tile_width,
2054 unsigned int *tile_height)
2056 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2057 unsigned int cpp = fb->format->cpp[color_plane];
2059 *tile_width = tile_width_bytes / cpp;
2060 *tile_height = intel_tile_height(fb, color_plane);
2064 intel_fb_align_height(const struct drm_framebuffer *fb,
2065 int color_plane, unsigned int height)
2067 unsigned int tile_height = intel_tile_height(fb, color_plane);
2069 return ALIGN(height, tile_height);
2072 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2074 unsigned int size = 0;
2077 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2078 size += rot_info->plane[i].width * rot_info->plane[i].height;
2083 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2085 unsigned int size = 0;
2088 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2089 size += rem_info->plane[i].width * rem_info->plane[i].height;
2095 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2096 const struct drm_framebuffer *fb,
2097 unsigned int rotation)
2099 view->type = I915_GGTT_VIEW_NORMAL;
2100 if (drm_rotation_90_or_270(rotation)) {
2101 view->type = I915_GGTT_VIEW_ROTATED;
2102 view->rotated = to_intel_framebuffer(fb)->rot_info;
2106 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2108 if (IS_I830(dev_priv))
2110 else if (IS_I85X(dev_priv))
2112 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2118 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2120 if (INTEL_GEN(dev_priv) >= 9)
2122 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2123 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2125 else if (INTEL_GEN(dev_priv) >= 4)
2131 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2134 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2136 /* AUX_DIST needs only 4K alignment */
2137 if (is_aux_plane(fb, color_plane))
2140 switch (fb->modifier) {
2141 case DRM_FORMAT_MOD_LINEAR:
2142 return intel_linear_alignment(dev_priv);
2143 case I915_FORMAT_MOD_X_TILED:
2144 if (INTEL_GEN(dev_priv) >= 9)
2147 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2149 case I915_FORMAT_MOD_Y_TILED_CCS:
2150 case I915_FORMAT_MOD_Yf_TILED_CCS:
2151 case I915_FORMAT_MOD_Y_TILED:
2152 case I915_FORMAT_MOD_Yf_TILED:
2153 return 1 * 1024 * 1024;
2155 MISSING_CASE(fb->modifier);
2160 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2162 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2163 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2165 return INTEL_GEN(dev_priv) < 4 ||
2167 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2171 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2172 const struct i915_ggtt_view *view,
2174 unsigned long *out_flags)
2176 struct drm_device *dev = fb->dev;
2177 struct drm_i915_private *dev_priv = to_i915(dev);
2178 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2179 intel_wakeref_t wakeref;
2180 struct i915_vma *vma;
2181 unsigned int pinctl;
2184 if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2185 return ERR_PTR(-EINVAL);
2187 alignment = intel_surf_alignment(fb, 0);
2189 /* Note that the w/a also requires 64 PTE of padding following the
2190 * bo. We currently fill all unused PTE with the shadow page and so
2191 * we should always have valid PTE following the scanout preventing
2194 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2195 alignment = 256 * 1024;
2198 * Global gtt pte registers are special registers which actually forward
2199 * writes to a chunk of system memory. Which means that there is no risk
2200 * that the register values disappear as soon as we call
2201 * intel_runtime_pm_put(), so it is correct to wrap only the
2202 * pin/unpin/fence and not more.
2204 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2206 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2209 * Valleyview is definitely limited to scanning out the first
2210 * 512MiB. Lets presume this behaviour was inherited from the
2211 * g4x display engine and that all earlier gen are similarly
2212 * limited. Testing suggests that it is a little more
2213 * complicated than this. For example, Cherryview appears quite
2214 * happy to scanout from anywhere within its global aperture.
2217 if (HAS_GMCH(dev_priv))
2218 pinctl |= PIN_MAPPABLE;
2220 vma = i915_gem_object_pin_to_display_plane(obj,
2221 alignment, view, pinctl);
2225 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2229 * Install a fence for tiled scan-out. Pre-i965 always needs a
2230 * fence, whereas 965+ only requires a fence if using
2231 * framebuffer compression. For simplicity, we always, when
2232 * possible, install a fence as the cost is not that onerous.
2234 * If we fail to fence the tiled scanout, then either the
2235 * modeset will reject the change (which is highly unlikely as
2236 * the affected systems, all but one, do not have unmappable
2237 * space) or we will not be able to enable full powersaving
2238 * techniques (also likely not to apply due to various limits
2239 * FBC and the like impose on the size of the buffer, which
2240 * presumably we violated anyway with this unmappable buffer).
2241 * Anyway, it is presumably better to stumble onwards with
2242 * something and try to run the system in a "less than optimal"
2243 * mode that matches the user configuration.
2245 ret = i915_vma_pin_fence(vma);
2246 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2247 i915_gem_object_unpin_from_display_plane(vma);
2252 if (ret == 0 && vma->fence)
2253 *out_flags |= PLANE_HAS_FENCE;
2258 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2259 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2263 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2265 i915_gem_object_lock(vma->obj);
2266 if (flags & PLANE_HAS_FENCE)
2267 i915_vma_unpin_fence(vma);
2268 i915_gem_object_unpin_from_display_plane(vma);
2269 i915_gem_object_unlock(vma->obj);
2274 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2275 unsigned int rotation)
2277 if (drm_rotation_90_or_270(rotation))
2278 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2280 return fb->pitches[color_plane];
2284 * Convert the x/y offsets into a linear offset.
2285 * Only valid with 0/180 degree rotation, which is fine since linear
2286 * offset is only used with linear buffers on pre-hsw and tiled buffers
2287 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2289 u32 intel_fb_xy_to_linear(int x, int y,
2290 const struct intel_plane_state *state,
2293 const struct drm_framebuffer *fb = state->hw.fb;
2294 unsigned int cpp = fb->format->cpp[color_plane];
2295 unsigned int pitch = state->color_plane[color_plane].stride;
2297 return y * pitch + x * cpp;
2301 * Add the x/y offsets derived from fb->offsets[] to the user
2302 * specified plane src x/y offsets. The resulting x/y offsets
2303 * specify the start of scanout from the beginning of the gtt mapping.
2305 void intel_add_fb_offsets(int *x, int *y,
2306 const struct intel_plane_state *state,
2310 *x += state->color_plane[color_plane].x;
2311 *y += state->color_plane[color_plane].y;
2314 static u32 intel_adjust_tile_offset(int *x, int *y,
2315 unsigned int tile_width,
2316 unsigned int tile_height,
2317 unsigned int tile_size,
2318 unsigned int pitch_tiles,
2322 unsigned int pitch_pixels = pitch_tiles * tile_width;
2325 WARN_ON(old_offset & (tile_size - 1));
2326 WARN_ON(new_offset & (tile_size - 1));
2327 WARN_ON(new_offset > old_offset);
2329 tiles = (old_offset - new_offset) / tile_size;
2331 *y += tiles / pitch_tiles * tile_height;
2332 *x += tiles % pitch_tiles * tile_width;
2334 /* minimize x in case it got needlessly big */
2335 *y += *x / pitch_pixels * tile_height;
2341 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2343 return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2344 is_gen12_ccs_plane(fb, color_plane);
2347 static u32 intel_adjust_aligned_offset(int *x, int *y,
2348 const struct drm_framebuffer *fb,
2350 unsigned int rotation,
2352 u32 old_offset, u32 new_offset)
2354 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2355 unsigned int cpp = fb->format->cpp[color_plane];
2357 WARN_ON(new_offset > old_offset);
2359 if (!is_surface_linear(fb, color_plane)) {
2360 unsigned int tile_size, tile_width, tile_height;
2361 unsigned int pitch_tiles;
2363 tile_size = intel_tile_size(dev_priv);
2364 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2366 if (drm_rotation_90_or_270(rotation)) {
2367 pitch_tiles = pitch / tile_height;
2368 swap(tile_width, tile_height);
2370 pitch_tiles = pitch / (tile_width * cpp);
2373 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2374 tile_size, pitch_tiles,
2375 old_offset, new_offset);
2377 old_offset += *y * pitch + *x * cpp;
2379 *y = (old_offset - new_offset) / pitch;
2380 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2387 * Adjust the tile offset by moving the difference into
2390 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2391 const struct intel_plane_state *state,
2393 u32 old_offset, u32 new_offset)
2395 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2397 state->color_plane[color_plane].stride,
2398 old_offset, new_offset);
2402 * Computes the aligned offset to the base tile and adjusts
2403 * x, y. bytes per pixel is assumed to be a power-of-two.
2405 * In the 90/270 rotated case, x and y are assumed
2406 * to be already rotated to match the rotated GTT view, and
2407 * pitch is the tile_height aligned framebuffer height.
2409 * This function is used when computing the derived information
2410 * under intel_framebuffer, so using any of that information
2411 * here is not allowed. Anything under drm_framebuffer can be
2412 * used. This is why the user has to pass in the pitch since it
2413 * is specified in the rotated orientation.
2415 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2417 const struct drm_framebuffer *fb,
2420 unsigned int rotation,
2423 unsigned int cpp = fb->format->cpp[color_plane];
2424 u32 offset, offset_aligned;
2429 if (!is_surface_linear(fb, color_plane)) {
2430 unsigned int tile_size, tile_width, tile_height;
2431 unsigned int tile_rows, tiles, pitch_tiles;
2433 tile_size = intel_tile_size(dev_priv);
2434 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2436 if (drm_rotation_90_or_270(rotation)) {
2437 pitch_tiles = pitch / tile_height;
2438 swap(tile_width, tile_height);
2440 pitch_tiles = pitch / (tile_width * cpp);
2443 tile_rows = *y / tile_height;
2446 tiles = *x / tile_width;
2449 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2450 offset_aligned = offset & ~alignment;
2452 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2453 tile_size, pitch_tiles,
2454 offset, offset_aligned);
2456 offset = *y * pitch + *x * cpp;
2457 offset_aligned = offset & ~alignment;
2459 *y = (offset & alignment) / pitch;
2460 *x = ((offset & alignment) - *y * pitch) / cpp;
2463 return offset_aligned;
2466 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2467 const struct intel_plane_state *state,
2470 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2471 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2472 const struct drm_framebuffer *fb = state->hw.fb;
2473 unsigned int rotation = state->hw.rotation;
2474 int pitch = state->color_plane[color_plane].stride;
2477 if (intel_plane->id == PLANE_CURSOR)
2478 alignment = intel_cursor_alignment(dev_priv);
2480 alignment = intel_surf_alignment(fb, color_plane);
2482 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2483 pitch, rotation, alignment);
2486 /* Convert the fb->offset[] into x/y offsets */
2487 static int intel_fb_offset_to_xy(int *x, int *y,
2488 const struct drm_framebuffer *fb,
2491 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2492 unsigned int height;
2494 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2495 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2496 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2497 fb->offsets[color_plane], color_plane);
2501 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2502 height = ALIGN(height, intel_tile_height(fb, color_plane));
2504 /* Catch potential overflows early */
2505 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2506 fb->offsets[color_plane])) {
2507 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2508 fb->offsets[color_plane], fb->pitches[color_plane],
2516 intel_adjust_aligned_offset(x, y,
2517 fb, color_plane, DRM_MODE_ROTATE_0,
2518 fb->pitches[color_plane],
2519 fb->offsets[color_plane], 0);
2524 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2526 switch (fb_modifier) {
2527 case I915_FORMAT_MOD_X_TILED:
2528 return I915_TILING_X;
2529 case I915_FORMAT_MOD_Y_TILED:
2530 case I915_FORMAT_MOD_Y_TILED_CCS:
2531 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2532 return I915_TILING_Y;
2534 return I915_TILING_NONE;
2539 * From the Sky Lake PRM:
2540 * "The Color Control Surface (CCS) contains the compression status of
2541 * the cache-line pairs. The compression state of the cache-line pair
2542 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2543 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2544 * cache-line-pairs. CCS is always Y tiled."
2546 * Since cache line pairs refers to horizontally adjacent cache lines,
2547 * each cache line in the CCS corresponds to an area of 32x16 cache
2548 * lines on the main surface. Since each pixel is 4 bytes, this gives
2549 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2552 static const struct drm_format_info skl_ccs_formats[] = {
2553 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2554 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2555 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2556 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2557 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2558 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2559 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2560 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2564 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2565 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2566 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2567 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2570 static const struct drm_format_info gen12_ccs_formats[] = {
2571 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2572 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2573 .hsub = 1, .vsub = 1, },
2574 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2575 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2576 .hsub = 1, .vsub = 1, },
2577 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2578 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2579 .hsub = 1, .vsub = 1, .has_alpha = true },
2580 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2581 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2582 .hsub = 1, .vsub = 1, .has_alpha = true },
2585 static const struct drm_format_info *
2586 lookup_format_info(const struct drm_format_info formats[],
2587 int num_formats, u32 format)
2591 for (i = 0; i < num_formats; i++) {
2592 if (formats[i].format == format)
2599 static const struct drm_format_info *
2600 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2602 switch (cmd->modifier[0]) {
2603 case I915_FORMAT_MOD_Y_TILED_CCS:
2604 case I915_FORMAT_MOD_Yf_TILED_CCS:
2605 return lookup_format_info(skl_ccs_formats,
2606 ARRAY_SIZE(skl_ccs_formats),
2608 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2609 return lookup_format_info(gen12_ccs_formats,
2610 ARRAY_SIZE(gen12_ccs_formats),
2617 bool is_ccs_modifier(u64 modifier)
2619 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2620 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2621 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2624 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2625 u32 pixel_format, u64 modifier)
2627 struct intel_crtc *crtc;
2628 struct intel_plane *plane;
2631 * We assume the primary plane for pipe A has
2632 * the highest stride limits of them all.
2634 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2638 plane = to_intel_plane(crtc->base.primary);
2640 return plane->max_stride(plane, pixel_format, modifier,
2645 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2646 u32 pixel_format, u64 modifier)
2649 * Arbitrary limit for gen4+ chosen to match the
2650 * render engine max stride.
2652 * The new CCS hash mode makes remapping impossible
2654 if (!is_ccs_modifier(modifier)) {
2655 if (INTEL_GEN(dev_priv) >= 7)
2657 else if (INTEL_GEN(dev_priv) >= 4)
2661 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2665 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2667 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2670 if (is_surface_linear(fb, color_plane)) {
2671 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2676 * To make remapping with linear generally feasible
2677 * we need the stride to be page aligned.
2679 if (fb->pitches[color_plane] > max_stride &&
2680 !is_ccs_modifier(fb->modifier))
2681 return intel_tile_size(dev_priv);
2686 tile_width = intel_tile_width_bytes(fb, color_plane);
2687 if (is_ccs_modifier(fb->modifier) && color_plane == 0) {
2689 * Display WA #0531: skl,bxt,kbl,glk
2691 * Render decompression and plane width > 3840
2692 * combined with horizontal panning requires the
2693 * plane stride to be a multiple of 4. We'll just
2694 * require the entire fb to accommodate that to avoid
2695 * potential runtime errors at plane configuration time.
2697 if (IS_GEN(dev_priv, 9) && fb->width > 3840)
2700 * The main surface pitch must be padded to a multiple of four
2703 else if (INTEL_GEN(dev_priv) >= 12)
2709 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2711 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2712 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2713 const struct drm_framebuffer *fb = plane_state->hw.fb;
2716 /* We don't want to deal with remapping with cursors */
2717 if (plane->id == PLANE_CURSOR)
2721 * The display engine limits already match/exceed the
2722 * render engine limits, so not much point in remapping.
2723 * Would also need to deal with the fence POT alignment
2724 * and gen2 2KiB GTT tile size.
2726 if (INTEL_GEN(dev_priv) < 4)
2730 * The new CCS hash mode isn't compatible with remapping as
2731 * the virtual address of the pages affects the compressed data.
2733 if (is_ccs_modifier(fb->modifier))
2736 /* Linear needs a page aligned stride for remapping */
2737 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2738 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2740 for (i = 0; i < fb->format->num_planes; i++) {
2741 if (fb->pitches[i] & alignment)
2749 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2751 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2752 const struct drm_framebuffer *fb = plane_state->hw.fb;
2753 unsigned int rotation = plane_state->hw.rotation;
2754 u32 stride, max_stride;
2757 * No remapping for invisible planes since we don't have
2758 * an actual source viewport to remap.
2760 if (!plane_state->uapi.visible)
2763 if (!intel_plane_can_remap(plane_state))
2767 * FIXME: aux plane limits on gen9+ are
2768 * unclear in Bspec, for now no checking.
2770 stride = intel_fb_pitch(fb, 0, rotation);
2771 max_stride = plane->max_stride(plane, fb->format->format,
2772 fb->modifier, rotation);
2774 return stride > max_stride;
2778 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2779 const struct drm_framebuffer *fb,
2784 if (color_plane == 0) {
2792 * TODO: Deduct the subsampling from the char block for all CCS
2793 * formats and planes.
2795 if (!is_gen12_ccs_plane(fb, color_plane)) {
2796 *hsub = fb->format->hsub;
2797 *vsub = fb->format->vsub;
2802 main_plane = ccs_to_main_plane(fb, color_plane);
2803 *hsub = drm_format_info_block_width(fb->format, color_plane) /
2804 drm_format_info_block_width(fb->format, main_plane);
2807 * The min stride check in the core framebuffer_check() function
2808 * assumes that format->hsub applies to every plane except for the
2809 * first plane. That's incorrect for the CCS AUX plane of the first
2810 * plane, but for the above check to pass we must define the block
2811 * width with that subsampling applied to it. Adjust the width here
2812 * accordingly, so we can calculate the actual subsampling factor.
2814 if (main_plane == 0)
2815 *hsub *= fb->format->hsub;
2820 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2822 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2825 int tile_width, tile_height;
2829 if (!is_ccs_plane(fb, ccs_plane))
2832 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2833 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2836 tile_height *= vsub;
2838 ccs_x = (x * hsub) % tile_width;
2839 ccs_y = (y * vsub) % tile_height;
2841 main_plane = ccs_to_main_plane(fb, ccs_plane);
2842 main_x = intel_fb->normal[main_plane].x % tile_width;
2843 main_y = intel_fb->normal[main_plane].y % tile_height;
2846 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2847 * x/y offsets must match between CCS and the main surface.
2849 if (main_x != ccs_x || main_y != ccs_y) {
2850 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2853 intel_fb->normal[main_plane].x,
2854 intel_fb->normal[main_plane].y,
2863 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2867 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2868 *w = fb->width / hsub;
2869 *h = fb->height / vsub;
2873 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2874 struct drm_framebuffer *fb)
2876 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2877 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2878 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2879 u32 gtt_offset_rotated = 0;
2880 unsigned int max_size = 0;
2881 int i, num_planes = fb->format->num_planes;
2882 unsigned int tile_size = intel_tile_size(dev_priv);
2884 for (i = 0; i < num_planes; i++) {
2885 unsigned int width, height;
2886 unsigned int cpp, size;
2891 cpp = fb->format->cpp[i];
2892 intel_fb_plane_dims(&width, &height, fb, i);
2894 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2896 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2901 ret = intel_fb_check_ccs_xy(fb, i, x, y);
2906 * The fence (if used) is aligned to the start of the object
2907 * so having the framebuffer wrap around across the edge of the
2908 * fenced region doesn't really work. We have no API to configure
2909 * the fence start offset within the object (nor could we probably
2910 * on gen2/3). So it's just easier if we just require that the
2911 * fb layout agrees with the fence layout. We already check that the
2912 * fb stride matches the fence stride elsewhere.
2914 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2915 (x + width) * cpp > fb->pitches[i]) {
2916 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2922 * First pixel of the framebuffer from
2923 * the start of the normal gtt mapping.
2925 intel_fb->normal[i].x = x;
2926 intel_fb->normal[i].y = y;
2928 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2932 offset /= tile_size;
2934 if (!is_surface_linear(fb, i)) {
2935 unsigned int tile_width, tile_height;
2936 unsigned int pitch_tiles;
2939 intel_tile_dims(fb, i, &tile_width, &tile_height);
2941 rot_info->plane[i].offset = offset;
2942 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2943 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2944 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2946 intel_fb->rotated[i].pitch =
2947 rot_info->plane[i].height * tile_height;
2949 /* how many tiles does this plane need */
2950 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2952 * If the plane isn't horizontally tile aligned,
2953 * we need one more tile.
2958 /* rotate the x/y offsets to match the GTT view */
2959 drm_rect_init(&r, x, y, width, height);
2961 rot_info->plane[i].width * tile_width,
2962 rot_info->plane[i].height * tile_height,
2963 DRM_MODE_ROTATE_270);
2967 /* rotate the tile dimensions to match the GTT view */
2968 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2969 swap(tile_width, tile_height);
2972 * We only keep the x/y offsets, so push all of the
2973 * gtt offset into the x/y offsets.
2975 intel_adjust_tile_offset(&x, &y,
2976 tile_width, tile_height,
2977 tile_size, pitch_tiles,
2978 gtt_offset_rotated * tile_size, 0);
2980 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2983 * First pixel of the framebuffer from
2984 * the start of the rotated gtt mapping.
2986 intel_fb->rotated[i].x = x;
2987 intel_fb->rotated[i].y = y;
2989 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2990 x * cpp, tile_size);
2993 /* how many tiles in total needed in the bo */
2994 max_size = max(max_size, offset + size);
2997 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2998 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2999 mul_u32_u32(max_size, tile_size), obj->base.size);
3007 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3009 struct drm_i915_private *dev_priv =
3010 to_i915(plane_state->uapi.plane->dev);
3011 struct drm_framebuffer *fb = plane_state->hw.fb;
3012 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3013 struct intel_rotation_info *info = &plane_state->view.rotated;
3014 unsigned int rotation = plane_state->hw.rotation;
3015 int i, num_planes = fb->format->num_planes;
3016 unsigned int tile_size = intel_tile_size(dev_priv);
3017 unsigned int src_x, src_y;
3018 unsigned int src_w, src_h;
3021 memset(&plane_state->view, 0, sizeof(plane_state->view));
3022 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3023 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3025 src_x = plane_state->uapi.src.x1 >> 16;
3026 src_y = plane_state->uapi.src.y1 >> 16;
3027 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3028 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3030 WARN_ON(is_ccs_modifier(fb->modifier));
3032 /* Make src coordinates relative to the viewport */
3033 drm_rect_translate(&plane_state->uapi.src,
3034 -(src_x << 16), -(src_y << 16));
3036 /* Rotate src coordinates to match rotated GTT view */
3037 if (drm_rotation_90_or_270(rotation))
3038 drm_rect_rotate(&plane_state->uapi.src,
3039 src_w << 16, src_h << 16,
3040 DRM_MODE_ROTATE_270);
3042 for (i = 0; i < num_planes; i++) {
3043 unsigned int hsub = i ? fb->format->hsub : 1;
3044 unsigned int vsub = i ? fb->format->vsub : 1;
3045 unsigned int cpp = fb->format->cpp[i];
3046 unsigned int tile_width, tile_height;
3047 unsigned int width, height;
3048 unsigned int pitch_tiles;
3052 intel_tile_dims(fb, i, &tile_width, &tile_height);
3056 width = src_w / hsub;
3057 height = src_h / vsub;
3060 * First pixel of the src viewport from the
3061 * start of the normal gtt mapping.
3063 x += intel_fb->normal[i].x;
3064 y += intel_fb->normal[i].y;
3066 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3067 fb, i, fb->pitches[i],
3068 DRM_MODE_ROTATE_0, tile_size);
3069 offset /= tile_size;
3071 info->plane[i].offset = offset;
3072 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3074 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3075 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3077 if (drm_rotation_90_or_270(rotation)) {
3080 /* rotate the x/y offsets to match the GTT view */
3081 drm_rect_init(&r, x, y, width, height);
3083 info->plane[i].width * tile_width,
3084 info->plane[i].height * tile_height,
3085 DRM_MODE_ROTATE_270);
3089 pitch_tiles = info->plane[i].height;
3090 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3092 /* rotate the tile dimensions to match the GTT view */
3093 swap(tile_width, tile_height);
3095 pitch_tiles = info->plane[i].width;
3096 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3100 * We only keep the x/y offsets, so push all of the
3101 * gtt offset into the x/y offsets.
3103 intel_adjust_tile_offset(&x, &y,
3104 tile_width, tile_height,
3105 tile_size, pitch_tiles,
3106 gtt_offset * tile_size, 0);
3108 gtt_offset += info->plane[i].width * info->plane[i].height;
3110 plane_state->color_plane[i].offset = 0;
3111 plane_state->color_plane[i].x = x;
3112 plane_state->color_plane[i].y = y;
3117 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3119 const struct intel_framebuffer *fb =
3120 to_intel_framebuffer(plane_state->hw.fb);
3121 unsigned int rotation = plane_state->hw.rotation;
3127 num_planes = fb->base.format->num_planes;
3129 if (intel_plane_needs_remap(plane_state)) {
3130 intel_plane_remap_gtt(plane_state);
3133 * Sometimes even remapping can't overcome
3134 * the stride limitations :( Can happen with
3135 * big plane sizes and suitably misaligned
3138 return intel_plane_check_stride(plane_state);
3141 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3143 for (i = 0; i < num_planes; i++) {
3144 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3145 plane_state->color_plane[i].offset = 0;
3147 if (drm_rotation_90_or_270(rotation)) {
3148 plane_state->color_plane[i].x = fb->rotated[i].x;
3149 plane_state->color_plane[i].y = fb->rotated[i].y;
3151 plane_state->color_plane[i].x = fb->normal[i].x;
3152 plane_state->color_plane[i].y = fb->normal[i].y;
3156 /* Rotate src coordinates to match rotated GTT view */
3157 if (drm_rotation_90_or_270(rotation))
3158 drm_rect_rotate(&plane_state->uapi.src,
3159 fb->base.width << 16, fb->base.height << 16,
3160 DRM_MODE_ROTATE_270);
3162 return intel_plane_check_stride(plane_state);
3165 static int i9xx_format_to_fourcc(int format)
3168 case DISPPLANE_8BPP:
3169 return DRM_FORMAT_C8;
3170 case DISPPLANE_BGRA555:
3171 return DRM_FORMAT_ARGB1555;
3172 case DISPPLANE_BGRX555:
3173 return DRM_FORMAT_XRGB1555;
3174 case DISPPLANE_BGRX565:
3175 return DRM_FORMAT_RGB565;
3177 case DISPPLANE_BGRX888:
3178 return DRM_FORMAT_XRGB8888;
3179 case DISPPLANE_RGBX888:
3180 return DRM_FORMAT_XBGR8888;
3181 case DISPPLANE_BGRA888:
3182 return DRM_FORMAT_ARGB8888;
3183 case DISPPLANE_RGBA888:
3184 return DRM_FORMAT_ABGR8888;
3185 case DISPPLANE_BGRX101010:
3186 return DRM_FORMAT_XRGB2101010;
3187 case DISPPLANE_RGBX101010:
3188 return DRM_FORMAT_XBGR2101010;
3189 case DISPPLANE_BGRA101010:
3190 return DRM_FORMAT_ARGB2101010;
3191 case DISPPLANE_RGBA101010:
3192 return DRM_FORMAT_ABGR2101010;
3193 case DISPPLANE_RGBX161616:
3194 return DRM_FORMAT_XBGR16161616F;
3198 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3201 case PLANE_CTL_FORMAT_RGB_565:
3202 return DRM_FORMAT_RGB565;
3203 case PLANE_CTL_FORMAT_NV12:
3204 return DRM_FORMAT_NV12;
3205 case PLANE_CTL_FORMAT_P010:
3206 return DRM_FORMAT_P010;
3207 case PLANE_CTL_FORMAT_P012:
3208 return DRM_FORMAT_P012;
3209 case PLANE_CTL_FORMAT_P016:
3210 return DRM_FORMAT_P016;
3211 case PLANE_CTL_FORMAT_Y210:
3212 return DRM_FORMAT_Y210;
3213 case PLANE_CTL_FORMAT_Y212:
3214 return DRM_FORMAT_Y212;
3215 case PLANE_CTL_FORMAT_Y216:
3216 return DRM_FORMAT_Y216;
3217 case PLANE_CTL_FORMAT_Y410:
3218 return DRM_FORMAT_XVYU2101010;
3219 case PLANE_CTL_FORMAT_Y412:
3220 return DRM_FORMAT_XVYU12_16161616;
3221 case PLANE_CTL_FORMAT_Y416:
3222 return DRM_FORMAT_XVYU16161616;
3224 case PLANE_CTL_FORMAT_XRGB_8888:
3227 return DRM_FORMAT_ABGR8888;
3229 return DRM_FORMAT_XBGR8888;
3232 return DRM_FORMAT_ARGB8888;
3234 return DRM_FORMAT_XRGB8888;
3236 case PLANE_CTL_FORMAT_XRGB_2101010:
3239 return DRM_FORMAT_ABGR2101010;
3241 return DRM_FORMAT_XBGR2101010;
3244 return DRM_FORMAT_ARGB2101010;
3246 return DRM_FORMAT_XRGB2101010;
3248 case PLANE_CTL_FORMAT_XRGB_16161616F:
3251 return DRM_FORMAT_ABGR16161616F;
3253 return DRM_FORMAT_XBGR16161616F;
3256 return DRM_FORMAT_ARGB16161616F;
3258 return DRM_FORMAT_XRGB16161616F;
3264 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3265 struct intel_initial_plane_config *plane_config)
3267 struct drm_device *dev = crtc->base.dev;
3268 struct drm_i915_private *dev_priv = to_i915(dev);
3269 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3270 struct drm_framebuffer *fb = &plane_config->fb->base;
3271 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3272 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3274 struct drm_i915_gem_object *obj;
3277 size_aligned -= base_aligned;
3279 if (plane_config->size == 0)
3282 /* If the FB is too big, just don't use it since fbdev is not very
3283 * important and we should probably use that space with FBC or other
3285 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3288 switch (fb->modifier) {
3289 case DRM_FORMAT_MOD_LINEAR:
3290 case I915_FORMAT_MOD_X_TILED:
3291 case I915_FORMAT_MOD_Y_TILED:
3294 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3299 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3306 switch (plane_config->tiling) {
3307 case I915_TILING_NONE:
3311 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3314 MISSING_CASE(plane_config->tiling);
3318 mode_cmd.pixel_format = fb->format->format;
3319 mode_cmd.width = fb->width;
3320 mode_cmd.height = fb->height;
3321 mode_cmd.pitches[0] = fb->pitches[0];
3322 mode_cmd.modifier[0] = fb->modifier;
3323 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3325 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3326 DRM_DEBUG_KMS("intel fb init failed\n");
3331 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3334 i915_gem_object_put(obj);
3339 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3340 struct intel_plane_state *plane_state,
3343 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3345 plane_state->uapi.visible = visible;
3348 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3350 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3353 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3355 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3356 struct drm_plane *plane;
3359 * Active_planes aliases if multiple "primary" or cursor planes
3360 * have been used on the same (or wrong) pipe. plane_mask uses
3361 * unique ids, hence we can use that to reconstruct active_planes.
3363 crtc_state->active_planes = 0;
3365 drm_for_each_plane_mask(plane, &dev_priv->drm,
3366 crtc_state->uapi.plane_mask)
3367 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3370 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3371 struct intel_plane *plane)
3373 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3374 struct intel_crtc_state *crtc_state =
3375 to_intel_crtc_state(crtc->base.state);
3376 struct intel_plane_state *plane_state =
3377 to_intel_plane_state(plane->base.state);
3379 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3380 plane->base.base.id, plane->base.name,
3381 crtc->base.base.id, crtc->base.name);
3383 intel_set_plane_visible(crtc_state, plane_state, false);
3384 fixup_active_planes(crtc_state);
3385 crtc_state->data_rate[plane->id] = 0;
3386 crtc_state->min_cdclk[plane->id] = 0;
3388 if (plane->id == PLANE_PRIMARY)
3389 hsw_disable_ips(crtc_state);
3392 * Vblank time updates from the shadow to live plane control register
3393 * are blocked if the memory self-refresh mode is active at that
3394 * moment. So to make sure the plane gets truly disabled, disable
3395 * first the self-refresh mode. The self-refresh enable bit in turn
3396 * will be checked/applied by the HW only at the next frame start
3397 * event which is after the vblank start event, so we need to have a
3398 * wait-for-vblank between disabling the plane and the pipe.
3400 if (HAS_GMCH(dev_priv) &&
3401 intel_set_memory_cxsr(dev_priv, false))
3402 intel_wait_for_vblank(dev_priv, crtc->pipe);
3405 * Gen2 reports pipe underruns whenever all planes are disabled.
3406 * So disable underrun reporting before all the planes get disabled.
3408 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3409 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3411 intel_disable_plane(plane, crtc_state);
3414 static struct intel_frontbuffer *
3415 to_intel_frontbuffer(struct drm_framebuffer *fb)
3417 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3421 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3422 struct intel_initial_plane_config *plane_config)
3424 struct drm_device *dev = intel_crtc->base.dev;
3425 struct drm_i915_private *dev_priv = to_i915(dev);
3427 struct drm_plane *primary = intel_crtc->base.primary;
3428 struct drm_plane_state *plane_state = primary->state;
3429 struct intel_plane *intel_plane = to_intel_plane(primary);
3430 struct intel_plane_state *intel_state =
3431 to_intel_plane_state(plane_state);
3432 struct drm_framebuffer *fb;
3434 if (!plane_config->fb)
3437 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3438 fb = &plane_config->fb->base;
3442 kfree(plane_config->fb);
3445 * Failed to alloc the obj, check to see if we should share
3446 * an fb with another CRTC instead
3448 for_each_crtc(dev, c) {
3449 struct intel_plane_state *state;
3451 if (c == &intel_crtc->base)
3454 if (!to_intel_crtc(c)->active)
3457 state = to_intel_plane_state(c->primary->state);
3461 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3463 drm_framebuffer_get(fb);
3469 * We've failed to reconstruct the BIOS FB. Current display state
3470 * indicates that the primary plane is visible, but has a NULL FB,
3471 * which will lead to problems later if we don't fix it up. The
3472 * simplest solution is to just disable the primary plane now and
3473 * pretend the BIOS never had it enabled.
3475 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3480 intel_state->hw.rotation = plane_config->rotation;
3481 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3482 intel_state->hw.rotation);
3483 intel_state->color_plane[0].stride =
3484 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3487 intel_pin_and_fence_fb_obj(fb,
3489 intel_plane_uses_fence(intel_state),
3490 &intel_state->flags);
3491 if (IS_ERR(intel_state->vma)) {
3492 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3493 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3495 intel_state->vma = NULL;
3496 drm_framebuffer_put(fb);
3500 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3502 plane_state->src_x = 0;
3503 plane_state->src_y = 0;
3504 plane_state->src_w = fb->width << 16;
3505 plane_state->src_h = fb->height << 16;
3507 plane_state->crtc_x = 0;
3508 plane_state->crtc_y = 0;
3509 plane_state->crtc_w = fb->width;
3510 plane_state->crtc_h = fb->height;
3512 intel_state->uapi.src = drm_plane_state_src(plane_state);
3513 intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3515 if (plane_config->tiling)
3516 dev_priv->preserve_bios_swizzle = true;
3518 plane_state->fb = fb;
3519 plane_state->crtc = &intel_crtc->base;
3520 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3522 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3523 &to_intel_frontbuffer(fb)->bits);
3526 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3528 unsigned int rotation)
3530 int cpp = fb->format->cpp[color_plane];
3532 switch (fb->modifier) {
3533 case DRM_FORMAT_MOD_LINEAR:
3534 case I915_FORMAT_MOD_X_TILED:
3536 * Validated limit is 4k, but has 5k should
3537 * work apart from the following features:
3538 * - Ytile (already limited to 4k)
3539 * - FP16 (already limited to 4k)
3540 * - render compression (already limited to 4k)
3541 * - KVMR sprite and cursor (don't care)
3542 * - horizontal panning (TODO verify this)
3543 * - pipe and plane scaling (TODO verify this)
3549 case I915_FORMAT_MOD_Y_TILED_CCS:
3550 case I915_FORMAT_MOD_Yf_TILED_CCS:
3551 /* FIXME AUX plane? */
3552 case I915_FORMAT_MOD_Y_TILED:
3553 case I915_FORMAT_MOD_Yf_TILED:
3559 MISSING_CASE(fb->modifier);
3564 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3566 unsigned int rotation)
3568 int cpp = fb->format->cpp[color_plane];
3570 switch (fb->modifier) {
3571 case DRM_FORMAT_MOD_LINEAR:
3572 case I915_FORMAT_MOD_X_TILED:
3577 case I915_FORMAT_MOD_Y_TILED_CCS:
3578 case I915_FORMAT_MOD_Yf_TILED_CCS:
3579 /* FIXME AUX plane? */
3580 case I915_FORMAT_MOD_Y_TILED:
3581 case I915_FORMAT_MOD_Yf_TILED:
3587 MISSING_CASE(fb->modifier);
3592 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3594 unsigned int rotation)
3599 static int skl_max_plane_height(void)
3604 static int icl_max_plane_height(void)
3609 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3610 int main_x, int main_y, u32 main_offset)
3612 const struct drm_framebuffer *fb = plane_state->hw.fb;
3613 int ccs_plane = main_to_ccs_plane(fb, 0);
3614 int aux_x = plane_state->color_plane[ccs_plane].x;
3615 int aux_y = plane_state->color_plane[ccs_plane].y;
3616 u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3617 u32 alignment = intel_surf_alignment(fb, ccs_plane);
3621 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3622 while (aux_offset >= main_offset && aux_y <= main_y) {
3625 if (aux_x == main_x && aux_y == main_y)
3628 if (aux_offset == 0)
3633 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3639 aux_x = x * hsub + aux_x % hsub;
3640 aux_y = y * vsub + aux_y % vsub;
3643 if (aux_x != main_x || aux_y != main_y)
3646 plane_state->color_plane[ccs_plane].offset = aux_offset;
3647 plane_state->color_plane[ccs_plane].x = aux_x;
3648 plane_state->color_plane[ccs_plane].y = aux_y;
3653 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3655 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3656 const struct drm_framebuffer *fb = plane_state->hw.fb;
3657 unsigned int rotation = plane_state->hw.rotation;
3658 int x = plane_state->uapi.src.x1 >> 16;
3659 int y = plane_state->uapi.src.y1 >> 16;
3660 int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3661 int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3666 int aux_plane = intel_main_to_aux_plane(fb, 0);
3667 u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3669 if (INTEL_GEN(dev_priv) >= 11)
3670 max_width = icl_max_plane_width(fb, 0, rotation);
3671 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3672 max_width = glk_max_plane_width(fb, 0, rotation);
3674 max_width = skl_max_plane_width(fb, 0, rotation);
3676 if (INTEL_GEN(dev_priv) >= 11)
3677 max_height = icl_max_plane_height();
3679 max_height = skl_max_plane_height();
3681 if (w > max_width || h > max_height) {
3682 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3683 w, h, max_width, max_height);
3687 intel_add_fb_offsets(&x, &y, plane_state, 0);
3688 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3689 alignment = intel_surf_alignment(fb, 0);
3692 * AUX surface offset is specified as the distance from the
3693 * main surface offset, and it must be non-negative. Make
3694 * sure that is what we will get.
3696 if (offset > aux_offset)
3697 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3698 offset, aux_offset & ~(alignment - 1));
3701 * When using an X-tiled surface, the plane blows up
3702 * if the x offset + width exceed the stride.
3704 * TODO: linear and Y-tiled seem fine, Yf untested,
3706 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3707 int cpp = fb->format->cpp[0];
3709 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3711 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3715 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3716 offset, offset - alignment);
3721 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3722 * they match with the main surface x/y offsets.
3724 if (is_ccs_modifier(fb->modifier)) {
3725 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3729 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3730 offset, offset - alignment);
3733 if (x != plane_state->color_plane[aux_plane].x ||
3734 y != plane_state->color_plane[aux_plane].y) {
3735 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3740 plane_state->color_plane[0].offset = offset;
3741 plane_state->color_plane[0].x = x;
3742 plane_state->color_plane[0].y = y;
3745 * Put the final coordinates back so that the src
3746 * coordinate checks will see the right values.
3748 drm_rect_translate_to(&plane_state->uapi.src,
3754 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3756 const struct drm_framebuffer *fb = plane_state->hw.fb;
3757 unsigned int rotation = plane_state->hw.rotation;
3758 int max_width = skl_max_plane_width(fb, 1, rotation);
3759 int max_height = 4096;
3760 int x = plane_state->uapi.src.x1 >> 17;
3761 int y = plane_state->uapi.src.y1 >> 17;
3762 int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3763 int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3766 intel_add_fb_offsets(&x, &y, plane_state, 1);
3767 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3769 /* FIXME not quite sure how/if these apply to the chroma plane */
3770 if (w > max_width || h > max_height) {
3771 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3772 w, h, max_width, max_height);
3776 plane_state->color_plane[1].offset = offset;
3777 plane_state->color_plane[1].x = x;
3778 plane_state->color_plane[1].y = y;
3783 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3785 const struct drm_framebuffer *fb = plane_state->hw.fb;
3786 int src_x = plane_state->uapi.src.x1 >> 16;
3787 int src_y = plane_state->uapi.src.y1 >> 16;
3794 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1);
3797 intel_add_fb_offsets(&x, &y, plane_state, 1);
3798 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3800 plane_state->color_plane[1].offset = offset;
3801 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3802 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3807 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3809 const struct drm_framebuffer *fb = plane_state->hw.fb;
3812 ret = intel_plane_compute_gtt(plane_state);
3816 if (!plane_state->uapi.visible)
3820 * Handle the AUX surface first since
3821 * the main surface setup depends on it.
3823 if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3824 ret = skl_check_nv12_aux_surface(plane_state);
3827 } else if (is_ccs_modifier(fb->modifier)) {
3828 ret = skl_check_ccs_aux_surface(plane_state);
3832 plane_state->color_plane[1].offset = ~0xfff;
3833 plane_state->color_plane[1].x = 0;
3834 plane_state->color_plane[1].y = 0;
3837 ret = skl_check_main_surface(plane_state);
3844 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3845 const struct intel_plane_state *plane_state,
3846 unsigned int *num, unsigned int *den)
3848 const struct drm_framebuffer *fb = plane_state->hw.fb;
3849 unsigned int cpp = fb->format->cpp[0];
3852 * g4x bspec says 64bpp pixel rate can't exceed 80%
3853 * of cdclk when the sprite plane is enabled on the
3854 * same pipe. ilk/snb bspec says 64bpp pixel rate is
3855 * never allowed to exceed 80% of cdclk. Let's just go
3856 * with the ilk/snb limit always.
3867 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3868 const struct intel_plane_state *plane_state)
3870 unsigned int pixel_rate;
3871 unsigned int num, den;
3874 * Note that crtc_state->pixel_rate accounts for both
3875 * horizontal and vertical panel fitter downscaling factors.
3876 * Pre-HSW bspec tells us to only consider the horizontal
3877 * downscaling factor here. We ignore that and just consider
3878 * both for simplicity.
3880 pixel_rate = crtc_state->pixel_rate;
3882 i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3884 /* two pixels per clock with double wide pipe */
3885 if (crtc_state->double_wide)
3888 return DIV_ROUND_UP(pixel_rate * num, den);
3892 i9xx_plane_max_stride(struct intel_plane *plane,
3893 u32 pixel_format, u64 modifier,
3894 unsigned int rotation)
3896 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3898 if (!HAS_GMCH(dev_priv)) {
3900 } else if (INTEL_GEN(dev_priv) >= 4) {
3901 if (modifier == I915_FORMAT_MOD_X_TILED)
3905 } else if (INTEL_GEN(dev_priv) >= 3) {
3906 if (modifier == I915_FORMAT_MOD_X_TILED)
3911 if (plane->i9xx_plane == PLANE_C)
3918 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3920 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3921 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3924 if (crtc_state->gamma_enable)
3925 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3927 if (crtc_state->csc_enable)
3928 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3930 if (INTEL_GEN(dev_priv) < 5)
3931 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3936 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3937 const struct intel_plane_state *plane_state)
3939 struct drm_i915_private *dev_priv =
3940 to_i915(plane_state->uapi.plane->dev);
3941 const struct drm_framebuffer *fb = plane_state->hw.fb;
3942 unsigned int rotation = plane_state->hw.rotation;
3945 dspcntr = DISPLAY_PLANE_ENABLE;
3947 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3948 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3949 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3951 switch (fb->format->format) {
3953 dspcntr |= DISPPLANE_8BPP;
3955 case DRM_FORMAT_XRGB1555:
3956 dspcntr |= DISPPLANE_BGRX555;
3958 case DRM_FORMAT_ARGB1555:
3959 dspcntr |= DISPPLANE_BGRA555;
3961 case DRM_FORMAT_RGB565:
3962 dspcntr |= DISPPLANE_BGRX565;
3964 case DRM_FORMAT_XRGB8888:
3965 dspcntr |= DISPPLANE_BGRX888;
3967 case DRM_FORMAT_XBGR8888:
3968 dspcntr |= DISPPLANE_RGBX888;
3970 case DRM_FORMAT_ARGB8888:
3971 dspcntr |= DISPPLANE_BGRA888;
3973 case DRM_FORMAT_ABGR8888:
3974 dspcntr |= DISPPLANE_RGBA888;
3976 case DRM_FORMAT_XRGB2101010:
3977 dspcntr |= DISPPLANE_BGRX101010;
3979 case DRM_FORMAT_XBGR2101010:
3980 dspcntr |= DISPPLANE_RGBX101010;
3982 case DRM_FORMAT_ARGB2101010:
3983 dspcntr |= DISPPLANE_BGRA101010;
3985 case DRM_FORMAT_ABGR2101010:
3986 dspcntr |= DISPPLANE_RGBA101010;
3988 case DRM_FORMAT_XBGR16161616F:
3989 dspcntr |= DISPPLANE_RGBX161616;
3992 MISSING_CASE(fb->format->format);
3996 if (INTEL_GEN(dev_priv) >= 4 &&
3997 fb->modifier == I915_FORMAT_MOD_X_TILED)
3998 dspcntr |= DISPPLANE_TILED;
4000 if (rotation & DRM_MODE_ROTATE_180)
4001 dspcntr |= DISPPLANE_ROTATE_180;
4003 if (rotation & DRM_MODE_REFLECT_X)
4004 dspcntr |= DISPPLANE_MIRROR;
4009 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4011 struct drm_i915_private *dev_priv =
4012 to_i915(plane_state->uapi.plane->dev);
4013 const struct drm_framebuffer *fb = plane_state->hw.fb;
4014 int src_x, src_y, src_w;
4018 ret = intel_plane_compute_gtt(plane_state);
4022 if (!plane_state->uapi.visible)
4025 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4026 src_x = plane_state->uapi.src.x1 >> 16;
4027 src_y = plane_state->uapi.src.y1 >> 16;
4029 /* Undocumented hardware limit on i965/g4x/vlv/chv */
4030 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4033 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4035 if (INTEL_GEN(dev_priv) >= 4)
4036 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4042 * Put the final coordinates back so that the src
4043 * coordinate checks will see the right values.
4045 drm_rect_translate_to(&plane_state->uapi.src,
4046 src_x << 16, src_y << 16);
4048 /* HSW/BDW do this automagically in hardware */
4049 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4050 unsigned int rotation = plane_state->hw.rotation;
4051 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4052 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4054 if (rotation & DRM_MODE_ROTATE_180) {
4057 } else if (rotation & DRM_MODE_REFLECT_X) {
4062 plane_state->color_plane[0].offset = offset;
4063 plane_state->color_plane[0].x = src_x;
4064 plane_state->color_plane[0].y = src_y;
4069 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4071 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4072 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4074 if (IS_CHERRYVIEW(dev_priv))
4075 return i9xx_plane == PLANE_B;
4076 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4078 else if (IS_GEN(dev_priv, 4))
4079 return i9xx_plane == PLANE_C;
4081 return i9xx_plane == PLANE_B ||
4082 i9xx_plane == PLANE_C;
4086 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4087 struct intel_plane_state *plane_state)
4089 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4092 ret = chv_plane_check_rotation(plane_state);
4096 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4098 DRM_PLANE_HELPER_NO_SCALING,
4099 DRM_PLANE_HELPER_NO_SCALING,
4100 i9xx_plane_has_windowing(plane),
4105 ret = i9xx_check_plane_surface(plane_state);
4109 if (!plane_state->uapi.visible)
4112 ret = intel_plane_check_src_coordinates(plane_state);
4116 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4121 static void i9xx_update_plane(struct intel_plane *plane,
4122 const struct intel_crtc_state *crtc_state,
4123 const struct intel_plane_state *plane_state)
4125 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4126 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4128 int x = plane_state->color_plane[0].x;
4129 int y = plane_state->color_plane[0].y;
4130 int crtc_x = plane_state->uapi.dst.x1;
4131 int crtc_y = plane_state->uapi.dst.y1;
4132 int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4133 int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4134 unsigned long irqflags;
4138 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4140 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4142 if (INTEL_GEN(dev_priv) >= 4)
4143 dspaddr_offset = plane_state->color_plane[0].offset;
4145 dspaddr_offset = linear_offset;
4147 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4149 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
4151 if (INTEL_GEN(dev_priv) < 4) {
4153 * PLANE_A doesn't actually have a full window
4154 * generator but let's assume we still need to
4155 * program whatever is there.
4157 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
4158 I915_WRITE_FW(DSPSIZE(i9xx_plane),
4159 ((crtc_h - 1) << 16) | (crtc_w - 1));
4160 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4161 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
4162 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
4163 ((crtc_h - 1) << 16) | (crtc_w - 1));
4164 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
4167 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4168 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
4169 } else if (INTEL_GEN(dev_priv) >= 4) {
4170 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
4171 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
4175 * The control register self-arms if the plane was previously
4176 * disabled. Try to make the plane enable atomic by writing
4177 * the control register just before the surface register.
4179 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4180 if (INTEL_GEN(dev_priv) >= 4)
4181 I915_WRITE_FW(DSPSURF(i9xx_plane),
4182 intel_plane_ggtt_offset(plane_state) +
4185 I915_WRITE_FW(DSPADDR(i9xx_plane),
4186 intel_plane_ggtt_offset(plane_state) +
4189 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4192 static void i9xx_disable_plane(struct intel_plane *plane,
4193 const struct intel_crtc_state *crtc_state)
4195 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4196 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4197 unsigned long irqflags;
4201 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4202 * enable on ilk+ affect the pipe bottom color as
4203 * well, so we must configure them even if the plane
4206 * On pre-g4x there is no way to gamma correct the
4207 * pipe bottom color but we'll keep on doing this
4208 * anyway so that the crtc state readout works correctly.
4210 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4212 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4214 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4215 if (INTEL_GEN(dev_priv) >= 4)
4216 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
4218 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
4220 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4223 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4226 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4227 enum intel_display_power_domain power_domain;
4228 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4229 intel_wakeref_t wakeref;
4234 * Not 100% correct for planes that can move between pipes,
4235 * but that's only the case for gen2-4 which don't have any
4236 * display power wells.
4238 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4239 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4243 val = I915_READ(DSPCNTR(i9xx_plane));
4245 ret = val & DISPLAY_PLANE_ENABLE;
4247 if (INTEL_GEN(dev_priv) >= 5)
4248 *pipe = plane->pipe;
4250 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4251 DISPPLANE_SEL_PIPE_SHIFT;
4253 intel_display_power_put(dev_priv, power_domain, wakeref);
4258 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4260 struct drm_device *dev = intel_crtc->base.dev;
4261 struct drm_i915_private *dev_priv = to_i915(dev);
4263 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4264 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4265 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4269 * This function detaches (aka. unbinds) unused scalers in hardware
4271 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4273 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4274 const struct intel_crtc_scaler_state *scaler_state =
4275 &crtc_state->scaler_state;
4278 /* loop through and disable scalers that aren't in use */
4279 for (i = 0; i < intel_crtc->num_scalers; i++) {
4280 if (!scaler_state->scalers[i].in_use)
4281 skl_detach_scaler(intel_crtc, i);
4285 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4286 int color_plane, unsigned int rotation)
4289 * The stride is either expressed as a multiple of 64 bytes chunks for
4290 * linear buffers or in number of tiles for tiled buffers.
4292 if (is_surface_linear(fb, color_plane))
4294 else if (drm_rotation_90_or_270(rotation))
4295 return intel_tile_height(fb, color_plane);
4297 return intel_tile_width_bytes(fb, color_plane);
4300 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4303 const struct drm_framebuffer *fb = plane_state->hw.fb;
4304 unsigned int rotation = plane_state->hw.rotation;
4305 u32 stride = plane_state->color_plane[color_plane].stride;
4307 if (color_plane >= fb->format->num_planes)
4310 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4313 static u32 skl_plane_ctl_format(u32 pixel_format)
4315 switch (pixel_format) {
4317 return PLANE_CTL_FORMAT_INDEXED;
4318 case DRM_FORMAT_RGB565:
4319 return PLANE_CTL_FORMAT_RGB_565;
4320 case DRM_FORMAT_XBGR8888:
4321 case DRM_FORMAT_ABGR8888:
4322 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4323 case DRM_FORMAT_XRGB8888:
4324 case DRM_FORMAT_ARGB8888:
4325 return PLANE_CTL_FORMAT_XRGB_8888;
4326 case DRM_FORMAT_XBGR2101010:
4327 case DRM_FORMAT_ABGR2101010:
4328 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4329 case DRM_FORMAT_XRGB2101010:
4330 case DRM_FORMAT_ARGB2101010:
4331 return PLANE_CTL_FORMAT_XRGB_2101010;
4332 case DRM_FORMAT_XBGR16161616F:
4333 case DRM_FORMAT_ABGR16161616F:
4334 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4335 case DRM_FORMAT_XRGB16161616F:
4336 case DRM_FORMAT_ARGB16161616F:
4337 return PLANE_CTL_FORMAT_XRGB_16161616F;
4338 case DRM_FORMAT_YUYV:
4339 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4340 case DRM_FORMAT_YVYU:
4341 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4342 case DRM_FORMAT_UYVY:
4343 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4344 case DRM_FORMAT_VYUY:
4345 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4346 case DRM_FORMAT_NV12:
4347 return PLANE_CTL_FORMAT_NV12;
4348 case DRM_FORMAT_P010:
4349 return PLANE_CTL_FORMAT_P010;
4350 case DRM_FORMAT_P012:
4351 return PLANE_CTL_FORMAT_P012;
4352 case DRM_FORMAT_P016:
4353 return PLANE_CTL_FORMAT_P016;
4354 case DRM_FORMAT_Y210:
4355 return PLANE_CTL_FORMAT_Y210;
4356 case DRM_FORMAT_Y212:
4357 return PLANE_CTL_FORMAT_Y212;
4358 case DRM_FORMAT_Y216:
4359 return PLANE_CTL_FORMAT_Y216;
4360 case DRM_FORMAT_XVYU2101010:
4361 return PLANE_CTL_FORMAT_Y410;
4362 case DRM_FORMAT_XVYU12_16161616:
4363 return PLANE_CTL_FORMAT_Y412;
4364 case DRM_FORMAT_XVYU16161616:
4365 return PLANE_CTL_FORMAT_Y416;
4367 MISSING_CASE(pixel_format);
4373 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4375 if (!plane_state->hw.fb->format->has_alpha)
4376 return PLANE_CTL_ALPHA_DISABLE;
4378 switch (plane_state->hw.pixel_blend_mode) {
4379 case DRM_MODE_BLEND_PIXEL_NONE:
4380 return PLANE_CTL_ALPHA_DISABLE;
4381 case DRM_MODE_BLEND_PREMULTI:
4382 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4383 case DRM_MODE_BLEND_COVERAGE:
4384 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4386 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4387 return PLANE_CTL_ALPHA_DISABLE;
4391 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4393 if (!plane_state->hw.fb->format->has_alpha)
4394 return PLANE_COLOR_ALPHA_DISABLE;
4396 switch (plane_state->hw.pixel_blend_mode) {
4397 case DRM_MODE_BLEND_PIXEL_NONE:
4398 return PLANE_COLOR_ALPHA_DISABLE;
4399 case DRM_MODE_BLEND_PREMULTI:
4400 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4401 case DRM_MODE_BLEND_COVERAGE:
4402 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4404 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4405 return PLANE_COLOR_ALPHA_DISABLE;
4409 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4411 switch (fb_modifier) {
4412 case DRM_FORMAT_MOD_LINEAR:
4414 case I915_FORMAT_MOD_X_TILED:
4415 return PLANE_CTL_TILED_X;
4416 case I915_FORMAT_MOD_Y_TILED:
4417 return PLANE_CTL_TILED_Y;
4418 case I915_FORMAT_MOD_Y_TILED_CCS:
4419 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4420 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4421 return PLANE_CTL_TILED_Y |
4422 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4423 PLANE_CTL_CLEAR_COLOR_DISABLE;
4424 case I915_FORMAT_MOD_Yf_TILED:
4425 return PLANE_CTL_TILED_YF;
4426 case I915_FORMAT_MOD_Yf_TILED_CCS:
4427 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4429 MISSING_CASE(fb_modifier);
4435 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4438 case DRM_MODE_ROTATE_0:
4441 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4442 * while i915 HW rotation is clockwise, thats why this swapping.
4444 case DRM_MODE_ROTATE_90:
4445 return PLANE_CTL_ROTATE_270;
4446 case DRM_MODE_ROTATE_180:
4447 return PLANE_CTL_ROTATE_180;
4448 case DRM_MODE_ROTATE_270:
4449 return PLANE_CTL_ROTATE_90;
4451 MISSING_CASE(rotate);
4457 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4462 case DRM_MODE_REFLECT_X:
4463 return PLANE_CTL_FLIP_HORIZONTAL;
4464 case DRM_MODE_REFLECT_Y:
4466 MISSING_CASE(reflect);
4472 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4474 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4477 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4480 if (crtc_state->gamma_enable)
4481 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4483 if (crtc_state->csc_enable)
4484 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4489 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4490 const struct intel_plane_state *plane_state)
4492 struct drm_i915_private *dev_priv =
4493 to_i915(plane_state->uapi.plane->dev);
4494 const struct drm_framebuffer *fb = plane_state->hw.fb;
4495 unsigned int rotation = plane_state->hw.rotation;
4496 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4499 plane_ctl = PLANE_CTL_ENABLE;
4501 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4502 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4503 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4505 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4506 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4508 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4509 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4512 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4513 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4514 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4516 if (INTEL_GEN(dev_priv) >= 10)
4517 plane_ctl |= cnl_plane_ctl_flip(rotation &
4518 DRM_MODE_REFLECT_MASK);
4520 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4521 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4522 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4523 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4528 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4530 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4531 u32 plane_color_ctl = 0;
4533 if (INTEL_GEN(dev_priv) >= 11)
4534 return plane_color_ctl;
4536 if (crtc_state->gamma_enable)
4537 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4539 if (crtc_state->csc_enable)
4540 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4542 return plane_color_ctl;
4545 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4546 const struct intel_plane_state *plane_state)
4548 struct drm_i915_private *dev_priv =
4549 to_i915(plane_state->uapi.plane->dev);
4550 const struct drm_framebuffer *fb = plane_state->hw.fb;
4551 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4552 u32 plane_color_ctl = 0;
4554 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4555 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4557 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4558 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4559 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4561 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4563 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4564 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4565 } else if (fb->format->is_yuv) {
4566 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4569 return plane_color_ctl;
4573 __intel_display_resume(struct drm_device *dev,
4574 struct drm_atomic_state *state,
4575 struct drm_modeset_acquire_ctx *ctx)
4577 struct drm_crtc_state *crtc_state;
4578 struct drm_crtc *crtc;
4581 intel_modeset_setup_hw_state(dev, ctx);
4582 intel_vga_redisable(to_i915(dev));
4588 * We've duplicated the state, pointers to the old state are invalid.
4590 * Don't attempt to use the old state until we commit the duplicated state.
4592 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4594 * Force recalculation even if we restore
4595 * current state. With fast modeset this may not result
4596 * in a modeset when the state is compatible.
4598 crtc_state->mode_changed = true;
4601 /* ignore any reset values/BIOS leftovers in the WM registers */
4602 if (!HAS_GMCH(to_i915(dev)))
4603 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4605 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4607 WARN_ON(ret == -EDEADLK);
4611 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4613 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4614 intel_has_gpu_reset(&dev_priv->gt));
4617 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4619 struct drm_device *dev = &dev_priv->drm;
4620 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4621 struct drm_atomic_state *state;
4624 /* reset doesn't touch the display */
4625 if (!i915_modparams.force_reset_modeset_test &&
4626 !gpu_reset_clobbers_display(dev_priv))
4629 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4630 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4631 smp_mb__after_atomic();
4632 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4634 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4635 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4636 intel_gt_set_wedged(&dev_priv->gt);
4640 * Need mode_config.mutex so that we don't
4641 * trample ongoing ->detect() and whatnot.
4643 mutex_lock(&dev->mode_config.mutex);
4644 drm_modeset_acquire_init(ctx, 0);
4646 ret = drm_modeset_lock_all_ctx(dev, ctx);
4647 if (ret != -EDEADLK)
4650 drm_modeset_backoff(ctx);
4653 * Disabling the crtcs gracefully seems nicer. Also the
4654 * g33 docs say we should at least disable all the planes.
4656 state = drm_atomic_helper_duplicate_state(dev, ctx);
4657 if (IS_ERR(state)) {
4658 ret = PTR_ERR(state);
4659 DRM_ERROR("Duplicating state failed with %i\n", ret);
4663 ret = drm_atomic_helper_disable_all(dev, ctx);
4665 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4666 drm_atomic_state_put(state);
4670 dev_priv->modeset_restore_state = state;
4671 state->acquire_ctx = ctx;
4674 void intel_finish_reset(struct drm_i915_private *dev_priv)
4676 struct drm_device *dev = &dev_priv->drm;
4677 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4678 struct drm_atomic_state *state;
4681 /* reset doesn't touch the display */
4682 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4685 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4689 /* reset doesn't touch the display */
4690 if (!gpu_reset_clobbers_display(dev_priv)) {
4691 /* for testing only restore the display */
4692 ret = __intel_display_resume(dev, state, ctx);
4694 DRM_ERROR("Restoring old state failed with %i\n", ret);
4697 * The display has been reset as well,
4698 * so need a full re-initialization.
4700 intel_pps_unlock_regs_wa(dev_priv);
4701 intel_modeset_init_hw(dev_priv);
4702 intel_init_clock_gating(dev_priv);
4704 spin_lock_irq(&dev_priv->irq_lock);
4705 if (dev_priv->display.hpd_irq_setup)
4706 dev_priv->display.hpd_irq_setup(dev_priv);
4707 spin_unlock_irq(&dev_priv->irq_lock);
4709 ret = __intel_display_resume(dev, state, ctx);
4711 DRM_ERROR("Restoring old state failed with %i\n", ret);
4713 intel_hpd_init(dev_priv);
4716 drm_atomic_state_put(state);
4718 drm_modeset_drop_locks(ctx);
4719 drm_modeset_acquire_fini(ctx);
4720 mutex_unlock(&dev->mode_config.mutex);
4722 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4725 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4727 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4728 enum pipe pipe = crtc->pipe;
4731 tmp = I915_READ(PIPE_CHICKEN(pipe));
4734 * Display WA #1153: icl
4735 * enable hardware to bypass the alpha math
4736 * and rounding for per-pixel values 00 and 0xff
4738 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4740 * Display WA # 1605353570: icl
4741 * Set the pixel rounding bit to 1 for allowing
4742 * passthrough of Frame buffer pixels unmodified
4745 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4746 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4749 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4751 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4753 u32 trans_ddi_func_ctl2_val;
4757 * Configure the master select and enable Transcoder Port Sync for
4758 * Slave CRTCs transcoder.
4760 if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4763 if (crtc_state->master_transcoder == TRANSCODER_EDP)
4766 master_select = crtc_state->master_transcoder + 1;
4768 /* Set the master select bits for Tranascoder Port Sync */
4769 trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4770 PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4771 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4772 /* Enable Transcoder Port Sync */
4773 trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4775 I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4776 trans_ddi_func_ctl2_val);
4779 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4781 struct drm_device *dev = crtc->base.dev;
4782 struct drm_i915_private *dev_priv = to_i915(dev);
4783 enum pipe pipe = crtc->pipe;
4787 /* enable normal train */
4788 reg = FDI_TX_CTL(pipe);
4789 temp = I915_READ(reg);
4790 if (IS_IVYBRIDGE(dev_priv)) {
4791 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4792 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4794 temp &= ~FDI_LINK_TRAIN_NONE;
4795 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4797 I915_WRITE(reg, temp);
4799 reg = FDI_RX_CTL(pipe);
4800 temp = I915_READ(reg);
4801 if (HAS_PCH_CPT(dev_priv)) {
4802 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4803 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4805 temp &= ~FDI_LINK_TRAIN_NONE;
4806 temp |= FDI_LINK_TRAIN_NONE;
4808 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4810 /* wait one idle pattern time */
4814 /* IVB wants error correction enabled */
4815 if (IS_IVYBRIDGE(dev_priv))
4816 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4817 FDI_FE_ERRC_ENABLE);
4820 /* The FDI link training functions for ILK/Ibexpeak. */
4821 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4822 const struct intel_crtc_state *crtc_state)
4824 struct drm_device *dev = crtc->base.dev;
4825 struct drm_i915_private *dev_priv = to_i915(dev);
4826 enum pipe pipe = crtc->pipe;
4830 /* FDI needs bits from pipe first */
4831 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
4833 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4835 reg = FDI_RX_IMR(pipe);
4836 temp = I915_READ(reg);
4837 temp &= ~FDI_RX_SYMBOL_LOCK;
4838 temp &= ~FDI_RX_BIT_LOCK;
4839 I915_WRITE(reg, temp);
4843 /* enable CPU FDI TX and PCH FDI RX */
4844 reg = FDI_TX_CTL(pipe);
4845 temp = I915_READ(reg);
4846 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4847 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4848 temp &= ~FDI_LINK_TRAIN_NONE;
4849 temp |= FDI_LINK_TRAIN_PATTERN_1;
4850 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4852 reg = FDI_RX_CTL(pipe);
4853 temp = I915_READ(reg);
4854 temp &= ~FDI_LINK_TRAIN_NONE;
4855 temp |= FDI_LINK_TRAIN_PATTERN_1;
4856 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4861 /* Ironlake workaround, enable clock pointer after FDI enable*/
4862 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4863 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4864 FDI_RX_PHASE_SYNC_POINTER_EN);
4866 reg = FDI_RX_IIR(pipe);
4867 for (tries = 0; tries < 5; tries++) {
4868 temp = I915_READ(reg);
4869 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4871 if ((temp & FDI_RX_BIT_LOCK)) {
4872 DRM_DEBUG_KMS("FDI train 1 done.\n");
4873 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4878 DRM_ERROR("FDI train 1 fail!\n");
4881 reg = FDI_TX_CTL(pipe);
4882 temp = I915_READ(reg);
4883 temp &= ~FDI_LINK_TRAIN_NONE;
4884 temp |= FDI_LINK_TRAIN_PATTERN_2;
4885 I915_WRITE(reg, temp);
4887 reg = FDI_RX_CTL(pipe);
4888 temp = I915_READ(reg);
4889 temp &= ~FDI_LINK_TRAIN_NONE;
4890 temp |= FDI_LINK_TRAIN_PATTERN_2;
4891 I915_WRITE(reg, temp);
4896 reg = FDI_RX_IIR(pipe);
4897 for (tries = 0; tries < 5; tries++) {
4898 temp = I915_READ(reg);
4899 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4901 if (temp & FDI_RX_SYMBOL_LOCK) {
4902 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4903 DRM_DEBUG_KMS("FDI train 2 done.\n");
4908 DRM_ERROR("FDI train 2 fail!\n");
4910 DRM_DEBUG_KMS("FDI train done\n");
4914 static const int snb_b_fdi_train_param[] = {
4915 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4916 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4917 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4918 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4921 /* The FDI link training functions for SNB/Cougarpoint. */
4922 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4923 const struct intel_crtc_state *crtc_state)
4925 struct drm_device *dev = crtc->base.dev;
4926 struct drm_i915_private *dev_priv = to_i915(dev);
4927 enum pipe pipe = crtc->pipe;
4931 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4933 reg = FDI_RX_IMR(pipe);
4934 temp = I915_READ(reg);
4935 temp &= ~FDI_RX_SYMBOL_LOCK;
4936 temp &= ~FDI_RX_BIT_LOCK;
4937 I915_WRITE(reg, temp);
4942 /* enable CPU FDI TX and PCH FDI RX */
4943 reg = FDI_TX_CTL(pipe);
4944 temp = I915_READ(reg);
4945 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4946 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4947 temp &= ~FDI_LINK_TRAIN_NONE;
4948 temp |= FDI_LINK_TRAIN_PATTERN_1;
4949 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4951 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4952 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4954 I915_WRITE(FDI_RX_MISC(pipe),
4955 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4957 reg = FDI_RX_CTL(pipe);
4958 temp = I915_READ(reg);
4959 if (HAS_PCH_CPT(dev_priv)) {
4960 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4961 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4963 temp &= ~FDI_LINK_TRAIN_NONE;
4964 temp |= FDI_LINK_TRAIN_PATTERN_1;
4966 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4971 for (i = 0; i < 4; i++) {
4972 reg = FDI_TX_CTL(pipe);
4973 temp = I915_READ(reg);
4974 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4975 temp |= snb_b_fdi_train_param[i];
4976 I915_WRITE(reg, temp);
4981 for (retry = 0; retry < 5; retry++) {
4982 reg = FDI_RX_IIR(pipe);
4983 temp = I915_READ(reg);
4984 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4985 if (temp & FDI_RX_BIT_LOCK) {
4986 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4987 DRM_DEBUG_KMS("FDI train 1 done.\n");
4996 DRM_ERROR("FDI train 1 fail!\n");
4999 reg = FDI_TX_CTL(pipe);
5000 temp = I915_READ(reg);
5001 temp &= ~FDI_LINK_TRAIN_NONE;
5002 temp |= FDI_LINK_TRAIN_PATTERN_2;
5003 if (IS_GEN(dev_priv, 6)) {
5004 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5006 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5008 I915_WRITE(reg, temp);
5010 reg = FDI_RX_CTL(pipe);
5011 temp = I915_READ(reg);
5012 if (HAS_PCH_CPT(dev_priv)) {
5013 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5014 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5016 temp &= ~FDI_LINK_TRAIN_NONE;
5017 temp |= FDI_LINK_TRAIN_PATTERN_2;
5019 I915_WRITE(reg, temp);
5024 for (i = 0; i < 4; i++) {
5025 reg = FDI_TX_CTL(pipe);
5026 temp = I915_READ(reg);
5027 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5028 temp |= snb_b_fdi_train_param[i];
5029 I915_WRITE(reg, temp);
5034 for (retry = 0; retry < 5; retry++) {
5035 reg = FDI_RX_IIR(pipe);
5036 temp = I915_READ(reg);
5037 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
5038 if (temp & FDI_RX_SYMBOL_LOCK) {
5039 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
5040 DRM_DEBUG_KMS("FDI train 2 done.\n");
5049 DRM_ERROR("FDI train 2 fail!\n");
5051 DRM_DEBUG_KMS("FDI train done.\n");
5054 /* Manual link training for Ivy Bridge A0 parts */
5055 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5056 const struct intel_crtc_state *crtc_state)
5058 struct drm_device *dev = crtc->base.dev;
5059 struct drm_i915_private *dev_priv = to_i915(dev);
5060 enum pipe pipe = crtc->pipe;
5064 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5066 reg = FDI_RX_IMR(pipe);
5067 temp = I915_READ(reg);
5068 temp &= ~FDI_RX_SYMBOL_LOCK;
5069 temp &= ~FDI_RX_BIT_LOCK;
5070 I915_WRITE(reg, temp);
5075 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
5076 I915_READ(FDI_RX_IIR(pipe)));
5078 /* Try each vswing and preemphasis setting twice before moving on */
5079 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5080 /* disable first in case we need to retry */
5081 reg = FDI_TX_CTL(pipe);
5082 temp = I915_READ(reg);
5083 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5084 temp &= ~FDI_TX_ENABLE;
5085 I915_WRITE(reg, temp);
5087 reg = FDI_RX_CTL(pipe);
5088 temp = I915_READ(reg);
5089 temp &= ~FDI_LINK_TRAIN_AUTO;
5090 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5091 temp &= ~FDI_RX_ENABLE;
5092 I915_WRITE(reg, temp);
5094 /* enable CPU FDI TX and PCH FDI RX */
5095 reg = FDI_TX_CTL(pipe);
5096 temp = I915_READ(reg);
5097 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5098 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5099 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5100 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5101 temp |= snb_b_fdi_train_param[j/2];
5102 temp |= FDI_COMPOSITE_SYNC;
5103 I915_WRITE(reg, temp | FDI_TX_ENABLE);
5105 I915_WRITE(FDI_RX_MISC(pipe),
5106 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5108 reg = FDI_RX_CTL(pipe);
5109 temp = I915_READ(reg);
5110 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5111 temp |= FDI_COMPOSITE_SYNC;
5112 I915_WRITE(reg, temp | FDI_RX_ENABLE);
5115 udelay(1); /* should be 0.5us */
5117 for (i = 0; i < 4; i++) {
5118 reg = FDI_RX_IIR(pipe);
5119 temp = I915_READ(reg);
5120 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
5122 if (temp & FDI_RX_BIT_LOCK ||
5123 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
5124 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
5125 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
5129 udelay(1); /* should be 0.5us */
5132 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
5137 reg = FDI_TX_CTL(pipe);
5138 temp = I915_READ(reg);
5139 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5140 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5141 I915_WRITE(reg, temp);
5143 reg = FDI_RX_CTL(pipe);
5144 temp = I915_READ(reg);
5145 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5146 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5147 I915_WRITE(reg, temp);
5150 udelay(2); /* should be 1.5us */
5152 for (i = 0; i < 4; i++) {
5153 reg = FDI_RX_IIR(pipe);
5154 temp = I915_READ(reg);
5155 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
5157 if (temp & FDI_RX_SYMBOL_LOCK ||
5158 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
5159 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
5160 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
5164 udelay(2); /* should be 1.5us */
5167 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
5171 DRM_DEBUG_KMS("FDI train done.\n");
5174 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5176 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5177 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5178 enum pipe pipe = intel_crtc->pipe;
5182 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5183 reg = FDI_RX_CTL(pipe);
5184 temp = I915_READ(reg);
5185 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5186 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5187 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5188 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
5193 /* Switch from Rawclk to PCDclk */
5194 temp = I915_READ(reg);
5195 I915_WRITE(reg, temp | FDI_PCDCLK);
5200 /* Enable CPU FDI TX PLL, always on for Ironlake */
5201 reg = FDI_TX_CTL(pipe);
5202 temp = I915_READ(reg);
5203 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5204 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5211 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
5213 struct drm_device *dev = intel_crtc->base.dev;
5214 struct drm_i915_private *dev_priv = to_i915(dev);
5215 enum pipe pipe = intel_crtc->pipe;
5219 /* Switch from PCDclk to Rawclk */
5220 reg = FDI_RX_CTL(pipe);
5221 temp = I915_READ(reg);
5222 I915_WRITE(reg, temp & ~FDI_PCDCLK);
5224 /* Disable CPU FDI TX PLL */
5225 reg = FDI_TX_CTL(pipe);
5226 temp = I915_READ(reg);
5227 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
5232 reg = FDI_RX_CTL(pipe);
5233 temp = I915_READ(reg);
5234 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
5236 /* Wait for the clocks to turn off. */
5241 static void ironlake_fdi_disable(struct intel_crtc *crtc)
5243 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5244 enum pipe pipe = crtc->pipe;
5248 /* disable CPU FDI tx and PCH FDI rx */
5249 reg = FDI_TX_CTL(pipe);
5250 temp = I915_READ(reg);
5251 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5254 reg = FDI_RX_CTL(pipe);
5255 temp = I915_READ(reg);
5256 temp &= ~(0x7 << 16);
5257 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5258 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5263 /* Ironlake workaround, disable clock pointer after downing FDI */
5264 if (HAS_PCH_IBX(dev_priv))
5265 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5267 /* still set train pattern 1 */
5268 reg = FDI_TX_CTL(pipe);
5269 temp = I915_READ(reg);
5270 temp &= ~FDI_LINK_TRAIN_NONE;
5271 temp |= FDI_LINK_TRAIN_PATTERN_1;
5272 I915_WRITE(reg, temp);
5274 reg = FDI_RX_CTL(pipe);
5275 temp = I915_READ(reg);
5276 if (HAS_PCH_CPT(dev_priv)) {
5277 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5278 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5280 temp &= ~FDI_LINK_TRAIN_NONE;
5281 temp |= FDI_LINK_TRAIN_PATTERN_1;
5283 /* BPC in FDI rx is consistent with that in PIPECONF */
5284 temp &= ~(0x07 << 16);
5285 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5286 I915_WRITE(reg, temp);
5292 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5294 struct drm_crtc *crtc;
5297 drm_for_each_crtc(crtc, &dev_priv->drm) {
5298 struct drm_crtc_commit *commit;
5299 spin_lock(&crtc->commit_lock);
5300 commit = list_first_entry_or_null(&crtc->commit_list,
5301 struct drm_crtc_commit, commit_entry);
5302 cleanup_done = commit ?
5303 try_wait_for_completion(&commit->cleanup_done) : true;
5304 spin_unlock(&crtc->commit_lock);
5309 drm_crtc_wait_one_vblank(crtc);
5317 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5321 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5323 mutex_lock(&dev_priv->sb_lock);
5325 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5326 temp |= SBI_SSCCTL_DISABLE;
5327 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5329 mutex_unlock(&dev_priv->sb_lock);
5332 /* Program iCLKIP clock to the desired frequency */
5333 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5335 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5336 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5337 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5338 u32 divsel, phaseinc, auxdiv, phasedir = 0;
5341 lpt_disable_iclkip(dev_priv);
5343 /* The iCLK virtual clock root frequency is in MHz,
5344 * but the adjusted_mode->crtc_clock in in KHz. To get the
5345 * divisors, it is necessary to divide one by another, so we
5346 * convert the virtual clock precision to KHz here for higher
5349 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5350 u32 iclk_virtual_root_freq = 172800 * 1000;
5351 u32 iclk_pi_range = 64;
5352 u32 desired_divisor;
5354 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5356 divsel = (desired_divisor / iclk_pi_range) - 2;
5357 phaseinc = desired_divisor % iclk_pi_range;
5360 * Near 20MHz is a corner case which is
5361 * out of range for the 7-bit divisor
5367 /* This should not happen with any sane values */
5368 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5369 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5370 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5371 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5373 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5380 mutex_lock(&dev_priv->sb_lock);
5382 /* Program SSCDIVINTPHASE6 */
5383 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5384 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5385 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5386 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5387 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5388 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5389 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5390 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5392 /* Program SSCAUXDIV */
5393 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5394 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5395 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5396 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5398 /* Enable modulator and associated divider */
5399 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5400 temp &= ~SBI_SSCCTL_DISABLE;
5401 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5403 mutex_unlock(&dev_priv->sb_lock);
5405 /* Wait for initialization time */
5408 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5411 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5413 u32 divsel, phaseinc, auxdiv;
5414 u32 iclk_virtual_root_freq = 172800 * 1000;
5415 u32 iclk_pi_range = 64;
5416 u32 desired_divisor;
5419 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5422 mutex_lock(&dev_priv->sb_lock);
5424 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5425 if (temp & SBI_SSCCTL_DISABLE) {
5426 mutex_unlock(&dev_priv->sb_lock);
5430 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5431 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5432 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5433 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5434 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5436 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5437 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5438 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5440 mutex_unlock(&dev_priv->sb_lock);
5442 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5444 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5445 desired_divisor << auxdiv);
5448 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5449 enum pipe pch_transcoder)
5451 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5452 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5453 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5455 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5456 I915_READ(HTOTAL(cpu_transcoder)));
5457 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5458 I915_READ(HBLANK(cpu_transcoder)));
5459 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5460 I915_READ(HSYNC(cpu_transcoder)));
5462 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5463 I915_READ(VTOTAL(cpu_transcoder)));
5464 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5465 I915_READ(VBLANK(cpu_transcoder)));
5466 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5467 I915_READ(VSYNC(cpu_transcoder)));
5468 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5469 I915_READ(VSYNCSHIFT(cpu_transcoder)));
5472 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5476 temp = I915_READ(SOUTH_CHICKEN1);
5477 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5480 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5481 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5483 temp &= ~FDI_BC_BIFURCATION_SELECT;
5485 temp |= FDI_BC_BIFURCATION_SELECT;
5487 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5488 I915_WRITE(SOUTH_CHICKEN1, temp);
5489 POSTING_READ(SOUTH_CHICKEN1);
5492 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5494 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5497 switch (crtc->pipe) {
5501 if (crtc_state->fdi_lanes > 2)
5502 cpt_set_fdi_bc_bifurcation(dev_priv, false);
5504 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5508 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5517 * Finds the encoder associated with the given CRTC. This can only be
5518 * used when we know that the CRTC isn't feeding multiple encoders!
5520 static struct intel_encoder *
5521 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5522 const struct intel_crtc_state *crtc_state)
5524 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5525 const struct drm_connector_state *connector_state;
5526 const struct drm_connector *connector;
5527 struct intel_encoder *encoder = NULL;
5528 int num_encoders = 0;
5531 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5532 if (connector_state->crtc != &crtc->base)
5535 encoder = to_intel_encoder(connector_state->best_encoder);
5539 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5540 num_encoders, pipe_name(crtc->pipe));
5546 * Enable PCH resources required for PCH ports:
5548 * - FDI training & RX/TX
5549 * - update transcoder timings
5550 * - DP transcoding bits
5553 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5554 const struct intel_crtc_state *crtc_state)
5556 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5557 struct drm_device *dev = crtc->base.dev;
5558 struct drm_i915_private *dev_priv = to_i915(dev);
5559 enum pipe pipe = crtc->pipe;
5562 assert_pch_transcoder_disabled(dev_priv, pipe);
5564 if (IS_IVYBRIDGE(dev_priv))
5565 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5567 /* Write the TU size bits before fdi link training, so that error
5568 * detection works. */
5569 I915_WRITE(FDI_RX_TUSIZE1(pipe),
5570 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5572 /* For PCH output, training FDI link */
5573 dev_priv->display.fdi_link_train(crtc, crtc_state);
5575 /* We need to program the right clock selection before writing the pixel
5576 * mutliplier into the DPLL. */
5577 if (HAS_PCH_CPT(dev_priv)) {
5580 temp = I915_READ(PCH_DPLL_SEL);
5581 temp |= TRANS_DPLL_ENABLE(pipe);
5582 sel = TRANS_DPLLB_SEL(pipe);
5583 if (crtc_state->shared_dpll ==
5584 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5588 I915_WRITE(PCH_DPLL_SEL, temp);
5591 /* XXX: pch pll's can be enabled any time before we enable the PCH
5592 * transcoder, and we actually should do this to not upset any PCH
5593 * transcoder that already use the clock when we share it.
5595 * Note that enable_shared_dpll tries to do the right thing, but
5596 * get_shared_dpll unconditionally resets the pll - we need that to have
5597 * the right LVDS enable sequence. */
5598 intel_enable_shared_dpll(crtc_state);
5600 /* set transcoder timing, panel must allow it */
5601 assert_panel_unlocked(dev_priv, pipe);
5602 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5604 intel_fdi_normal_train(crtc);
5606 /* For PCH DP, enable TRANS_DP_CTL */
5607 if (HAS_PCH_CPT(dev_priv) &&
5608 intel_crtc_has_dp_encoder(crtc_state)) {
5609 const struct drm_display_mode *adjusted_mode =
5610 &crtc_state->hw.adjusted_mode;
5611 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5612 i915_reg_t reg = TRANS_DP_CTL(pipe);
5615 temp = I915_READ(reg);
5616 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5617 TRANS_DP_SYNC_MASK |
5619 temp |= TRANS_DP_OUTPUT_ENABLE;
5620 temp |= bpc << 9; /* same format but at 11:9 */
5622 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5623 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5624 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5625 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5627 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5628 WARN_ON(port < PORT_B || port > PORT_D);
5629 temp |= TRANS_DP_PORT_SEL(port);
5631 I915_WRITE(reg, temp);
5634 ironlake_enable_pch_transcoder(crtc_state);
5637 static void lpt_pch_enable(const struct intel_atomic_state *state,
5638 const struct intel_crtc_state *crtc_state)
5640 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5641 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5642 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5644 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5646 lpt_program_iclkip(crtc_state);
5648 /* Set transcoder timing. */
5649 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5651 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5654 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5657 i915_reg_t dslreg = PIPEDSL(pipe);
5660 temp = I915_READ(dslreg);
5662 if (wait_for(I915_READ(dslreg) != temp, 5)) {
5663 if (wait_for(I915_READ(dslreg) != temp, 5))
5664 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5669 * The hardware phase 0.0 refers to the center of the pixel.
5670 * We want to start from the top/left edge which is phase
5671 * -0.5. That matches how the hardware calculates the scaling
5672 * factors (from top-left of the first pixel to bottom-right
5673 * of the last pixel, as opposed to the pixel centers).
5675 * For 4:2:0 subsampled chroma planes we obviously have to
5676 * adjust that so that the chroma sample position lands in
5679 * Note that for packed YCbCr 4:2:2 formats there is no way to
5680 * control chroma siting. The hardware simply replicates the
5681 * chroma samples for both of the luma samples, and thus we don't
5682 * actually get the expected MPEG2 chroma siting convention :(
5683 * The same behaviour is observed on pre-SKL platforms as well.
5685 * Theory behind the formula (note that we ignore sub-pixel
5686 * source coordinates):
5687 * s = source sample position
5688 * d = destination sample position
5693 * | | 1.5 (initial phase)
5701 * | -0.375 (initial phase)
5708 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5710 int phase = -0x8000;
5714 phase += (sub - 1) * 0x8000 / sub;
5716 phase += scale / (2 * sub);
5719 * Hardware initial phase limited to [-0.5:1.5].
5720 * Since the max hardware scale factor is 3.0, we
5721 * should never actually excdeed 1.0 here.
5723 WARN_ON(phase < -0x8000 || phase > 0x18000);
5726 phase = 0x10000 + phase;
5728 trip = PS_PHASE_TRIP;
5730 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5733 #define SKL_MIN_SRC_W 8
5734 #define SKL_MAX_SRC_W 4096
5735 #define SKL_MIN_SRC_H 8
5736 #define SKL_MAX_SRC_H 4096
5737 #define SKL_MIN_DST_W 8
5738 #define SKL_MAX_DST_W 4096
5739 #define SKL_MIN_DST_H 8
5740 #define SKL_MAX_DST_H 4096
5741 #define ICL_MAX_SRC_W 5120
5742 #define ICL_MAX_SRC_H 4096
5743 #define ICL_MAX_DST_W 5120
5744 #define ICL_MAX_DST_H 4096
5745 #define SKL_MIN_YUV_420_SRC_W 16
5746 #define SKL_MIN_YUV_420_SRC_H 16
5749 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5750 unsigned int scaler_user, int *scaler_id,
5751 int src_w, int src_h, int dst_w, int dst_h,
5752 const struct drm_format_info *format, bool need_scaler)
5754 struct intel_crtc_scaler_state *scaler_state =
5755 &crtc_state->scaler_state;
5756 struct intel_crtc *intel_crtc =
5757 to_intel_crtc(crtc_state->uapi.crtc);
5758 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5759 const struct drm_display_mode *adjusted_mode =
5760 &crtc_state->hw.adjusted_mode;
5763 * Src coordinates are already rotated by 270 degrees for
5764 * the 90/270 degree plane rotation cases (to match the
5765 * GTT mapping), hence no need to account for rotation here.
5767 if (src_w != dst_w || src_h != dst_h)
5771 * Scaling/fitting not supported in IF-ID mode in GEN9+
5772 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5773 * Once NV12 is enabled, handle it here while allocating scaler
5776 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5777 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5778 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5783 * if plane is being disabled or scaler is no more required or force detach
5784 * - free scaler binded to this plane/crtc
5785 * - in order to do this, update crtc->scaler_usage
5787 * Here scaler state in crtc_state is set free so that
5788 * scaler can be assigned to other user. Actual register
5789 * update to free the scaler is done in plane/panel-fit programming.
5790 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5792 if (force_detach || !need_scaler) {
5793 if (*scaler_id >= 0) {
5794 scaler_state->scaler_users &= ~(1 << scaler_user);
5795 scaler_state->scalers[*scaler_id].in_use = 0;
5797 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5798 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5799 intel_crtc->pipe, scaler_user, *scaler_id,
5800 scaler_state->scaler_users);
5806 if (format && drm_format_info_is_yuv_semiplanar(format) &&
5807 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5808 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5813 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5814 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5815 (INTEL_GEN(dev_priv) >= 11 &&
5816 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5817 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5818 (INTEL_GEN(dev_priv) < 11 &&
5819 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5820 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5821 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5822 "size is out of scaler range\n",
5823 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5827 /* mark this plane as a scaler user in crtc_state */
5828 scaler_state->scaler_users |= (1 << scaler_user);
5829 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5830 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5831 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5832 scaler_state->scaler_users);
5838 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5840 * @state: crtc's scaler state
5843 * 0 - scaler_usage updated successfully
5844 * error - requested scaling cannot be supported or other error condition
5846 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5848 const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5849 bool need_scaler = false;
5851 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5854 return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5855 &state->scaler_state.scaler_id,
5856 state->pipe_src_w, state->pipe_src_h,
5857 adjusted_mode->crtc_hdisplay,
5858 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5862 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5863 * @crtc_state: crtc's scaler state
5864 * @plane_state: atomic plane state to update
5867 * 0 - scaler_usage updated successfully
5868 * error - requested scaling cannot be supported or other error condition
5870 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5871 struct intel_plane_state *plane_state)
5873 struct intel_plane *intel_plane =
5874 to_intel_plane(plane_state->uapi.plane);
5875 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5876 struct drm_framebuffer *fb = plane_state->hw.fb;
5878 bool force_detach = !fb || !plane_state->uapi.visible;
5879 bool need_scaler = false;
5881 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5882 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5883 fb && drm_format_info_is_yuv_semiplanar(fb->format))
5886 ret = skl_update_scaler(crtc_state, force_detach,
5887 drm_plane_index(&intel_plane->base),
5888 &plane_state->scaler_id,
5889 drm_rect_width(&plane_state->uapi.src) >> 16,
5890 drm_rect_height(&plane_state->uapi.src) >> 16,
5891 drm_rect_width(&plane_state->uapi.dst),
5892 drm_rect_height(&plane_state->uapi.dst),
5893 fb ? fb->format : NULL, need_scaler);
5895 if (ret || plane_state->scaler_id < 0)
5898 /* check colorkey */
5899 if (plane_state->ckey.flags) {
5900 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5901 intel_plane->base.base.id,
5902 intel_plane->base.name);
5906 /* Check src format */
5907 switch (fb->format->format) {
5908 case DRM_FORMAT_RGB565:
5909 case DRM_FORMAT_XBGR8888:
5910 case DRM_FORMAT_XRGB8888:
5911 case DRM_FORMAT_ABGR8888:
5912 case DRM_FORMAT_ARGB8888:
5913 case DRM_FORMAT_XRGB2101010:
5914 case DRM_FORMAT_XBGR2101010:
5915 case DRM_FORMAT_ARGB2101010:
5916 case DRM_FORMAT_ABGR2101010:
5917 case DRM_FORMAT_YUYV:
5918 case DRM_FORMAT_YVYU:
5919 case DRM_FORMAT_UYVY:
5920 case DRM_FORMAT_VYUY:
5921 case DRM_FORMAT_NV12:
5922 case DRM_FORMAT_P010:
5923 case DRM_FORMAT_P012:
5924 case DRM_FORMAT_P016:
5925 case DRM_FORMAT_Y210:
5926 case DRM_FORMAT_Y212:
5927 case DRM_FORMAT_Y216:
5928 case DRM_FORMAT_XVYU2101010:
5929 case DRM_FORMAT_XVYU12_16161616:
5930 case DRM_FORMAT_XVYU16161616:
5932 case DRM_FORMAT_XBGR16161616F:
5933 case DRM_FORMAT_ABGR16161616F:
5934 case DRM_FORMAT_XRGB16161616F:
5935 case DRM_FORMAT_ARGB16161616F:
5936 if (INTEL_GEN(dev_priv) >= 11)
5940 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5941 intel_plane->base.base.id, intel_plane->base.name,
5942 fb->base.id, fb->format->format);
5949 void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state)
5951 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5954 for (i = 0; i < crtc->num_scalers; i++)
5955 skl_detach_scaler(crtc, i);
5958 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5960 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5961 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5962 enum pipe pipe = crtc->pipe;
5963 const struct intel_crtc_scaler_state *scaler_state =
5964 &crtc_state->scaler_state;
5966 if (crtc_state->pch_pfit.enabled) {
5967 u16 uv_rgb_hphase, uv_rgb_vphase;
5968 int pfit_w, pfit_h, hscale, vscale;
5971 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5974 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5975 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5977 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5978 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5980 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5981 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5983 id = scaler_state->scaler_id;
5984 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5985 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5986 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5987 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5988 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5989 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5990 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5991 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5995 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5997 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5998 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5999 enum pipe pipe = crtc->pipe;
6001 if (crtc_state->pch_pfit.enabled) {
6002 /* Force use of hard-coded filter coefficients
6003 * as some pre-programmed values are broken,
6006 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6007 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
6008 PF_PIPE_SEL_IVB(pipe));
6010 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6011 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
6012 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
6016 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6018 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6019 struct drm_device *dev = crtc->base.dev;
6020 struct drm_i915_private *dev_priv = to_i915(dev);
6022 if (!crtc_state->ips_enabled)
6026 * We can only enable IPS after we enable a plane and wait for a vblank
6027 * This function is called from post_plane_update, which is run after
6030 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6032 if (IS_BROADWELL(dev_priv)) {
6033 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6034 IPS_ENABLE | IPS_PCODE_CONTROL));
6035 /* Quoting Art Runyan: "its not safe to expect any particular
6036 * value in IPS_CTL bit 31 after enabling IPS through the
6037 * mailbox." Moreover, the mailbox may return a bogus state,
6038 * so we need to just enable it and continue on.
6041 I915_WRITE(IPS_CTL, IPS_ENABLE);
6042 /* The bit only becomes 1 in the next vblank, so this wait here
6043 * is essentially intel_wait_for_vblank. If we don't have this
6044 * and don't wait for vblanks until the end of crtc_enable, then
6045 * the HW state readout code will complain that the expected
6046 * IPS_CTL value is not the one we read. */
6047 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6048 DRM_ERROR("Timed out waiting for IPS enable\n");
6052 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6054 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6055 struct drm_device *dev = crtc->base.dev;
6056 struct drm_i915_private *dev_priv = to_i915(dev);
6058 if (!crtc_state->ips_enabled)
6061 if (IS_BROADWELL(dev_priv)) {
6062 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6064 * Wait for PCODE to finish disabling IPS. The BSpec specified
6065 * 42ms timeout value leads to occasional timeouts so use 100ms
6068 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6069 DRM_ERROR("Timed out waiting for IPS disable\n");
6071 I915_WRITE(IPS_CTL, 0);
6072 POSTING_READ(IPS_CTL);
6075 /* We need to wait for a vblank before we can disable the plane. */
6076 intel_wait_for_vblank(dev_priv, crtc->pipe);
6079 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6081 if (intel_crtc->overlay)
6082 (void) intel_overlay_switch_off(intel_crtc->overlay);
6084 /* Let userspace switch the overlay on again. In most cases userspace
6085 * has to recompute where to put it anyway.
6089 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6090 const struct intel_crtc_state *new_crtc_state)
6092 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6093 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6095 if (!old_crtc_state->ips_enabled)
6098 if (needs_modeset(new_crtc_state))
6102 * Workaround : Do not read or write the pipe palette/gamma data while
6103 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6105 * Disable IPS before we program the LUT.
6107 if (IS_HASWELL(dev_priv) &&
6108 (new_crtc_state->uapi.color_mgmt_changed ||
6109 new_crtc_state->update_pipe) &&
6110 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6113 return !new_crtc_state->ips_enabled;
6116 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6117 const struct intel_crtc_state *new_crtc_state)
6119 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6120 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6122 if (!new_crtc_state->ips_enabled)
6125 if (needs_modeset(new_crtc_state))
6129 * Workaround : Do not read or write the pipe palette/gamma data while
6130 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6132 * Re-enable IPS after the LUT has been programmed.
6134 if (IS_HASWELL(dev_priv) &&
6135 (new_crtc_state->uapi.color_mgmt_changed ||
6136 new_crtc_state->update_pipe) &&
6137 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6141 * We can't read out IPS on broadwell, assume the worst and
6142 * forcibly enable IPS on the first fastset.
6144 if (new_crtc_state->update_pipe &&
6145 old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6148 return !old_crtc_state->ips_enabled;
6151 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6153 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6155 if (!crtc_state->nv12_planes)
6158 /* WA Display #0827: Gen9:all */
6159 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6165 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6167 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6169 /* Wa_2006604312:icl */
6170 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6176 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6177 const struct intel_crtc_state *new_crtc_state)
6179 return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6180 new_crtc_state->active_planes;
6183 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6184 const struct intel_crtc_state *new_crtc_state)
6186 return old_crtc_state->active_planes &&
6187 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6190 static void intel_post_plane_update(struct intel_atomic_state *state,
6191 struct intel_crtc *crtc)
6193 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6194 struct intel_plane *primary = to_intel_plane(crtc->base.primary);
6195 const struct intel_crtc_state *old_crtc_state =
6196 intel_atomic_get_old_crtc_state(state, crtc);
6197 const struct intel_crtc_state *new_crtc_state =
6198 intel_atomic_get_new_crtc_state(state, crtc);
6199 const struct intel_plane_state *new_primary_state =
6200 intel_atomic_get_new_plane_state(state, primary);
6201 enum pipe pipe = crtc->pipe;
6203 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6205 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6206 intel_update_watermarks(crtc);
6208 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6209 hsw_enable_ips(new_crtc_state);
6211 if (new_primary_state)
6212 intel_fbc_post_update(crtc);
6214 if (needs_nv12_wa(old_crtc_state) &&
6215 !needs_nv12_wa(new_crtc_state))
6216 skl_wa_827(dev_priv, pipe, false);
6218 if (needs_scalerclk_wa(old_crtc_state) &&
6219 !needs_scalerclk_wa(new_crtc_state))
6220 icl_wa_scalerclkgating(dev_priv, pipe, false);
6223 static void intel_pre_plane_update(struct intel_atomic_state *state,
6224 struct intel_crtc *crtc)
6226 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6227 struct intel_plane *primary = to_intel_plane(crtc->base.primary);
6228 const struct intel_crtc_state *old_crtc_state =
6229 intel_atomic_get_old_crtc_state(state, crtc);
6230 const struct intel_crtc_state *new_crtc_state =
6231 intel_atomic_get_new_crtc_state(state, crtc);
6232 const struct intel_plane_state *new_primary_state =
6233 intel_atomic_get_new_plane_state(state, primary);
6234 enum pipe pipe = crtc->pipe;
6236 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6237 hsw_disable_ips(old_crtc_state);
6239 if (new_primary_state &&
6240 intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
6241 intel_wait_for_vblank(dev_priv, pipe);
6243 /* Display WA 827 */
6244 if (!needs_nv12_wa(old_crtc_state) &&
6245 needs_nv12_wa(new_crtc_state))
6246 skl_wa_827(dev_priv, pipe, true);
6248 /* Wa_2006604312:icl */
6249 if (!needs_scalerclk_wa(old_crtc_state) &&
6250 needs_scalerclk_wa(new_crtc_state))
6251 icl_wa_scalerclkgating(dev_priv, pipe, true);
6254 * Vblank time updates from the shadow to live plane control register
6255 * are blocked if the memory self-refresh mode is active at that
6256 * moment. So to make sure the plane gets truly disabled, disable
6257 * first the self-refresh mode. The self-refresh enable bit in turn
6258 * will be checked/applied by the HW only at the next frame start
6259 * event which is after the vblank start event, so we need to have a
6260 * wait-for-vblank between disabling the plane and the pipe.
6262 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6263 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6264 intel_wait_for_vblank(dev_priv, pipe);
6267 * IVB workaround: must disable low power watermarks for at least
6268 * one frame before enabling scaling. LP watermarks can be re-enabled
6269 * when scaling is disabled.
6271 * WaCxSRDisabledForSpriteScaling:ivb
6273 if (old_crtc_state->hw.active &&
6274 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6275 intel_wait_for_vblank(dev_priv, pipe);
6278 * If we're doing a modeset we don't need to do any
6279 * pre-vblank watermark programming here.
6281 if (!needs_modeset(new_crtc_state)) {
6283 * For platforms that support atomic watermarks, program the
6284 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
6285 * will be the intermediate values that are safe for both pre- and
6286 * post- vblank; when vblank happens, the 'active' values will be set
6287 * to the final 'target' values and we'll do this again to get the
6288 * optimal watermarks. For gen9+ platforms, the values we program here
6289 * will be the final target values which will get automatically latched
6290 * at vblank time; no further programming will be necessary.
6292 * If a platform hasn't been transitioned to atomic watermarks yet,
6293 * we'll continue to update watermarks the old way, if flags tell
6296 if (dev_priv->display.initial_watermarks)
6297 dev_priv->display.initial_watermarks(state, crtc);
6298 else if (new_crtc_state->update_wm_pre)
6299 intel_update_watermarks(crtc);
6303 * Gen2 reports pipe underruns whenever all planes are disabled.
6304 * So disable underrun reporting before all the planes get disabled.
6306 * We do this after .initial_watermarks() so that we have a
6307 * chance of catching underruns with the intermediate watermarks
6308 * vs. the old plane configuration.
6310 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6311 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6314 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6315 struct intel_crtc *crtc)
6317 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6318 const struct intel_crtc_state *new_crtc_state =
6319 intel_atomic_get_new_crtc_state(state, crtc);
6320 unsigned int update_mask = new_crtc_state->update_planes;
6321 const struct intel_plane_state *old_plane_state;
6322 struct intel_plane *plane;
6323 unsigned fb_bits = 0;
6326 intel_crtc_dpms_overlay_disable(crtc);
6328 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6329 if (crtc->pipe != plane->pipe ||
6330 !(update_mask & BIT(plane->id)))
6333 intel_disable_plane(plane, new_crtc_state);
6335 if (old_plane_state->uapi.visible)
6336 fb_bits |= plane->frontbuffer_bit;
6339 intel_frontbuffer_flip(dev_priv, fb_bits);
6343 * intel_connector_primary_encoder - get the primary encoder for a connector
6344 * @connector: connector for which to return the encoder
6346 * Returns the primary encoder for a connector. There is a 1:1 mapping from
6347 * all connectors to their encoder, except for DP-MST connectors which have
6348 * both a virtual and a primary encoder. These DP-MST primary encoders can be
6349 * pointed to by as many DP-MST connectors as there are pipes.
6351 static struct intel_encoder *
6352 intel_connector_primary_encoder(struct intel_connector *connector)
6354 struct intel_encoder *encoder;
6356 if (connector->mst_port)
6357 return &dp_to_dig_port(connector->mst_port)->base;
6359 encoder = intel_attached_encoder(&connector->base);
6366 intel_connector_needs_modeset(struct intel_atomic_state *state,
6367 const struct drm_connector_state *old_conn_state,
6368 const struct drm_connector_state *new_conn_state)
6370 struct intel_crtc *old_crtc = old_conn_state->crtc ?
6371 to_intel_crtc(old_conn_state->crtc) : NULL;
6372 struct intel_crtc *new_crtc = new_conn_state->crtc ?
6373 to_intel_crtc(new_conn_state->crtc) : NULL;
6375 return new_crtc != old_crtc ||
6377 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6380 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6382 struct drm_connector_state *old_conn_state;
6383 struct drm_connector_state *new_conn_state;
6384 struct drm_connector *conn;
6387 for_each_oldnew_connector_in_state(&state->base, conn,
6388 old_conn_state, new_conn_state, i) {
6389 struct intel_encoder *encoder;
6390 struct intel_crtc *crtc;
6392 if (!intel_connector_needs_modeset(state,
6397 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6398 if (!encoder->update_prepare)
6401 crtc = new_conn_state->crtc ?
6402 to_intel_crtc(new_conn_state->crtc) : NULL;
6403 encoder->update_prepare(state, encoder, crtc);
6407 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6409 struct drm_connector_state *old_conn_state;
6410 struct drm_connector_state *new_conn_state;
6411 struct drm_connector *conn;
6414 for_each_oldnew_connector_in_state(&state->base, conn,
6415 old_conn_state, new_conn_state, i) {
6416 struct intel_encoder *encoder;
6417 struct intel_crtc *crtc;
6419 if (!intel_connector_needs_modeset(state,
6424 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6425 if (!encoder->update_complete)
6428 crtc = new_conn_state->crtc ?
6429 to_intel_crtc(new_conn_state->crtc) : NULL;
6430 encoder->update_complete(state, encoder, crtc);
6434 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6435 struct intel_crtc *crtc)
6437 const struct intel_crtc_state *crtc_state =
6438 intel_atomic_get_new_crtc_state(state, crtc);
6439 const struct drm_connector_state *conn_state;
6440 struct drm_connector *conn;
6443 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6444 struct intel_encoder *encoder =
6445 to_intel_encoder(conn_state->best_encoder);
6447 if (conn_state->crtc != &crtc->base)
6450 if (encoder->pre_pll_enable)
6451 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6455 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6456 struct intel_crtc *crtc)
6458 const struct intel_crtc_state *crtc_state =
6459 intel_atomic_get_new_crtc_state(state, crtc);
6460 const struct drm_connector_state *conn_state;
6461 struct drm_connector *conn;
6464 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6465 struct intel_encoder *encoder =
6466 to_intel_encoder(conn_state->best_encoder);
6468 if (conn_state->crtc != &crtc->base)
6471 if (encoder->pre_enable)
6472 encoder->pre_enable(encoder, crtc_state, conn_state);
6476 static void intel_encoders_enable(struct intel_atomic_state *state,
6477 struct intel_crtc *crtc)
6479 const struct intel_crtc_state *crtc_state =
6480 intel_atomic_get_new_crtc_state(state, crtc);
6481 const struct drm_connector_state *conn_state;
6482 struct drm_connector *conn;
6485 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6486 struct intel_encoder *encoder =
6487 to_intel_encoder(conn_state->best_encoder);
6489 if (conn_state->crtc != &crtc->base)
6492 if (encoder->enable)
6493 encoder->enable(encoder, crtc_state, conn_state);
6494 intel_opregion_notify_encoder(encoder, true);
6498 static void intel_encoders_disable(struct intel_atomic_state *state,
6499 struct intel_crtc *crtc)
6501 const struct intel_crtc_state *old_crtc_state =
6502 intel_atomic_get_old_crtc_state(state, crtc);
6503 const struct drm_connector_state *old_conn_state;
6504 struct drm_connector *conn;
6507 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6508 struct intel_encoder *encoder =
6509 to_intel_encoder(old_conn_state->best_encoder);
6511 if (old_conn_state->crtc != &crtc->base)
6514 intel_opregion_notify_encoder(encoder, false);
6515 if (encoder->disable)
6516 encoder->disable(encoder, old_crtc_state, old_conn_state);
6520 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6521 struct intel_crtc *crtc)
6523 const struct intel_crtc_state *old_crtc_state =
6524 intel_atomic_get_old_crtc_state(state, crtc);
6525 const struct drm_connector_state *old_conn_state;
6526 struct drm_connector *conn;
6529 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6530 struct intel_encoder *encoder =
6531 to_intel_encoder(old_conn_state->best_encoder);
6533 if (old_conn_state->crtc != &crtc->base)
6536 if (encoder->post_disable)
6537 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6541 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6542 struct intel_crtc *crtc)
6544 const struct intel_crtc_state *old_crtc_state =
6545 intel_atomic_get_old_crtc_state(state, crtc);
6546 const struct drm_connector_state *old_conn_state;
6547 struct drm_connector *conn;
6550 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6551 struct intel_encoder *encoder =
6552 to_intel_encoder(old_conn_state->best_encoder);
6554 if (old_conn_state->crtc != &crtc->base)
6557 if (encoder->post_pll_disable)
6558 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6562 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6563 struct intel_crtc *crtc)
6565 const struct intel_crtc_state *crtc_state =
6566 intel_atomic_get_new_crtc_state(state, crtc);
6567 const struct drm_connector_state *conn_state;
6568 struct drm_connector *conn;
6571 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6572 struct intel_encoder *encoder =
6573 to_intel_encoder(conn_state->best_encoder);
6575 if (conn_state->crtc != &crtc->base)
6578 if (encoder->update_pipe)
6579 encoder->update_pipe(encoder, crtc_state, conn_state);
6583 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6585 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6586 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6588 plane->disable_plane(plane, crtc_state);
6591 static void ironlake_crtc_enable(struct intel_atomic_state *state,
6592 struct intel_crtc *crtc)
6594 const struct intel_crtc_state *new_crtc_state =
6595 intel_atomic_get_new_crtc_state(state, crtc);
6596 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6597 enum pipe pipe = crtc->pipe;
6599 if (WARN_ON(crtc->active))
6603 * Sometimes spurious CPU pipe underruns happen during FDI
6604 * training, at least with VGA+HDMI cloning. Suppress them.
6606 * On ILK we get an occasional spurious CPU pipe underruns
6607 * between eDP port A enable and vdd enable. Also PCH port
6608 * enable seems to result in the occasional CPU pipe underrun.
6610 * Spurious PCH underruns also occur during PCH enabling.
6612 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6613 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6615 if (new_crtc_state->has_pch_encoder)
6616 intel_prepare_shared_dpll(new_crtc_state);
6618 if (intel_crtc_has_dp_encoder(new_crtc_state))
6619 intel_dp_set_m_n(new_crtc_state, M1_N1);
6621 intel_set_pipe_timings(new_crtc_state);
6622 intel_set_pipe_src_size(new_crtc_state);
6624 if (new_crtc_state->has_pch_encoder)
6625 intel_cpu_transcoder_set_m_n(new_crtc_state,
6626 &new_crtc_state->fdi_m_n, NULL);
6628 ironlake_set_pipeconf(new_crtc_state);
6630 crtc->active = true;
6632 intel_encoders_pre_enable(state, crtc);
6634 if (new_crtc_state->has_pch_encoder) {
6635 /* Note: FDI PLL enabling _must_ be done before we enable the
6636 * cpu pipes, hence this is separate from all the other fdi/pch
6638 ironlake_fdi_pll_enable(new_crtc_state);
6640 assert_fdi_tx_disabled(dev_priv, pipe);
6641 assert_fdi_rx_disabled(dev_priv, pipe);
6644 ironlake_pfit_enable(new_crtc_state);
6647 * On ILK+ LUT must be loaded before the pipe is running but with
6650 intel_color_load_luts(new_crtc_state);
6651 intel_color_commit(new_crtc_state);
6652 /* update DSPCNTR to configure gamma for pipe bottom color */
6653 intel_disable_primary_plane(new_crtc_state);
6655 if (dev_priv->display.initial_watermarks)
6656 dev_priv->display.initial_watermarks(state, crtc);
6657 intel_enable_pipe(new_crtc_state);
6659 if (new_crtc_state->has_pch_encoder)
6660 ironlake_pch_enable(state, new_crtc_state);
6662 intel_crtc_vblank_on(new_crtc_state);
6664 intel_encoders_enable(state, crtc);
6666 if (HAS_PCH_CPT(dev_priv))
6667 cpt_verify_modeset(dev_priv, pipe);
6670 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6671 * And a second vblank wait is needed at least on ILK with
6672 * some interlaced HDMI modes. Let's do the double wait always
6673 * in case there are more corner cases we don't know about.
6675 if (new_crtc_state->has_pch_encoder) {
6676 intel_wait_for_vblank(dev_priv, pipe);
6677 intel_wait_for_vblank(dev_priv, pipe);
6679 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6680 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6683 /* IPS only exists on ULT machines and is tied to pipe A. */
6684 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6686 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6689 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6690 enum pipe pipe, bool apply)
6692 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6693 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6700 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6703 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6705 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6706 enum pipe pipe = crtc->pipe;
6709 val = MBUS_DBOX_A_CREDIT(2);
6711 if (INTEL_GEN(dev_priv) >= 12) {
6712 val |= MBUS_DBOX_BW_CREDIT(2);
6713 val |= MBUS_DBOX_B_CREDIT(12);
6715 val |= MBUS_DBOX_BW_CREDIT(1);
6716 val |= MBUS_DBOX_B_CREDIT(8);
6719 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6722 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6724 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6725 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6726 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6729 val = I915_READ(reg);
6730 val &= ~HSW_FRAME_START_DELAY_MASK;
6731 val |= HSW_FRAME_START_DELAY(0);
6732 I915_WRITE(reg, val);
6735 static void haswell_crtc_enable(struct intel_atomic_state *state,
6736 struct intel_crtc *crtc)
6738 const struct intel_crtc_state *new_crtc_state =
6739 intel_atomic_get_new_crtc_state(state, crtc);
6740 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6741 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
6742 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
6743 bool psl_clkgate_wa;
6745 if (WARN_ON(crtc->active))
6748 intel_encoders_pre_pll_enable(state, crtc);
6750 if (new_crtc_state->shared_dpll)
6751 intel_enable_shared_dpll(new_crtc_state);
6753 intel_encoders_pre_enable(state, crtc);
6755 if (intel_crtc_has_dp_encoder(new_crtc_state))
6756 intel_dp_set_m_n(new_crtc_state, M1_N1);
6758 if (!transcoder_is_dsi(cpu_transcoder))
6759 intel_set_pipe_timings(new_crtc_state);
6761 if (INTEL_GEN(dev_priv) >= 11)
6762 icl_enable_trans_port_sync(new_crtc_state);
6764 intel_set_pipe_src_size(new_crtc_state);
6766 if (cpu_transcoder != TRANSCODER_EDP &&
6767 !transcoder_is_dsi(cpu_transcoder))
6768 I915_WRITE(PIPE_MULT(cpu_transcoder),
6769 new_crtc_state->pixel_multiplier - 1);
6771 if (new_crtc_state->has_pch_encoder)
6772 intel_cpu_transcoder_set_m_n(new_crtc_state,
6773 &new_crtc_state->fdi_m_n, NULL);
6775 if (!transcoder_is_dsi(cpu_transcoder)) {
6776 hsw_set_frame_start_delay(new_crtc_state);
6777 haswell_set_pipeconf(new_crtc_state);
6780 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6781 bdw_set_pipemisc(new_crtc_state);
6783 crtc->active = true;
6785 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6786 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6787 new_crtc_state->pch_pfit.enabled;
6789 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6791 if (INTEL_GEN(dev_priv) >= 9)
6792 skylake_pfit_enable(new_crtc_state);
6794 ironlake_pfit_enable(new_crtc_state);
6797 * On ILK+ LUT must be loaded before the pipe is running but with
6800 intel_color_load_luts(new_crtc_state);
6801 intel_color_commit(new_crtc_state);
6802 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6803 if (INTEL_GEN(dev_priv) < 9)
6804 intel_disable_primary_plane(new_crtc_state);
6806 if (INTEL_GEN(dev_priv) >= 11)
6807 icl_set_pipe_chicken(crtc);
6809 if (!transcoder_is_dsi(cpu_transcoder))
6810 intel_ddi_enable_transcoder_func(new_crtc_state);
6812 if (dev_priv->display.initial_watermarks)
6813 dev_priv->display.initial_watermarks(state, crtc);
6815 if (INTEL_GEN(dev_priv) >= 11)
6816 icl_pipe_mbus_enable(crtc);
6818 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6819 if (!transcoder_is_dsi(cpu_transcoder))
6820 intel_enable_pipe(new_crtc_state);
6822 if (new_crtc_state->has_pch_encoder)
6823 lpt_pch_enable(state, new_crtc_state);
6825 intel_crtc_vblank_on(new_crtc_state);
6827 intel_encoders_enable(state, crtc);
6829 if (psl_clkgate_wa) {
6830 intel_wait_for_vblank(dev_priv, pipe);
6831 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6834 /* If we change the relative order between pipe/planes enabling, we need
6835 * to change the workaround. */
6836 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
6837 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6838 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6839 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6843 void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6845 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6846 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6847 enum pipe pipe = crtc->pipe;
6849 /* To avoid upsetting the power well on haswell only disable the pfit if
6850 * it's in use. The hw state code will make sure we get this right. */
6851 if (old_crtc_state->pch_pfit.enabled) {
6852 I915_WRITE(PF_CTL(pipe), 0);
6853 I915_WRITE(PF_WIN_POS(pipe), 0);
6854 I915_WRITE(PF_WIN_SZ(pipe), 0);
6858 static void ironlake_crtc_disable(struct intel_atomic_state *state,
6859 struct intel_crtc *crtc)
6861 const struct intel_crtc_state *old_crtc_state =
6862 intel_atomic_get_old_crtc_state(state, crtc);
6863 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6864 enum pipe pipe = crtc->pipe;
6867 * Sometimes spurious CPU pipe underruns happen when the
6868 * pipe is already disabled, but FDI RX/TX is still enabled.
6869 * Happens at least with VGA+HDMI cloning. Suppress them.
6871 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6872 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6874 intel_encoders_disable(state, crtc);
6876 intel_crtc_vblank_off(old_crtc_state);
6878 intel_disable_pipe(old_crtc_state);
6880 ironlake_pfit_disable(old_crtc_state);
6882 if (old_crtc_state->has_pch_encoder)
6883 ironlake_fdi_disable(crtc);
6885 intel_encoders_post_disable(state, crtc);
6887 if (old_crtc_state->has_pch_encoder) {
6888 ironlake_disable_pch_transcoder(dev_priv, pipe);
6890 if (HAS_PCH_CPT(dev_priv)) {
6894 /* disable TRANS_DP_CTL */
6895 reg = TRANS_DP_CTL(pipe);
6896 temp = I915_READ(reg);
6897 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6898 TRANS_DP_PORT_SEL_MASK);
6899 temp |= TRANS_DP_PORT_SEL_NONE;
6900 I915_WRITE(reg, temp);
6902 /* disable DPLL_SEL */
6903 temp = I915_READ(PCH_DPLL_SEL);
6904 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6905 I915_WRITE(PCH_DPLL_SEL, temp);
6908 ironlake_fdi_pll_disable(crtc);
6911 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6912 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6915 static void haswell_crtc_disable(struct intel_atomic_state *state,
6916 struct intel_crtc *crtc)
6919 * FIXME collapse everything to one hook.
6920 * Need care with mst->ddi interactions.
6922 intel_encoders_disable(state, crtc);
6923 intel_encoders_post_disable(state, crtc);
6926 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6928 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6929 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6931 if (!crtc_state->gmch_pfit.control)
6935 * The panel fitter should only be adjusted whilst the pipe is disabled,
6936 * according to register description and PRM.
6938 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6939 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
6941 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6942 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6944 /* Border color in case we don't scale up to the full screen. Black by
6945 * default, change to something else for debugging. */
6946 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6949 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6951 if (phy == PHY_NONE)
6954 if (IS_ELKHARTLAKE(dev_priv))
6955 return phy <= PHY_C;
6957 if (INTEL_GEN(dev_priv) >= 11)
6958 return phy <= PHY_B;
6963 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6965 if (INTEL_GEN(dev_priv) >= 12)
6966 return phy >= PHY_D && phy <= PHY_I;
6968 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6969 return phy >= PHY_C && phy <= PHY_F;
6974 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6976 if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6979 return (enum phy)port;
6982 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6984 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6985 return PORT_TC_NONE;
6987 if (INTEL_GEN(dev_priv) >= 12)
6988 return port - PORT_D;
6990 return port - PORT_C;
6993 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6997 return POWER_DOMAIN_PORT_DDI_A_LANES;
6999 return POWER_DOMAIN_PORT_DDI_B_LANES;
7001 return POWER_DOMAIN_PORT_DDI_C_LANES;
7003 return POWER_DOMAIN_PORT_DDI_D_LANES;
7005 return POWER_DOMAIN_PORT_DDI_E_LANES;
7007 return POWER_DOMAIN_PORT_DDI_F_LANES;
7009 return POWER_DOMAIN_PORT_DDI_G_LANES;
7012 return POWER_DOMAIN_PORT_OTHER;
7016 enum intel_display_power_domain
7017 intel_aux_power_domain(struct intel_digital_port *dig_port)
7019 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7020 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7022 if (intel_phy_is_tc(dev_priv, phy) &&
7023 dig_port->tc_mode == TC_PORT_TBT_ALT) {
7024 switch (dig_port->aux_ch) {
7026 return POWER_DOMAIN_AUX_C_TBT;
7028 return POWER_DOMAIN_AUX_D_TBT;
7030 return POWER_DOMAIN_AUX_E_TBT;
7032 return POWER_DOMAIN_AUX_F_TBT;
7034 return POWER_DOMAIN_AUX_G_TBT;
7036 MISSING_CASE(dig_port->aux_ch);
7037 return POWER_DOMAIN_AUX_C_TBT;
7041 switch (dig_port->aux_ch) {
7043 return POWER_DOMAIN_AUX_A;
7045 return POWER_DOMAIN_AUX_B;
7047 return POWER_DOMAIN_AUX_C;
7049 return POWER_DOMAIN_AUX_D;
7051 return POWER_DOMAIN_AUX_E;
7053 return POWER_DOMAIN_AUX_F;
7055 return POWER_DOMAIN_AUX_G;
7057 MISSING_CASE(dig_port->aux_ch);
7058 return POWER_DOMAIN_AUX_A;
7062 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7064 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7065 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7066 struct drm_encoder *encoder;
7067 enum pipe pipe = crtc->pipe;
7069 enum transcoder transcoder = crtc_state->cpu_transcoder;
7071 if (!crtc_state->hw.active)
7074 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7075 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7076 if (crtc_state->pch_pfit.enabled ||
7077 crtc_state->pch_pfit.force_thru)
7078 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7080 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7081 crtc_state->uapi.encoder_mask) {
7082 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7084 mask |= BIT_ULL(intel_encoder->power_domain);
7087 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7088 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7090 if (crtc_state->shared_dpll)
7091 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7097 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7099 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7100 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7101 enum intel_display_power_domain domain;
7102 u64 domains, new_domains, old_domains;
7104 old_domains = crtc->enabled_power_domains;
7105 crtc->enabled_power_domains = new_domains =
7106 get_crtc_power_domains(crtc_state);
7108 domains = new_domains & ~old_domains;
7110 for_each_power_domain(domain, domains)
7111 intel_display_power_get(dev_priv, domain);
7113 return old_domains & ~new_domains;
7116 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7119 enum intel_display_power_domain domain;
7121 for_each_power_domain(domain, domains)
7122 intel_display_power_put_unchecked(dev_priv, domain);
7125 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7126 struct intel_crtc *crtc)
7128 const struct intel_crtc_state *new_crtc_state =
7129 intel_atomic_get_new_crtc_state(state, crtc);
7130 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7131 enum pipe pipe = crtc->pipe;
7133 if (WARN_ON(crtc->active))
7136 if (intel_crtc_has_dp_encoder(new_crtc_state))
7137 intel_dp_set_m_n(new_crtc_state, M1_N1);
7139 intel_set_pipe_timings(new_crtc_state);
7140 intel_set_pipe_src_size(new_crtc_state);
7142 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7143 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7144 I915_WRITE(CHV_CANVAS(pipe), 0);
7147 i9xx_set_pipeconf(new_crtc_state);
7149 crtc->active = true;
7151 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7153 intel_encoders_pre_pll_enable(state, crtc);
7155 if (IS_CHERRYVIEW(dev_priv)) {
7156 chv_prepare_pll(crtc, new_crtc_state);
7157 chv_enable_pll(crtc, new_crtc_state);
7159 vlv_prepare_pll(crtc, new_crtc_state);
7160 vlv_enable_pll(crtc, new_crtc_state);
7163 intel_encoders_pre_enable(state, crtc);
7165 i9xx_pfit_enable(new_crtc_state);
7167 intel_color_load_luts(new_crtc_state);
7168 intel_color_commit(new_crtc_state);
7169 /* update DSPCNTR to configure gamma for pipe bottom color */
7170 intel_disable_primary_plane(new_crtc_state);
7172 dev_priv->display.initial_watermarks(state, crtc);
7173 intel_enable_pipe(new_crtc_state);
7175 intel_crtc_vblank_on(new_crtc_state);
7177 intel_encoders_enable(state, crtc);
7180 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7182 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7183 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7185 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7186 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7189 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7190 struct intel_crtc *crtc)
7192 const struct intel_crtc_state *new_crtc_state =
7193 intel_atomic_get_new_crtc_state(state, crtc);
7194 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7195 enum pipe pipe = crtc->pipe;
7197 if (WARN_ON(crtc->active))
7200 i9xx_set_pll_dividers(new_crtc_state);
7202 if (intel_crtc_has_dp_encoder(new_crtc_state))
7203 intel_dp_set_m_n(new_crtc_state, M1_N1);
7205 intel_set_pipe_timings(new_crtc_state);
7206 intel_set_pipe_src_size(new_crtc_state);
7208 i9xx_set_pipeconf(new_crtc_state);
7210 crtc->active = true;
7212 if (!IS_GEN(dev_priv, 2))
7213 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7215 intel_encoders_pre_enable(state, crtc);
7217 i9xx_enable_pll(crtc, new_crtc_state);
7219 i9xx_pfit_enable(new_crtc_state);
7221 intel_color_load_luts(new_crtc_state);
7222 intel_color_commit(new_crtc_state);
7223 /* update DSPCNTR to configure gamma for pipe bottom color */
7224 intel_disable_primary_plane(new_crtc_state);
7226 if (dev_priv->display.initial_watermarks)
7227 dev_priv->display.initial_watermarks(state, crtc);
7229 intel_update_watermarks(crtc);
7230 intel_enable_pipe(new_crtc_state);
7232 intel_crtc_vblank_on(new_crtc_state);
7234 intel_encoders_enable(state, crtc);
7237 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7239 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7240 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7242 if (!old_crtc_state->gmch_pfit.control)
7245 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7247 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7248 I915_READ(PFIT_CONTROL));
7249 I915_WRITE(PFIT_CONTROL, 0);
7252 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7253 struct intel_crtc *crtc)
7255 struct intel_crtc_state *old_crtc_state =
7256 intel_atomic_get_old_crtc_state(state, crtc);
7257 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7258 enum pipe pipe = crtc->pipe;
7261 * On gen2 planes are double buffered but the pipe isn't, so we must
7262 * wait for planes to fully turn off before disabling the pipe.
7264 if (IS_GEN(dev_priv, 2))
7265 intel_wait_for_vblank(dev_priv, pipe);
7267 intel_encoders_disable(state, crtc);
7269 intel_crtc_vblank_off(old_crtc_state);
7271 intel_disable_pipe(old_crtc_state);
7273 i9xx_pfit_disable(old_crtc_state);
7275 intel_encoders_post_disable(state, crtc);
7277 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7278 if (IS_CHERRYVIEW(dev_priv))
7279 chv_disable_pll(dev_priv, pipe);
7280 else if (IS_VALLEYVIEW(dev_priv))
7281 vlv_disable_pll(dev_priv, pipe);
7283 i9xx_disable_pll(old_crtc_state);
7286 intel_encoders_post_pll_disable(state, crtc);
7288 if (!IS_GEN(dev_priv, 2))
7289 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7291 if (!dev_priv->display.initial_watermarks)
7292 intel_update_watermarks(crtc);
7294 /* clock the pipe down to 640x480@60 to potentially save power */
7295 if (IS_I830(dev_priv))
7296 i830_enable_pipe(dev_priv, pipe);
7299 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7300 struct drm_modeset_acquire_ctx *ctx)
7302 struct intel_encoder *encoder;
7303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7304 struct intel_bw_state *bw_state =
7305 to_intel_bw_state(dev_priv->bw_obj.state);
7306 struct intel_crtc_state *crtc_state =
7307 to_intel_crtc_state(crtc->base.state);
7308 enum intel_display_power_domain domain;
7309 struct intel_plane *plane;
7310 struct drm_atomic_state *state;
7311 struct intel_crtc_state *temp_crtc_state;
7312 enum pipe pipe = crtc->pipe;
7316 if (!crtc_state->hw.active)
7319 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7320 const struct intel_plane_state *plane_state =
7321 to_intel_plane_state(plane->base.state);
7323 if (plane_state->uapi.visible)
7324 intel_plane_disable_noatomic(crtc, plane);
7327 state = drm_atomic_state_alloc(&dev_priv->drm);
7329 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7330 crtc->base.base.id, crtc->base.name);
7334 state->acquire_ctx = ctx;
7336 /* Everything's already locked, -EDEADLK can't happen. */
7337 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7338 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7340 WARN_ON(IS_ERR(temp_crtc_state) || ret);
7342 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7344 drm_atomic_state_put(state);
7346 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7347 crtc->base.base.id, crtc->base.name);
7349 crtc->active = false;
7350 crtc->base.enabled = false;
7352 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7353 crtc_state->uapi.active = false;
7354 crtc_state->uapi.connector_mask = 0;
7355 crtc_state->uapi.encoder_mask = 0;
7356 intel_crtc_free_hw_state(crtc_state);
7357 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7359 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7360 encoder->base.crtc = NULL;
7362 intel_fbc_disable(crtc);
7363 intel_update_watermarks(crtc);
7364 intel_disable_shared_dpll(crtc_state);
7366 domains = crtc->enabled_power_domains;
7367 for_each_power_domain(domain, domains)
7368 intel_display_power_put_unchecked(dev_priv, domain);
7369 crtc->enabled_power_domains = 0;
7371 dev_priv->active_pipes &= ~BIT(pipe);
7372 dev_priv->min_cdclk[pipe] = 0;
7373 dev_priv->min_voltage_level[pipe] = 0;
7375 bw_state->data_rate[pipe] = 0;
7376 bw_state->num_active_planes[pipe] = 0;
7380 * turn all crtc's off, but do not adjust state
7381 * This has to be paired with a call to intel_modeset_setup_hw_state.
7383 int intel_display_suspend(struct drm_device *dev)
7385 struct drm_i915_private *dev_priv = to_i915(dev);
7386 struct drm_atomic_state *state;
7389 state = drm_atomic_helper_suspend(dev);
7390 ret = PTR_ERR_OR_ZERO(state);
7392 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7394 dev_priv->modeset_restore_state = state;
7398 void intel_encoder_destroy(struct drm_encoder *encoder)
7400 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7402 drm_encoder_cleanup(encoder);
7403 kfree(intel_encoder);
7406 /* Cross check the actual hw state with our own modeset state tracking (and it's
7407 * internal consistency). */
7408 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7409 struct drm_connector_state *conn_state)
7411 struct intel_connector *connector = to_intel_connector(conn_state->connector);
7413 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7414 connector->base.base.id,
7415 connector->base.name);
7417 if (connector->get_hw_state(connector)) {
7418 struct intel_encoder *encoder = connector->encoder;
7420 I915_STATE_WARN(!crtc_state,
7421 "connector enabled without attached crtc\n");
7426 I915_STATE_WARN(!crtc_state->hw.active,
7427 "connector is active, but attached crtc isn't\n");
7429 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7432 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7433 "atomic encoder doesn't match attached encoder\n");
7435 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7436 "attached encoder crtc differs from connector crtc\n");
7438 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7439 "attached crtc is active, but connector isn't\n");
7440 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7441 "best encoder set without crtc!\n");
7445 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7447 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7448 return crtc_state->fdi_lanes;
7453 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7454 struct intel_crtc_state *pipe_config)
7456 struct drm_i915_private *dev_priv = to_i915(dev);
7457 struct drm_atomic_state *state = pipe_config->uapi.state;
7458 struct intel_crtc *other_crtc;
7459 struct intel_crtc_state *other_crtc_state;
7461 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7462 pipe_name(pipe), pipe_config->fdi_lanes);
7463 if (pipe_config->fdi_lanes > 4) {
7464 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7465 pipe_name(pipe), pipe_config->fdi_lanes);
7469 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7470 if (pipe_config->fdi_lanes > 2) {
7471 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7472 pipe_config->fdi_lanes);
7479 if (INTEL_NUM_PIPES(dev_priv) == 2)
7482 /* Ivybridge 3 pipe is really complicated */
7487 if (pipe_config->fdi_lanes <= 2)
7490 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7492 intel_atomic_get_crtc_state(state, other_crtc);
7493 if (IS_ERR(other_crtc_state))
7494 return PTR_ERR(other_crtc_state);
7496 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7497 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7498 pipe_name(pipe), pipe_config->fdi_lanes);
7503 if (pipe_config->fdi_lanes > 2) {
7504 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7505 pipe_name(pipe), pipe_config->fdi_lanes);
7509 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7511 intel_atomic_get_crtc_state(state, other_crtc);
7512 if (IS_ERR(other_crtc_state))
7513 return PTR_ERR(other_crtc_state);
7515 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7516 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7526 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7527 struct intel_crtc_state *pipe_config)
7529 struct drm_device *dev = intel_crtc->base.dev;
7530 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7531 int lane, link_bw, fdi_dotclock, ret;
7532 bool needs_recompute = false;
7535 /* FDI is a binary signal running at ~2.7GHz, encoding
7536 * each output octet as 10 bits. The actual frequency
7537 * is stored as a divider into a 100MHz clock, and the
7538 * mode pixel clock is stored in units of 1KHz.
7539 * Hence the bw of each lane in terms of the mode signal
7542 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7544 fdi_dotclock = adjusted_mode->crtc_clock;
7546 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7547 pipe_config->pipe_bpp);
7549 pipe_config->fdi_lanes = lane;
7551 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7552 link_bw, &pipe_config->fdi_m_n, false, false);
7554 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7555 if (ret == -EDEADLK)
7558 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7559 pipe_config->pipe_bpp -= 2*3;
7560 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7561 pipe_config->pipe_bpp);
7562 needs_recompute = true;
7563 pipe_config->bw_constrained = true;
7568 if (needs_recompute)
7574 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7576 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7577 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7579 /* IPS only exists on ULT machines and is tied to pipe A. */
7580 if (!hsw_crtc_supports_ips(crtc))
7583 if (!i915_modparams.enable_ips)
7586 if (crtc_state->pipe_bpp > 24)
7590 * We compare against max which means we must take
7591 * the increased cdclk requirement into account when
7592 * calculating the new cdclk.
7594 * Should measure whether using a lower cdclk w/o IPS
7596 if (IS_BROADWELL(dev_priv) &&
7597 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7603 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7605 struct drm_i915_private *dev_priv =
7606 to_i915(crtc_state->uapi.crtc->dev);
7607 struct intel_atomic_state *intel_state =
7608 to_intel_atomic_state(crtc_state->uapi.state);
7610 if (!hsw_crtc_state_ips_capable(crtc_state))
7614 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7615 * enabled and disabled dynamically based on package C states,
7616 * user space can't make reliable use of the CRCs, so let's just
7617 * completely disable it.
7619 if (crtc_state->crc_enabled)
7622 /* IPS should be fine as long as at least one plane is enabled. */
7623 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7626 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7627 if (IS_BROADWELL(dev_priv) &&
7628 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7634 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7636 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7638 /* GDG double wide on either pipe, otherwise pipe A only */
7639 return INTEL_GEN(dev_priv) < 4 &&
7640 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7643 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7647 pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7650 * We only use IF-ID interlacing. If we ever use
7651 * PF-ID we'll need to adjust the pixel_rate here.
7654 if (pipe_config->pch_pfit.enabled) {
7655 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7656 u32 pfit_size = pipe_config->pch_pfit.size;
7658 pipe_w = pipe_config->pipe_src_w;
7659 pipe_h = pipe_config->pipe_src_h;
7661 pfit_w = (pfit_size >> 16) & 0xFFFF;
7662 pfit_h = pfit_size & 0xFFFF;
7663 if (pipe_w < pfit_w)
7665 if (pipe_h < pfit_h)
7668 if (WARN_ON(!pfit_w || !pfit_h))
7671 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7678 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7680 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7682 if (HAS_GMCH(dev_priv))
7683 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7684 crtc_state->pixel_rate =
7685 crtc_state->hw.adjusted_mode.crtc_clock;
7687 crtc_state->pixel_rate =
7688 ilk_pipe_pixel_rate(crtc_state);
7691 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7692 struct intel_crtc_state *pipe_config)
7694 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7695 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7696 int clock_limit = dev_priv->max_dotclk_freq;
7698 if (INTEL_GEN(dev_priv) < 4) {
7699 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7702 * Enable double wide mode when the dot clock
7703 * is > 90% of the (display) core speed.
7705 if (intel_crtc_supports_double_wide(crtc) &&
7706 adjusted_mode->crtc_clock > clock_limit) {
7707 clock_limit = dev_priv->max_dotclk_freq;
7708 pipe_config->double_wide = true;
7712 if (adjusted_mode->crtc_clock > clock_limit) {
7713 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7714 adjusted_mode->crtc_clock, clock_limit,
7715 yesno(pipe_config->double_wide));
7719 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7720 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7721 pipe_config->hw.ctm) {
7723 * There is only one pipe CSC unit per pipe, and we need that
7724 * for output conversion from RGB->YCBCR. So if CTM is already
7725 * applied we can't support YCBCR420 output.
7727 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7732 * Pipe horizontal size must be even in:
7734 * - LVDS dual channel mode
7735 * - Double wide pipe
7737 if (pipe_config->pipe_src_w & 1) {
7738 if (pipe_config->double_wide) {
7739 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7743 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7744 intel_is_dual_link_lvds(dev_priv)) {
7745 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7750 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7751 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7753 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7754 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7757 intel_crtc_compute_pixel_rate(pipe_config);
7759 if (pipe_config->has_pch_encoder)
7760 return ironlake_fdi_compute_config(crtc, pipe_config);
7766 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7768 while (*num > DATA_LINK_M_N_MASK ||
7769 *den > DATA_LINK_M_N_MASK) {
7775 static void compute_m_n(unsigned int m, unsigned int n,
7776 u32 *ret_m, u32 *ret_n,
7780 * Several DP dongles in particular seem to be fussy about
7781 * too large link M/N values. Give N value as 0x8000 that
7782 * should be acceptable by specific devices. 0x8000 is the
7783 * specified fixed N value for asynchronous clock mode,
7784 * which the devices expect also in synchronous clock mode.
7789 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7791 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7792 intel_reduce_m_n_ratio(ret_m, ret_n);
7796 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7797 int pixel_clock, int link_clock,
7798 struct intel_link_m_n *m_n,
7799 bool constant_n, bool fec_enable)
7801 u32 data_clock = bits_per_pixel * pixel_clock;
7804 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7807 compute_m_n(data_clock,
7808 link_clock * nlanes * 8,
7809 &m_n->gmch_m, &m_n->gmch_n,
7812 compute_m_n(pixel_clock, link_clock,
7813 &m_n->link_m, &m_n->link_n,
7817 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7820 * There may be no VBT; and if the BIOS enabled SSC we can
7821 * just keep using it to avoid unnecessary flicker. Whereas if the
7822 * BIOS isn't using it, don't assume it will work even if the VBT
7823 * indicates as much.
7825 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7826 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7829 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7830 DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7831 enableddisabled(bios_lvds_use_ssc),
7832 enableddisabled(dev_priv->vbt.lvds_use_ssc));
7833 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7838 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7840 if (i915_modparams.panel_use_ssc >= 0)
7841 return i915_modparams.panel_use_ssc != 0;
7842 return dev_priv->vbt.lvds_use_ssc
7843 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7846 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7848 return (1 << dpll->n) << 16 | dpll->m2;
7851 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7853 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7856 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7857 struct intel_crtc_state *crtc_state,
7858 struct dpll *reduced_clock)
7860 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7863 if (IS_PINEVIEW(dev_priv)) {
7864 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7866 fp2 = pnv_dpll_compute_fp(reduced_clock);
7868 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7870 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7873 crtc_state->dpll_hw_state.fp0 = fp;
7875 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7877 crtc_state->dpll_hw_state.fp1 = fp2;
7879 crtc_state->dpll_hw_state.fp1 = fp;
7883 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7889 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7890 * and set it to a reasonable value instead.
7892 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7893 reg_val &= 0xffffff00;
7894 reg_val |= 0x00000030;
7895 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7897 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7898 reg_val &= 0x00ffffff;
7899 reg_val |= 0x8c000000;
7900 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7902 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7903 reg_val &= 0xffffff00;
7904 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7906 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7907 reg_val &= 0x00ffffff;
7908 reg_val |= 0xb0000000;
7909 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7912 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7913 const struct intel_link_m_n *m_n)
7915 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7917 enum pipe pipe = crtc->pipe;
7919 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7920 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7921 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7922 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7925 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7926 enum transcoder transcoder)
7928 if (IS_HASWELL(dev_priv))
7929 return transcoder == TRANSCODER_EDP;
7932 * Strictly speaking some registers are available before
7933 * gen7, but we only support DRRS on gen7+
7935 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7938 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7939 const struct intel_link_m_n *m_n,
7940 const struct intel_link_m_n *m2_n2)
7942 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7943 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7944 enum pipe pipe = crtc->pipe;
7945 enum transcoder transcoder = crtc_state->cpu_transcoder;
7947 if (INTEL_GEN(dev_priv) >= 5) {
7948 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7949 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7950 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7951 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7953 * M2_N2 registers are set only if DRRS is supported
7954 * (to make sure the registers are not unnecessarily accessed).
7956 if (m2_n2 && crtc_state->has_drrs &&
7957 transcoder_has_m2_n2(dev_priv, transcoder)) {
7958 I915_WRITE(PIPE_DATA_M2(transcoder),
7959 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7960 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7961 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7962 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7965 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7966 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7967 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7968 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7972 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7974 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7977 dp_m_n = &crtc_state->dp_m_n;
7978 dp_m2_n2 = &crtc_state->dp_m2_n2;
7979 } else if (m_n == M2_N2) {
7982 * M2_N2 registers are not supported. Hence m2_n2 divider value
7983 * needs to be programmed into M1_N1.
7985 dp_m_n = &crtc_state->dp_m2_n2;
7987 DRM_ERROR("Unsupported divider value\n");
7991 if (crtc_state->has_pch_encoder)
7992 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7994 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7997 static void vlv_compute_dpll(struct intel_crtc *crtc,
7998 struct intel_crtc_state *pipe_config)
8000 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8001 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8002 if (crtc->pipe != PIPE_A)
8003 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8005 /* DPLL not used with DSI, but still need the rest set up */
8006 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8007 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8008 DPLL_EXT_BUFFER_ENABLE_VLV;
8010 pipe_config->dpll_hw_state.dpll_md =
8011 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8014 static void chv_compute_dpll(struct intel_crtc *crtc,
8015 struct intel_crtc_state *pipe_config)
8017 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8018 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8019 if (crtc->pipe != PIPE_A)
8020 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8022 /* DPLL not used with DSI, but still need the rest set up */
8023 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8024 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8026 pipe_config->dpll_hw_state.dpll_md =
8027 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8030 static void vlv_prepare_pll(struct intel_crtc *crtc,
8031 const struct intel_crtc_state *pipe_config)
8033 struct drm_device *dev = crtc->base.dev;
8034 struct drm_i915_private *dev_priv = to_i915(dev);
8035 enum pipe pipe = crtc->pipe;
8037 u32 bestn, bestm1, bestm2, bestp1, bestp2;
8038 u32 coreclk, reg_val;
8041 I915_WRITE(DPLL(pipe),
8042 pipe_config->dpll_hw_state.dpll &
8043 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8045 /* No need to actually set up the DPLL with DSI */
8046 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8049 vlv_dpio_get(dev_priv);
8051 bestn = pipe_config->dpll.n;
8052 bestm1 = pipe_config->dpll.m1;
8053 bestm2 = pipe_config->dpll.m2;
8054 bestp1 = pipe_config->dpll.p1;
8055 bestp2 = pipe_config->dpll.p2;
8057 /* See eDP HDMI DPIO driver vbios notes doc */
8059 /* PLL B needs special handling */
8061 vlv_pllb_recal_opamp(dev_priv, pipe);
8063 /* Set up Tx target for periodic Rcomp update */
8064 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8066 /* Disable target IRef on PLL */
8067 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8068 reg_val &= 0x00ffffff;
8069 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8071 /* Disable fast lock */
8072 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8074 /* Set idtafcrecal before PLL is enabled */
8075 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8076 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8077 mdiv |= ((bestn << DPIO_N_SHIFT));
8078 mdiv |= (1 << DPIO_K_SHIFT);
8081 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8082 * but we don't support that).
8083 * Note: don't use the DAC post divider as it seems unstable.
8085 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8086 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8088 mdiv |= DPIO_ENABLE_CALIBRATION;
8089 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8091 /* Set HBR and RBR LPF coefficients */
8092 if (pipe_config->port_clock == 162000 ||
8093 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8094 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8095 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8098 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8101 if (intel_crtc_has_dp_encoder(pipe_config)) {
8102 /* Use SSC source */
8104 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8107 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8109 } else { /* HDMI or VGA */
8110 /* Use bend source */
8112 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8115 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8119 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8120 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8121 if (intel_crtc_has_dp_encoder(pipe_config))
8122 coreclk |= 0x01000000;
8123 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8125 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8127 vlv_dpio_put(dev_priv);
8130 static void chv_prepare_pll(struct intel_crtc *crtc,
8131 const struct intel_crtc_state *pipe_config)
8133 struct drm_device *dev = crtc->base.dev;
8134 struct drm_i915_private *dev_priv = to_i915(dev);
8135 enum pipe pipe = crtc->pipe;
8136 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8137 u32 loopfilter, tribuf_calcntr;
8138 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8142 /* Enable Refclk and SSC */
8143 I915_WRITE(DPLL(pipe),
8144 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8146 /* No need to actually set up the DPLL with DSI */
8147 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8150 bestn = pipe_config->dpll.n;
8151 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8152 bestm1 = pipe_config->dpll.m1;
8153 bestm2 = pipe_config->dpll.m2 >> 22;
8154 bestp1 = pipe_config->dpll.p1;
8155 bestp2 = pipe_config->dpll.p2;
8156 vco = pipe_config->dpll.vco;
8160 vlv_dpio_get(dev_priv);
8162 /* p1 and p2 divider */
8163 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8164 5 << DPIO_CHV_S1_DIV_SHIFT |
8165 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8166 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8167 1 << DPIO_CHV_K_DIV_SHIFT);
8169 /* Feedback post-divider - m2 */
8170 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8172 /* Feedback refclk divider - n and m1 */
8173 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8174 DPIO_CHV_M1_DIV_BY_2 |
8175 1 << DPIO_CHV_N_DIV_SHIFT);
8177 /* M2 fraction division */
8178 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8180 /* M2 fraction division enable */
8181 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8182 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8183 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8185 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8186 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8188 /* Program digital lock detect threshold */
8189 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8190 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8191 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8192 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8194 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8195 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8198 if (vco == 5400000) {
8199 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8200 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8201 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8202 tribuf_calcntr = 0x9;
8203 } else if (vco <= 6200000) {
8204 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8205 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8206 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8207 tribuf_calcntr = 0x9;
8208 } else if (vco <= 6480000) {
8209 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8210 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8211 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8212 tribuf_calcntr = 0x8;
8214 /* Not supported. Apply the same limits as in the max case */
8215 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8216 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8217 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8220 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8222 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8223 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8224 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8225 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8228 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8229 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8232 vlv_dpio_put(dev_priv);
8236 * vlv_force_pll_on - forcibly enable just the PLL
8237 * @dev_priv: i915 private structure
8238 * @pipe: pipe PLL to enable
8239 * @dpll: PLL configuration
8241 * Enable the PLL for @pipe using the supplied @dpll config. To be used
8242 * in cases where we need the PLL enabled even when @pipe is not going to
8245 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8246 const struct dpll *dpll)
8248 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8249 struct intel_crtc_state *pipe_config;
8251 pipe_config = intel_crtc_state_alloc(crtc);
8255 pipe_config->cpu_transcoder = (enum transcoder)pipe;
8256 pipe_config->pixel_multiplier = 1;
8257 pipe_config->dpll = *dpll;
8259 if (IS_CHERRYVIEW(dev_priv)) {
8260 chv_compute_dpll(crtc, pipe_config);
8261 chv_prepare_pll(crtc, pipe_config);
8262 chv_enable_pll(crtc, pipe_config);
8264 vlv_compute_dpll(crtc, pipe_config);
8265 vlv_prepare_pll(crtc, pipe_config);
8266 vlv_enable_pll(crtc, pipe_config);
8275 * vlv_force_pll_off - forcibly disable just the PLL
8276 * @dev_priv: i915 private structure
8277 * @pipe: pipe PLL to disable
8279 * Disable the PLL for @pipe. To be used in cases where we need
8280 * the PLL enabled even when @pipe is not going to be enabled.
8282 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8284 if (IS_CHERRYVIEW(dev_priv))
8285 chv_disable_pll(dev_priv, pipe);
8287 vlv_disable_pll(dev_priv, pipe);
8290 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8291 struct intel_crtc_state *crtc_state,
8292 struct dpll *reduced_clock)
8294 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8296 struct dpll *clock = &crtc_state->dpll;
8298 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8300 dpll = DPLL_VGA_MODE_DIS;
8302 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8303 dpll |= DPLLB_MODE_LVDS;
8305 dpll |= DPLLB_MODE_DAC_SERIAL;
8307 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8308 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8309 dpll |= (crtc_state->pixel_multiplier - 1)
8310 << SDVO_MULTIPLIER_SHIFT_HIRES;
8313 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8314 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8315 dpll |= DPLL_SDVO_HIGH_SPEED;
8317 if (intel_crtc_has_dp_encoder(crtc_state))
8318 dpll |= DPLL_SDVO_HIGH_SPEED;
8320 /* compute bitmask from p1 value */
8321 if (IS_PINEVIEW(dev_priv))
8322 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8324 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8325 if (IS_G4X(dev_priv) && reduced_clock)
8326 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8328 switch (clock->p2) {
8330 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8333 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8336 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8339 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8342 if (INTEL_GEN(dev_priv) >= 4)
8343 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8345 if (crtc_state->sdvo_tv_clock)
8346 dpll |= PLL_REF_INPUT_TVCLKINBC;
8347 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8348 intel_panel_use_ssc(dev_priv))
8349 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8351 dpll |= PLL_REF_INPUT_DREFCLK;
8353 dpll |= DPLL_VCO_ENABLE;
8354 crtc_state->dpll_hw_state.dpll = dpll;
8356 if (INTEL_GEN(dev_priv) >= 4) {
8357 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8358 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8359 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8363 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8364 struct intel_crtc_state *crtc_state,
8365 struct dpll *reduced_clock)
8367 struct drm_device *dev = crtc->base.dev;
8368 struct drm_i915_private *dev_priv = to_i915(dev);
8370 struct dpll *clock = &crtc_state->dpll;
8372 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8374 dpll = DPLL_VGA_MODE_DIS;
8376 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8377 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8380 dpll |= PLL_P1_DIVIDE_BY_TWO;
8382 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8384 dpll |= PLL_P2_DIVIDE_BY_4;
8389 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8390 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8391 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8392 * Enable) must be set to “1” in both the DPLL A Control Register
8393 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8395 * For simplicity We simply keep both bits always enabled in
8396 * both DPLLS. The spec says we should disable the DVO 2X clock
8397 * when not needed, but this seems to work fine in practice.
8399 if (IS_I830(dev_priv) ||
8400 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8401 dpll |= DPLL_DVO_2X_MODE;
8403 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8404 intel_panel_use_ssc(dev_priv))
8405 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8407 dpll |= PLL_REF_INPUT_DREFCLK;
8409 dpll |= DPLL_VCO_ENABLE;
8410 crtc_state->dpll_hw_state.dpll = dpll;
8413 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8415 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8417 enum pipe pipe = crtc->pipe;
8418 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8419 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8420 u32 crtc_vtotal, crtc_vblank_end;
8423 /* We need to be careful not to changed the adjusted mode, for otherwise
8424 * the hw state checker will get angry at the mismatch. */
8425 crtc_vtotal = adjusted_mode->crtc_vtotal;
8426 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8428 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8429 /* the chip adds 2 halflines automatically */
8431 crtc_vblank_end -= 1;
8433 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8434 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8436 vsyncshift = adjusted_mode->crtc_hsync_start -
8437 adjusted_mode->crtc_htotal / 2;
8439 vsyncshift += adjusted_mode->crtc_htotal;
8442 if (INTEL_GEN(dev_priv) > 3)
8443 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8445 I915_WRITE(HTOTAL(cpu_transcoder),
8446 (adjusted_mode->crtc_hdisplay - 1) |
8447 ((adjusted_mode->crtc_htotal - 1) << 16));
8448 I915_WRITE(HBLANK(cpu_transcoder),
8449 (adjusted_mode->crtc_hblank_start - 1) |
8450 ((adjusted_mode->crtc_hblank_end - 1) << 16));
8451 I915_WRITE(HSYNC(cpu_transcoder),
8452 (adjusted_mode->crtc_hsync_start - 1) |
8453 ((adjusted_mode->crtc_hsync_end - 1) << 16));
8455 I915_WRITE(VTOTAL(cpu_transcoder),
8456 (adjusted_mode->crtc_vdisplay - 1) |
8457 ((crtc_vtotal - 1) << 16));
8458 I915_WRITE(VBLANK(cpu_transcoder),
8459 (adjusted_mode->crtc_vblank_start - 1) |
8460 ((crtc_vblank_end - 1) << 16));
8461 I915_WRITE(VSYNC(cpu_transcoder),
8462 (adjusted_mode->crtc_vsync_start - 1) |
8463 ((adjusted_mode->crtc_vsync_end - 1) << 16));
8465 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8466 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8467 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8469 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8470 (pipe == PIPE_B || pipe == PIPE_C))
8471 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8475 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8477 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8478 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8479 enum pipe pipe = crtc->pipe;
8481 /* pipesrc controls the size that is scaled from, which should
8482 * always be the user's requested size.
8484 I915_WRITE(PIPESRC(pipe),
8485 ((crtc_state->pipe_src_w - 1) << 16) |
8486 (crtc_state->pipe_src_h - 1));
8489 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8491 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8492 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8494 if (IS_GEN(dev_priv, 2))
8497 if (INTEL_GEN(dev_priv) >= 9 ||
8498 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8499 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8501 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8504 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8505 struct intel_crtc_state *pipe_config)
8507 struct drm_device *dev = crtc->base.dev;
8508 struct drm_i915_private *dev_priv = to_i915(dev);
8509 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8512 tmp = I915_READ(HTOTAL(cpu_transcoder));
8513 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8514 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8516 if (!transcoder_is_dsi(cpu_transcoder)) {
8517 tmp = I915_READ(HBLANK(cpu_transcoder));
8518 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8520 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8521 ((tmp >> 16) & 0xffff) + 1;
8523 tmp = I915_READ(HSYNC(cpu_transcoder));
8524 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8525 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8527 tmp = I915_READ(VTOTAL(cpu_transcoder));
8528 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8529 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8531 if (!transcoder_is_dsi(cpu_transcoder)) {
8532 tmp = I915_READ(VBLANK(cpu_transcoder));
8533 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8535 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8536 ((tmp >> 16) & 0xffff) + 1;
8538 tmp = I915_READ(VSYNC(cpu_transcoder));
8539 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8540 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8542 if (intel_pipe_is_interlaced(pipe_config)) {
8543 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8544 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8545 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8549 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8550 struct intel_crtc_state *pipe_config)
8552 struct drm_device *dev = crtc->base.dev;
8553 struct drm_i915_private *dev_priv = to_i915(dev);
8556 tmp = I915_READ(PIPESRC(crtc->pipe));
8557 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8558 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8560 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8561 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8564 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8565 struct intel_crtc_state *pipe_config)
8567 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8568 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8569 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8570 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8572 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8573 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8574 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8575 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8577 mode->flags = pipe_config->hw.adjusted_mode.flags;
8578 mode->type = DRM_MODE_TYPE_DRIVER;
8580 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8582 mode->hsync = drm_mode_hsync(mode);
8583 mode->vrefresh = drm_mode_vrefresh(mode);
8584 drm_mode_set_name(mode);
8587 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8589 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8590 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8595 /* we keep both pipes enabled on 830 */
8596 if (IS_I830(dev_priv))
8597 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8599 if (crtc_state->double_wide)
8600 pipeconf |= PIPECONF_DOUBLE_WIDE;
8602 /* only g4x and later have fancy bpc/dither controls */
8603 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8604 IS_CHERRYVIEW(dev_priv)) {
8605 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8606 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8607 pipeconf |= PIPECONF_DITHER_EN |
8608 PIPECONF_DITHER_TYPE_SP;
8610 switch (crtc_state->pipe_bpp) {
8612 pipeconf |= PIPECONF_6BPC;
8615 pipeconf |= PIPECONF_8BPC;
8618 pipeconf |= PIPECONF_10BPC;
8621 /* Case prevented by intel_choose_pipe_bpp_dither. */
8626 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8627 if (INTEL_GEN(dev_priv) < 4 ||
8628 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8629 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8631 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8633 pipeconf |= PIPECONF_PROGRESSIVE;
8636 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8637 crtc_state->limited_color_range)
8638 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8640 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8642 pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8644 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8645 POSTING_READ(PIPECONF(crtc->pipe));
8648 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8649 struct intel_crtc_state *crtc_state)
8651 struct drm_device *dev = crtc->base.dev;
8652 struct drm_i915_private *dev_priv = to_i915(dev);
8653 const struct intel_limit *limit;
8656 memset(&crtc_state->dpll_hw_state, 0,
8657 sizeof(crtc_state->dpll_hw_state));
8659 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8660 if (intel_panel_use_ssc(dev_priv)) {
8661 refclk = dev_priv->vbt.lvds_ssc_freq;
8662 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8665 limit = &intel_limits_i8xx_lvds;
8666 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8667 limit = &intel_limits_i8xx_dvo;
8669 limit = &intel_limits_i8xx_dac;
8672 if (!crtc_state->clock_set &&
8673 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8674 refclk, NULL, &crtc_state->dpll)) {
8675 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8679 i8xx_compute_dpll(crtc, crtc_state, NULL);
8684 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8685 struct intel_crtc_state *crtc_state)
8687 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8688 const struct intel_limit *limit;
8691 memset(&crtc_state->dpll_hw_state, 0,
8692 sizeof(crtc_state->dpll_hw_state));
8694 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8695 if (intel_panel_use_ssc(dev_priv)) {
8696 refclk = dev_priv->vbt.lvds_ssc_freq;
8697 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8700 if (intel_is_dual_link_lvds(dev_priv))
8701 limit = &intel_limits_g4x_dual_channel_lvds;
8703 limit = &intel_limits_g4x_single_channel_lvds;
8704 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8705 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8706 limit = &intel_limits_g4x_hdmi;
8707 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8708 limit = &intel_limits_g4x_sdvo;
8710 /* The option is for other outputs */
8711 limit = &intel_limits_i9xx_sdvo;
8714 if (!crtc_state->clock_set &&
8715 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8716 refclk, NULL, &crtc_state->dpll)) {
8717 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8721 i9xx_compute_dpll(crtc, crtc_state, NULL);
8726 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8727 struct intel_crtc_state *crtc_state)
8729 struct drm_device *dev = crtc->base.dev;
8730 struct drm_i915_private *dev_priv = to_i915(dev);
8731 const struct intel_limit *limit;
8734 memset(&crtc_state->dpll_hw_state, 0,
8735 sizeof(crtc_state->dpll_hw_state));
8737 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8738 if (intel_panel_use_ssc(dev_priv)) {
8739 refclk = dev_priv->vbt.lvds_ssc_freq;
8740 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8743 limit = &intel_limits_pineview_lvds;
8745 limit = &intel_limits_pineview_sdvo;
8748 if (!crtc_state->clock_set &&
8749 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8750 refclk, NULL, &crtc_state->dpll)) {
8751 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8755 i9xx_compute_dpll(crtc, crtc_state, NULL);
8760 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8761 struct intel_crtc_state *crtc_state)
8763 struct drm_device *dev = crtc->base.dev;
8764 struct drm_i915_private *dev_priv = to_i915(dev);
8765 const struct intel_limit *limit;
8768 memset(&crtc_state->dpll_hw_state, 0,
8769 sizeof(crtc_state->dpll_hw_state));
8771 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8772 if (intel_panel_use_ssc(dev_priv)) {
8773 refclk = dev_priv->vbt.lvds_ssc_freq;
8774 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8777 limit = &intel_limits_i9xx_lvds;
8779 limit = &intel_limits_i9xx_sdvo;
8782 if (!crtc_state->clock_set &&
8783 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8784 refclk, NULL, &crtc_state->dpll)) {
8785 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8789 i9xx_compute_dpll(crtc, crtc_state, NULL);
8794 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8795 struct intel_crtc_state *crtc_state)
8797 int refclk = 100000;
8798 const struct intel_limit *limit = &intel_limits_chv;
8800 memset(&crtc_state->dpll_hw_state, 0,
8801 sizeof(crtc_state->dpll_hw_state));
8803 if (!crtc_state->clock_set &&
8804 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8805 refclk, NULL, &crtc_state->dpll)) {
8806 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8810 chv_compute_dpll(crtc, crtc_state);
8815 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8816 struct intel_crtc_state *crtc_state)
8818 int refclk = 100000;
8819 const struct intel_limit *limit = &intel_limits_vlv;
8821 memset(&crtc_state->dpll_hw_state, 0,
8822 sizeof(crtc_state->dpll_hw_state));
8824 if (!crtc_state->clock_set &&
8825 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8826 refclk, NULL, &crtc_state->dpll)) {
8827 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8831 vlv_compute_dpll(crtc, crtc_state);
8836 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8838 if (IS_I830(dev_priv))
8841 return INTEL_GEN(dev_priv) >= 4 ||
8842 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8845 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8846 struct intel_crtc_state *pipe_config)
8848 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8851 if (!i9xx_has_pfit(dev_priv))
8854 tmp = I915_READ(PFIT_CONTROL);
8855 if (!(tmp & PFIT_ENABLE))
8858 /* Check whether the pfit is attached to our pipe. */
8859 if (INTEL_GEN(dev_priv) < 4) {
8860 if (crtc->pipe != PIPE_B)
8863 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8867 pipe_config->gmch_pfit.control = tmp;
8868 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8871 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8872 struct intel_crtc_state *pipe_config)
8874 struct drm_device *dev = crtc->base.dev;
8875 struct drm_i915_private *dev_priv = to_i915(dev);
8876 enum pipe pipe = crtc->pipe;
8879 int refclk = 100000;
8881 /* In case of DSI, DPLL will not be used */
8882 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8885 vlv_dpio_get(dev_priv);
8886 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8887 vlv_dpio_put(dev_priv);
8889 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8890 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8891 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8892 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8893 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8895 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8899 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8900 struct intel_initial_plane_config *plane_config)
8902 struct drm_device *dev = crtc->base.dev;
8903 struct drm_i915_private *dev_priv = to_i915(dev);
8904 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8905 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8907 u32 val, base, offset;
8908 int fourcc, pixel_format;
8909 unsigned int aligned_height;
8910 struct drm_framebuffer *fb;
8911 struct intel_framebuffer *intel_fb;
8913 if (!plane->get_hw_state(plane, &pipe))
8916 WARN_ON(pipe != crtc->pipe);
8918 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8920 DRM_DEBUG_KMS("failed to alloc fb\n");
8924 fb = &intel_fb->base;
8928 val = I915_READ(DSPCNTR(i9xx_plane));
8930 if (INTEL_GEN(dev_priv) >= 4) {
8931 if (val & DISPPLANE_TILED) {
8932 plane_config->tiling = I915_TILING_X;
8933 fb->modifier = I915_FORMAT_MOD_X_TILED;
8936 if (val & DISPPLANE_ROTATE_180)
8937 plane_config->rotation = DRM_MODE_ROTATE_180;
8940 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8941 val & DISPPLANE_MIRROR)
8942 plane_config->rotation |= DRM_MODE_REFLECT_X;
8944 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8945 fourcc = i9xx_format_to_fourcc(pixel_format);
8946 fb->format = drm_format_info(fourcc);
8948 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8949 offset = I915_READ(DSPOFFSET(i9xx_plane));
8950 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8951 } else if (INTEL_GEN(dev_priv) >= 4) {
8952 if (plane_config->tiling)
8953 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8955 offset = I915_READ(DSPLINOFF(i9xx_plane));
8956 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8958 base = I915_READ(DSPADDR(i9xx_plane));
8960 plane_config->base = base;
8962 val = I915_READ(PIPESRC(pipe));
8963 fb->width = ((val >> 16) & 0xfff) + 1;
8964 fb->height = ((val >> 0) & 0xfff) + 1;
8966 val = I915_READ(DSPSTRIDE(i9xx_plane));
8967 fb->pitches[0] = val & 0xffffffc0;
8969 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8971 plane_config->size = fb->pitches[0] * aligned_height;
8973 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8974 crtc->base.name, plane->base.name, fb->width, fb->height,
8975 fb->format->cpp[0] * 8, base, fb->pitches[0],
8976 plane_config->size);
8978 plane_config->fb = intel_fb;
8981 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8982 struct intel_crtc_state *pipe_config)
8984 struct drm_device *dev = crtc->base.dev;
8985 struct drm_i915_private *dev_priv = to_i915(dev);
8986 enum pipe pipe = crtc->pipe;
8987 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8989 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8990 int refclk = 100000;
8992 /* In case of DSI, DPLL will not be used */
8993 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8996 vlv_dpio_get(dev_priv);
8997 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8998 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8999 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9000 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9001 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9002 vlv_dpio_put(dev_priv);
9004 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9005 clock.m2 = (pll_dw0 & 0xff) << 22;
9006 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9007 clock.m2 |= pll_dw2 & 0x3fffff;
9008 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9009 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9010 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9012 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9015 static enum intel_output_format
9016 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9018 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9021 tmp = I915_READ(PIPEMISC(crtc->pipe));
9023 if (tmp & PIPEMISC_YUV420_ENABLE) {
9024 /* We support 4:2:0 in full blend mode only */
9025 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9027 return INTEL_OUTPUT_FORMAT_YCBCR420;
9028 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9029 return INTEL_OUTPUT_FORMAT_YCBCR444;
9031 return INTEL_OUTPUT_FORMAT_RGB;
9035 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9037 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9038 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9039 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9040 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9043 tmp = I915_READ(DSPCNTR(i9xx_plane));
9045 if (tmp & DISPPLANE_GAMMA_ENABLE)
9046 crtc_state->gamma_enable = true;
9048 if (!HAS_GMCH(dev_priv) &&
9049 tmp & DISPPLANE_PIPE_CSC_ENABLE)
9050 crtc_state->csc_enable = true;
9053 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9054 struct intel_crtc_state *pipe_config)
9056 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9057 enum intel_display_power_domain power_domain;
9058 intel_wakeref_t wakeref;
9062 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9063 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9067 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9068 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9069 pipe_config->shared_dpll = NULL;
9070 pipe_config->master_transcoder = INVALID_TRANSCODER;
9074 tmp = I915_READ(PIPECONF(crtc->pipe));
9075 if (!(tmp & PIPECONF_ENABLE))
9078 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9079 IS_CHERRYVIEW(dev_priv)) {
9080 switch (tmp & PIPECONF_BPC_MASK) {
9082 pipe_config->pipe_bpp = 18;
9085 pipe_config->pipe_bpp = 24;
9087 case PIPECONF_10BPC:
9088 pipe_config->pipe_bpp = 30;
9095 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9096 (tmp & PIPECONF_COLOR_RANGE_SELECT))
9097 pipe_config->limited_color_range = true;
9099 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9100 PIPECONF_GAMMA_MODE_SHIFT;
9102 if (IS_CHERRYVIEW(dev_priv))
9103 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
9105 i9xx_get_pipe_color_config(pipe_config);
9106 intel_color_get_config(pipe_config);
9108 if (INTEL_GEN(dev_priv) < 4)
9109 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9111 intel_get_pipe_timings(crtc, pipe_config);
9112 intel_get_pipe_src_size(crtc, pipe_config);
9114 i9xx_get_pfit_config(crtc, pipe_config);
9116 if (INTEL_GEN(dev_priv) >= 4) {
9117 /* No way to read it out on pipes B and C */
9118 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9119 tmp = dev_priv->chv_dpll_md[crtc->pipe];
9121 tmp = I915_READ(DPLL_MD(crtc->pipe));
9122 pipe_config->pixel_multiplier =
9123 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9124 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9125 pipe_config->dpll_hw_state.dpll_md = tmp;
9126 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9127 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9128 tmp = I915_READ(DPLL(crtc->pipe));
9129 pipe_config->pixel_multiplier =
9130 ((tmp & SDVO_MULTIPLIER_MASK)
9131 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9133 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9134 * port and will be fixed up in the encoder->get_config
9136 pipe_config->pixel_multiplier = 1;
9138 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
9139 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9140 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
9141 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
9143 /* Mask out read-only status bits. */
9144 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9145 DPLL_PORTC_READY_MASK |
9146 DPLL_PORTB_READY_MASK);
9149 if (IS_CHERRYVIEW(dev_priv))
9150 chv_crtc_clock_get(crtc, pipe_config);
9151 else if (IS_VALLEYVIEW(dev_priv))
9152 vlv_crtc_clock_get(crtc, pipe_config);
9154 i9xx_crtc_clock_get(crtc, pipe_config);
9157 * Normally the dotclock is filled in by the encoder .get_config()
9158 * but in case the pipe is enabled w/o any ports we need a sane
9161 pipe_config->hw.adjusted_mode.crtc_clock =
9162 pipe_config->port_clock / pipe_config->pixel_multiplier;
9167 intel_display_power_put(dev_priv, power_domain, wakeref);
9172 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9174 struct intel_encoder *encoder;
9177 bool has_lvds = false;
9178 bool has_cpu_edp = false;
9179 bool has_panel = false;
9180 bool has_ck505 = false;
9181 bool can_ssc = false;
9182 bool using_ssc_source = false;
9184 /* We need to take the global config into account */
9185 for_each_intel_encoder(&dev_priv->drm, encoder) {
9186 switch (encoder->type) {
9187 case INTEL_OUTPUT_LVDS:
9191 case INTEL_OUTPUT_EDP:
9193 if (encoder->port == PORT_A)
9201 if (HAS_PCH_IBX(dev_priv)) {
9202 has_ck505 = dev_priv->vbt.display_clock_mode;
9203 can_ssc = has_ck505;
9209 /* Check if any DPLLs are using the SSC source */
9210 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9211 u32 temp = I915_READ(PCH_DPLL(i));
9213 if (!(temp & DPLL_VCO_ENABLE))
9216 if ((temp & PLL_REF_INPUT_MASK) ==
9217 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9218 using_ssc_source = true;
9223 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9224 has_panel, has_lvds, has_ck505, using_ssc_source);
9226 /* Ironlake: try to setup display ref clock before DPLL
9227 * enabling. This is only under driver's control after
9228 * PCH B stepping, previous chipset stepping should be
9229 * ignoring this setting.
9231 val = I915_READ(PCH_DREF_CONTROL);
9233 /* As we must carefully and slowly disable/enable each source in turn,
9234 * compute the final state we want first and check if we need to
9235 * make any changes at all.
9238 final &= ~DREF_NONSPREAD_SOURCE_MASK;
9240 final |= DREF_NONSPREAD_CK505_ENABLE;
9242 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9244 final &= ~DREF_SSC_SOURCE_MASK;
9245 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9246 final &= ~DREF_SSC1_ENABLE;
9249 final |= DREF_SSC_SOURCE_ENABLE;
9251 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9252 final |= DREF_SSC1_ENABLE;
9255 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9256 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9258 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9260 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9261 } else if (using_ssc_source) {
9262 final |= DREF_SSC_SOURCE_ENABLE;
9263 final |= DREF_SSC1_ENABLE;
9269 /* Always enable nonspread source */
9270 val &= ~DREF_NONSPREAD_SOURCE_MASK;
9273 val |= DREF_NONSPREAD_CK505_ENABLE;
9275 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9278 val &= ~DREF_SSC_SOURCE_MASK;
9279 val |= DREF_SSC_SOURCE_ENABLE;
9281 /* SSC must be turned on before enabling the CPU output */
9282 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9283 DRM_DEBUG_KMS("Using SSC on panel\n");
9284 val |= DREF_SSC1_ENABLE;
9286 val &= ~DREF_SSC1_ENABLE;
9288 /* Get SSC going before enabling the outputs */
9289 I915_WRITE(PCH_DREF_CONTROL, val);
9290 POSTING_READ(PCH_DREF_CONTROL);
9293 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9295 /* Enable CPU source on CPU attached eDP */
9297 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9298 DRM_DEBUG_KMS("Using SSC on eDP\n");
9299 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9301 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9303 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9305 I915_WRITE(PCH_DREF_CONTROL, val);
9306 POSTING_READ(PCH_DREF_CONTROL);
9309 DRM_DEBUG_KMS("Disabling CPU source output\n");
9311 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9313 /* Turn off CPU output */
9314 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9316 I915_WRITE(PCH_DREF_CONTROL, val);
9317 POSTING_READ(PCH_DREF_CONTROL);
9320 if (!using_ssc_source) {
9321 DRM_DEBUG_KMS("Disabling SSC source\n");
9323 /* Turn off the SSC source */
9324 val &= ~DREF_SSC_SOURCE_MASK;
9325 val |= DREF_SSC_SOURCE_DISABLE;
9328 val &= ~DREF_SSC1_ENABLE;
9330 I915_WRITE(PCH_DREF_CONTROL, val);
9331 POSTING_READ(PCH_DREF_CONTROL);
9336 BUG_ON(val != final);
9339 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9343 tmp = I915_READ(SOUTH_CHICKEN2);
9344 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9345 I915_WRITE(SOUTH_CHICKEN2, tmp);
9347 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9348 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9349 DRM_ERROR("FDI mPHY reset assert timeout\n");
9351 tmp = I915_READ(SOUTH_CHICKEN2);
9352 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9353 I915_WRITE(SOUTH_CHICKEN2, tmp);
9355 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9356 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9357 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9360 /* WaMPhyProgramming:hsw */
9361 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9365 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9366 tmp &= ~(0xFF << 24);
9367 tmp |= (0x12 << 24);
9368 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9370 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9372 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9374 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9376 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9378 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9379 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9380 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9382 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9383 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9384 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9386 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9389 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9391 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9394 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9396 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9399 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9401 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9404 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9406 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9407 tmp &= ~(0xFF << 16);
9408 tmp |= (0x1C << 16);
9409 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9411 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9412 tmp &= ~(0xFF << 16);
9413 tmp |= (0x1C << 16);
9414 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9416 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9418 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9420 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9422 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9424 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9425 tmp &= ~(0xF << 28);
9427 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9429 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9430 tmp &= ~(0xF << 28);
9432 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9435 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9436 * Programming" based on the parameters passed:
9437 * - Sequence to enable CLKOUT_DP
9438 * - Sequence to enable CLKOUT_DP without spread
9439 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9441 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9442 bool with_spread, bool with_fdi)
9446 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9448 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9449 with_fdi, "LP PCH doesn't have FDI\n"))
9452 mutex_lock(&dev_priv->sb_lock);
9454 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9455 tmp &= ~SBI_SSCCTL_DISABLE;
9456 tmp |= SBI_SSCCTL_PATHALT;
9457 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9462 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9463 tmp &= ~SBI_SSCCTL_PATHALT;
9464 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9467 lpt_reset_fdi_mphy(dev_priv);
9468 lpt_program_fdi_mphy(dev_priv);
9472 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9473 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9474 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9475 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9477 mutex_unlock(&dev_priv->sb_lock);
9480 /* Sequence to disable CLKOUT_DP */
9481 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9485 mutex_lock(&dev_priv->sb_lock);
9487 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9488 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9489 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9490 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9492 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9493 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9494 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9495 tmp |= SBI_SSCCTL_PATHALT;
9496 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9499 tmp |= SBI_SSCCTL_DISABLE;
9500 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9503 mutex_unlock(&dev_priv->sb_lock);
9506 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9508 static const u16 sscdivintphase[] = {
9509 [BEND_IDX( 50)] = 0x3B23,
9510 [BEND_IDX( 45)] = 0x3B23,
9511 [BEND_IDX( 40)] = 0x3C23,
9512 [BEND_IDX( 35)] = 0x3C23,
9513 [BEND_IDX( 30)] = 0x3D23,
9514 [BEND_IDX( 25)] = 0x3D23,
9515 [BEND_IDX( 20)] = 0x3E23,
9516 [BEND_IDX( 15)] = 0x3E23,
9517 [BEND_IDX( 10)] = 0x3F23,
9518 [BEND_IDX( 5)] = 0x3F23,
9519 [BEND_IDX( 0)] = 0x0025,
9520 [BEND_IDX( -5)] = 0x0025,
9521 [BEND_IDX(-10)] = 0x0125,
9522 [BEND_IDX(-15)] = 0x0125,
9523 [BEND_IDX(-20)] = 0x0225,
9524 [BEND_IDX(-25)] = 0x0225,
9525 [BEND_IDX(-30)] = 0x0325,
9526 [BEND_IDX(-35)] = 0x0325,
9527 [BEND_IDX(-40)] = 0x0425,
9528 [BEND_IDX(-45)] = 0x0425,
9529 [BEND_IDX(-50)] = 0x0525,
9534 * steps -50 to 50 inclusive, in steps of 5
9535 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9536 * change in clock period = -(steps / 10) * 5.787 ps
9538 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9541 int idx = BEND_IDX(steps);
9543 if (WARN_ON(steps % 5 != 0))
9546 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9549 mutex_lock(&dev_priv->sb_lock);
9551 if (steps % 10 != 0)
9555 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9557 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9559 tmp |= sscdivintphase[idx];
9560 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9562 mutex_unlock(&dev_priv->sb_lock);
9567 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9569 u32 fuse_strap = I915_READ(FUSE_STRAP);
9570 u32 ctl = I915_READ(SPLL_CTL);
9572 if ((ctl & SPLL_PLL_ENABLE) == 0)
9575 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9576 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9579 if (IS_BROADWELL(dev_priv) &&
9580 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9586 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9587 enum intel_dpll_id id)
9589 u32 fuse_strap = I915_READ(FUSE_STRAP);
9590 u32 ctl = I915_READ(WRPLL_CTL(id));
9592 if ((ctl & WRPLL_PLL_ENABLE) == 0)
9595 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9598 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9599 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9600 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9606 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9608 struct intel_encoder *encoder;
9609 bool has_fdi = false;
9611 for_each_intel_encoder(&dev_priv->drm, encoder) {
9612 switch (encoder->type) {
9613 case INTEL_OUTPUT_ANALOG:
9622 * The BIOS may have decided to use the PCH SSC
9623 * reference so we must not disable it until the
9624 * relevant PLLs have stopped relying on it. We'll
9625 * just leave the PCH SSC reference enabled in case
9626 * any active PLL is using it. It will get disabled
9627 * after runtime suspend if we don't have FDI.
9629 * TODO: Move the whole reference clock handling
9630 * to the modeset sequence proper so that we can
9631 * actually enable/disable/reconfigure these things
9632 * safely. To do that we need to introduce a real
9633 * clock hierarchy. That would also allow us to do
9634 * clock bending finally.
9636 dev_priv->pch_ssc_use = 0;
9638 if (spll_uses_pch_ssc(dev_priv)) {
9639 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9640 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9643 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9644 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9645 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9648 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9649 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9650 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9653 if (dev_priv->pch_ssc_use)
9657 lpt_bend_clkout_dp(dev_priv, 0);
9658 lpt_enable_clkout_dp(dev_priv, true, true);
9660 lpt_disable_clkout_dp(dev_priv);
9665 * Initialize reference clocks when the driver loads
9667 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9669 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9670 ironlake_init_pch_refclk(dev_priv);
9671 else if (HAS_PCH_LPT(dev_priv))
9672 lpt_init_pch_refclk(dev_priv);
9675 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9677 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9678 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9679 enum pipe pipe = crtc->pipe;
9684 switch (crtc_state->pipe_bpp) {
9686 val |= PIPECONF_6BPC;
9689 val |= PIPECONF_8BPC;
9692 val |= PIPECONF_10BPC;
9695 val |= PIPECONF_12BPC;
9698 /* Case prevented by intel_choose_pipe_bpp_dither. */
9702 if (crtc_state->dither)
9703 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9705 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9706 val |= PIPECONF_INTERLACED_ILK;
9708 val |= PIPECONF_PROGRESSIVE;
9711 * This would end up with an odd purple hue over
9712 * the entire display. Make sure we don't do it.
9714 WARN_ON(crtc_state->limited_color_range &&
9715 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9717 if (crtc_state->limited_color_range)
9718 val |= PIPECONF_COLOR_RANGE_SELECT;
9720 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9721 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9723 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9725 val |= PIPECONF_FRAME_START_DELAY(0);
9727 I915_WRITE(PIPECONF(pipe), val);
9728 POSTING_READ(PIPECONF(pipe));
9731 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9733 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9734 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9735 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9738 if (IS_HASWELL(dev_priv) && crtc_state->dither)
9739 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9741 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9742 val |= PIPECONF_INTERLACED_ILK;
9744 val |= PIPECONF_PROGRESSIVE;
9746 if (IS_HASWELL(dev_priv) &&
9747 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9748 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9750 I915_WRITE(PIPECONF(cpu_transcoder), val);
9751 POSTING_READ(PIPECONF(cpu_transcoder));
9754 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9756 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9757 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9760 switch (crtc_state->pipe_bpp) {
9762 val |= PIPEMISC_DITHER_6_BPC;
9765 val |= PIPEMISC_DITHER_8_BPC;
9768 val |= PIPEMISC_DITHER_10_BPC;
9771 val |= PIPEMISC_DITHER_12_BPC;
9774 MISSING_CASE(crtc_state->pipe_bpp);
9778 if (crtc_state->dither)
9779 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9781 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9782 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9783 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9785 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9786 val |= PIPEMISC_YUV420_ENABLE |
9787 PIPEMISC_YUV420_MODE_FULL_BLEND;
9789 if (INTEL_GEN(dev_priv) >= 11 &&
9790 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9791 BIT(PLANE_CURSOR))) == 0)
9792 val |= PIPEMISC_HDR_MODE_PRECISION;
9794 I915_WRITE(PIPEMISC(crtc->pipe), val);
9797 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9799 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9802 tmp = I915_READ(PIPEMISC(crtc->pipe));
9804 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9805 case PIPEMISC_DITHER_6_BPC:
9807 case PIPEMISC_DITHER_8_BPC:
9809 case PIPEMISC_DITHER_10_BPC:
9811 case PIPEMISC_DITHER_12_BPC:
9819 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9822 * Account for spread spectrum to avoid
9823 * oversubscribing the link. Max center spread
9824 * is 2.5%; use 5% for safety's sake.
9826 u32 bps = target_clock * bpp * 21 / 20;
9827 return DIV_ROUND_UP(bps, link_bw * 8);
9830 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9832 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9835 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9836 struct intel_crtc_state *crtc_state,
9837 struct dpll *reduced_clock)
9839 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9843 /* Enable autotuning of the PLL clock (if permissible) */
9845 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9846 if ((intel_panel_use_ssc(dev_priv) &&
9847 dev_priv->vbt.lvds_ssc_freq == 100000) ||
9848 (HAS_PCH_IBX(dev_priv) &&
9849 intel_is_dual_link_lvds(dev_priv)))
9851 } else if (crtc_state->sdvo_tv_clock) {
9855 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9857 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9860 if (reduced_clock) {
9861 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9863 if (reduced_clock->m < factor * reduced_clock->n)
9871 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9872 dpll |= DPLLB_MODE_LVDS;
9874 dpll |= DPLLB_MODE_DAC_SERIAL;
9876 dpll |= (crtc_state->pixel_multiplier - 1)
9877 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9879 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9880 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9881 dpll |= DPLL_SDVO_HIGH_SPEED;
9883 if (intel_crtc_has_dp_encoder(crtc_state))
9884 dpll |= DPLL_SDVO_HIGH_SPEED;
9887 * The high speed IO clock is only really required for
9888 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9889 * possible to share the DPLL between CRT and HDMI. Enabling
9890 * the clock needlessly does no real harm, except use up a
9891 * bit of power potentially.
9893 * We'll limit this to IVB with 3 pipes, since it has only two
9894 * DPLLs and so DPLL sharing is the only way to get three pipes
9895 * driving PCH ports at the same time. On SNB we could do this,
9896 * and potentially avoid enabling the second DPLL, but it's not
9897 * clear if it''s a win or loss power wise. No point in doing
9898 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9900 if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9901 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9902 dpll |= DPLL_SDVO_HIGH_SPEED;
9904 /* compute bitmask from p1 value */
9905 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9907 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9909 switch (crtc_state->dpll.p2) {
9911 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9914 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9917 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9920 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9924 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9925 intel_panel_use_ssc(dev_priv))
9926 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9928 dpll |= PLL_REF_INPUT_DREFCLK;
9930 dpll |= DPLL_VCO_ENABLE;
9932 crtc_state->dpll_hw_state.dpll = dpll;
9933 crtc_state->dpll_hw_state.fp0 = fp;
9934 crtc_state->dpll_hw_state.fp1 = fp2;
9937 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9938 struct intel_crtc_state *crtc_state)
9940 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9941 struct intel_atomic_state *state =
9942 to_intel_atomic_state(crtc_state->uapi.state);
9943 const struct intel_limit *limit;
9944 int refclk = 120000;
9946 memset(&crtc_state->dpll_hw_state, 0,
9947 sizeof(crtc_state->dpll_hw_state));
9949 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9950 if (!crtc_state->has_pch_encoder)
9953 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9954 if (intel_panel_use_ssc(dev_priv)) {
9955 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9956 dev_priv->vbt.lvds_ssc_freq);
9957 refclk = dev_priv->vbt.lvds_ssc_freq;
9960 if (intel_is_dual_link_lvds(dev_priv)) {
9961 if (refclk == 100000)
9962 limit = &intel_limits_ironlake_dual_lvds_100m;
9964 limit = &intel_limits_ironlake_dual_lvds;
9966 if (refclk == 100000)
9967 limit = &intel_limits_ironlake_single_lvds_100m;
9969 limit = &intel_limits_ironlake_single_lvds;
9972 limit = &intel_limits_ironlake_dac;
9975 if (!crtc_state->clock_set &&
9976 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9977 refclk, NULL, &crtc_state->dpll)) {
9978 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9982 ironlake_compute_dpll(crtc, crtc_state, NULL);
9984 if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9985 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9986 pipe_name(crtc->pipe));
9993 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9994 struct intel_link_m_n *m_n)
9996 struct drm_device *dev = crtc->base.dev;
9997 struct drm_i915_private *dev_priv = to_i915(dev);
9998 enum pipe pipe = crtc->pipe;
10000 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
10001 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
10002 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
10004 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
10005 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
10006 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10009 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10010 enum transcoder transcoder,
10011 struct intel_link_m_n *m_n,
10012 struct intel_link_m_n *m2_n2)
10014 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10015 enum pipe pipe = crtc->pipe;
10017 if (INTEL_GEN(dev_priv) >= 5) {
10018 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
10019 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
10020 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
10022 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
10023 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
10024 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10026 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10027 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
10028 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
10029 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
10031 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
10032 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
10033 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10036 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
10037 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
10038 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
10040 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
10041 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
10042 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10046 void intel_dp_get_m_n(struct intel_crtc *crtc,
10047 struct intel_crtc_state *pipe_config)
10049 if (pipe_config->has_pch_encoder)
10050 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10052 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10053 &pipe_config->dp_m_n,
10054 &pipe_config->dp_m2_n2);
10057 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
10058 struct intel_crtc_state *pipe_config)
10060 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10061 &pipe_config->fdi_m_n, NULL);
10064 static void skylake_get_pfit_config(struct intel_crtc *crtc,
10065 struct intel_crtc_state *pipe_config)
10067 struct drm_device *dev = crtc->base.dev;
10068 struct drm_i915_private *dev_priv = to_i915(dev);
10069 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
10074 /* find scaler attached to this pipe */
10075 for (i = 0; i < crtc->num_scalers; i++) {
10076 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
10077 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
10079 pipe_config->pch_pfit.enabled = true;
10080 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
10081 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
10082 scaler_state->scalers[i].in_use = true;
10087 scaler_state->scaler_id = id;
10089 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10091 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10096 skylake_get_initial_plane_config(struct intel_crtc *crtc,
10097 struct intel_initial_plane_config *plane_config)
10099 struct drm_device *dev = crtc->base.dev;
10100 struct drm_i915_private *dev_priv = to_i915(dev);
10101 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10102 enum plane_id plane_id = plane->id;
10104 u32 val, base, offset, stride_mult, tiling, alpha;
10105 int fourcc, pixel_format;
10106 unsigned int aligned_height;
10107 struct drm_framebuffer *fb;
10108 struct intel_framebuffer *intel_fb;
10110 if (!plane->get_hw_state(plane, &pipe))
10113 WARN_ON(pipe != crtc->pipe);
10115 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10117 DRM_DEBUG_KMS("failed to alloc fb\n");
10121 fb = &intel_fb->base;
10125 val = I915_READ(PLANE_CTL(pipe, plane_id));
10127 if (INTEL_GEN(dev_priv) >= 11)
10128 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10130 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10132 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10133 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
10134 alpha &= PLANE_COLOR_ALPHA_MASK;
10136 alpha = val & PLANE_CTL_ALPHA_MASK;
10139 fourcc = skl_format_to_fourcc(pixel_format,
10140 val & PLANE_CTL_ORDER_RGBX, alpha);
10141 fb->format = drm_format_info(fourcc);
10143 tiling = val & PLANE_CTL_TILED_MASK;
10145 case PLANE_CTL_TILED_LINEAR:
10146 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10148 case PLANE_CTL_TILED_X:
10149 plane_config->tiling = I915_TILING_X;
10150 fb->modifier = I915_FORMAT_MOD_X_TILED;
10152 case PLANE_CTL_TILED_Y:
10153 plane_config->tiling = I915_TILING_Y;
10154 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10155 fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10156 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10157 I915_FORMAT_MOD_Y_TILED_CCS;
10159 fb->modifier = I915_FORMAT_MOD_Y_TILED;
10161 case PLANE_CTL_TILED_YF:
10162 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10163 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10165 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10168 MISSING_CASE(tiling);
10173 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10174 * while i915 HW rotation is clockwise, thats why this swapping.
10176 switch (val & PLANE_CTL_ROTATE_MASK) {
10177 case PLANE_CTL_ROTATE_0:
10178 plane_config->rotation = DRM_MODE_ROTATE_0;
10180 case PLANE_CTL_ROTATE_90:
10181 plane_config->rotation = DRM_MODE_ROTATE_270;
10183 case PLANE_CTL_ROTATE_180:
10184 plane_config->rotation = DRM_MODE_ROTATE_180;
10186 case PLANE_CTL_ROTATE_270:
10187 plane_config->rotation = DRM_MODE_ROTATE_90;
10191 if (INTEL_GEN(dev_priv) >= 10 &&
10192 val & PLANE_CTL_FLIP_HORIZONTAL)
10193 plane_config->rotation |= DRM_MODE_REFLECT_X;
10195 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10196 plane_config->base = base;
10198 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10200 val = I915_READ(PLANE_SIZE(pipe, plane_id));
10201 fb->height = ((val >> 16) & 0xffff) + 1;
10202 fb->width = ((val >> 0) & 0xffff) + 1;
10204 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10205 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10206 fb->pitches[0] = (val & 0x3ff) * stride_mult;
10208 aligned_height = intel_fb_align_height(fb, 0, fb->height);
10210 plane_config->size = fb->pitches[0] * aligned_height;
10212 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10213 crtc->base.name, plane->base.name, fb->width, fb->height,
10214 fb->format->cpp[0] * 8, base, fb->pitches[0],
10215 plane_config->size);
10217 plane_config->fb = intel_fb;
10224 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10225 struct intel_crtc_state *pipe_config)
10227 struct drm_device *dev = crtc->base.dev;
10228 struct drm_i915_private *dev_priv = to_i915(dev);
10231 tmp = I915_READ(PF_CTL(crtc->pipe));
10233 if (tmp & PF_ENABLE) {
10234 pipe_config->pch_pfit.enabled = true;
10235 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10236 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10238 /* We currently do not free assignements of panel fitters on
10239 * ivb/hsw (since we don't use the higher upscaling modes which
10240 * differentiates them) so just WARN about this case for now. */
10241 if (IS_GEN(dev_priv, 7)) {
10242 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10243 PF_PIPE_SEL_IVB(crtc->pipe));
10248 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10249 struct intel_crtc_state *pipe_config)
10251 struct drm_device *dev = crtc->base.dev;
10252 struct drm_i915_private *dev_priv = to_i915(dev);
10253 enum intel_display_power_domain power_domain;
10254 intel_wakeref_t wakeref;
10258 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10259 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10263 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10264 pipe_config->shared_dpll = NULL;
10265 pipe_config->master_transcoder = INVALID_TRANSCODER;
10268 tmp = I915_READ(PIPECONF(crtc->pipe));
10269 if (!(tmp & PIPECONF_ENABLE))
10272 switch (tmp & PIPECONF_BPC_MASK) {
10273 case PIPECONF_6BPC:
10274 pipe_config->pipe_bpp = 18;
10276 case PIPECONF_8BPC:
10277 pipe_config->pipe_bpp = 24;
10279 case PIPECONF_10BPC:
10280 pipe_config->pipe_bpp = 30;
10282 case PIPECONF_12BPC:
10283 pipe_config->pipe_bpp = 36;
10289 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10290 pipe_config->limited_color_range = true;
10292 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10293 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10294 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10295 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10298 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10302 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10303 PIPECONF_GAMMA_MODE_SHIFT;
10305 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10307 i9xx_get_pipe_color_config(pipe_config);
10308 intel_color_get_config(pipe_config);
10310 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10311 struct intel_shared_dpll *pll;
10312 enum intel_dpll_id pll_id;
10314 pipe_config->has_pch_encoder = true;
10316 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10317 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10318 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10320 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10322 if (HAS_PCH_IBX(dev_priv)) {
10324 * The pipe->pch transcoder and pch transcoder->pll
10325 * mapping is fixed.
10327 pll_id = (enum intel_dpll_id) crtc->pipe;
10329 tmp = I915_READ(PCH_DPLL_SEL);
10330 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10331 pll_id = DPLL_ID_PCH_PLL_B;
10333 pll_id= DPLL_ID_PCH_PLL_A;
10336 pipe_config->shared_dpll =
10337 intel_get_shared_dpll_by_id(dev_priv, pll_id);
10338 pll = pipe_config->shared_dpll;
10340 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10341 &pipe_config->dpll_hw_state));
10343 tmp = pipe_config->dpll_hw_state.dpll;
10344 pipe_config->pixel_multiplier =
10345 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10346 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10348 ironlake_pch_clock_get(crtc, pipe_config);
10350 pipe_config->pixel_multiplier = 1;
10353 intel_get_pipe_timings(crtc, pipe_config);
10354 intel_get_pipe_src_size(crtc, pipe_config);
10356 ironlake_get_pfit_config(crtc, pipe_config);
10361 intel_display_power_put(dev_priv, power_domain, wakeref);
10365 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10366 struct intel_crtc_state *crtc_state)
10368 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10369 struct intel_atomic_state *state =
10370 to_intel_atomic_state(crtc_state->uapi.state);
10372 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10373 INTEL_GEN(dev_priv) >= 11) {
10374 struct intel_encoder *encoder =
10375 intel_get_crtc_new_encoder(state, crtc_state);
10377 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10378 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10379 pipe_name(crtc->pipe));
10387 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10389 struct intel_crtc_state *pipe_config)
10391 enum intel_dpll_id id;
10394 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10395 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10397 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10400 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10403 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10405 struct intel_crtc_state *pipe_config)
10407 enum phy phy = intel_port_to_phy(dev_priv, port);
10408 enum icl_port_dpll_id port_dpll_id;
10409 enum intel_dpll_id id;
10412 if (intel_phy_is_combo(dev_priv, phy)) {
10413 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10414 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10415 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10416 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10417 } else if (intel_phy_is_tc(dev_priv, phy)) {
10418 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10420 if (clk_sel == DDI_CLK_SEL_MG) {
10421 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10423 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10425 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10426 id = DPLL_ID_ICL_TBTPLL;
10427 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10430 WARN(1, "Invalid port %x\n", port);
10434 pipe_config->icl_port_dplls[port_dpll_id].pll =
10435 intel_get_shared_dpll_by_id(dev_priv, id);
10437 icl_set_active_port_dpll(pipe_config, port_dpll_id);
10440 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10442 struct intel_crtc_state *pipe_config)
10444 enum intel_dpll_id id;
10448 id = DPLL_ID_SKL_DPLL0;
10451 id = DPLL_ID_SKL_DPLL1;
10454 id = DPLL_ID_SKL_DPLL2;
10457 DRM_ERROR("Incorrect port type\n");
10461 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10464 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10466 struct intel_crtc_state *pipe_config)
10468 enum intel_dpll_id id;
10471 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10472 id = temp >> (port * 3 + 1);
10474 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10477 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10480 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10482 struct intel_crtc_state *pipe_config)
10484 enum intel_dpll_id id;
10485 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10487 switch (ddi_pll_sel) {
10488 case PORT_CLK_SEL_WRPLL1:
10489 id = DPLL_ID_WRPLL1;
10491 case PORT_CLK_SEL_WRPLL2:
10492 id = DPLL_ID_WRPLL2;
10494 case PORT_CLK_SEL_SPLL:
10497 case PORT_CLK_SEL_LCPLL_810:
10498 id = DPLL_ID_LCPLL_810;
10500 case PORT_CLK_SEL_LCPLL_1350:
10501 id = DPLL_ID_LCPLL_1350;
10503 case PORT_CLK_SEL_LCPLL_2700:
10504 id = DPLL_ID_LCPLL_2700;
10507 MISSING_CASE(ddi_pll_sel);
10509 case PORT_CLK_SEL_NONE:
10513 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10516 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10517 struct intel_crtc_state *pipe_config,
10518 u64 *power_domain_mask,
10519 intel_wakeref_t *wakerefs)
10521 struct drm_device *dev = crtc->base.dev;
10522 struct drm_i915_private *dev_priv = to_i915(dev);
10523 enum intel_display_power_domain power_domain;
10524 unsigned long panel_transcoder_mask = 0;
10525 unsigned long enabled_panel_transcoders = 0;
10526 enum transcoder panel_transcoder;
10527 intel_wakeref_t wf;
10530 if (INTEL_GEN(dev_priv) >= 11)
10531 panel_transcoder_mask |=
10532 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10534 if (HAS_TRANSCODER_EDP(dev_priv))
10535 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10538 * The pipe->transcoder mapping is fixed with the exception of the eDP
10539 * and DSI transcoders handled below.
10541 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10544 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10545 * consistency and less surprising code; it's in always on power).
10547 for_each_set_bit(panel_transcoder,
10548 &panel_transcoder_mask,
10549 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10550 bool force_thru = false;
10551 enum pipe trans_pipe;
10553 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10554 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10558 * Log all enabled ones, only use the first one.
10560 * FIXME: This won't work for two separate DSI displays.
10562 enabled_panel_transcoders |= BIT(panel_transcoder);
10563 if (enabled_panel_transcoders != BIT(panel_transcoder))
10566 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10568 WARN(1, "unknown pipe linked to transcoder %s\n",
10569 transcoder_name(panel_transcoder));
10571 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10574 case TRANS_DDI_EDP_INPUT_A_ON:
10575 trans_pipe = PIPE_A;
10577 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10578 trans_pipe = PIPE_B;
10580 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10581 trans_pipe = PIPE_C;
10583 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10584 trans_pipe = PIPE_D;
10588 if (trans_pipe == crtc->pipe) {
10589 pipe_config->cpu_transcoder = panel_transcoder;
10590 pipe_config->pch_pfit.force_thru = force_thru;
10595 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10597 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10598 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10600 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10601 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10603 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10607 wakerefs[power_domain] = wf;
10608 *power_domain_mask |= BIT_ULL(power_domain);
10610 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10612 return tmp & PIPECONF_ENABLE;
10615 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10616 struct intel_crtc_state *pipe_config,
10617 u64 *power_domain_mask,
10618 intel_wakeref_t *wakerefs)
10620 struct drm_device *dev = crtc->base.dev;
10621 struct drm_i915_private *dev_priv = to_i915(dev);
10622 enum intel_display_power_domain power_domain;
10623 enum transcoder cpu_transcoder;
10624 intel_wakeref_t wf;
10628 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10629 if (port == PORT_A)
10630 cpu_transcoder = TRANSCODER_DSI_A;
10632 cpu_transcoder = TRANSCODER_DSI_C;
10634 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10635 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10637 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10641 wakerefs[power_domain] = wf;
10642 *power_domain_mask |= BIT_ULL(power_domain);
10645 * The PLL needs to be enabled with a valid divider
10646 * configuration, otherwise accessing DSI registers will hang
10647 * the machine. See BSpec North Display Engine
10648 * registers/MIPI[BXT]. We can break out here early, since we
10649 * need the same DSI PLL to be enabled for both DSI ports.
10651 if (!bxt_dsi_pll_is_enabled(dev_priv))
10654 /* XXX: this works for video mode only */
10655 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10656 if (!(tmp & DPI_ENABLE))
10659 tmp = I915_READ(MIPI_CTRL(port));
10660 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10663 pipe_config->cpu_transcoder = cpu_transcoder;
10667 return transcoder_is_dsi(pipe_config->cpu_transcoder);
10670 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10671 struct intel_crtc_state *pipe_config)
10673 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10674 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10675 struct intel_shared_dpll *pll;
10679 if (transcoder_is_dsi(cpu_transcoder)) {
10680 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10683 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
10684 if (INTEL_GEN(dev_priv) >= 12)
10685 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10687 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10690 if (INTEL_GEN(dev_priv) >= 11)
10691 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10692 else if (IS_CANNONLAKE(dev_priv))
10693 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10694 else if (IS_GEN9_BC(dev_priv))
10695 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10696 else if (IS_GEN9_LP(dev_priv))
10697 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10699 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10701 pll = pipe_config->shared_dpll;
10703 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10704 &pipe_config->dpll_hw_state));
10708 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10709 * DDI E. So just check whether this pipe is wired to DDI E and whether
10710 * the PCH transcoder is on.
10712 if (INTEL_GEN(dev_priv) < 9 &&
10713 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10714 pipe_config->has_pch_encoder = true;
10716 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10717 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10718 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10720 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10724 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10725 enum transcoder cpu_transcoder)
10727 u32 trans_port_sync, master_select;
10729 trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10731 if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10732 return INVALID_TRANSCODER;
10734 master_select = trans_port_sync &
10735 PORT_SYNC_MODE_MASTER_SELECT_MASK;
10736 if (master_select == 0)
10737 return TRANSCODER_EDP;
10739 return master_select - 1;
10742 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10744 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10746 enum transcoder cpu_transcoder;
10748 crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10749 crtc_state->cpu_transcoder);
10751 transcoders = BIT(TRANSCODER_A) |
10752 BIT(TRANSCODER_B) |
10753 BIT(TRANSCODER_C) |
10755 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10756 enum intel_display_power_domain power_domain;
10757 intel_wakeref_t trans_wakeref;
10759 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10760 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10763 if (!trans_wakeref)
10766 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10767 crtc_state->cpu_transcoder)
10768 crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10770 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10773 WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10774 crtc_state->sync_mode_slaves_mask);
10777 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10778 struct intel_crtc_state *pipe_config)
10780 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10781 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10782 enum intel_display_power_domain power_domain;
10783 u64 power_domain_mask;
10786 pipe_config->master_transcoder = INVALID_TRANSCODER;
10788 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10789 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10793 wakerefs[power_domain] = wf;
10794 power_domain_mask = BIT_ULL(power_domain);
10796 pipe_config->shared_dpll = NULL;
10798 active = hsw_get_transcoder_state(crtc, pipe_config,
10799 &power_domain_mask, wakerefs);
10801 if (IS_GEN9_LP(dev_priv) &&
10802 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10803 &power_domain_mask, wakerefs)) {
10811 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10812 INTEL_GEN(dev_priv) >= 11) {
10813 haswell_get_ddi_port_state(crtc, pipe_config);
10814 intel_get_pipe_timings(crtc, pipe_config);
10817 intel_get_pipe_src_size(crtc, pipe_config);
10819 if (IS_HASWELL(dev_priv)) {
10820 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10822 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10823 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10825 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10827 pipe_config->output_format =
10828 bdw_get_pipemisc_output_format(crtc);
10831 * Currently there is no interface defined to
10832 * check user preference between RGB/YCBCR444
10833 * or YCBCR420. So the only possible case for
10834 * YCBCR444 usage is driving YCBCR420 output
10835 * with LSPCON, when pipe is configured for
10836 * YCBCR444 output and LSPCON takes care of
10839 pipe_config->lspcon_downsampling =
10840 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10843 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10845 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10847 if (INTEL_GEN(dev_priv) >= 9) {
10848 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10850 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10851 pipe_config->gamma_enable = true;
10853 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10854 pipe_config->csc_enable = true;
10856 i9xx_get_pipe_color_config(pipe_config);
10859 intel_color_get_config(pipe_config);
10861 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10862 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10864 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10866 wakerefs[power_domain] = wf;
10867 power_domain_mask |= BIT_ULL(power_domain);
10869 if (INTEL_GEN(dev_priv) >= 9)
10870 skylake_get_pfit_config(crtc, pipe_config);
10872 ironlake_get_pfit_config(crtc, pipe_config);
10875 if (hsw_crtc_supports_ips(crtc)) {
10876 if (IS_HASWELL(dev_priv))
10877 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10880 * We cannot readout IPS state on broadwell, set to
10881 * true so we can set it to a defined state on first
10884 pipe_config->ips_enabled = true;
10888 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10889 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10890 pipe_config->pixel_multiplier =
10891 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10893 pipe_config->pixel_multiplier = 1;
10896 if (INTEL_GEN(dev_priv) >= 11 &&
10897 !transcoder_is_dsi(pipe_config->cpu_transcoder))
10898 icelake_get_trans_port_sync_config(pipe_config);
10901 for_each_power_domain(power_domain, power_domain_mask)
10902 intel_display_power_put(dev_priv,
10903 power_domain, wakerefs[power_domain]);
10908 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10910 struct drm_i915_private *dev_priv =
10911 to_i915(plane_state->uapi.plane->dev);
10912 const struct drm_framebuffer *fb = plane_state->hw.fb;
10913 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10916 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10917 base = obj->phys_handle->busaddr;
10919 base = intel_plane_ggtt_offset(plane_state);
10921 return base + plane_state->color_plane[0].offset;
10924 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10926 int x = plane_state->uapi.dst.x1;
10927 int y = plane_state->uapi.dst.y1;
10931 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10934 pos |= x << CURSOR_X_SHIFT;
10937 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10940 pos |= y << CURSOR_Y_SHIFT;
10945 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10947 const struct drm_mode_config *config =
10948 &plane_state->uapi.plane->dev->mode_config;
10949 int width = drm_rect_width(&plane_state->uapi.dst);
10950 int height = drm_rect_height(&plane_state->uapi.dst);
10952 return width > 0 && width <= config->cursor_width &&
10953 height > 0 && height <= config->cursor_height;
10956 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10958 struct drm_i915_private *dev_priv =
10959 to_i915(plane_state->uapi.plane->dev);
10960 unsigned int rotation = plane_state->hw.rotation;
10965 ret = intel_plane_compute_gtt(plane_state);
10969 if (!plane_state->uapi.visible)
10972 src_x = plane_state->uapi.src.x1 >> 16;
10973 src_y = plane_state->uapi.src.y1 >> 16;
10975 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10976 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10979 if (src_x != 0 || src_y != 0) {
10980 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10985 * Put the final coordinates back so that the src
10986 * coordinate checks will see the right values.
10988 drm_rect_translate_to(&plane_state->uapi.src,
10989 src_x << 16, src_y << 16);
10991 /* ILK+ do this automagically in hardware */
10992 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10993 const struct drm_framebuffer *fb = plane_state->hw.fb;
10994 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10995 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10997 offset += (src_h * src_w - 1) * fb->format->cpp[0];
11000 plane_state->color_plane[0].offset = offset;
11001 plane_state->color_plane[0].x = src_x;
11002 plane_state->color_plane[0].y = src_y;
11007 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11008 struct intel_plane_state *plane_state)
11010 const struct drm_framebuffer *fb = plane_state->hw.fb;
11013 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11014 DRM_DEBUG_KMS("cursor cannot be tiled\n");
11018 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11020 DRM_PLANE_HELPER_NO_SCALING,
11021 DRM_PLANE_HELPER_NO_SCALING,
11026 /* Use the unclipped src/dst rectangles, which we program to hw */
11027 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11028 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11030 ret = intel_cursor_check_surface(plane_state);
11034 if (!plane_state->uapi.visible)
11037 ret = intel_plane_check_src_coordinates(plane_state);
11044 static unsigned int
11045 i845_cursor_max_stride(struct intel_plane *plane,
11046 u32 pixel_format, u64 modifier,
11047 unsigned int rotation)
11052 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11056 if (crtc_state->gamma_enable)
11057 cntl |= CURSOR_GAMMA_ENABLE;
11062 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11063 const struct intel_plane_state *plane_state)
11065 return CURSOR_ENABLE |
11066 CURSOR_FORMAT_ARGB |
11067 CURSOR_STRIDE(plane_state->color_plane[0].stride);
11070 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11072 int width = drm_rect_width(&plane_state->uapi.dst);
11075 * 845g/865g are only limited by the width of their cursors,
11076 * the height is arbitrary up to the precision of the register.
11078 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11081 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11082 struct intel_plane_state *plane_state)
11084 const struct drm_framebuffer *fb = plane_state->hw.fb;
11087 ret = intel_check_cursor(crtc_state, plane_state);
11091 /* if we want to turn off the cursor ignore width and height */
11095 /* Check for which cursor types we support */
11096 if (!i845_cursor_size_ok(plane_state)) {
11097 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11098 drm_rect_width(&plane_state->uapi.dst),
11099 drm_rect_height(&plane_state->uapi.dst));
11103 WARN_ON(plane_state->uapi.visible &&
11104 plane_state->color_plane[0].stride != fb->pitches[0]);
11106 switch (fb->pitches[0]) {
11113 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
11118 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11123 static void i845_update_cursor(struct intel_plane *plane,
11124 const struct intel_crtc_state *crtc_state,
11125 const struct intel_plane_state *plane_state)
11127 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11128 u32 cntl = 0, base = 0, pos = 0, size = 0;
11129 unsigned long irqflags;
11131 if (plane_state && plane_state->uapi.visible) {
11132 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11133 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11135 cntl = plane_state->ctl |
11136 i845_cursor_ctl_crtc(crtc_state);
11138 size = (height << 12) | width;
11140 base = intel_cursor_base(plane_state);
11141 pos = intel_cursor_position(plane_state);
11144 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11146 /* On these chipsets we can only modify the base/size/stride
11147 * whilst the cursor is disabled.
11149 if (plane->cursor.base != base ||
11150 plane->cursor.size != size ||
11151 plane->cursor.cntl != cntl) {
11152 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
11153 I915_WRITE_FW(CURBASE(PIPE_A), base);
11154 I915_WRITE_FW(CURSIZE, size);
11155 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11156 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
11158 plane->cursor.base = base;
11159 plane->cursor.size = size;
11160 plane->cursor.cntl = cntl;
11162 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11165 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11168 static void i845_disable_cursor(struct intel_plane *plane,
11169 const struct intel_crtc_state *crtc_state)
11171 i845_update_cursor(plane, crtc_state, NULL);
11174 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11177 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11178 enum intel_display_power_domain power_domain;
11179 intel_wakeref_t wakeref;
11182 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11183 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11187 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11191 intel_display_power_put(dev_priv, power_domain, wakeref);
11196 static unsigned int
11197 i9xx_cursor_max_stride(struct intel_plane *plane,
11198 u32 pixel_format, u64 modifier,
11199 unsigned int rotation)
11201 return plane->base.dev->mode_config.cursor_width * 4;
11204 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11206 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11210 if (INTEL_GEN(dev_priv) >= 11)
11213 if (crtc_state->gamma_enable)
11214 cntl = MCURSOR_GAMMA_ENABLE;
11216 if (crtc_state->csc_enable)
11217 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11219 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11220 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11225 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11226 const struct intel_plane_state *plane_state)
11228 struct drm_i915_private *dev_priv =
11229 to_i915(plane_state->uapi.plane->dev);
11232 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11233 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11235 switch (drm_rect_width(&plane_state->uapi.dst)) {
11237 cntl |= MCURSOR_MODE_64_ARGB_AX;
11240 cntl |= MCURSOR_MODE_128_ARGB_AX;
11243 cntl |= MCURSOR_MODE_256_ARGB_AX;
11246 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11250 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11251 cntl |= MCURSOR_ROTATE_180;
11256 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11258 struct drm_i915_private *dev_priv =
11259 to_i915(plane_state->uapi.plane->dev);
11260 int width = drm_rect_width(&plane_state->uapi.dst);
11261 int height = drm_rect_height(&plane_state->uapi.dst);
11263 if (!intel_cursor_size_ok(plane_state))
11266 /* Cursor width is limited to a few power-of-two sizes */
11277 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11278 * height from 8 lines up to the cursor width, when the
11279 * cursor is not rotated. Everything else requires square
11282 if (HAS_CUR_FBC(dev_priv) &&
11283 plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11284 if (height < 8 || height > width)
11287 if (height != width)
11294 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11295 struct intel_plane_state *plane_state)
11297 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11298 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11299 const struct drm_framebuffer *fb = plane_state->hw.fb;
11300 enum pipe pipe = plane->pipe;
11303 ret = intel_check_cursor(crtc_state, plane_state);
11307 /* if we want to turn off the cursor ignore width and height */
11311 /* Check for which cursor types we support */
11312 if (!i9xx_cursor_size_ok(plane_state)) {
11313 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11314 drm_rect_width(&plane_state->uapi.dst),
11315 drm_rect_height(&plane_state->uapi.dst));
11319 WARN_ON(plane_state->uapi.visible &&
11320 plane_state->color_plane[0].stride != fb->pitches[0]);
11322 if (fb->pitches[0] !=
11323 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11324 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11326 drm_rect_width(&plane_state->uapi.dst));
11331 * There's something wrong with the cursor on CHV pipe C.
11332 * If it straddles the left edge of the screen then
11333 * moving it away from the edge or disabling it often
11334 * results in a pipe underrun, and often that can lead to
11335 * dead pipe (constant underrun reported, and it scans
11336 * out just a solid color). To recover from that, the
11337 * display power well must be turned off and on again.
11338 * Refuse the put the cursor into that compromised position.
11340 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11341 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11342 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11346 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11351 static void i9xx_update_cursor(struct intel_plane *plane,
11352 const struct intel_crtc_state *crtc_state,
11353 const struct intel_plane_state *plane_state)
11355 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11356 enum pipe pipe = plane->pipe;
11357 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11358 unsigned long irqflags;
11360 if (plane_state && plane_state->uapi.visible) {
11361 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11362 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11364 cntl = plane_state->ctl |
11365 i9xx_cursor_ctl_crtc(crtc_state);
11367 if (width != height)
11368 fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11370 base = intel_cursor_base(plane_state);
11371 pos = intel_cursor_position(plane_state);
11374 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11377 * On some platforms writing CURCNTR first will also
11378 * cause CURPOS to be armed by the CURBASE write.
11379 * Without the CURCNTR write the CURPOS write would
11380 * arm itself. Thus we always update CURCNTR before
11383 * On other platforms CURPOS always requires the
11384 * CURBASE write to arm the update. Additonally
11385 * a write to any of the cursor register will cancel
11386 * an already armed cursor update. Thus leaving out
11387 * the CURBASE write after CURPOS could lead to a
11388 * cursor that doesn't appear to move, or even change
11389 * shape. Thus we always write CURBASE.
11391 * The other registers are armed by by the CURBASE write
11392 * except when the plane is getting enabled at which time
11393 * the CURCNTR write arms the update.
11396 if (INTEL_GEN(dev_priv) >= 9)
11397 skl_write_cursor_wm(plane, crtc_state);
11399 if (plane->cursor.base != base ||
11400 plane->cursor.size != fbc_ctl ||
11401 plane->cursor.cntl != cntl) {
11402 if (HAS_CUR_FBC(dev_priv))
11403 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11404 I915_WRITE_FW(CURCNTR(pipe), cntl);
11405 I915_WRITE_FW(CURPOS(pipe), pos);
11406 I915_WRITE_FW(CURBASE(pipe), base);
11408 plane->cursor.base = base;
11409 plane->cursor.size = fbc_ctl;
11410 plane->cursor.cntl = cntl;
11412 I915_WRITE_FW(CURPOS(pipe), pos);
11413 I915_WRITE_FW(CURBASE(pipe), base);
11416 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11419 static void i9xx_disable_cursor(struct intel_plane *plane,
11420 const struct intel_crtc_state *crtc_state)
11422 i9xx_update_cursor(plane, crtc_state, NULL);
11425 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11428 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11429 enum intel_display_power_domain power_domain;
11430 intel_wakeref_t wakeref;
11435 * Not 100% correct for planes that can move between pipes,
11436 * but that's only the case for gen2-3 which don't have any
11437 * display power wells.
11439 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11440 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11444 val = I915_READ(CURCNTR(plane->pipe));
11446 ret = val & MCURSOR_MODE;
11448 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11449 *pipe = plane->pipe;
11451 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11452 MCURSOR_PIPE_SELECT_SHIFT;
11454 intel_display_power_put(dev_priv, power_domain, wakeref);
11459 /* VESA 640x480x72Hz mode to set on the pipe */
11460 static const struct drm_display_mode load_detect_mode = {
11461 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11462 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11465 struct drm_framebuffer *
11466 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11467 struct drm_mode_fb_cmd2 *mode_cmd)
11469 struct intel_framebuffer *intel_fb;
11472 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11474 return ERR_PTR(-ENOMEM);
11476 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11480 return &intel_fb->base;
11484 return ERR_PTR(ret);
11487 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11488 struct drm_crtc *crtc)
11490 struct drm_plane *plane;
11491 struct drm_plane_state *plane_state;
11494 ret = drm_atomic_add_affected_planes(state, crtc);
11498 for_each_new_plane_in_state(state, plane, plane_state, i) {
11499 if (plane_state->crtc != crtc)
11502 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11506 drm_atomic_set_fb_for_plane(plane_state, NULL);
11512 int intel_get_load_detect_pipe(struct drm_connector *connector,
11513 struct intel_load_detect_pipe *old,
11514 struct drm_modeset_acquire_ctx *ctx)
11516 struct intel_crtc *intel_crtc;
11517 struct intel_encoder *intel_encoder =
11518 intel_attached_encoder(connector);
11519 struct drm_crtc *possible_crtc;
11520 struct drm_encoder *encoder = &intel_encoder->base;
11521 struct drm_crtc *crtc = NULL;
11522 struct drm_device *dev = encoder->dev;
11523 struct drm_i915_private *dev_priv = to_i915(dev);
11524 struct drm_mode_config *config = &dev->mode_config;
11525 struct drm_atomic_state *state = NULL, *restore_state = NULL;
11526 struct drm_connector_state *connector_state;
11527 struct intel_crtc_state *crtc_state;
11530 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11531 connector->base.id, connector->name,
11532 encoder->base.id, encoder->name);
11534 old->restore_state = NULL;
11536 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11539 * Algorithm gets a little messy:
11541 * - if the connector already has an assigned crtc, use it (but make
11542 * sure it's on first)
11544 * - try to find the first unused crtc that can drive this connector,
11545 * and use that if we find one
11548 /* See if we already have a CRTC for this connector */
11549 if (connector->state->crtc) {
11550 crtc = connector->state->crtc;
11552 ret = drm_modeset_lock(&crtc->mutex, ctx);
11556 /* Make sure the crtc and connector are running */
11560 /* Find an unused one (if possible) */
11561 for_each_crtc(dev, possible_crtc) {
11563 if (!(encoder->possible_crtcs & (1 << i)))
11566 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11570 if (possible_crtc->state->enable) {
11571 drm_modeset_unlock(&possible_crtc->mutex);
11575 crtc = possible_crtc;
11580 * If we didn't find an unused CRTC, don't use any.
11583 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11589 intel_crtc = to_intel_crtc(crtc);
11591 state = drm_atomic_state_alloc(dev);
11592 restore_state = drm_atomic_state_alloc(dev);
11593 if (!state || !restore_state) {
11598 state->acquire_ctx = ctx;
11599 restore_state->acquire_ctx = ctx;
11601 connector_state = drm_atomic_get_connector_state(state, connector);
11602 if (IS_ERR(connector_state)) {
11603 ret = PTR_ERR(connector_state);
11607 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11611 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11612 if (IS_ERR(crtc_state)) {
11613 ret = PTR_ERR(crtc_state);
11617 crtc_state->uapi.active = true;
11619 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11620 &load_detect_mode);
11624 ret = intel_modeset_disable_planes(state, crtc);
11628 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11630 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11632 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11634 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11638 ret = drm_atomic_commit(state);
11640 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11644 old->restore_state = restore_state;
11645 drm_atomic_state_put(state);
11647 /* let the connector get through one full cycle before testing */
11648 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11653 drm_atomic_state_put(state);
11656 if (restore_state) {
11657 drm_atomic_state_put(restore_state);
11658 restore_state = NULL;
11661 if (ret == -EDEADLK)
11667 void intel_release_load_detect_pipe(struct drm_connector *connector,
11668 struct intel_load_detect_pipe *old,
11669 struct drm_modeset_acquire_ctx *ctx)
11671 struct intel_encoder *intel_encoder =
11672 intel_attached_encoder(connector);
11673 struct drm_encoder *encoder = &intel_encoder->base;
11674 struct drm_atomic_state *state = old->restore_state;
11677 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11678 connector->base.id, connector->name,
11679 encoder->base.id, encoder->name);
11684 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11686 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11687 drm_atomic_state_put(state);
11690 static int i9xx_pll_refclk(struct drm_device *dev,
11691 const struct intel_crtc_state *pipe_config)
11693 struct drm_i915_private *dev_priv = to_i915(dev);
11694 u32 dpll = pipe_config->dpll_hw_state.dpll;
11696 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11697 return dev_priv->vbt.lvds_ssc_freq;
11698 else if (HAS_PCH_SPLIT(dev_priv))
11700 else if (!IS_GEN(dev_priv, 2))
11706 /* Returns the clock of the currently programmed mode of the given pipe. */
11707 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11708 struct intel_crtc_state *pipe_config)
11710 struct drm_device *dev = crtc->base.dev;
11711 struct drm_i915_private *dev_priv = to_i915(dev);
11712 enum pipe pipe = crtc->pipe;
11713 u32 dpll = pipe_config->dpll_hw_state.dpll;
11717 int refclk = i9xx_pll_refclk(dev, pipe_config);
11719 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11720 fp = pipe_config->dpll_hw_state.fp0;
11722 fp = pipe_config->dpll_hw_state.fp1;
11724 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11725 if (IS_PINEVIEW(dev_priv)) {
11726 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11727 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11729 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11730 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11733 if (!IS_GEN(dev_priv, 2)) {
11734 if (IS_PINEVIEW(dev_priv))
11735 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11736 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11738 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11739 DPLL_FPA01_P1_POST_DIV_SHIFT);
11741 switch (dpll & DPLL_MODE_MASK) {
11742 case DPLLB_MODE_DAC_SERIAL:
11743 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11746 case DPLLB_MODE_LVDS:
11747 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11751 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11752 "mode\n", (int)(dpll & DPLL_MODE_MASK));
11756 if (IS_PINEVIEW(dev_priv))
11757 port_clock = pnv_calc_dpll_params(refclk, &clock);
11759 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11761 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11762 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11765 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11766 DPLL_FPA01_P1_POST_DIV_SHIFT);
11768 if (lvds & LVDS_CLKB_POWER_UP)
11773 if (dpll & PLL_P1_DIVIDE_BY_TWO)
11776 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11777 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11779 if (dpll & PLL_P2_DIVIDE_BY_4)
11785 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11789 * This value includes pixel_multiplier. We will use
11790 * port_clock to compute adjusted_mode.crtc_clock in the
11791 * encoder's get_config() function.
11793 pipe_config->port_clock = port_clock;
11796 int intel_dotclock_calculate(int link_freq,
11797 const struct intel_link_m_n *m_n)
11800 * The calculation for the data clock is:
11801 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11802 * But we want to avoid losing precison if possible, so:
11803 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11805 * and the link clock is simpler:
11806 * link_clock = (m * link_clock) / n
11812 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11815 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11816 struct intel_crtc_state *pipe_config)
11818 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11820 /* read out port_clock from the DPLL */
11821 i9xx_crtc_clock_get(crtc, pipe_config);
11824 * In case there is an active pipe without active ports,
11825 * we may need some idea for the dotclock anyway.
11826 * Calculate one based on the FDI configuration.
11828 pipe_config->hw.adjusted_mode.crtc_clock =
11829 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11830 &pipe_config->fdi_m_n);
11833 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
11834 struct intel_crtc *crtc)
11836 memset(crtc_state, 0, sizeof(*crtc_state));
11838 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
11840 crtc_state->cpu_transcoder = INVALID_TRANSCODER;
11841 crtc_state->master_transcoder = INVALID_TRANSCODER;
11842 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
11843 crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
11844 crtc_state->scaler_state.scaler_id = -1;
11847 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
11849 struct intel_crtc_state *crtc_state;
11851 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
11854 intel_crtc_state_reset(crtc_state, crtc);
11859 /* Returns the currently programmed mode of the given encoder. */
11860 struct drm_display_mode *
11861 intel_encoder_current_mode(struct intel_encoder *encoder)
11863 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11864 struct intel_crtc_state *crtc_state;
11865 struct drm_display_mode *mode;
11866 struct intel_crtc *crtc;
11869 if (!encoder->get_hw_state(encoder, &pipe))
11872 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11874 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11878 crtc_state = intel_crtc_state_alloc(crtc);
11884 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11890 encoder->get_config(encoder, crtc_state);
11892 intel_mode_from_pipe_config(mode, crtc_state);
11899 static void intel_crtc_destroy(struct drm_crtc *crtc)
11901 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11903 drm_crtc_cleanup(crtc);
11908 * intel_wm_need_update - Check whether watermarks need updating
11909 * @cur: current plane state
11910 * @new: new plane state
11912 * Check current plane state versus the new one to determine whether
11913 * watermarks need to be recalculated.
11915 * Returns true or false.
11917 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11918 struct intel_plane_state *new)
11920 /* Update watermarks on tiling or size changes. */
11921 if (new->uapi.visible != cur->uapi.visible)
11924 if (!cur->hw.fb || !new->hw.fb)
11927 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11928 cur->hw.rotation != new->hw.rotation ||
11929 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11930 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11931 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11932 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11938 static bool needs_scaling(const struct intel_plane_state *state)
11940 int src_w = drm_rect_width(&state->uapi.src) >> 16;
11941 int src_h = drm_rect_height(&state->uapi.src) >> 16;
11942 int dst_w = drm_rect_width(&state->uapi.dst);
11943 int dst_h = drm_rect_height(&state->uapi.dst);
11945 return (src_w != dst_w || src_h != dst_h);
11948 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11949 struct intel_crtc_state *crtc_state,
11950 const struct intel_plane_state *old_plane_state,
11951 struct intel_plane_state *plane_state)
11953 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11954 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11955 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11956 bool mode_changed = needs_modeset(crtc_state);
11957 bool was_crtc_enabled = old_crtc_state->hw.active;
11958 bool is_crtc_enabled = crtc_state->hw.active;
11959 bool turn_off, turn_on, visible, was_visible;
11962 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11963 ret = skl_update_scaler_plane(crtc_state, plane_state);
11968 was_visible = old_plane_state->uapi.visible;
11969 visible = plane_state->uapi.visible;
11971 if (!was_crtc_enabled && WARN_ON(was_visible))
11972 was_visible = false;
11975 * Visibility is calculated as if the crtc was on, but
11976 * after scaler setup everything depends on it being off
11977 * when the crtc isn't active.
11979 * FIXME this is wrong for watermarks. Watermarks should also
11980 * be computed as if the pipe would be active. Perhaps move
11981 * per-plane wm computation to the .check_plane() hook, and
11982 * only combine the results from all planes in the current place?
11984 if (!is_crtc_enabled) {
11985 plane_state->uapi.visible = visible = false;
11986 crtc_state->active_planes &= ~BIT(plane->id);
11987 crtc_state->data_rate[plane->id] = 0;
11988 crtc_state->min_cdclk[plane->id] = 0;
11991 if (!was_visible && !visible)
11994 turn_off = was_visible && (!visible || mode_changed);
11995 turn_on = visible && (!was_visible || mode_changed);
11997 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11998 crtc->base.base.id, crtc->base.name,
11999 plane->base.base.id, plane->base.name,
12000 was_visible, visible,
12001 turn_off, turn_on, mode_changed);
12004 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12005 crtc_state->update_wm_pre = true;
12007 /* must disable cxsr around plane enable/disable */
12008 if (plane->id != PLANE_CURSOR)
12009 crtc_state->disable_cxsr = true;
12010 } else if (turn_off) {
12011 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12012 crtc_state->update_wm_post = true;
12014 /* must disable cxsr around plane enable/disable */
12015 if (plane->id != PLANE_CURSOR)
12016 crtc_state->disable_cxsr = true;
12017 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
12018 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12019 /* FIXME bollocks */
12020 crtc_state->update_wm_pre = true;
12021 crtc_state->update_wm_post = true;
12025 if (visible || was_visible)
12026 crtc_state->fb_bits |= plane->frontbuffer_bit;
12029 * ILK/SNB DVSACNTR/Sprite Enable
12030 * IVB SPR_CTL/Sprite Enable
12031 * "When in Self Refresh Big FIFO mode, a write to enable the
12032 * plane will be internally buffered and delayed while Big FIFO
12033 * mode is exiting."
12035 * Which means that enabling the sprite can take an extra frame
12036 * when we start in big FIFO mode (LP1+). Thus we need to drop
12037 * down to LP0 and wait for vblank in order to make sure the
12038 * sprite gets enabled on the next vblank after the register write.
12039 * Doing otherwise would risk enabling the sprite one frame after
12040 * we've already signalled flip completion. We can resume LP1+
12041 * once the sprite has been enabled.
12044 * WaCxSRDisabledForSpriteScaling:ivb
12045 * IVB SPR_SCALE/Scaling Enable
12046 * "Low Power watermarks must be disabled for at least one
12047 * frame before enabling sprite scaling, and kept disabled
12048 * until sprite scaling is disabled."
12050 * ILK/SNB DVSASCALE/Scaling Enable
12051 * "When in Self Refresh Big FIFO mode, scaling enable will be
12052 * masked off while Big FIFO mode is exiting."
12054 * Despite the w/a only being listed for IVB we assume that
12055 * the ILK/SNB note has similar ramifications, hence we apply
12056 * the w/a on all three platforms.
12058 * With experimental results seems this is needed also for primary
12059 * plane, not only sprite plane.
12061 if (plane->id != PLANE_CURSOR &&
12062 (IS_GEN_RANGE(dev_priv, 5, 6) ||
12063 IS_IVYBRIDGE(dev_priv)) &&
12064 (turn_on || (!needs_scaling(old_plane_state) &&
12065 needs_scaling(plane_state))))
12066 crtc_state->disable_lp_wm = true;
12071 static bool encoders_cloneable(const struct intel_encoder *a,
12072 const struct intel_encoder *b)
12074 /* masks could be asymmetric, so check both ways */
12075 return a == b || (a->cloneable & (1 << b->type) &&
12076 b->cloneable & (1 << a->type));
12079 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12080 struct intel_crtc *crtc,
12081 struct intel_encoder *encoder)
12083 struct intel_encoder *source_encoder;
12084 struct drm_connector *connector;
12085 struct drm_connector_state *connector_state;
12088 for_each_new_connector_in_state(state, connector, connector_state, i) {
12089 if (connector_state->crtc != &crtc->base)
12093 to_intel_encoder(connector_state->best_encoder);
12094 if (!encoders_cloneable(encoder, source_encoder))
12101 static int icl_add_linked_planes(struct intel_atomic_state *state)
12103 struct intel_plane *plane, *linked;
12104 struct intel_plane_state *plane_state, *linked_plane_state;
12107 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12108 linked = plane_state->planar_linked_plane;
12113 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12114 if (IS_ERR(linked_plane_state))
12115 return PTR_ERR(linked_plane_state);
12117 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12118 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12124 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12126 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12127 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12128 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12129 struct intel_plane *plane, *linked;
12130 struct intel_plane_state *plane_state;
12133 if (INTEL_GEN(dev_priv) < 11)
12137 * Destroy all old plane links and make the slave plane invisible
12138 * in the crtc_state->active_planes mask.
12140 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12141 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12144 plane_state->planar_linked_plane = NULL;
12145 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12146 crtc_state->active_planes &= ~BIT(plane->id);
12147 crtc_state->update_planes |= BIT(plane->id);
12150 plane_state->planar_slave = false;
12153 if (!crtc_state->nv12_planes)
12156 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12157 struct intel_plane_state *linked_state = NULL;
12159 if (plane->pipe != crtc->pipe ||
12160 !(crtc_state->nv12_planes & BIT(plane->id)))
12163 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12164 if (!icl_is_nv12_y_plane(linked->id))
12167 if (crtc_state->active_planes & BIT(linked->id))
12170 linked_state = intel_atomic_get_plane_state(state, linked);
12171 if (IS_ERR(linked_state))
12172 return PTR_ERR(linked_state);
12177 if (!linked_state) {
12178 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
12179 hweight8(crtc_state->nv12_planes));
12184 plane_state->planar_linked_plane = linked;
12186 linked_state->planar_slave = true;
12187 linked_state->planar_linked_plane = plane;
12188 crtc_state->active_planes |= BIT(linked->id);
12189 crtc_state->update_planes |= BIT(linked->id);
12190 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
12192 /* Copy parameters to slave plane */
12193 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12194 linked_state->color_ctl = plane_state->color_ctl;
12195 linked_state->color_plane[0] = plane_state->color_plane[0];
12197 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12198 linked_state->uapi.src = plane_state->uapi.src;
12199 linked_state->uapi.dst = plane_state->uapi.dst;
12201 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12202 if (linked->id == PLANE_SPRITE5)
12203 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12204 else if (linked->id == PLANE_SPRITE4)
12205 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12207 MISSING_CASE(linked->id);
12214 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12216 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12217 struct intel_atomic_state *state =
12218 to_intel_atomic_state(new_crtc_state->uapi.state);
12219 const struct intel_crtc_state *old_crtc_state =
12220 intel_atomic_get_old_crtc_state(state, crtc);
12222 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12225 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12227 struct drm_crtc *crtc = crtc_state->uapi.crtc;
12228 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12229 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12230 struct drm_connector *master_connector, *connector;
12231 struct drm_connector_state *connector_state;
12232 struct drm_connector_list_iter conn_iter;
12233 struct drm_crtc *master_crtc = NULL;
12234 struct drm_crtc_state *master_crtc_state;
12235 struct intel_crtc_state *master_pipe_config;
12236 int i, tile_group_id;
12238 if (INTEL_GEN(dev_priv) < 11)
12242 * In case of tiled displays there could be one or more slaves but there is
12243 * only one master. Lets make the CRTC used by the connector corresponding
12244 * to the last horizonal and last vertical tile a master/genlock CRTC.
12245 * All the other CRTCs corresponding to other tiles of the same Tile group
12246 * are the slave CRTCs and hold a pointer to their genlock CRTC.
12248 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12249 if (connector_state->crtc != crtc)
12251 if (!connector->has_tile)
12253 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12254 crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12256 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12257 connector->tile_v_loc == connector->num_v_tile - 1)
12259 crtc_state->sync_mode_slaves_mask = 0;
12260 tile_group_id = connector->tile_group->id;
12261 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12262 drm_for_each_connector_iter(master_connector, &conn_iter) {
12263 struct drm_connector_state *master_conn_state = NULL;
12265 if (!master_connector->has_tile)
12267 if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12268 master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12270 if (master_connector->tile_group->id != tile_group_id)
12273 master_conn_state = drm_atomic_get_connector_state(&state->base,
12275 if (IS_ERR(master_conn_state)) {
12276 drm_connector_list_iter_end(&conn_iter);
12277 return PTR_ERR(master_conn_state);
12279 if (master_conn_state->crtc) {
12280 master_crtc = master_conn_state->crtc;
12284 drm_connector_list_iter_end(&conn_iter);
12286 if (!master_crtc) {
12287 DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12288 connector_state->crtc->base.id);
12292 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12294 if (IS_ERR(master_crtc_state))
12295 return PTR_ERR(master_crtc_state);
12297 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12298 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12299 master_pipe_config->sync_mode_slaves_mask |=
12300 BIT(crtc_state->cpu_transcoder);
12301 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12302 transcoder_name(crtc_state->master_transcoder),
12303 crtc_state->uapi.crtc->base.id,
12304 master_pipe_config->sync_mode_slaves_mask);
12310 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12311 struct intel_crtc *crtc)
12313 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12314 struct intel_crtc_state *crtc_state =
12315 intel_atomic_get_new_crtc_state(state, crtc);
12316 bool mode_changed = needs_modeset(crtc_state);
12319 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12320 mode_changed && !crtc_state->hw.active)
12321 crtc_state->update_wm_post = true;
12323 if (mode_changed && crtc_state->hw.enable &&
12324 dev_priv->display.crtc_compute_clock &&
12325 !WARN_ON(crtc_state->shared_dpll)) {
12326 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12332 * May need to update pipe gamma enable bits
12333 * when C8 planes are getting enabled/disabled.
12335 if (c8_planes_changed(crtc_state))
12336 crtc_state->uapi.color_mgmt_changed = true;
12338 if (mode_changed || crtc_state->update_pipe ||
12339 crtc_state->uapi.color_mgmt_changed) {
12340 ret = intel_color_check(crtc_state);
12346 if (dev_priv->display.compute_pipe_wm) {
12347 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12349 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12354 if (dev_priv->display.compute_intermediate_wm) {
12355 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12359 * Calculate 'intermediate' watermarks that satisfy both the
12360 * old state and the new state. We can program these
12363 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12365 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12370 if (INTEL_GEN(dev_priv) >= 9) {
12371 if (mode_changed || crtc_state->update_pipe)
12372 ret = skl_update_scaler_crtc(crtc_state);
12374 ret = intel_atomic_setup_scalers(dev_priv, crtc,
12378 if (HAS_IPS(dev_priv))
12379 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12384 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12386 struct intel_connector *connector;
12387 struct drm_connector_list_iter conn_iter;
12389 drm_connector_list_iter_begin(dev, &conn_iter);
12390 for_each_intel_connector_iter(connector, &conn_iter) {
12391 if (connector->base.state->crtc)
12392 drm_connector_put(&connector->base);
12394 if (connector->base.encoder) {
12395 connector->base.state->best_encoder =
12396 connector->base.encoder;
12397 connector->base.state->crtc =
12398 connector->base.encoder->crtc;
12400 drm_connector_get(&connector->base);
12402 connector->base.state->best_encoder = NULL;
12403 connector->base.state->crtc = NULL;
12406 drm_connector_list_iter_end(&conn_iter);
12410 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12411 struct intel_crtc_state *pipe_config)
12413 struct drm_connector *connector = conn_state->connector;
12414 const struct drm_display_info *info = &connector->display_info;
12417 switch (conn_state->max_bpc) {
12434 if (bpp < pipe_config->pipe_bpp) {
12435 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12436 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12437 connector->base.id, connector->name,
12438 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12439 pipe_config->pipe_bpp);
12441 pipe_config->pipe_bpp = bpp;
12448 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12449 struct intel_crtc_state *pipe_config)
12451 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12452 struct drm_atomic_state *state = pipe_config->uapi.state;
12453 struct drm_connector *connector;
12454 struct drm_connector_state *connector_state;
12457 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12458 IS_CHERRYVIEW(dev_priv)))
12460 else if (INTEL_GEN(dev_priv) >= 5)
12465 pipe_config->pipe_bpp = bpp;
12467 /* Clamp display bpp to connector max bpp */
12468 for_each_new_connector_in_state(state, connector, connector_state, i) {
12471 if (connector_state->crtc != &crtc->base)
12474 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12482 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12484 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12485 "type: 0x%x flags: 0x%x\n",
12487 mode->crtc_hdisplay, mode->crtc_hsync_start,
12488 mode->crtc_hsync_end, mode->crtc_htotal,
12489 mode->crtc_vdisplay, mode->crtc_vsync_start,
12490 mode->crtc_vsync_end, mode->crtc_vtotal,
12491 mode->type, mode->flags);
12495 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12496 const char *id, unsigned int lane_count,
12497 const struct intel_link_m_n *m_n)
12499 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12501 m_n->gmch_m, m_n->gmch_n,
12502 m_n->link_m, m_n->link_n, m_n->tu);
12506 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12507 const union hdmi_infoframe *frame)
12509 if ((drm_debug & DRM_UT_KMS) == 0)
12512 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12515 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12517 static const char * const output_type_str[] = {
12518 OUTPUT_TYPE(UNUSED),
12519 OUTPUT_TYPE(ANALOG),
12523 OUTPUT_TYPE(TVOUT),
12529 OUTPUT_TYPE(DP_MST),
12534 static void snprintf_output_types(char *buf, size_t len,
12535 unsigned int output_types)
12542 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12545 if ((output_types & BIT(i)) == 0)
12548 r = snprintf(str, len, "%s%s",
12549 str != buf ? "," : "", output_type_str[i]);
12555 output_types &= ~BIT(i);
12558 WARN_ON_ONCE(output_types != 0);
12561 static const char * const output_format_str[] = {
12562 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12563 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12564 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12565 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12568 static const char *output_formats(enum intel_output_format format)
12570 if (format >= ARRAY_SIZE(output_format_str))
12571 format = INTEL_OUTPUT_FORMAT_INVALID;
12572 return output_format_str[format];
12575 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12577 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12578 const struct drm_framebuffer *fb = plane_state->hw.fb;
12579 struct drm_format_name_buf format_name;
12582 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12583 plane->base.base.id, plane->base.name,
12584 yesno(plane_state->uapi.visible));
12588 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12589 plane->base.base.id, plane->base.name,
12590 fb->base.id, fb->width, fb->height,
12591 drm_get_format_name(fb->format->format, &format_name),
12592 yesno(plane_state->uapi.visible));
12593 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12594 plane_state->hw.rotation, plane_state->scaler_id);
12595 if (plane_state->uapi.visible)
12596 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12597 DRM_RECT_FP_ARG(&plane_state->uapi.src),
12598 DRM_RECT_ARG(&plane_state->uapi.dst));
12601 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12602 struct intel_atomic_state *state,
12603 const char *context)
12605 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12606 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12607 const struct intel_plane_state *plane_state;
12608 struct intel_plane *plane;
12612 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12613 crtc->base.base.id, crtc->base.name,
12614 yesno(pipe_config->hw.enable), context);
12616 if (!pipe_config->hw.enable)
12619 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12620 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12621 yesno(pipe_config->hw.active),
12622 buf, pipe_config->output_types,
12623 output_formats(pipe_config->output_format));
12625 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12626 transcoder_name(pipe_config->cpu_transcoder),
12627 pipe_config->pipe_bpp, pipe_config->dither);
12629 if (pipe_config->has_pch_encoder)
12630 intel_dump_m_n_config(pipe_config, "fdi",
12631 pipe_config->fdi_lanes,
12632 &pipe_config->fdi_m_n);
12634 if (intel_crtc_has_dp_encoder(pipe_config)) {
12635 intel_dump_m_n_config(pipe_config, "dp m_n",
12636 pipe_config->lane_count, &pipe_config->dp_m_n);
12637 if (pipe_config->has_drrs)
12638 intel_dump_m_n_config(pipe_config, "dp m2_n2",
12639 pipe_config->lane_count,
12640 &pipe_config->dp_m2_n2);
12643 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12644 pipe_config->has_audio, pipe_config->has_infoframe,
12645 pipe_config->infoframes.enable);
12647 if (pipe_config->infoframes.enable &
12648 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12649 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12650 if (pipe_config->infoframes.enable &
12651 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12652 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12653 if (pipe_config->infoframes.enable &
12654 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12655 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12656 if (pipe_config->infoframes.enable &
12657 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12658 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12660 DRM_DEBUG_KMS("requested mode:\n");
12661 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12662 DRM_DEBUG_KMS("adjusted mode:\n");
12663 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12664 intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12665 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12666 pipe_config->port_clock,
12667 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12668 pipe_config->pixel_rate);
12670 if (INTEL_GEN(dev_priv) >= 9)
12671 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12673 pipe_config->scaler_state.scaler_users,
12674 pipe_config->scaler_state.scaler_id);
12676 if (HAS_GMCH(dev_priv))
12677 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12678 pipe_config->gmch_pfit.control,
12679 pipe_config->gmch_pfit.pgm_ratios,
12680 pipe_config->gmch_pfit.lvds_border_bits);
12682 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12683 pipe_config->pch_pfit.pos,
12684 pipe_config->pch_pfit.size,
12685 enableddisabled(pipe_config->pch_pfit.enabled),
12686 yesno(pipe_config->pch_pfit.force_thru));
12688 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12689 pipe_config->ips_enabled, pipe_config->double_wide);
12691 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12693 if (IS_CHERRYVIEW(dev_priv))
12694 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12695 pipe_config->cgm_mode, pipe_config->gamma_mode,
12696 pipe_config->gamma_enable, pipe_config->csc_enable);
12698 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12699 pipe_config->csc_mode, pipe_config->gamma_mode,
12700 pipe_config->gamma_enable, pipe_config->csc_enable);
12706 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12707 if (plane->pipe == crtc->pipe)
12708 intel_dump_plane_state(plane_state);
12712 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12714 struct drm_device *dev = state->base.dev;
12715 struct drm_connector *connector;
12716 struct drm_connector_list_iter conn_iter;
12717 unsigned int used_ports = 0;
12718 unsigned int used_mst_ports = 0;
12722 * We're going to peek into connector->state,
12723 * hence connection_mutex must be held.
12725 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12728 * Walk the connector list instead of the encoder
12729 * list to detect the problem on ddi platforms
12730 * where there's just one encoder per digital port.
12732 drm_connector_list_iter_begin(dev, &conn_iter);
12733 drm_for_each_connector_iter(connector, &conn_iter) {
12734 struct drm_connector_state *connector_state;
12735 struct intel_encoder *encoder;
12738 drm_atomic_get_new_connector_state(&state->base,
12740 if (!connector_state)
12741 connector_state = connector->state;
12743 if (!connector_state->best_encoder)
12746 encoder = to_intel_encoder(connector_state->best_encoder);
12748 WARN_ON(!connector_state->crtc);
12750 switch (encoder->type) {
12751 unsigned int port_mask;
12752 case INTEL_OUTPUT_DDI:
12753 if (WARN_ON(!HAS_DDI(to_i915(dev))))
12755 /* else, fall through */
12756 case INTEL_OUTPUT_DP:
12757 case INTEL_OUTPUT_HDMI:
12758 case INTEL_OUTPUT_EDP:
12759 port_mask = 1 << encoder->port;
12761 /* the same port mustn't appear more than once */
12762 if (used_ports & port_mask)
12765 used_ports |= port_mask;
12767 case INTEL_OUTPUT_DP_MST:
12769 1 << encoder->port;
12775 drm_connector_list_iter_end(&conn_iter);
12777 /* can't mix MST and SST/HDMI on the same port */
12778 if (used_ports & used_mst_ports)
12785 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12787 intel_crtc_copy_color_blobs(crtc_state);
12791 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12793 crtc_state->hw.enable = crtc_state->uapi.enable;
12794 crtc_state->hw.active = crtc_state->uapi.active;
12795 crtc_state->hw.mode = crtc_state->uapi.mode;
12796 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12797 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12800 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12802 crtc_state->uapi.enable = crtc_state->hw.enable;
12803 crtc_state->uapi.active = crtc_state->hw.active;
12804 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12806 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12808 /* copy color blobs to uapi */
12809 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12810 crtc_state->hw.degamma_lut);
12811 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12812 crtc_state->hw.gamma_lut);
12813 drm_property_replace_blob(&crtc_state->uapi.ctm,
12814 crtc_state->hw.ctm);
12818 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12820 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12822 struct intel_crtc_state *saved_state;
12824 saved_state = intel_crtc_state_alloc(crtc);
12828 /* free the old crtc_state->hw members */
12829 intel_crtc_free_hw_state(crtc_state);
12831 /* FIXME: before the switch to atomic started, a new pipe_config was
12832 * kzalloc'd. Code that depends on any field being zero should be
12833 * fixed, so that the crtc_state can be safely duplicated. For now,
12834 * only fields that are know to not cause problems are preserved. */
12836 saved_state->uapi = crtc_state->uapi;
12837 saved_state->scaler_state = crtc_state->scaler_state;
12838 saved_state->shared_dpll = crtc_state->shared_dpll;
12839 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12840 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12841 sizeof(saved_state->icl_port_dplls));
12842 saved_state->crc_enabled = crtc_state->crc_enabled;
12843 if (IS_G4X(dev_priv) ||
12844 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12845 saved_state->wm = crtc_state->wm;
12847 * Save the slave bitmask which gets filled for master crtc state during
12848 * slave atomic check call.
12850 if (is_trans_port_sync_master(crtc_state))
12851 saved_state->sync_mode_slaves_mask =
12852 crtc_state->sync_mode_slaves_mask;
12854 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12855 kfree(saved_state);
12857 intel_crtc_copy_uapi_to_hw_state(crtc_state);
12863 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12865 struct drm_crtc *crtc = pipe_config->uapi.crtc;
12866 struct drm_atomic_state *state = pipe_config->uapi.state;
12867 struct intel_encoder *encoder;
12868 struct drm_connector *connector;
12869 struct drm_connector_state *connector_state;
12874 pipe_config->cpu_transcoder =
12875 (enum transcoder) to_intel_crtc(crtc)->pipe;
12878 * Sanitize sync polarity flags based on requested ones. If neither
12879 * positive or negative polarity is requested, treat this as meaning
12880 * negative polarity.
12882 if (!(pipe_config->hw.adjusted_mode.flags &
12883 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12884 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12886 if (!(pipe_config->hw.adjusted_mode.flags &
12887 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12888 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12890 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12895 base_bpp = pipe_config->pipe_bpp;
12898 * Determine the real pipe dimensions. Note that stereo modes can
12899 * increase the actual pipe size due to the frame doubling and
12900 * insertion of additional space for blanks between the frame. This
12901 * is stored in the crtc timings. We use the requested mode to do this
12902 * computation to clearly distinguish it from the adjusted mode, which
12903 * can be changed by the connectors in the below retry loop.
12905 drm_mode_get_hv_timing(&pipe_config->hw.mode,
12906 &pipe_config->pipe_src_w,
12907 &pipe_config->pipe_src_h);
12909 for_each_new_connector_in_state(state, connector, connector_state, i) {
12910 if (connector_state->crtc != crtc)
12913 encoder = to_intel_encoder(connector_state->best_encoder);
12915 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12916 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12921 * Determine output_types before calling the .compute_config()
12922 * hooks so that the hooks can use this information safely.
12924 if (encoder->compute_output_type)
12925 pipe_config->output_types |=
12926 BIT(encoder->compute_output_type(encoder, pipe_config,
12929 pipe_config->output_types |= BIT(encoder->type);
12933 /* Ensure the port clock defaults are reset when retrying. */
12934 pipe_config->port_clock = 0;
12935 pipe_config->pixel_multiplier = 1;
12937 /* Fill in default crtc timings, allow encoders to overwrite them. */
12938 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12939 CRTC_STEREO_DOUBLE);
12941 /* Set the crtc_state defaults for trans_port_sync */
12942 pipe_config->master_transcoder = INVALID_TRANSCODER;
12943 ret = icl_add_sync_mode_crtcs(pipe_config);
12945 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12950 /* Pass our mode to the connectors and the CRTC to give them a chance to
12951 * adjust it according to limitations or connector properties, and also
12952 * a chance to reject the mode entirely.
12954 for_each_new_connector_in_state(state, connector, connector_state, i) {
12955 if (connector_state->crtc != crtc)
12958 encoder = to_intel_encoder(connector_state->best_encoder);
12959 ret = encoder->compute_config(encoder, pipe_config,
12962 if (ret != -EDEADLK)
12963 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12969 /* Set default port clock if not overwritten by the encoder. Needs to be
12970 * done afterwards in case the encoder adjusts the mode. */
12971 if (!pipe_config->port_clock)
12972 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12973 * pipe_config->pixel_multiplier;
12975 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12976 if (ret == -EDEADLK)
12979 DRM_DEBUG_KMS("CRTC fixup failed\n");
12983 if (ret == RETRY) {
12984 if (WARN(!retry, "loop in pipe configuration computation\n"))
12987 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12989 goto encoder_retry;
12992 /* Dithering seems to not pass-through bits correctly when it should, so
12993 * only enable it on 6bpc panels and when its not a compliance
12994 * test requesting 6bpc video pattern.
12996 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12997 !pipe_config->dither_force_disable;
12998 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12999 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13002 * Make drm_calc_timestamping_constants in
13003 * drm_atomic_helper_update_legacy_modeset_state() happy
13005 pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
13010 bool intel_fuzzy_clock_check(int clock1, int clock2)
13014 if (clock1 == clock2)
13017 if (!clock1 || !clock2)
13020 diff = abs(clock1 - clock2);
13022 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13029 intel_compare_m_n(unsigned int m, unsigned int n,
13030 unsigned int m2, unsigned int n2,
13033 if (m == m2 && n == n2)
13036 if (exact || !m || !n || !m2 || !n2)
13039 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13046 } else if (n < n2) {
13056 return intel_fuzzy_clock_check(m, m2);
13060 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13061 const struct intel_link_m_n *m2_n2,
13064 return m_n->tu == m2_n2->tu &&
13065 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13066 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13067 intel_compare_m_n(m_n->link_m, m_n->link_n,
13068 m2_n2->link_m, m2_n2->link_n, exact);
13072 intel_compare_infoframe(const union hdmi_infoframe *a,
13073 const union hdmi_infoframe *b)
13075 return memcmp(a, b, sizeof(*a)) == 0;
13079 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13080 bool fastset, const char *name,
13081 const union hdmi_infoframe *a,
13082 const union hdmi_infoframe *b)
13085 if ((drm_debug & DRM_UT_KMS) == 0)
13088 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
13089 DRM_DEBUG_KMS("expected:\n");
13090 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13091 DRM_DEBUG_KMS("found:\n");
13092 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13094 DRM_ERROR("mismatch in %s infoframe\n", name);
13095 DRM_ERROR("expected:\n");
13096 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13097 DRM_ERROR("found:\n");
13098 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13102 static void __printf(4, 5)
13103 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13104 const char *name, const char *format, ...)
13106 struct va_format vaf;
13109 va_start(args, format);
13114 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13115 crtc->base.base.id, crtc->base.name, name, &vaf);
13117 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
13118 crtc->base.base.id, crtc->base.name, name, &vaf);
13123 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13125 if (i915_modparams.fastboot != -1)
13126 return i915_modparams.fastboot;
13128 /* Enable fastboot by default on Skylake and newer */
13129 if (INTEL_GEN(dev_priv) >= 9)
13132 /* Enable fastboot by default on VLV and CHV */
13133 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13136 /* Disabled by default on all others */
13141 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13142 const struct intel_crtc_state *pipe_config,
13145 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13146 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13149 bool fixup_inherited = fastset &&
13150 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13151 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13153 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13154 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
13158 #define PIPE_CONF_CHECK_X(name) do { \
13159 if (current_config->name != pipe_config->name) { \
13160 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13161 "(expected 0x%08x, found 0x%08x)", \
13162 current_config->name, \
13163 pipe_config->name); \
13168 #define PIPE_CONF_CHECK_I(name) do { \
13169 if (current_config->name != pipe_config->name) { \
13170 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13171 "(expected %i, found %i)", \
13172 current_config->name, \
13173 pipe_config->name); \
13178 #define PIPE_CONF_CHECK_BOOL(name) do { \
13179 if (current_config->name != pipe_config->name) { \
13180 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13181 "(expected %s, found %s)", \
13182 yesno(current_config->name), \
13183 yesno(pipe_config->name)); \
13189 * Checks state where we only read out the enabling, but not the entire
13190 * state itself (like full infoframes or ELD for audio). These states
13191 * require a full modeset on bootup to fix up.
13193 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13194 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13195 PIPE_CONF_CHECK_BOOL(name); \
13197 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13198 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13199 yesno(current_config->name), \
13200 yesno(pipe_config->name)); \
13205 #define PIPE_CONF_CHECK_P(name) do { \
13206 if (current_config->name != pipe_config->name) { \
13207 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13208 "(expected %p, found %p)", \
13209 current_config->name, \
13210 pipe_config->name); \
13215 #define PIPE_CONF_CHECK_M_N(name) do { \
13216 if (!intel_compare_link_m_n(¤t_config->name, \
13217 &pipe_config->name,\
13219 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13220 "(expected tu %i gmch %i/%i link %i/%i, " \
13221 "found tu %i, gmch %i/%i link %i/%i)", \
13222 current_config->name.tu, \
13223 current_config->name.gmch_m, \
13224 current_config->name.gmch_n, \
13225 current_config->name.link_m, \
13226 current_config->name.link_n, \
13227 pipe_config->name.tu, \
13228 pipe_config->name.gmch_m, \
13229 pipe_config->name.gmch_n, \
13230 pipe_config->name.link_m, \
13231 pipe_config->name.link_n); \
13236 /* This is required for BDW+ where there is only one set of registers for
13237 * switching between high and low RR.
13238 * This macro can be used whenever a comparison has to be made between one
13239 * hw state and multiple sw state variables.
13241 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13242 if (!intel_compare_link_m_n(¤t_config->name, \
13243 &pipe_config->name, !fastset) && \
13244 !intel_compare_link_m_n(¤t_config->alt_name, \
13245 &pipe_config->name, !fastset)) { \
13246 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13247 "(expected tu %i gmch %i/%i link %i/%i, " \
13248 "or tu %i gmch %i/%i link %i/%i, " \
13249 "found tu %i, gmch %i/%i link %i/%i)", \
13250 current_config->name.tu, \
13251 current_config->name.gmch_m, \
13252 current_config->name.gmch_n, \
13253 current_config->name.link_m, \
13254 current_config->name.link_n, \
13255 current_config->alt_name.tu, \
13256 current_config->alt_name.gmch_m, \
13257 current_config->alt_name.gmch_n, \
13258 current_config->alt_name.link_m, \
13259 current_config->alt_name.link_n, \
13260 pipe_config->name.tu, \
13261 pipe_config->name.gmch_m, \
13262 pipe_config->name.gmch_n, \
13263 pipe_config->name.link_m, \
13264 pipe_config->name.link_n); \
13269 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13270 if ((current_config->name ^ pipe_config->name) & (mask)) { \
13271 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13272 "(%x) (expected %i, found %i)", \
13274 current_config->name & (mask), \
13275 pipe_config->name & (mask)); \
13280 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13281 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13282 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13283 "(expected %i, found %i)", \
13284 current_config->name, \
13285 pipe_config->name); \
13290 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13291 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
13292 &pipe_config->infoframes.name)) { \
13293 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13294 ¤t_config->infoframes.name, \
13295 &pipe_config->infoframes.name); \
13300 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13301 if (current_config->name1 != pipe_config->name1) { \
13302 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13303 "(expected %i, found %i, won't compare lut values)", \
13304 current_config->name1, \
13305 pipe_config->name1); \
13308 if (!intel_color_lut_equal(current_config->name2, \
13309 pipe_config->name2, pipe_config->name1, \
13310 bit_precision)) { \
13311 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13312 "hw_state doesn't match sw_state"); \
13318 #define PIPE_CONF_QUIRK(quirk) \
13319 ((current_config->quirks | pipe_config->quirks) & (quirk))
13321 PIPE_CONF_CHECK_I(cpu_transcoder);
13323 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13324 PIPE_CONF_CHECK_I(fdi_lanes);
13325 PIPE_CONF_CHECK_M_N(fdi_m_n);
13327 PIPE_CONF_CHECK_I(lane_count);
13328 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13330 if (INTEL_GEN(dev_priv) < 8) {
13331 PIPE_CONF_CHECK_M_N(dp_m_n);
13333 if (current_config->has_drrs)
13334 PIPE_CONF_CHECK_M_N(dp_m2_n2);
13336 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13338 PIPE_CONF_CHECK_X(output_types);
13340 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13341 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13342 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13343 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13344 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13345 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13347 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13348 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13349 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13350 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13351 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13352 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13354 PIPE_CONF_CHECK_I(pixel_multiplier);
13355 PIPE_CONF_CHECK_I(output_format);
13356 PIPE_CONF_CHECK_I(dc3co_exitline);
13357 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13358 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13359 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13360 PIPE_CONF_CHECK_BOOL(limited_color_range);
13362 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13363 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13364 PIPE_CONF_CHECK_BOOL(has_infoframe);
13365 PIPE_CONF_CHECK_BOOL(fec_enable);
13367 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13369 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13370 DRM_MODE_FLAG_INTERLACE);
13372 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13373 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13374 DRM_MODE_FLAG_PHSYNC);
13375 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13376 DRM_MODE_FLAG_NHSYNC);
13377 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13378 DRM_MODE_FLAG_PVSYNC);
13379 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13380 DRM_MODE_FLAG_NVSYNC);
13383 PIPE_CONF_CHECK_X(gmch_pfit.control);
13384 /* pfit ratios are autocomputed by the hw on gen4+ */
13385 if (INTEL_GEN(dev_priv) < 4)
13386 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13387 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13390 * Changing the EDP transcoder input mux
13391 * (A_ONOFF vs. A_ON) requires a full modeset.
13393 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13396 PIPE_CONF_CHECK_I(pipe_src_w);
13397 PIPE_CONF_CHECK_I(pipe_src_h);
13399 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13400 if (current_config->pch_pfit.enabled) {
13401 PIPE_CONF_CHECK_X(pch_pfit.pos);
13402 PIPE_CONF_CHECK_X(pch_pfit.size);
13405 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13406 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13408 PIPE_CONF_CHECK_X(gamma_mode);
13409 if (IS_CHERRYVIEW(dev_priv))
13410 PIPE_CONF_CHECK_X(cgm_mode);
13412 PIPE_CONF_CHECK_X(csc_mode);
13413 PIPE_CONF_CHECK_BOOL(gamma_enable);
13414 PIPE_CONF_CHECK_BOOL(csc_enable);
13416 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13418 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13422 PIPE_CONF_CHECK_BOOL(double_wide);
13424 PIPE_CONF_CHECK_P(shared_dpll);
13425 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13426 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13427 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13428 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13429 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13430 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13431 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13432 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13433 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13434 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13435 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13436 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13437 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13438 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13439 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13440 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13441 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13442 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13443 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13444 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13445 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13446 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13447 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13448 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13449 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13450 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13451 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13452 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13453 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13454 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13455 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13457 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13458 PIPE_CONF_CHECK_X(dsi_pll.div);
13460 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13461 PIPE_CONF_CHECK_I(pipe_bpp);
13463 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13464 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13466 PIPE_CONF_CHECK_I(min_voltage_level);
13468 PIPE_CONF_CHECK_X(infoframes.enable);
13469 PIPE_CONF_CHECK_X(infoframes.gcp);
13470 PIPE_CONF_CHECK_INFOFRAME(avi);
13471 PIPE_CONF_CHECK_INFOFRAME(spd);
13472 PIPE_CONF_CHECK_INFOFRAME(hdmi);
13473 PIPE_CONF_CHECK_INFOFRAME(drm);
13475 PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13476 PIPE_CONF_CHECK_I(master_transcoder);
13478 PIPE_CONF_CHECK_I(dsc.compression_enable);
13479 PIPE_CONF_CHECK_I(dsc.dsc_split);
13480 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
13482 #undef PIPE_CONF_CHECK_X
13483 #undef PIPE_CONF_CHECK_I
13484 #undef PIPE_CONF_CHECK_BOOL
13485 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13486 #undef PIPE_CONF_CHECK_P
13487 #undef PIPE_CONF_CHECK_FLAGS
13488 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13489 #undef PIPE_CONF_CHECK_COLOR_LUT
13490 #undef PIPE_CONF_QUIRK
13495 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13496 const struct intel_crtc_state *pipe_config)
13498 if (pipe_config->has_pch_encoder) {
13499 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13500 &pipe_config->fdi_m_n);
13501 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13504 * FDI already provided one idea for the dotclock.
13505 * Yell if the encoder disagrees.
13507 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13508 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13509 fdi_dotclock, dotclock);
13513 static void verify_wm_state(struct intel_crtc *crtc,
13514 struct intel_crtc_state *new_crtc_state)
13516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13517 struct skl_hw_state {
13518 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13519 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13520 struct skl_ddb_allocation ddb;
13521 struct skl_pipe_wm wm;
13523 struct skl_ddb_allocation *sw_ddb;
13524 struct skl_pipe_wm *sw_wm;
13525 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13526 const enum pipe pipe = crtc->pipe;
13527 int plane, level, max_level = ilk_wm_max_level(dev_priv);
13529 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13532 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13536 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13537 sw_wm = &new_crtc_state->wm.skl.optimal;
13539 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13541 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13542 sw_ddb = &dev_priv->wm.skl_hw.ddb;
13544 if (INTEL_GEN(dev_priv) >= 11 &&
13545 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13546 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13547 sw_ddb->enabled_slices,
13548 hw->ddb.enabled_slices);
13551 for_each_universal_plane(dev_priv, pipe, plane) {
13552 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13554 hw_plane_wm = &hw->wm.planes[plane];
13555 sw_plane_wm = &sw_wm->planes[plane];
13558 for (level = 0; level <= max_level; level++) {
13559 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13560 &sw_plane_wm->wm[level]))
13563 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13564 pipe_name(pipe), plane + 1, level,
13565 sw_plane_wm->wm[level].plane_en,
13566 sw_plane_wm->wm[level].plane_res_b,
13567 sw_plane_wm->wm[level].plane_res_l,
13568 hw_plane_wm->wm[level].plane_en,
13569 hw_plane_wm->wm[level].plane_res_b,
13570 hw_plane_wm->wm[level].plane_res_l);
13573 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13574 &sw_plane_wm->trans_wm)) {
13575 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13576 pipe_name(pipe), plane + 1,
13577 sw_plane_wm->trans_wm.plane_en,
13578 sw_plane_wm->trans_wm.plane_res_b,
13579 sw_plane_wm->trans_wm.plane_res_l,
13580 hw_plane_wm->trans_wm.plane_en,
13581 hw_plane_wm->trans_wm.plane_res_b,
13582 hw_plane_wm->trans_wm.plane_res_l);
13586 hw_ddb_entry = &hw->ddb_y[plane];
13587 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13589 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13590 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13591 pipe_name(pipe), plane + 1,
13592 sw_ddb_entry->start, sw_ddb_entry->end,
13593 hw_ddb_entry->start, hw_ddb_entry->end);
13599 * If the cursor plane isn't active, we may not have updated it's ddb
13600 * allocation. In that case since the ddb allocation will be updated
13601 * once the plane becomes visible, we can skip this check
13604 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13606 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13607 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13610 for (level = 0; level <= max_level; level++) {
13611 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13612 &sw_plane_wm->wm[level]))
13615 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13616 pipe_name(pipe), level,
13617 sw_plane_wm->wm[level].plane_en,
13618 sw_plane_wm->wm[level].plane_res_b,
13619 sw_plane_wm->wm[level].plane_res_l,
13620 hw_plane_wm->wm[level].plane_en,
13621 hw_plane_wm->wm[level].plane_res_b,
13622 hw_plane_wm->wm[level].plane_res_l);
13625 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13626 &sw_plane_wm->trans_wm)) {
13627 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13629 sw_plane_wm->trans_wm.plane_en,
13630 sw_plane_wm->trans_wm.plane_res_b,
13631 sw_plane_wm->trans_wm.plane_res_l,
13632 hw_plane_wm->trans_wm.plane_en,
13633 hw_plane_wm->trans_wm.plane_res_b,
13634 hw_plane_wm->trans_wm.plane_res_l);
13638 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13639 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13641 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13642 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13644 sw_ddb_entry->start, sw_ddb_entry->end,
13645 hw_ddb_entry->start, hw_ddb_entry->end);
13653 verify_connector_state(struct intel_atomic_state *state,
13654 struct intel_crtc *crtc)
13656 struct drm_connector *connector;
13657 struct drm_connector_state *new_conn_state;
13660 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13661 struct drm_encoder *encoder = connector->encoder;
13662 struct intel_crtc_state *crtc_state = NULL;
13664 if (new_conn_state->crtc != &crtc->base)
13668 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13670 intel_connector_verify_state(crtc_state, new_conn_state);
13672 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13673 "connector's atomic encoder doesn't match legacy encoder\n");
13678 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13680 struct intel_encoder *encoder;
13681 struct drm_connector *connector;
13682 struct drm_connector_state *old_conn_state, *new_conn_state;
13685 for_each_intel_encoder(&dev_priv->drm, encoder) {
13686 bool enabled = false, found = false;
13689 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13690 encoder->base.base.id,
13691 encoder->base.name);
13693 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13694 new_conn_state, i) {
13695 if (old_conn_state->best_encoder == &encoder->base)
13698 if (new_conn_state->best_encoder != &encoder->base)
13700 found = enabled = true;
13702 I915_STATE_WARN(new_conn_state->crtc !=
13703 encoder->base.crtc,
13704 "connector's crtc doesn't match encoder crtc\n");
13710 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13711 "encoder's enabled state mismatch "
13712 "(expected %i, found %i)\n",
13713 !!encoder->base.crtc, enabled);
13715 if (!encoder->base.crtc) {
13718 active = encoder->get_hw_state(encoder, &pipe);
13719 I915_STATE_WARN(active,
13720 "encoder detached but still enabled on pipe %c.\n",
13727 verify_crtc_state(struct intel_crtc *crtc,
13728 struct intel_crtc_state *old_crtc_state,
13729 struct intel_crtc_state *new_crtc_state)
13731 struct drm_device *dev = crtc->base.dev;
13732 struct drm_i915_private *dev_priv = to_i915(dev);
13733 struct intel_encoder *encoder;
13734 struct intel_crtc_state *pipe_config = old_crtc_state;
13735 struct drm_atomic_state *state = old_crtc_state->uapi.state;
13738 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13739 intel_crtc_free_hw_state(old_crtc_state);
13740 intel_crtc_state_reset(old_crtc_state, crtc);
13741 old_crtc_state->uapi.state = state;
13743 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13745 active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13747 /* we keep both pipes enabled on 830 */
13748 if (IS_I830(dev_priv))
13749 active = new_crtc_state->hw.active;
13751 I915_STATE_WARN(new_crtc_state->hw.active != active,
13752 "crtc active state doesn't match with hw state "
13753 "(expected %i, found %i)\n",
13754 new_crtc_state->hw.active, active);
13756 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13757 "transitional active state does not match atomic hw state "
13758 "(expected %i, found %i)\n",
13759 new_crtc_state->hw.active, crtc->active);
13761 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13764 active = encoder->get_hw_state(encoder, &pipe);
13765 I915_STATE_WARN(active != new_crtc_state->hw.active,
13766 "[ENCODER:%i] active %i with crtc active %i\n",
13767 encoder->base.base.id, active,
13768 new_crtc_state->hw.active);
13770 I915_STATE_WARN(active && crtc->pipe != pipe,
13771 "Encoder connected to wrong pipe %c\n",
13775 encoder->get_config(encoder, pipe_config);
13778 intel_crtc_compute_pixel_rate(pipe_config);
13780 if (!new_crtc_state->hw.active)
13783 intel_pipe_config_sanity_check(dev_priv, pipe_config);
13785 if (!intel_pipe_config_compare(new_crtc_state,
13786 pipe_config, false)) {
13787 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13788 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13789 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13794 intel_verify_planes(struct intel_atomic_state *state)
13796 struct intel_plane *plane;
13797 const struct intel_plane_state *plane_state;
13800 for_each_new_intel_plane_in_state(state, plane,
13802 assert_plane(plane, plane_state->planar_slave ||
13803 plane_state->uapi.visible);
13807 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13808 struct intel_shared_dpll *pll,
13809 struct intel_crtc *crtc,
13810 struct intel_crtc_state *new_crtc_state)
13812 struct intel_dpll_hw_state dpll_hw_state;
13813 unsigned int crtc_mask;
13816 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13818 DRM_DEBUG_KMS("%s\n", pll->info->name);
13820 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13822 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13823 I915_STATE_WARN(!pll->on && pll->active_mask,
13824 "pll in active use but not on in sw tracking\n");
13825 I915_STATE_WARN(pll->on && !pll->active_mask,
13826 "pll is on but not used by any active crtc\n");
13827 I915_STATE_WARN(pll->on != active,
13828 "pll on state mismatch (expected %i, found %i)\n",
13833 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13834 "more active pll users than references: %x vs %x\n",
13835 pll->active_mask, pll->state.crtc_mask);
13840 crtc_mask = drm_crtc_mask(&crtc->base);
13842 if (new_crtc_state->hw.active)
13843 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13844 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13845 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13847 I915_STATE_WARN(pll->active_mask & crtc_mask,
13848 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13849 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13851 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13852 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13853 crtc_mask, pll->state.crtc_mask);
13855 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13857 sizeof(dpll_hw_state)),
13858 "pll hw state mismatch\n");
13862 verify_shared_dpll_state(struct intel_crtc *crtc,
13863 struct intel_crtc_state *old_crtc_state,
13864 struct intel_crtc_state *new_crtc_state)
13866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13868 if (new_crtc_state->shared_dpll)
13869 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13871 if (old_crtc_state->shared_dpll &&
13872 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13873 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13874 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13876 I915_STATE_WARN(pll->active_mask & crtc_mask,
13877 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13878 pipe_name(drm_crtc_index(&crtc->base)));
13879 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13880 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13881 pipe_name(drm_crtc_index(&crtc->base)));
13886 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13887 struct intel_atomic_state *state,
13888 struct intel_crtc_state *old_crtc_state,
13889 struct intel_crtc_state *new_crtc_state)
13891 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13894 verify_wm_state(crtc, new_crtc_state);
13895 verify_connector_state(state, crtc);
13896 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13897 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13901 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13905 for (i = 0; i < dev_priv->num_shared_dpll; i++)
13906 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13910 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13911 struct intel_atomic_state *state)
13913 verify_encoder_state(dev_priv, state);
13914 verify_connector_state(state, NULL);
13915 verify_disabled_dpll_state(dev_priv);
13919 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13921 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13922 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13923 const struct drm_display_mode *adjusted_mode =
13924 &crtc_state->hw.adjusted_mode;
13926 drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13929 * The scanline counter increments at the leading edge of hsync.
13931 * On most platforms it starts counting from vtotal-1 on the
13932 * first active line. That means the scanline counter value is
13933 * always one less than what we would expect. Ie. just after
13934 * start of vblank, which also occurs at start of hsync (on the
13935 * last active line), the scanline counter will read vblank_start-1.
13937 * On gen2 the scanline counter starts counting from 1 instead
13938 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13939 * to keep the value positive), instead of adding one.
13941 * On HSW+ the behaviour of the scanline counter depends on the output
13942 * type. For DP ports it behaves like most other platforms, but on HDMI
13943 * there's an extra 1 line difference. So we need to add two instead of
13944 * one to the value.
13946 * On VLV/CHV DSI the scanline counter would appear to increment
13947 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13948 * that means we can't tell whether we're in vblank or not while
13949 * we're on that particular line. We must still set scanline_offset
13950 * to 1 so that the vblank timestamps come out correct when we query
13951 * the scanline counter from within the vblank interrupt handler.
13952 * However if queried just before the start of vblank we'll get an
13953 * answer that's slightly in the future.
13955 if (IS_GEN(dev_priv, 2)) {
13958 vtotal = adjusted_mode->crtc_vtotal;
13959 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13962 crtc->scanline_offset = vtotal - 1;
13963 } else if (HAS_DDI(dev_priv) &&
13964 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13965 crtc->scanline_offset = 2;
13967 crtc->scanline_offset = 1;
13971 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13973 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13974 struct intel_crtc_state *new_crtc_state;
13975 struct intel_crtc *crtc;
13978 if (!dev_priv->display.crtc_compute_clock)
13981 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13982 if (!needs_modeset(new_crtc_state))
13985 intel_release_shared_dplls(state, crtc);
13990 * This implements the workaround described in the "notes" section of the mode
13991 * set sequence documentation. When going from no pipes or single pipe to
13992 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13993 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13995 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13997 struct intel_crtc_state *crtc_state;
13998 struct intel_crtc *crtc;
13999 struct intel_crtc_state *first_crtc_state = NULL;
14000 struct intel_crtc_state *other_crtc_state = NULL;
14001 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14004 /* look at all crtc's that are going to be enabled in during modeset */
14005 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14006 if (!crtc_state->hw.active ||
14007 !needs_modeset(crtc_state))
14010 if (first_crtc_state) {
14011 other_crtc_state = crtc_state;
14014 first_crtc_state = crtc_state;
14015 first_pipe = crtc->pipe;
14019 /* No workaround needed? */
14020 if (!first_crtc_state)
14023 /* w/a possibly needed, check how many crtc's are already enabled. */
14024 for_each_intel_crtc(state->base.dev, crtc) {
14025 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14026 if (IS_ERR(crtc_state))
14027 return PTR_ERR(crtc_state);
14029 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14031 if (!crtc_state->hw.active ||
14032 needs_modeset(crtc_state))
14035 /* 2 or more enabled crtcs means no need for w/a */
14036 if (enabled_pipe != INVALID_PIPE)
14039 enabled_pipe = crtc->pipe;
14042 if (enabled_pipe != INVALID_PIPE)
14043 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14044 else if (other_crtc_state)
14045 other_crtc_state->hsw_workaround_pipe = first_pipe;
14050 static int intel_modeset_checks(struct intel_atomic_state *state)
14052 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14053 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14054 struct intel_crtc *crtc;
14057 /* keep the current setting */
14058 if (!state->cdclk.force_min_cdclk_changed)
14059 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
14061 state->modeset = true;
14062 state->active_pipes = dev_priv->active_pipes;
14063 state->cdclk.logical = dev_priv->cdclk.logical;
14064 state->cdclk.actual = dev_priv->cdclk.actual;
14066 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14067 new_crtc_state, i) {
14068 if (new_crtc_state->hw.active)
14069 state->active_pipes |= BIT(crtc->pipe);
14071 state->active_pipes &= ~BIT(crtc->pipe);
14073 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
14074 state->active_pipe_changes |= BIT(crtc->pipe);
14077 if (state->active_pipe_changes) {
14078 ret = intel_atomic_lock_global_state(state);
14083 ret = intel_modeset_calc_cdclk(state);
14087 intel_modeset_clear_plls(state);
14089 if (IS_HASWELL(dev_priv))
14090 return haswell_mode_set_planes_workaround(state);
14096 * Handle calculation of various watermark data at the end of the atomic check
14097 * phase. The code here should be run after the per-crtc and per-plane 'check'
14098 * handlers to ensure that all derived state has been updated.
14100 static int calc_watermark_data(struct intel_atomic_state *state)
14102 struct drm_device *dev = state->base.dev;
14103 struct drm_i915_private *dev_priv = to_i915(dev);
14105 /* Is there platform-specific watermark information to calculate? */
14106 if (dev_priv->display.compute_global_watermarks)
14107 return dev_priv->display.compute_global_watermarks(state);
14112 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14113 struct intel_crtc_state *new_crtc_state)
14115 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14118 new_crtc_state->uapi.mode_changed = false;
14119 new_crtc_state->update_pipe = true;
14122 * If we're not doing the full modeset we want to
14123 * keep the current M/N values as they may be
14124 * sufficiently different to the computed values
14125 * to cause problems.
14127 * FIXME: should really copy more fuzzy state here
14129 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14130 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14131 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14132 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14135 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14136 struct intel_crtc *crtc,
14139 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14140 struct intel_plane *plane;
14142 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14143 struct intel_plane_state *plane_state;
14145 if ((plane_ids_mask & BIT(plane->id)) == 0)
14148 plane_state = intel_atomic_get_plane_state(state, plane);
14149 if (IS_ERR(plane_state))
14150 return PTR_ERR(plane_state);
14156 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14158 /* See {hsw,vlv,ivb}_plane_ratio() */
14159 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14160 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14161 IS_IVYBRIDGE(dev_priv);
14164 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14165 bool *need_modeset)
14167 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14168 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14169 struct intel_plane_state *plane_state;
14170 struct intel_plane *plane;
14171 struct intel_crtc *crtc;
14174 ret = icl_add_linked_planes(state);
14178 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14179 ret = intel_plane_atomic_check(state, plane);
14181 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
14182 plane->base.base.id, plane->base.name);
14187 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14188 new_crtc_state, i) {
14189 u8 old_active_planes, new_active_planes;
14191 ret = icl_check_nv12_planes(new_crtc_state);
14196 * On some platforms the number of active planes affects
14197 * the planes' minimum cdclk calculation. Add such planes
14198 * to the state before we compute the minimum cdclk.
14200 if (!active_planes_affects_min_cdclk(dev_priv))
14203 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14204 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14206 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14209 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14215 * active_planes bitmask has been updated, and potentially
14216 * affected planes are part of the state. We can now
14217 * compute the minimum cdclk for each plane.
14219 for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14220 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14225 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14227 struct intel_crtc_state *crtc_state;
14228 struct intel_crtc *crtc;
14231 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14232 int ret = intel_crtc_atomic_check(state, crtc);
14234 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14235 crtc->base.base.id, crtc->base.name);
14244 * intel_atomic_check - validate state object
14246 * @_state: state to validate
14248 static int intel_atomic_check(struct drm_device *dev,
14249 struct drm_atomic_state *_state)
14251 struct drm_i915_private *dev_priv = to_i915(dev);
14252 struct intel_atomic_state *state = to_intel_atomic_state(_state);
14253 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14254 struct intel_crtc *crtc;
14256 bool any_ms = false;
14258 /* Catch I915_MODE_FLAG_INHERITED */
14259 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14260 new_crtc_state, i) {
14261 if (new_crtc_state->hw.mode.private_flags !=
14262 old_crtc_state->hw.mode.private_flags)
14263 new_crtc_state->uapi.mode_changed = true;
14266 ret = drm_atomic_helper_check_modeset(dev, &state->base);
14270 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14271 new_crtc_state, i) {
14272 if (!needs_modeset(new_crtc_state)) {
14274 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14279 if (!new_crtc_state->uapi.enable) {
14280 intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14286 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14290 ret = intel_modeset_pipe_config(new_crtc_state);
14294 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14296 if (needs_modeset(new_crtc_state))
14300 if (any_ms && !check_digital_port_conflicts(state)) {
14301 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14306 ret = drm_dp_mst_atomic_check(&state->base);
14310 any_ms |= state->cdclk.force_min_cdclk_changed;
14312 ret = intel_atomic_check_planes(state, &any_ms);
14317 ret = intel_modeset_checks(state);
14321 state->cdclk.logical = dev_priv->cdclk.logical;
14324 ret = intel_atomic_check_crtcs(state);
14328 intel_fbc_choose_crtc(dev_priv, state);
14329 ret = calc_watermark_data(state);
14333 ret = intel_bw_atomic_check(state);
14337 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14338 new_crtc_state, i) {
14339 if (!needs_modeset(new_crtc_state) &&
14340 !new_crtc_state->update_pipe)
14343 intel_dump_pipe_config(new_crtc_state, state,
14344 needs_modeset(new_crtc_state) ?
14345 "[modeset]" : "[fastset]");
14351 if (ret == -EDEADLK)
14355 * FIXME would probably be nice to know which crtc specifically
14356 * caused the failure, in cases where we can pinpoint it.
14358 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14360 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14365 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14367 return drm_atomic_helper_prepare_planes(state->base.dev,
14371 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14373 struct drm_device *dev = crtc->base.dev;
14374 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14376 if (!vblank->max_vblank_count)
14377 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14379 return crtc->base.funcs->get_vblank_counter(&crtc->base);
14382 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14383 struct intel_crtc_state *crtc_state)
14385 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14387 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
14388 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14390 if (crtc_state->has_pch_encoder) {
14391 enum pipe pch_transcoder =
14392 intel_crtc_pch_transcoder(crtc);
14394 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14398 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14399 const struct intel_crtc_state *new_crtc_state)
14401 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14402 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14405 * Update pipe size and adjust fitter if needed: the reason for this is
14406 * that in compute_mode_changes we check the native mode (not the pfit
14407 * mode) to see if we can flip rather than do a full mode set. In the
14408 * fastboot case, we'll flip, but if we don't update the pipesrc and
14409 * pfit state, we'll end up with a big fb scanned out into the wrong
14412 intel_set_pipe_src_size(new_crtc_state);
14414 /* on skylake this is done by detaching scalers */
14415 if (INTEL_GEN(dev_priv) >= 9) {
14416 skl_detach_scalers(new_crtc_state);
14418 if (new_crtc_state->pch_pfit.enabled)
14419 skylake_pfit_enable(new_crtc_state);
14420 } else if (HAS_PCH_SPLIT(dev_priv)) {
14421 if (new_crtc_state->pch_pfit.enabled)
14422 ironlake_pfit_enable(new_crtc_state);
14423 else if (old_crtc_state->pch_pfit.enabled)
14424 ironlake_pfit_disable(old_crtc_state);
14427 if (INTEL_GEN(dev_priv) >= 11)
14428 icl_set_pipe_chicken(crtc);
14431 static void commit_pipe_config(struct intel_atomic_state *state,
14432 struct intel_crtc_state *old_crtc_state,
14433 struct intel_crtc_state *new_crtc_state)
14435 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14436 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14437 bool modeset = needs_modeset(new_crtc_state);
14440 * During modesets pipe configuration was programmed as the
14441 * CRTC was enabled.
14444 if (new_crtc_state->uapi.color_mgmt_changed ||
14445 new_crtc_state->update_pipe)
14446 intel_color_commit(new_crtc_state);
14448 if (INTEL_GEN(dev_priv) >= 9)
14449 skl_detach_scalers(new_crtc_state);
14451 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14452 bdw_set_pipemisc(new_crtc_state);
14454 if (new_crtc_state->update_pipe)
14455 intel_pipe_fastset(old_crtc_state, new_crtc_state);
14458 if (dev_priv->display.atomic_update_watermarks)
14459 dev_priv->display.atomic_update_watermarks(state, crtc);
14462 static void intel_update_crtc(struct intel_crtc *crtc,
14463 struct intel_atomic_state *state,
14464 struct intel_crtc_state *old_crtc_state,
14465 struct intel_crtc_state *new_crtc_state)
14467 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14468 bool modeset = needs_modeset(new_crtc_state);
14469 struct intel_plane_state *new_plane_state =
14470 intel_atomic_get_new_plane_state(state,
14471 to_intel_plane(crtc->base.primary));
14474 intel_crtc_update_active_timings(new_crtc_state);
14476 dev_priv->display.crtc_enable(state, crtc);
14478 /* vblanks work again, re-enable pipe CRC. */
14479 intel_crtc_enable_pipe_crc(crtc);
14481 if (new_crtc_state->preload_luts &&
14482 (new_crtc_state->uapi.color_mgmt_changed ||
14483 new_crtc_state->update_pipe))
14484 intel_color_load_luts(new_crtc_state);
14486 intel_pre_plane_update(state, crtc);
14488 if (new_crtc_state->update_pipe)
14489 intel_encoders_update_pipe(state, crtc);
14492 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14493 intel_fbc_disable(crtc);
14494 else if (new_plane_state)
14495 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14497 /* Perform vblank evasion around commit operation */
14498 intel_pipe_update_start(new_crtc_state);
14500 commit_pipe_config(state, old_crtc_state, new_crtc_state);
14502 if (INTEL_GEN(dev_priv) >= 9)
14503 skl_update_planes_on_crtc(state, crtc);
14505 i9xx_update_planes_on_crtc(state, crtc);
14507 intel_pipe_update_end(new_crtc_state);
14510 * We usually enable FIFO underrun interrupts as part of the
14511 * CRTC enable sequence during modesets. But when we inherit a
14512 * valid pipe configuration from the BIOS we need to take care
14513 * of enabling them on the CRTC's first fastset.
14515 if (new_crtc_state->update_pipe && !modeset &&
14516 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14517 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14520 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14522 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14523 enum transcoder slave_transcoder;
14525 WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14527 slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14528 return intel_get_crtc_for_pipe(dev_priv,
14529 (enum pipe)slave_transcoder);
14532 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14533 struct intel_crtc_state *old_crtc_state,
14534 struct intel_crtc_state *new_crtc_state,
14535 struct intel_crtc *crtc)
14537 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14539 intel_crtc_disable_planes(state, crtc);
14542 * We need to disable pipe CRC before disabling the pipe,
14543 * or we race against vblank off.
14545 intel_crtc_disable_pipe_crc(crtc);
14547 dev_priv->display.crtc_disable(state, crtc);
14548 crtc->active = false;
14549 intel_fbc_disable(crtc);
14550 intel_disable_shared_dpll(old_crtc_state);
14552 /* FIXME unify this for all platforms */
14553 if (!new_crtc_state->hw.active &&
14554 !HAS_GMCH(dev_priv) &&
14555 dev_priv->display.initial_watermarks)
14556 dev_priv->display.initial_watermarks(state, crtc);
14559 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14561 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14562 struct intel_crtc *crtc;
14566 /* Only disable port sync slaves */
14567 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14568 new_crtc_state, i) {
14569 if (!needs_modeset(new_crtc_state))
14572 if (!old_crtc_state->hw.active)
14575 /* In case of Transcoder port Sync master slave CRTCs can be
14576 * assigned in any order and we need to make sure that
14577 * slave CRTCs are disabled first and then master CRTC since
14578 * Slave vblanks are masked till Master Vblanks.
14580 if (!is_trans_port_sync_slave(old_crtc_state))
14583 intel_pre_plane_update(state, crtc);
14584 intel_old_crtc_state_disables(state, old_crtc_state,
14585 new_crtc_state, crtc);
14586 handled |= BIT(crtc->pipe);
14589 /* Disable everything else left on */
14590 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14591 new_crtc_state, i) {
14592 if (!needs_modeset(new_crtc_state) ||
14593 (handled & BIT(crtc->pipe)))
14596 intel_pre_plane_update(state, crtc);
14597 if (old_crtc_state->hw.active)
14598 intel_old_crtc_state_disables(state, old_crtc_state,
14599 new_crtc_state, crtc);
14603 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14605 struct intel_crtc *crtc;
14606 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14609 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14610 if (!new_crtc_state->hw.active)
14613 intel_update_crtc(crtc, state, old_crtc_state,
14618 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14619 struct intel_atomic_state *state,
14620 struct intel_crtc_state *new_crtc_state)
14622 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14624 intel_crtc_update_active_timings(new_crtc_state);
14625 dev_priv->display.crtc_enable(state, crtc);
14626 intel_crtc_enable_pipe_crc(crtc);
14629 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14630 struct intel_atomic_state *state)
14632 struct drm_connector *uninitialized_var(conn);
14633 struct drm_connector_state *conn_state;
14634 struct intel_dp *intel_dp;
14637 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14638 if (conn_state->crtc == &crtc->base)
14641 intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14642 intel_dp_stop_link_train(intel_dp);
14645 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14646 struct intel_atomic_state *state)
14648 struct intel_crtc_state *new_crtc_state =
14649 intel_atomic_get_new_crtc_state(state, crtc);
14650 struct intel_crtc_state *old_crtc_state =
14651 intel_atomic_get_old_crtc_state(state, crtc);
14652 struct intel_plane_state *new_plane_state =
14653 intel_atomic_get_new_plane_state(state,
14654 to_intel_plane(crtc->base.primary));
14655 bool modeset = needs_modeset(new_crtc_state);
14657 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14658 intel_fbc_disable(crtc);
14659 else if (new_plane_state)
14660 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14662 /* Perform vblank evasion around commit operation */
14663 intel_pipe_update_start(new_crtc_state);
14664 commit_pipe_config(state, old_crtc_state, new_crtc_state);
14665 skl_update_planes_on_crtc(state, crtc);
14666 intel_pipe_update_end(new_crtc_state);
14669 * We usually enable FIFO underrun interrupts as part of the
14670 * CRTC enable sequence during modesets. But when we inherit a
14671 * valid pipe configuration from the BIOS we need to take care
14672 * of enabling them on the CRTC's first fastset.
14674 if (new_crtc_state->update_pipe && !modeset &&
14675 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14676 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14679 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14680 struct intel_atomic_state *state,
14681 struct intel_crtc_state *old_crtc_state,
14682 struct intel_crtc_state *new_crtc_state)
14684 struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14685 struct intel_crtc_state *new_slave_crtc_state =
14686 intel_atomic_get_new_crtc_state(state, slave_crtc);
14687 struct intel_crtc_state *old_slave_crtc_state =
14688 intel_atomic_get_old_crtc_state(state, slave_crtc);
14690 WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14691 !old_slave_crtc_state);
14693 DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14694 crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14695 slave_crtc->base.name);
14697 /* Enable seq for slave with with DP_TP_CTL left Idle until the
14700 intel_crtc_enable_trans_port_sync(slave_crtc,
14702 new_slave_crtc_state);
14704 /* Enable seq for master with with DP_TP_CTL left Idle */
14705 intel_crtc_enable_trans_port_sync(crtc,
14709 /* Set Slave's DP_TP_CTL to Normal */
14710 intel_set_dp_tp_ctl_normal(slave_crtc,
14713 /* Set Master's DP_TP_CTL To Normal */
14714 usleep_range(200, 400);
14715 intel_set_dp_tp_ctl_normal(crtc,
14718 /* Now do the post crtc enable for all master and slaves */
14719 intel_post_crtc_enable_updates(slave_crtc,
14721 intel_post_crtc_enable_updates(crtc,
14725 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14727 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14728 struct intel_crtc *crtc;
14729 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14730 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14731 u8 required_slices = state->wm_results.ddb.enabled_slices;
14732 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14733 u8 dirty_pipes = 0;
14736 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14737 /* ignore allocations for crtc's that have been turned off. */
14738 if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active)
14739 entries[i] = old_crtc_state->wm.skl.ddb;
14740 if (new_crtc_state->hw.active)
14741 dirty_pipes |= BIT(crtc->pipe);
14744 /* If 2nd DBuf slice required, enable it here */
14745 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14746 icl_dbuf_slices_update(dev_priv, required_slices);
14749 * Whenever the number of active pipes changes, we need to make sure we
14750 * update the pipes in the right order so that their ddb allocations
14751 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14752 * cause pipe underruns and other bad stuff.
14754 while (dirty_pipes) {
14755 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14756 new_crtc_state, i) {
14757 enum pipe pipe = crtc->pipe;
14758 bool modeset = needs_modeset(new_crtc_state);
14760 if ((dirty_pipes & BIT(pipe)) == 0)
14763 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14765 INTEL_NUM_PIPES(dev_priv), i))
14768 entries[i] = new_crtc_state->wm.skl.ddb;
14769 dirty_pipes &= ~BIT(pipe);
14771 if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14772 if (is_trans_port_sync_master(new_crtc_state))
14773 intel_update_trans_port_sync_crtcs(crtc,
14780 intel_update_crtc(crtc, state, old_crtc_state,
14785 * If this is an already active pipe, it's DDB changed,
14786 * and this isn't the last pipe that needs updating
14787 * then we need to wait for a vblank to pass for the
14788 * new ddb allocation to take effect.
14790 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14791 &old_crtc_state->wm.skl.ddb) &&
14792 !modeset && dirty_pipes)
14793 intel_wait_for_vblank(dev_priv, pipe);
14797 /* If 2nd DBuf slice is no more required disable it */
14798 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14799 icl_dbuf_slices_update(dev_priv, required_slices);
14802 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14804 struct intel_atomic_state *state, *next;
14805 struct llist_node *freed;
14807 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14808 llist_for_each_entry_safe(state, next, freed, freed)
14809 drm_atomic_state_put(&state->base);
14812 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14814 struct drm_i915_private *dev_priv =
14815 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14817 intel_atomic_helper_free_state(dev_priv);
14820 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14822 struct wait_queue_entry wait_fence, wait_reset;
14823 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14825 init_wait_entry(&wait_fence, 0);
14826 init_wait_entry(&wait_reset, 0);
14828 prepare_to_wait(&intel_state->commit_ready.wait,
14829 &wait_fence, TASK_UNINTERRUPTIBLE);
14830 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14831 I915_RESET_MODESET),
14832 &wait_reset, TASK_UNINTERRUPTIBLE);
14835 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14836 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14841 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14842 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14843 I915_RESET_MODESET),
14847 static void intel_atomic_cleanup_work(struct work_struct *work)
14849 struct drm_atomic_state *state =
14850 container_of(work, struct drm_atomic_state, commit_work);
14851 struct drm_i915_private *i915 = to_i915(state->dev);
14853 drm_atomic_helper_cleanup_planes(&i915->drm, state);
14854 drm_atomic_helper_commit_cleanup_done(state);
14855 drm_atomic_state_put(state);
14857 intel_atomic_helper_free_state(i915);
14860 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14862 struct drm_device *dev = state->base.dev;
14863 struct drm_i915_private *dev_priv = to_i915(dev);
14864 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14865 struct intel_crtc *crtc;
14866 u64 put_domains[I915_MAX_PIPES] = {};
14867 intel_wakeref_t wakeref = 0;
14870 intel_atomic_commit_fence_wait(state);
14872 drm_atomic_helper_wait_for_dependencies(&state->base);
14874 if (state->modeset)
14875 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14877 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14878 new_crtc_state, i) {
14879 if (needs_modeset(new_crtc_state) ||
14880 new_crtc_state->update_pipe) {
14882 put_domains[crtc->pipe] =
14883 modeset_get_crtc_power_domains(new_crtc_state);
14887 intel_commit_modeset_disables(state);
14889 /* FIXME: Eventually get rid of our crtc->config pointer */
14890 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14891 crtc->config = new_crtc_state;
14893 if (state->modeset) {
14894 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14896 intel_set_cdclk_pre_plane_update(dev_priv,
14897 &state->cdclk.actual,
14898 &dev_priv->cdclk.actual,
14899 state->cdclk.pipe);
14902 * SKL workaround: bspec recommends we disable the SAGV when we
14903 * have more then one pipe enabled
14905 if (!intel_can_enable_sagv(state))
14906 intel_disable_sagv(dev_priv);
14908 intel_modeset_verify_disabled(dev_priv, state);
14911 /* Complete the events for pipes that have now been disabled */
14912 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14913 bool modeset = needs_modeset(new_crtc_state);
14915 /* Complete events for now disable pipes here. */
14916 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14917 spin_lock_irq(&dev->event_lock);
14918 drm_crtc_send_vblank_event(&crtc->base,
14919 new_crtc_state->uapi.event);
14920 spin_unlock_irq(&dev->event_lock);
14922 new_crtc_state->uapi.event = NULL;
14926 if (state->modeset)
14927 intel_encoders_update_prepare(state);
14929 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14930 dev_priv->display.commit_modeset_enables(state);
14932 if (state->modeset) {
14933 intel_encoders_update_complete(state);
14935 intel_set_cdclk_post_plane_update(dev_priv,
14936 &state->cdclk.actual,
14937 &dev_priv->cdclk.actual,
14938 state->cdclk.pipe);
14941 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14942 * already, but still need the state for the delayed optimization. To
14944 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14945 * - schedule that vblank worker _before_ calling hw_done
14946 * - at the start of commit_tail, cancel it _synchrously
14947 * - switch over to the vblank wait helper in the core after that since
14948 * we don't need out special handling any more.
14950 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14952 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14953 if (new_crtc_state->hw.active &&
14954 !needs_modeset(new_crtc_state) &&
14955 !new_crtc_state->preload_luts &&
14956 (new_crtc_state->uapi.color_mgmt_changed ||
14957 new_crtc_state->update_pipe))
14958 intel_color_load_luts(new_crtc_state);
14962 * Now that the vblank has passed, we can go ahead and program the
14963 * optimal watermarks on platforms that need two-step watermark
14966 * TODO: Move this (and other cleanup) to an async worker eventually.
14968 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14969 new_crtc_state, i) {
14971 * Gen2 reports pipe underruns whenever all planes are disabled.
14972 * So re-enable underrun reporting after some planes get enabled.
14974 * We do this before .optimize_watermarks() so that we have a
14975 * chance of catching underruns with the intermediate watermarks
14976 * vs. the new plane configuration.
14978 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
14979 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14981 if (dev_priv->display.optimize_watermarks)
14982 dev_priv->display.optimize_watermarks(state, crtc);
14985 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14986 intel_post_plane_update(state, crtc);
14988 if (put_domains[i])
14989 modeset_put_power_domains(dev_priv, put_domains[i]);
14991 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14994 /* Underruns don't always raise interrupts, so check manually */
14995 intel_check_cpu_fifo_underruns(dev_priv);
14996 intel_check_pch_fifo_underruns(dev_priv);
14998 if (state->modeset)
14999 intel_verify_planes(state);
15001 if (state->modeset && intel_can_enable_sagv(state))
15002 intel_enable_sagv(dev_priv);
15004 drm_atomic_helper_commit_hw_done(&state->base);
15006 if (state->modeset) {
15007 /* As one of the primary mmio accessors, KMS has a high
15008 * likelihood of triggering bugs in unclaimed access. After we
15009 * finish modesetting, see if an error has been flagged, and if
15010 * so enable debugging for the next modeset - and hope we catch
15013 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15014 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15016 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15019 * Defer the cleanup of the old state to a separate worker to not
15020 * impede the current task (userspace for blocking modesets) that
15021 * are executed inline. For out-of-line asynchronous modesets/flips,
15022 * deferring to a new worker seems overkill, but we would place a
15023 * schedule point (cond_resched()) here anyway to keep latencies
15026 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15027 queue_work(system_highpri_wq, &state->base.commit_work);
15030 static void intel_atomic_commit_work(struct work_struct *work)
15032 struct intel_atomic_state *state =
15033 container_of(work, struct intel_atomic_state, base.commit_work);
15035 intel_atomic_commit_tail(state);
15038 static int __i915_sw_fence_call
15039 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15040 enum i915_sw_fence_notify notify)
15042 struct intel_atomic_state *state =
15043 container_of(fence, struct intel_atomic_state, commit_ready);
15046 case FENCE_COMPLETE:
15047 /* we do blocking waits in the worker, nothing to do here */
15051 struct intel_atomic_helper *helper =
15052 &to_i915(state->base.dev)->atomic_helper;
15054 if (llist_add(&state->freed, &helper->free_list))
15055 schedule_work(&helper->free_work);
15060 return NOTIFY_DONE;
15063 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15065 struct intel_plane_state *old_plane_state, *new_plane_state;
15066 struct intel_plane *plane;
15069 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15070 new_plane_state, i)
15071 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15072 to_intel_frontbuffer(new_plane_state->hw.fb),
15073 plane->frontbuffer_bit);
15076 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15078 struct intel_crtc *crtc;
15080 for_each_intel_crtc(&dev_priv->drm, crtc)
15081 drm_modeset_lock_assert_held(&crtc->base.mutex);
15084 static int intel_atomic_commit(struct drm_device *dev,
15085 struct drm_atomic_state *_state,
15088 struct intel_atomic_state *state = to_intel_atomic_state(_state);
15089 struct drm_i915_private *dev_priv = to_i915(dev);
15092 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15094 drm_atomic_state_get(&state->base);
15095 i915_sw_fence_init(&state->commit_ready,
15096 intel_atomic_commit_ready);
15099 * The intel_legacy_cursor_update() fast path takes care
15100 * of avoiding the vblank waits for simple cursor
15101 * movement and flips. For cursor on/off and size changes,
15102 * we want to perform the vblank waits so that watermark
15103 * updates happen during the correct frames. Gen9+ have
15104 * double buffered watermarks and so shouldn't need this.
15106 * Unset state->legacy_cursor_update before the call to
15107 * drm_atomic_helper_setup_commit() because otherwise
15108 * drm_atomic_helper_wait_for_flip_done() is a noop and
15109 * we get FIFO underruns because we didn't wait
15112 * FIXME doing watermarks and fb cleanup from a vblank worker
15113 * (assuming we had any) would solve these problems.
15115 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15116 struct intel_crtc_state *new_crtc_state;
15117 struct intel_crtc *crtc;
15120 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15121 if (new_crtc_state->wm.need_postvbl_update ||
15122 new_crtc_state->update_wm_post)
15123 state->base.legacy_cursor_update = false;
15126 ret = intel_atomic_prepare_commit(state);
15128 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
15129 i915_sw_fence_commit(&state->commit_ready);
15130 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15134 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15136 ret = drm_atomic_helper_swap_state(&state->base, true);
15139 i915_sw_fence_commit(&state->commit_ready);
15141 drm_atomic_helper_cleanup_planes(dev, &state->base);
15142 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15145 dev_priv->wm.distrust_bios_wm = false;
15146 intel_shared_dpll_swap_state(state);
15147 intel_atomic_track_fbs(state);
15149 if (state->global_state_changed) {
15150 assert_global_state_locked(dev_priv);
15152 memcpy(dev_priv->min_cdclk, state->min_cdclk,
15153 sizeof(state->min_cdclk));
15154 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
15155 sizeof(state->min_voltage_level));
15156 dev_priv->active_pipes = state->active_pipes;
15157 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
15159 intel_cdclk_swap_state(state);
15162 drm_atomic_state_get(&state->base);
15163 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15165 i915_sw_fence_commit(&state->commit_ready);
15166 if (nonblock && state->modeset) {
15167 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15168 } else if (nonblock) {
15169 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15171 if (state->modeset)
15172 flush_workqueue(dev_priv->modeset_wq);
15173 intel_atomic_commit_tail(state);
15179 struct wait_rps_boost {
15180 struct wait_queue_entry wait;
15182 struct drm_crtc *crtc;
15183 struct i915_request *request;
15186 static int do_rps_boost(struct wait_queue_entry *_wait,
15187 unsigned mode, int sync, void *key)
15189 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15190 struct i915_request *rq = wait->request;
15193 * If we missed the vblank, but the request is already running it
15194 * is reasonable to assume that it will complete before the next
15195 * vblank without our intervention, so leave RPS alone.
15197 if (!i915_request_started(rq))
15198 intel_rps_boost(rq);
15199 i915_request_put(rq);
15201 drm_crtc_vblank_put(wait->crtc);
15203 list_del(&wait->wait.entry);
15208 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15209 struct dma_fence *fence)
15211 struct wait_rps_boost *wait;
15213 if (!dma_fence_is_i915(fence))
15216 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15219 if (drm_crtc_vblank_get(crtc))
15222 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15224 drm_crtc_vblank_put(crtc);
15228 wait->request = to_request(dma_fence_get(fence));
15231 wait->wait.func = do_rps_boost;
15232 wait->wait.flags = 0;
15234 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15237 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15239 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15240 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15241 struct drm_framebuffer *fb = plane_state->hw.fb;
15242 struct i915_vma *vma;
15244 if (plane->id == PLANE_CURSOR &&
15245 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15246 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15247 const int align = intel_cursor_alignment(dev_priv);
15250 err = i915_gem_object_attach_phys(obj, align);
15255 vma = intel_pin_and_fence_fb_obj(fb,
15256 &plane_state->view,
15257 intel_plane_uses_fence(plane_state),
15258 &plane_state->flags);
15260 return PTR_ERR(vma);
15262 plane_state->vma = vma;
15267 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15269 struct i915_vma *vma;
15271 vma = fetch_and_zero(&old_plane_state->vma);
15273 intel_unpin_fb_vma(vma, old_plane_state->flags);
15276 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15278 struct i915_sched_attr attr = {
15279 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15282 i915_gem_object_wait_priority(obj, 0, &attr);
15286 * intel_prepare_plane_fb - Prepare fb for usage on plane
15287 * @plane: drm plane to prepare for
15288 * @_new_plane_state: the plane state being prepared
15290 * Prepares a framebuffer for usage on a display plane. Generally this
15291 * involves pinning the underlying object and updating the frontbuffer tracking
15292 * bits. Some older platforms need special physical address handling for
15295 * Returns 0 on success, negative error code on failure.
15298 intel_prepare_plane_fb(struct drm_plane *plane,
15299 struct drm_plane_state *_new_plane_state)
15301 struct intel_plane_state *new_plane_state =
15302 to_intel_plane_state(_new_plane_state);
15303 struct intel_atomic_state *intel_state =
15304 to_intel_atomic_state(new_plane_state->uapi.state);
15305 struct drm_i915_private *dev_priv = to_i915(plane->dev);
15306 struct drm_framebuffer *fb = new_plane_state->hw.fb;
15307 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15308 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15312 struct intel_crtc_state *crtc_state =
15313 intel_atomic_get_new_crtc_state(intel_state,
15314 to_intel_crtc(plane->state->crtc));
15316 /* Big Hammer, we also need to ensure that any pending
15317 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15318 * current scanout is retired before unpinning the old
15319 * framebuffer. Note that we rely on userspace rendering
15320 * into the buffer attached to the pipe they are waiting
15321 * on. If not, userspace generates a GPU hang with IPEHR
15322 * point to the MI_WAIT_FOR_EVENT.
15324 * This should only fail upon a hung GPU, in which case we
15325 * can safely continue.
15327 if (needs_modeset(crtc_state)) {
15328 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15329 old_obj->base.resv, NULL,
15337 if (new_plane_state->uapi.fence) { /* explicit fencing */
15338 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15339 new_plane_state->uapi.fence,
15340 I915_FENCE_TIMEOUT,
15349 ret = i915_gem_object_pin_pages(obj);
15353 ret = intel_plane_pin_fb(new_plane_state);
15355 i915_gem_object_unpin_pages(obj);
15359 fb_obj_bump_render_priority(obj);
15360 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
15362 if (!new_plane_state->uapi.fence) { /* implicit fencing */
15363 struct dma_fence *fence;
15365 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15366 obj->base.resv, NULL,
15367 false, I915_FENCE_TIMEOUT,
15372 fence = dma_resv_get_excl_rcu(obj->base.resv);
15374 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15376 dma_fence_put(fence);
15379 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15380 new_plane_state->uapi.fence);
15384 * We declare pageflips to be interactive and so merit a small bias
15385 * towards upclocking to deliver the frame on time. By only changing
15386 * the RPS thresholds to sample more regularly and aim for higher
15387 * clocks we can hopefully deliver low power workloads (like kodi)
15388 * that are not quite steady state without resorting to forcing
15389 * maximum clocks following a vblank miss (see do_rps_boost()).
15391 if (!intel_state->rps_interactive) {
15392 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15393 intel_state->rps_interactive = true;
15400 * intel_cleanup_plane_fb - Cleans up an fb after plane use
15401 * @plane: drm plane to clean up for
15402 * @_old_plane_state: the state from the previous modeset
15404 * Cleans up a framebuffer that has just been removed from a plane.
15407 intel_cleanup_plane_fb(struct drm_plane *plane,
15408 struct drm_plane_state *_old_plane_state)
15410 struct intel_plane_state *old_plane_state =
15411 to_intel_plane_state(_old_plane_state);
15412 struct intel_atomic_state *intel_state =
15413 to_intel_atomic_state(old_plane_state->uapi.state);
15414 struct drm_i915_private *dev_priv = to_i915(plane->dev);
15416 if (intel_state->rps_interactive) {
15417 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15418 intel_state->rps_interactive = false;
15421 /* Should only be called after a successful intel_prepare_plane_fb()! */
15422 intel_plane_unpin_fb(old_plane_state);
15426 * intel_plane_destroy - destroy a plane
15427 * @plane: plane to destroy
15429 * Common destruction function for all types of planes (primary, cursor,
15432 void intel_plane_destroy(struct drm_plane *plane)
15434 drm_plane_cleanup(plane);
15435 kfree(to_intel_plane(plane));
15438 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15439 u32 format, u64 modifier)
15441 switch (modifier) {
15442 case DRM_FORMAT_MOD_LINEAR:
15443 case I915_FORMAT_MOD_X_TILED:
15450 case DRM_FORMAT_C8:
15451 case DRM_FORMAT_RGB565:
15452 case DRM_FORMAT_XRGB1555:
15453 case DRM_FORMAT_XRGB8888:
15454 return modifier == DRM_FORMAT_MOD_LINEAR ||
15455 modifier == I915_FORMAT_MOD_X_TILED;
15461 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15462 u32 format, u64 modifier)
15464 switch (modifier) {
15465 case DRM_FORMAT_MOD_LINEAR:
15466 case I915_FORMAT_MOD_X_TILED:
15473 case DRM_FORMAT_C8:
15474 case DRM_FORMAT_RGB565:
15475 case DRM_FORMAT_XRGB8888:
15476 case DRM_FORMAT_XBGR8888:
15477 case DRM_FORMAT_ARGB8888:
15478 case DRM_FORMAT_ABGR8888:
15479 case DRM_FORMAT_XRGB2101010:
15480 case DRM_FORMAT_XBGR2101010:
15481 case DRM_FORMAT_ARGB2101010:
15482 case DRM_FORMAT_ABGR2101010:
15483 case DRM_FORMAT_XBGR16161616F:
15484 return modifier == DRM_FORMAT_MOD_LINEAR ||
15485 modifier == I915_FORMAT_MOD_X_TILED;
15491 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15492 u32 format, u64 modifier)
15494 return modifier == DRM_FORMAT_MOD_LINEAR &&
15495 format == DRM_FORMAT_ARGB8888;
15498 static const struct drm_plane_funcs i965_plane_funcs = {
15499 .update_plane = drm_atomic_helper_update_plane,
15500 .disable_plane = drm_atomic_helper_disable_plane,
15501 .destroy = intel_plane_destroy,
15502 .atomic_duplicate_state = intel_plane_duplicate_state,
15503 .atomic_destroy_state = intel_plane_destroy_state,
15504 .format_mod_supported = i965_plane_format_mod_supported,
15507 static const struct drm_plane_funcs i8xx_plane_funcs = {
15508 .update_plane = drm_atomic_helper_update_plane,
15509 .disable_plane = drm_atomic_helper_disable_plane,
15510 .destroy = intel_plane_destroy,
15511 .atomic_duplicate_state = intel_plane_duplicate_state,
15512 .atomic_destroy_state = intel_plane_destroy_state,
15513 .format_mod_supported = i8xx_plane_format_mod_supported,
15517 intel_legacy_cursor_update(struct drm_plane *_plane,
15518 struct drm_crtc *_crtc,
15519 struct drm_framebuffer *fb,
15520 int crtc_x, int crtc_y,
15521 unsigned int crtc_w, unsigned int crtc_h,
15522 u32 src_x, u32 src_y,
15523 u32 src_w, u32 src_h,
15524 struct drm_modeset_acquire_ctx *ctx)
15526 struct intel_plane *plane = to_intel_plane(_plane);
15527 struct intel_crtc *crtc = to_intel_crtc(_crtc);
15528 struct intel_plane_state *old_plane_state =
15529 to_intel_plane_state(plane->base.state);
15530 struct intel_plane_state *new_plane_state;
15531 struct intel_crtc_state *crtc_state =
15532 to_intel_crtc_state(crtc->base.state);
15533 struct intel_crtc_state *new_crtc_state;
15537 * When crtc is inactive or there is a modeset pending,
15538 * wait for it to complete in the slowpath
15540 if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15541 crtc_state->update_pipe)
15545 * Don't do an async update if there is an outstanding commit modifying
15546 * the plane. This prevents our async update's changes from getting
15547 * overridden by a previous synchronous update's state.
15549 if (old_plane_state->uapi.commit &&
15550 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15554 * If any parameters change that may affect watermarks,
15555 * take the slowpath. Only changing fb or position should be
15558 if (old_plane_state->uapi.crtc != &crtc->base ||
15559 old_plane_state->uapi.src_w != src_w ||
15560 old_plane_state->uapi.src_h != src_h ||
15561 old_plane_state->uapi.crtc_w != crtc_w ||
15562 old_plane_state->uapi.crtc_h != crtc_h ||
15563 !old_plane_state->uapi.fb != !fb)
15566 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15567 if (!new_plane_state)
15570 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15571 if (!new_crtc_state) {
15576 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15578 new_plane_state->uapi.src_x = src_x;
15579 new_plane_state->uapi.src_y = src_y;
15580 new_plane_state->uapi.src_w = src_w;
15581 new_plane_state->uapi.src_h = src_h;
15582 new_plane_state->uapi.crtc_x = crtc_x;
15583 new_plane_state->uapi.crtc_y = crtc_y;
15584 new_plane_state->uapi.crtc_w = crtc_w;
15585 new_plane_state->uapi.crtc_h = crtc_h;
15587 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15588 old_plane_state, new_plane_state);
15592 ret = intel_plane_pin_fb(new_plane_state);
15596 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15598 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15599 to_intel_frontbuffer(new_plane_state->hw.fb),
15600 plane->frontbuffer_bit);
15602 /* Swap plane state */
15603 plane->base.state = &new_plane_state->uapi;
15606 * We cannot swap crtc_state as it may be in use by an atomic commit or
15607 * page flip that's running simultaneously. If we swap crtc_state and
15608 * destroy the old state, we will cause a use-after-free there.
15610 * Only update active_planes, which is needed for our internal
15611 * bookkeeping. Either value will do the right thing when updating
15612 * planes atomically. If the cursor was part of the atomic update then
15613 * we would have taken the slowpath.
15615 crtc_state->active_planes = new_crtc_state->active_planes;
15617 if (new_plane_state->uapi.visible)
15618 intel_update_plane(plane, crtc_state, new_plane_state);
15620 intel_disable_plane(plane, crtc_state);
15622 intel_plane_unpin_fb(old_plane_state);
15625 if (new_crtc_state)
15626 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15628 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15630 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15634 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15635 crtc_x, crtc_y, crtc_w, crtc_h,
15636 src_x, src_y, src_w, src_h, ctx);
15639 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15640 .update_plane = intel_legacy_cursor_update,
15641 .disable_plane = drm_atomic_helper_disable_plane,
15642 .destroy = intel_plane_destroy,
15643 .atomic_duplicate_state = intel_plane_duplicate_state,
15644 .atomic_destroy_state = intel_plane_destroy_state,
15645 .format_mod_supported = intel_cursor_format_mod_supported,
15648 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15649 enum i9xx_plane_id i9xx_plane)
15651 if (!HAS_FBC(dev_priv))
15654 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15655 return i9xx_plane == PLANE_A; /* tied to pipe A */
15656 else if (IS_IVYBRIDGE(dev_priv))
15657 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15658 i9xx_plane == PLANE_C;
15659 else if (INTEL_GEN(dev_priv) >= 4)
15660 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15662 return i9xx_plane == PLANE_A;
15665 static struct intel_plane *
15666 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15668 struct intel_plane *plane;
15669 const struct drm_plane_funcs *plane_funcs;
15670 unsigned int supported_rotations;
15671 unsigned int possible_crtcs;
15672 const u32 *formats;
15676 if (INTEL_GEN(dev_priv) >= 9)
15677 return skl_universal_plane_create(dev_priv, pipe,
15680 plane = intel_plane_alloc();
15684 plane->pipe = pipe;
15686 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15687 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15689 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15690 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15692 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15693 plane->id = PLANE_PRIMARY;
15694 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15696 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15697 if (plane->has_fbc) {
15698 struct intel_fbc *fbc = &dev_priv->fbc;
15700 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15703 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15704 formats = vlv_primary_formats;
15705 num_formats = ARRAY_SIZE(vlv_primary_formats);
15706 } else if (INTEL_GEN(dev_priv) >= 4) {
15708 * WaFP16GammaEnabling:ivb
15709 * "Workaround : When using the 64-bit format, the plane
15710 * output on each color channel has one quarter amplitude.
15711 * It can be brought up to full amplitude by using pipe
15712 * gamma correction or pipe color space conversion to
15713 * multiply the plane output by four."
15715 * There is no dedicated plane gamma for the primary plane,
15716 * and using the pipe gamma/csc could conflict with other
15717 * planes, so we choose not to expose fp16 on IVB primary
15718 * planes. HSW primary planes no longer have this problem.
15720 if (IS_IVYBRIDGE(dev_priv)) {
15721 formats = ivb_primary_formats;
15722 num_formats = ARRAY_SIZE(ivb_primary_formats);
15724 formats = i965_primary_formats;
15725 num_formats = ARRAY_SIZE(i965_primary_formats);
15728 formats = i8xx_primary_formats;
15729 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15732 if (INTEL_GEN(dev_priv) >= 4)
15733 plane_funcs = &i965_plane_funcs;
15735 plane_funcs = &i8xx_plane_funcs;
15737 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15738 plane->min_cdclk = vlv_plane_min_cdclk;
15739 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15740 plane->min_cdclk = hsw_plane_min_cdclk;
15741 else if (IS_IVYBRIDGE(dev_priv))
15742 plane->min_cdclk = ivb_plane_min_cdclk;
15744 plane->min_cdclk = i9xx_plane_min_cdclk;
15746 plane->max_stride = i9xx_plane_max_stride;
15747 plane->update_plane = i9xx_update_plane;
15748 plane->disable_plane = i9xx_disable_plane;
15749 plane->get_hw_state = i9xx_plane_get_hw_state;
15750 plane->check_plane = i9xx_plane_check;
15752 possible_crtcs = BIT(pipe);
15754 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15755 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15756 possible_crtcs, plane_funcs,
15757 formats, num_formats,
15758 i9xx_format_modifiers,
15759 DRM_PLANE_TYPE_PRIMARY,
15760 "primary %c", pipe_name(pipe));
15762 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15763 possible_crtcs, plane_funcs,
15764 formats, num_formats,
15765 i9xx_format_modifiers,
15766 DRM_PLANE_TYPE_PRIMARY,
15768 plane_name(plane->i9xx_plane));
15772 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15773 supported_rotations =
15774 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15775 DRM_MODE_REFLECT_X;
15776 } else if (INTEL_GEN(dev_priv) >= 4) {
15777 supported_rotations =
15778 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15780 supported_rotations = DRM_MODE_ROTATE_0;
15783 if (INTEL_GEN(dev_priv) >= 4)
15784 drm_plane_create_rotation_property(&plane->base,
15786 supported_rotations);
15789 drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15791 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15796 intel_plane_free(plane);
15798 return ERR_PTR(ret);
15801 static struct intel_plane *
15802 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15805 unsigned int possible_crtcs;
15806 struct intel_plane *cursor;
15809 cursor = intel_plane_alloc();
15810 if (IS_ERR(cursor))
15813 cursor->pipe = pipe;
15814 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15815 cursor->id = PLANE_CURSOR;
15816 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15818 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15819 cursor->max_stride = i845_cursor_max_stride;
15820 cursor->update_plane = i845_update_cursor;
15821 cursor->disable_plane = i845_disable_cursor;
15822 cursor->get_hw_state = i845_cursor_get_hw_state;
15823 cursor->check_plane = i845_check_cursor;
15825 cursor->max_stride = i9xx_cursor_max_stride;
15826 cursor->update_plane = i9xx_update_cursor;
15827 cursor->disable_plane = i9xx_disable_cursor;
15828 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15829 cursor->check_plane = i9xx_check_cursor;
15832 cursor->cursor.base = ~0;
15833 cursor->cursor.cntl = ~0;
15835 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15836 cursor->cursor.size = ~0;
15838 possible_crtcs = BIT(pipe);
15840 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15841 possible_crtcs, &intel_cursor_plane_funcs,
15842 intel_cursor_formats,
15843 ARRAY_SIZE(intel_cursor_formats),
15844 cursor_format_modifiers,
15845 DRM_PLANE_TYPE_CURSOR,
15846 "cursor %c", pipe_name(pipe));
15850 if (INTEL_GEN(dev_priv) >= 4)
15851 drm_plane_create_rotation_property(&cursor->base,
15853 DRM_MODE_ROTATE_0 |
15854 DRM_MODE_ROTATE_180);
15856 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15857 drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15859 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15864 intel_plane_free(cursor);
15866 return ERR_PTR(ret);
15869 #define INTEL_CRTC_FUNCS \
15870 .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15871 .set_config = drm_atomic_helper_set_config, \
15872 .destroy = intel_crtc_destroy, \
15873 .page_flip = drm_atomic_helper_page_flip, \
15874 .atomic_duplicate_state = intel_crtc_duplicate_state, \
15875 .atomic_destroy_state = intel_crtc_destroy_state, \
15876 .set_crc_source = intel_crtc_set_crc_source, \
15877 .verify_crc_source = intel_crtc_verify_crc_source, \
15878 .get_crc_sources = intel_crtc_get_crc_sources
15880 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15883 .get_vblank_counter = g4x_get_vblank_counter,
15884 .enable_vblank = bdw_enable_vblank,
15885 .disable_vblank = bdw_disable_vblank,
15888 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15891 .get_vblank_counter = g4x_get_vblank_counter,
15892 .enable_vblank = ilk_enable_vblank,
15893 .disable_vblank = ilk_disable_vblank,
15896 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15899 .get_vblank_counter = g4x_get_vblank_counter,
15900 .enable_vblank = i965_enable_vblank,
15901 .disable_vblank = i965_disable_vblank,
15904 static const struct drm_crtc_funcs i965_crtc_funcs = {
15907 .get_vblank_counter = i915_get_vblank_counter,
15908 .enable_vblank = i965_enable_vblank,
15909 .disable_vblank = i965_disable_vblank,
15912 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15915 .get_vblank_counter = i915_get_vblank_counter,
15916 .enable_vblank = i915gm_enable_vblank,
15917 .disable_vblank = i915gm_disable_vblank,
15920 static const struct drm_crtc_funcs i915_crtc_funcs = {
15923 .get_vblank_counter = i915_get_vblank_counter,
15924 .enable_vblank = i8xx_enable_vblank,
15925 .disable_vblank = i8xx_disable_vblank,
15928 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15931 /* no hw vblank counter */
15932 .enable_vblank = i8xx_enable_vblank,
15933 .disable_vblank = i8xx_disable_vblank,
15936 static struct intel_crtc *intel_crtc_alloc(void)
15938 struct intel_crtc_state *crtc_state;
15939 struct intel_crtc *crtc;
15941 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
15943 return ERR_PTR(-ENOMEM);
15945 crtc_state = intel_crtc_state_alloc(crtc);
15948 return ERR_PTR(-ENOMEM);
15951 crtc->base.state = &crtc_state->uapi;
15952 crtc->config = crtc_state;
15957 static void intel_crtc_free(struct intel_crtc *crtc)
15959 intel_crtc_destroy_state(&crtc->base, crtc->base.state);
15963 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15965 struct intel_plane *primary, *cursor;
15966 const struct drm_crtc_funcs *funcs;
15967 struct intel_crtc *crtc;
15970 crtc = intel_crtc_alloc();
15972 return PTR_ERR(crtc);
15975 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
15977 primary = intel_primary_plane_create(dev_priv, pipe);
15978 if (IS_ERR(primary)) {
15979 ret = PTR_ERR(primary);
15982 crtc->plane_ids_mask |= BIT(primary->id);
15984 for_each_sprite(dev_priv, pipe, sprite) {
15985 struct intel_plane *plane;
15987 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15988 if (IS_ERR(plane)) {
15989 ret = PTR_ERR(plane);
15992 crtc->plane_ids_mask |= BIT(plane->id);
15995 cursor = intel_cursor_plane_create(dev_priv, pipe);
15996 if (IS_ERR(cursor)) {
15997 ret = PTR_ERR(cursor);
16000 crtc->plane_ids_mask |= BIT(cursor->id);
16002 if (HAS_GMCH(dev_priv)) {
16003 if (IS_CHERRYVIEW(dev_priv) ||
16004 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16005 funcs = &g4x_crtc_funcs;
16006 else if (IS_GEN(dev_priv, 4))
16007 funcs = &i965_crtc_funcs;
16008 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16009 funcs = &i915gm_crtc_funcs;
16010 else if (IS_GEN(dev_priv, 3))
16011 funcs = &i915_crtc_funcs;
16013 funcs = &i8xx_crtc_funcs;
16015 if (INTEL_GEN(dev_priv) >= 8)
16016 funcs = &bdw_crtc_funcs;
16018 funcs = &ilk_crtc_funcs;
16021 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16022 &primary->base, &cursor->base,
16023 funcs, "pipe %c", pipe_name(pipe));
16027 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16028 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16029 dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16031 if (INTEL_GEN(dev_priv) < 9) {
16032 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16034 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16035 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16036 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16039 intel_color_init(crtc);
16041 WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
16046 intel_crtc_free(crtc);
16051 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16052 struct drm_file *file)
16054 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16055 struct drm_crtc *drmmode_crtc;
16056 struct intel_crtc *crtc;
16058 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16062 crtc = to_intel_crtc(drmmode_crtc);
16063 pipe_from_crtc_id->pipe = crtc->pipe;
16068 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16070 struct drm_device *dev = encoder->base.dev;
16071 struct intel_encoder *source_encoder;
16072 u32 possible_clones = 0;
16074 for_each_intel_encoder(dev, source_encoder) {
16075 if (encoders_cloneable(encoder, source_encoder))
16076 possible_clones |= drm_encoder_mask(&source_encoder->base);
16079 return possible_clones;
16082 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16084 struct drm_device *dev = encoder->base.dev;
16085 struct intel_crtc *crtc;
16086 u32 possible_crtcs = 0;
16088 for_each_intel_crtc(dev, crtc) {
16089 if (encoder->pipe_mask & BIT(crtc->pipe))
16090 possible_crtcs |= drm_crtc_mask(&crtc->base);
16093 return possible_crtcs;
16096 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16098 if (!IS_MOBILE(dev_priv))
16101 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
16104 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
16110 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16112 if (INTEL_GEN(dev_priv) >= 9)
16115 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16118 if (HAS_PCH_LPT_H(dev_priv) &&
16119 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16122 /* DDI E can't be used if DDI A requires 4 lanes */
16123 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16126 if (!dev_priv->vbt.int_crt_support)
16132 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16137 if (HAS_DDI(dev_priv))
16140 * This w/a is needed at least on CPT/PPT, but to be sure apply it
16141 * everywhere where registers can be write protected.
16143 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16148 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16149 u32 val = I915_READ(PP_CONTROL(pps_idx));
16151 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16152 I915_WRITE(PP_CONTROL(pps_idx), val);
16156 static void intel_pps_init(struct drm_i915_private *dev_priv)
16158 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16159 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16160 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16161 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16163 dev_priv->pps_mmio_base = PPS_BASE;
16165 intel_pps_unlock_regs_wa(dev_priv);
16168 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16170 struct intel_encoder *encoder;
16171 bool dpd_is_edp = false;
16173 intel_pps_init(dev_priv);
16175 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16178 if (INTEL_GEN(dev_priv) >= 12) {
16179 intel_ddi_init(dev_priv, PORT_A);
16180 intel_ddi_init(dev_priv, PORT_B);
16181 intel_ddi_init(dev_priv, PORT_D);
16182 intel_ddi_init(dev_priv, PORT_E);
16183 intel_ddi_init(dev_priv, PORT_F);
16184 intel_ddi_init(dev_priv, PORT_G);
16185 intel_ddi_init(dev_priv, PORT_H);
16186 intel_ddi_init(dev_priv, PORT_I);
16187 icl_dsi_init(dev_priv);
16188 } else if (IS_ELKHARTLAKE(dev_priv)) {
16189 intel_ddi_init(dev_priv, PORT_A);
16190 intel_ddi_init(dev_priv, PORT_B);
16191 intel_ddi_init(dev_priv, PORT_C);
16192 intel_ddi_init(dev_priv, PORT_D);
16193 icl_dsi_init(dev_priv);
16194 } else if (IS_GEN(dev_priv, 11)) {
16195 intel_ddi_init(dev_priv, PORT_A);
16196 intel_ddi_init(dev_priv, PORT_B);
16197 intel_ddi_init(dev_priv, PORT_C);
16198 intel_ddi_init(dev_priv, PORT_D);
16199 intel_ddi_init(dev_priv, PORT_E);
16201 * On some ICL SKUs port F is not present. No strap bits for
16202 * this, so rely on VBT.
16203 * Work around broken VBTs on SKUs known to have no port F.
16205 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16206 intel_bios_is_port_present(dev_priv, PORT_F))
16207 intel_ddi_init(dev_priv, PORT_F);
16209 icl_dsi_init(dev_priv);
16210 } else if (IS_GEN9_LP(dev_priv)) {
16212 * FIXME: Broxton doesn't support port detection via the
16213 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16214 * detect the ports.
16216 intel_ddi_init(dev_priv, PORT_A);
16217 intel_ddi_init(dev_priv, PORT_B);
16218 intel_ddi_init(dev_priv, PORT_C);
16220 vlv_dsi_init(dev_priv);
16221 } else if (HAS_DDI(dev_priv)) {
16224 if (intel_ddi_crt_present(dev_priv))
16225 intel_crt_init(dev_priv);
16228 * Haswell uses DDI functions to detect digital outputs.
16229 * On SKL pre-D0 the strap isn't connected, so we assume
16232 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16233 /* WaIgnoreDDIAStrap: skl */
16234 if (found || IS_GEN9_BC(dev_priv))
16235 intel_ddi_init(dev_priv, PORT_A);
16237 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16239 found = I915_READ(SFUSE_STRAP);
16241 if (found & SFUSE_STRAP_DDIB_DETECTED)
16242 intel_ddi_init(dev_priv, PORT_B);
16243 if (found & SFUSE_STRAP_DDIC_DETECTED)
16244 intel_ddi_init(dev_priv, PORT_C);
16245 if (found & SFUSE_STRAP_DDID_DETECTED)
16246 intel_ddi_init(dev_priv, PORT_D);
16247 if (found & SFUSE_STRAP_DDIF_DETECTED)
16248 intel_ddi_init(dev_priv, PORT_F);
16250 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16252 if (IS_GEN9_BC(dev_priv) &&
16253 intel_bios_is_port_present(dev_priv, PORT_E))
16254 intel_ddi_init(dev_priv, PORT_E);
16256 } else if (HAS_PCH_SPLIT(dev_priv)) {
16260 * intel_edp_init_connector() depends on this completing first,
16261 * to prevent the registration of both eDP and LVDS and the
16262 * incorrect sharing of the PPS.
16264 intel_lvds_init(dev_priv);
16265 intel_crt_init(dev_priv);
16267 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16269 if (ilk_has_edp_a(dev_priv))
16270 intel_dp_init(dev_priv, DP_A, PORT_A);
16272 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16273 /* PCH SDVOB multiplex with HDMIB */
16274 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16276 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16277 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16278 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16281 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16282 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16284 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16285 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16287 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16288 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16290 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16291 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16292 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16293 bool has_edp, has_port;
16295 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16296 intel_crt_init(dev_priv);
16299 * The DP_DETECTED bit is the latched state of the DDC
16300 * SDA pin at boot. However since eDP doesn't require DDC
16301 * (no way to plug in a DP->HDMI dongle) the DDC pins for
16302 * eDP ports may have been muxed to an alternate function.
16303 * Thus we can't rely on the DP_DETECTED bit alone to detect
16304 * eDP ports. Consult the VBT as well as DP_DETECTED to
16305 * detect eDP ports.
16307 * Sadly the straps seem to be missing sometimes even for HDMI
16308 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16309 * and VBT for the presence of the port. Additionally we can't
16310 * trust the port type the VBT declares as we've seen at least
16311 * HDMI ports that the VBT claim are DP or eDP.
16313 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16314 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16315 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16316 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16317 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16318 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16320 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16321 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16322 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16323 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16324 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16325 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16327 if (IS_CHERRYVIEW(dev_priv)) {
16329 * eDP not supported on port D,
16330 * so no need to worry about it
16332 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16333 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16334 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16335 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16336 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16339 vlv_dsi_init(dev_priv);
16340 } else if (IS_PINEVIEW(dev_priv)) {
16341 intel_lvds_init(dev_priv);
16342 intel_crt_init(dev_priv);
16343 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16344 bool found = false;
16346 if (IS_MOBILE(dev_priv))
16347 intel_lvds_init(dev_priv);
16349 intel_crt_init(dev_priv);
16351 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16352 DRM_DEBUG_KMS("probing SDVOB\n");
16353 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16354 if (!found && IS_G4X(dev_priv)) {
16355 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16356 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16359 if (!found && IS_G4X(dev_priv))
16360 intel_dp_init(dev_priv, DP_B, PORT_B);
16363 /* Before G4X SDVOC doesn't have its own detect register */
16365 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16366 DRM_DEBUG_KMS("probing SDVOC\n");
16367 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16370 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16372 if (IS_G4X(dev_priv)) {
16373 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16374 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16376 if (IS_G4X(dev_priv))
16377 intel_dp_init(dev_priv, DP_C, PORT_C);
16380 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16381 intel_dp_init(dev_priv, DP_D, PORT_D);
16383 if (SUPPORTS_TV(dev_priv))
16384 intel_tv_init(dev_priv);
16385 } else if (IS_GEN(dev_priv, 2)) {
16386 if (IS_I85X(dev_priv))
16387 intel_lvds_init(dev_priv);
16389 intel_crt_init(dev_priv);
16390 intel_dvo_init(dev_priv);
16393 intel_psr_init(dev_priv);
16395 for_each_intel_encoder(&dev_priv->drm, encoder) {
16396 encoder->base.possible_crtcs =
16397 intel_encoder_possible_crtcs(encoder);
16398 encoder->base.possible_clones =
16399 intel_encoder_possible_clones(encoder);
16402 intel_init_pch_refclk(dev_priv);
16404 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16407 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16409 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16411 drm_framebuffer_cleanup(fb);
16412 intel_frontbuffer_put(intel_fb->frontbuffer);
16417 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16418 struct drm_file *file,
16419 unsigned int *handle)
16421 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16423 if (obj->userptr.mm) {
16424 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16428 return drm_gem_handle_create(file, &obj->base, handle);
16431 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16432 struct drm_file *file,
16433 unsigned flags, unsigned color,
16434 struct drm_clip_rect *clips,
16435 unsigned num_clips)
16437 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16439 i915_gem_object_flush_if_display(obj);
16440 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16445 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16446 .destroy = intel_user_framebuffer_destroy,
16447 .create_handle = intel_user_framebuffer_create_handle,
16448 .dirty = intel_user_framebuffer_dirty,
16451 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16452 struct drm_i915_gem_object *obj,
16453 struct drm_mode_fb_cmd2 *mode_cmd)
16455 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16456 struct drm_framebuffer *fb = &intel_fb->base;
16458 unsigned int tiling, stride;
16462 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16463 if (!intel_fb->frontbuffer)
16466 i915_gem_object_lock(obj);
16467 tiling = i915_gem_object_get_tiling(obj);
16468 stride = i915_gem_object_get_stride(obj);
16469 i915_gem_object_unlock(obj);
16471 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16473 * If there's a fence, enforce that
16474 * the fb modifier and tiling mode match.
16476 if (tiling != I915_TILING_NONE &&
16477 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16478 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16482 if (tiling == I915_TILING_X) {
16483 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16484 } else if (tiling == I915_TILING_Y) {
16485 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16490 if (!drm_any_plane_has_format(&dev_priv->drm,
16491 mode_cmd->pixel_format,
16492 mode_cmd->modifier[0])) {
16493 struct drm_format_name_buf format_name;
16495 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16496 drm_get_format_name(mode_cmd->pixel_format,
16498 mode_cmd->modifier[0]);
16503 * gen2/3 display engine uses the fence if present,
16504 * so the tiling mode must match the fb modifier exactly.
16506 if (INTEL_GEN(dev_priv) < 4 &&
16507 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16508 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16512 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16513 mode_cmd->modifier[0]);
16514 if (mode_cmd->pitches[0] > max_stride) {
16515 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16516 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16517 "tiled" : "linear",
16518 mode_cmd->pitches[0], max_stride);
16523 * If there's a fence, enforce that
16524 * the fb pitch and fence stride match.
16526 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16527 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16528 mode_cmd->pitches[0], stride);
16532 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16533 if (mode_cmd->offsets[0] != 0)
16536 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16538 for (i = 0; i < fb->format->num_planes; i++) {
16539 u32 stride_alignment;
16541 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16542 DRM_DEBUG_KMS("bad plane %d handle\n", i);
16546 stride_alignment = intel_fb_stride_alignment(fb, i);
16547 if (fb->pitches[i] & (stride_alignment - 1)) {
16548 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16549 i, fb->pitches[i], stride_alignment);
16553 fb->obj[i] = &obj->base;
16556 ret = intel_fill_fb_info(dev_priv, fb);
16560 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16562 DRM_ERROR("framebuffer init failed %d\n", ret);
16569 intel_frontbuffer_put(intel_fb->frontbuffer);
16573 static struct drm_framebuffer *
16574 intel_user_framebuffer_create(struct drm_device *dev,
16575 struct drm_file *filp,
16576 const struct drm_mode_fb_cmd2 *user_mode_cmd)
16578 struct drm_framebuffer *fb;
16579 struct drm_i915_gem_object *obj;
16580 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16582 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16584 return ERR_PTR(-ENOENT);
16586 fb = intel_framebuffer_create(obj, &mode_cmd);
16587 i915_gem_object_put(obj);
16592 static void intel_atomic_state_free(struct drm_atomic_state *state)
16594 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16596 drm_atomic_state_default_release(state);
16598 i915_sw_fence_fini(&intel_state->commit_ready);
16603 static enum drm_mode_status
16604 intel_mode_valid(struct drm_device *dev,
16605 const struct drm_display_mode *mode)
16607 struct drm_i915_private *dev_priv = to_i915(dev);
16608 int hdisplay_max, htotal_max;
16609 int vdisplay_max, vtotal_max;
16612 * Can't reject DBLSCAN here because Xorg ddxen can add piles
16613 * of DBLSCAN modes to the output's mode list when they detect
16614 * the scaling mode property on the connector. And they don't
16615 * ask the kernel to validate those modes in any way until
16616 * modeset time at which point the client gets a protocol error.
16617 * So in order to not upset those clients we silently ignore the
16618 * DBLSCAN flag on such connectors. For other connectors we will
16619 * reject modes with the DBLSCAN flag in encoder->compute_config().
16620 * And we always reject DBLSCAN modes in connector->mode_valid()
16621 * as we never want such modes on the connector's mode list.
16624 if (mode->vscan > 1)
16625 return MODE_NO_VSCAN;
16627 if (mode->flags & DRM_MODE_FLAG_HSKEW)
16628 return MODE_H_ILLEGAL;
16630 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16631 DRM_MODE_FLAG_NCSYNC |
16632 DRM_MODE_FLAG_PCSYNC))
16635 if (mode->flags & (DRM_MODE_FLAG_BCAST |
16636 DRM_MODE_FLAG_PIXMUX |
16637 DRM_MODE_FLAG_CLKDIV2))
16640 /* Transcoder timing limits */
16641 if (INTEL_GEN(dev_priv) >= 11) {
16642 hdisplay_max = 16384;
16643 vdisplay_max = 8192;
16644 htotal_max = 16384;
16646 } else if (INTEL_GEN(dev_priv) >= 9 ||
16647 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16648 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16649 vdisplay_max = 4096;
16652 } else if (INTEL_GEN(dev_priv) >= 3) {
16653 hdisplay_max = 4096;
16654 vdisplay_max = 4096;
16658 hdisplay_max = 2048;
16659 vdisplay_max = 2048;
16664 if (mode->hdisplay > hdisplay_max ||
16665 mode->hsync_start > htotal_max ||
16666 mode->hsync_end > htotal_max ||
16667 mode->htotal > htotal_max)
16668 return MODE_H_ILLEGAL;
16670 if (mode->vdisplay > vdisplay_max ||
16671 mode->vsync_start > vtotal_max ||
16672 mode->vsync_end > vtotal_max ||
16673 mode->vtotal > vtotal_max)
16674 return MODE_V_ILLEGAL;
16676 if (INTEL_GEN(dev_priv) >= 5) {
16677 if (mode->hdisplay < 64 ||
16678 mode->htotal - mode->hdisplay < 32)
16679 return MODE_H_ILLEGAL;
16681 if (mode->vtotal - mode->vdisplay < 5)
16682 return MODE_V_ILLEGAL;
16684 if (mode->htotal - mode->hdisplay < 32)
16685 return MODE_H_ILLEGAL;
16687 if (mode->vtotal - mode->vdisplay < 3)
16688 return MODE_V_ILLEGAL;
16694 enum drm_mode_status
16695 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16696 const struct drm_display_mode *mode)
16698 int plane_width_max, plane_height_max;
16701 * intel_mode_valid() should be
16702 * sufficient on older platforms.
16704 if (INTEL_GEN(dev_priv) < 9)
16708 * Most people will probably want a fullscreen
16709 * plane so let's not advertize modes that are
16710 * too big for that.
16712 if (INTEL_GEN(dev_priv) >= 11) {
16713 plane_width_max = 5120;
16714 plane_height_max = 4320;
16716 plane_width_max = 5120;
16717 plane_height_max = 4096;
16720 if (mode->hdisplay > plane_width_max)
16721 return MODE_H_ILLEGAL;
16723 if (mode->vdisplay > plane_height_max)
16724 return MODE_V_ILLEGAL;
16729 static const struct drm_mode_config_funcs intel_mode_funcs = {
16730 .fb_create = intel_user_framebuffer_create,
16731 .get_format_info = intel_get_format_info,
16732 .output_poll_changed = intel_fbdev_output_poll_changed,
16733 .mode_valid = intel_mode_valid,
16734 .atomic_check = intel_atomic_check,
16735 .atomic_commit = intel_atomic_commit,
16736 .atomic_state_alloc = intel_atomic_state_alloc,
16737 .atomic_state_clear = intel_atomic_state_clear,
16738 .atomic_state_free = intel_atomic_state_free,
16742 * intel_init_display_hooks - initialize the display modesetting hooks
16743 * @dev_priv: device private
16745 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16747 intel_init_cdclk_hooks(dev_priv);
16749 if (INTEL_GEN(dev_priv) >= 9) {
16750 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16751 dev_priv->display.get_initial_plane_config =
16752 skylake_get_initial_plane_config;
16753 dev_priv->display.crtc_compute_clock =
16754 haswell_crtc_compute_clock;
16755 dev_priv->display.crtc_enable = haswell_crtc_enable;
16756 dev_priv->display.crtc_disable = haswell_crtc_disable;
16757 } else if (HAS_DDI(dev_priv)) {
16758 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16759 dev_priv->display.get_initial_plane_config =
16760 i9xx_get_initial_plane_config;
16761 dev_priv->display.crtc_compute_clock =
16762 haswell_crtc_compute_clock;
16763 dev_priv->display.crtc_enable = haswell_crtc_enable;
16764 dev_priv->display.crtc_disable = haswell_crtc_disable;
16765 } else if (HAS_PCH_SPLIT(dev_priv)) {
16766 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16767 dev_priv->display.get_initial_plane_config =
16768 i9xx_get_initial_plane_config;
16769 dev_priv->display.crtc_compute_clock =
16770 ironlake_crtc_compute_clock;
16771 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16772 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16773 } else if (IS_CHERRYVIEW(dev_priv)) {
16774 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16775 dev_priv->display.get_initial_plane_config =
16776 i9xx_get_initial_plane_config;
16777 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16778 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16779 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16780 } else if (IS_VALLEYVIEW(dev_priv)) {
16781 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16782 dev_priv->display.get_initial_plane_config =
16783 i9xx_get_initial_plane_config;
16784 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16785 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16786 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16787 } else if (IS_G4X(dev_priv)) {
16788 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16789 dev_priv->display.get_initial_plane_config =
16790 i9xx_get_initial_plane_config;
16791 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16792 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16793 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16794 } else if (IS_PINEVIEW(dev_priv)) {
16795 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16796 dev_priv->display.get_initial_plane_config =
16797 i9xx_get_initial_plane_config;
16798 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16799 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16800 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16801 } else if (!IS_GEN(dev_priv, 2)) {
16802 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16803 dev_priv->display.get_initial_plane_config =
16804 i9xx_get_initial_plane_config;
16805 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16806 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16807 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16809 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16810 dev_priv->display.get_initial_plane_config =
16811 i9xx_get_initial_plane_config;
16812 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16813 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16814 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16817 if (IS_GEN(dev_priv, 5)) {
16818 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16819 } else if (IS_GEN(dev_priv, 6)) {
16820 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16821 } else if (IS_IVYBRIDGE(dev_priv)) {
16822 /* FIXME: detect B0+ stepping and use auto training */
16823 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16826 if (INTEL_GEN(dev_priv) >= 9)
16827 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16829 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16833 void intel_modeset_init_hw(struct drm_i915_private *i915)
16835 intel_update_cdclk(i915);
16836 intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16837 i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16841 * Calculate what we think the watermarks should be for the state we've read
16842 * out of the hardware and then immediately program those watermarks so that
16843 * we ensure the hardware settings match our internal state.
16845 * We can calculate what we think WM's should be by creating a duplicate of the
16846 * current state (which was constructed during hardware readout) and running it
16847 * through the atomic check code to calculate new watermark values in the
16850 static void sanitize_watermarks(struct drm_device *dev)
16852 struct drm_i915_private *dev_priv = to_i915(dev);
16853 struct drm_atomic_state *state;
16854 struct intel_atomic_state *intel_state;
16855 struct intel_crtc *crtc;
16856 struct intel_crtc_state *crtc_state;
16857 struct drm_modeset_acquire_ctx ctx;
16861 /* Only supported on platforms that use atomic watermark design */
16862 if (!dev_priv->display.optimize_watermarks)
16866 * We need to hold connection_mutex before calling duplicate_state so
16867 * that the connector loop is protected.
16869 drm_modeset_acquire_init(&ctx, 0);
16871 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16872 if (ret == -EDEADLK) {
16873 drm_modeset_backoff(&ctx);
16875 } else if (WARN_ON(ret)) {
16879 state = drm_atomic_helper_duplicate_state(dev, &ctx);
16880 if (WARN_ON(IS_ERR(state)))
16883 intel_state = to_intel_atomic_state(state);
16886 * Hardware readout is the only time we don't want to calculate
16887 * intermediate watermarks (since we don't trust the current
16890 if (!HAS_GMCH(dev_priv))
16891 intel_state->skip_intermediate_wm = true;
16893 ret = intel_atomic_check(dev, state);
16896 * If we fail here, it means that the hardware appears to be
16897 * programmed in a way that shouldn't be possible, given our
16898 * understanding of watermark requirements. This might mean a
16899 * mistake in the hardware readout code or a mistake in the
16900 * watermark calculations for a given platform. Raise a WARN
16901 * so that this is noticeable.
16903 * If this actually happens, we'll have to just leave the
16904 * BIOS-programmed watermarks untouched and hope for the best.
16906 WARN(true, "Could not determine valid watermarks for inherited state\n");
16910 /* Write calculated watermark values back */
16911 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16912 crtc_state->wm.need_postvbl_update = true;
16913 dev_priv->display.optimize_watermarks(intel_state, crtc);
16915 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16919 drm_atomic_state_put(state);
16921 drm_modeset_drop_locks(&ctx);
16922 drm_modeset_acquire_fini(&ctx);
16925 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16927 if (IS_GEN(dev_priv, 5)) {
16929 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16931 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16932 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16933 dev_priv->fdi_pll_freq = 270000;
16938 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16941 static int intel_initial_commit(struct drm_device *dev)
16943 struct drm_atomic_state *state = NULL;
16944 struct drm_modeset_acquire_ctx ctx;
16945 struct intel_crtc *crtc;
16948 state = drm_atomic_state_alloc(dev);
16952 drm_modeset_acquire_init(&ctx, 0);
16955 state->acquire_ctx = &ctx;
16957 for_each_intel_crtc(dev, crtc) {
16958 struct intel_crtc_state *crtc_state =
16959 intel_atomic_get_crtc_state(state, crtc);
16961 if (IS_ERR(crtc_state)) {
16962 ret = PTR_ERR(crtc_state);
16966 if (crtc_state->hw.active) {
16967 ret = drm_atomic_add_affected_planes(state, &crtc->base);
16972 * FIXME hack to force a LUT update to avoid the
16973 * plane update forcing the pipe gamma on without
16974 * having a proper LUT loaded. Remove once we
16975 * have readout for pipe gamma enable.
16977 crtc_state->uapi.color_mgmt_changed = true;
16981 ret = drm_atomic_commit(state);
16984 if (ret == -EDEADLK) {
16985 drm_atomic_state_clear(state);
16986 drm_modeset_backoff(&ctx);
16990 drm_atomic_state_put(state);
16992 drm_modeset_drop_locks(&ctx);
16993 drm_modeset_acquire_fini(&ctx);
16998 static void intel_mode_config_init(struct drm_i915_private *i915)
17000 struct drm_mode_config *mode_config = &i915->drm.mode_config;
17002 drm_mode_config_init(&i915->drm);
17004 mode_config->min_width = 0;
17005 mode_config->min_height = 0;
17007 mode_config->preferred_depth = 24;
17008 mode_config->prefer_shadow = 1;
17010 mode_config->allow_fb_modifiers = true;
17012 mode_config->funcs = &intel_mode_funcs;
17015 * Maximum framebuffer dimensions, chosen to match
17016 * the maximum render engine surface size on gen4+.
17018 if (INTEL_GEN(i915) >= 7) {
17019 mode_config->max_width = 16384;
17020 mode_config->max_height = 16384;
17021 } else if (INTEL_GEN(i915) >= 4) {
17022 mode_config->max_width = 8192;
17023 mode_config->max_height = 8192;
17024 } else if (IS_GEN(i915, 3)) {
17025 mode_config->max_width = 4096;
17026 mode_config->max_height = 4096;
17028 mode_config->max_width = 2048;
17029 mode_config->max_height = 2048;
17032 if (IS_I845G(i915) || IS_I865G(i915)) {
17033 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17034 mode_config->cursor_height = 1023;
17035 } else if (IS_GEN(i915, 2)) {
17036 mode_config->cursor_width = 64;
17037 mode_config->cursor_height = 64;
17039 mode_config->cursor_width = 256;
17040 mode_config->cursor_height = 256;
17044 int intel_modeset_init(struct drm_i915_private *i915)
17046 struct drm_device *dev = &i915->drm;
17048 struct intel_crtc *crtc;
17051 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17052 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17053 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17055 intel_mode_config_init(i915);
17057 ret = intel_bw_init(i915);
17061 init_llist_head(&i915->atomic_helper.free_list);
17062 INIT_WORK(&i915->atomic_helper.free_work,
17063 intel_atomic_helper_free_state_worker);
17065 intel_init_quirks(i915);
17067 intel_fbc_init(i915);
17069 intel_init_pm(i915);
17071 intel_panel_sanitize_ssc(i915);
17073 intel_gmbus_setup(i915);
17075 DRM_DEBUG_KMS("%d display pipe%s available.\n",
17076 INTEL_NUM_PIPES(i915),
17077 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17079 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17080 for_each_pipe(i915, pipe) {
17081 ret = intel_crtc_init(i915, pipe);
17083 drm_mode_config_cleanup(dev);
17089 intel_shared_dpll_init(dev);
17090 intel_update_fdi_pll_freq(i915);
17092 intel_update_czclk(i915);
17093 intel_modeset_init_hw(i915);
17095 intel_hdcp_component_init(i915);
17097 if (i915->max_cdclk_freq == 0)
17098 intel_update_max_cdclk(i915);
17100 /* Just disable it once at startup */
17101 intel_vga_disable(i915);
17102 intel_setup_outputs(i915);
17104 drm_modeset_lock_all(dev);
17105 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17106 drm_modeset_unlock_all(dev);
17108 for_each_intel_crtc(dev, crtc) {
17109 struct intel_initial_plane_config plane_config = {};
17115 * Note that reserving the BIOS fb up front prevents us
17116 * from stuffing other stolen allocations like the ring
17117 * on top. This prevents some ugliness at boot time, and
17118 * can even allow for smooth boot transitions if the BIOS
17119 * fb is large enough for the active pipe configuration.
17121 i915->display.get_initial_plane_config(crtc, &plane_config);
17124 * If the fb is shared between multiple heads, we'll
17125 * just get the first one.
17127 intel_find_initial_plane_obj(crtc, &plane_config);
17131 * Make sure hardware watermarks really match the state we read out.
17132 * Note that we need to do this after reconstructing the BIOS fb's
17133 * since the watermark calculation done here will use pstate->fb.
17135 if (!HAS_GMCH(i915))
17136 sanitize_watermarks(dev);
17139 * Force all active planes to recompute their states. So that on
17140 * mode_setcrtc after probe, all the intel_plane_state variables
17141 * are already calculated and there is no assert_plane warnings
17144 ret = intel_initial_commit(dev);
17146 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
17151 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17153 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17154 /* 640x480@60Hz, ~25175 kHz */
17155 struct dpll clock = {
17165 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17167 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17168 pipe_name(pipe), clock.vco, clock.dot);
17170 fp = i9xx_dpll_compute_fp(&clock);
17171 dpll = DPLL_DVO_2X_MODE |
17172 DPLL_VGA_MODE_DIS |
17173 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17174 PLL_P2_DIVIDE_BY_4 |
17175 PLL_REF_INPUT_DREFCLK |
17178 I915_WRITE(FP0(pipe), fp);
17179 I915_WRITE(FP1(pipe), fp);
17181 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17182 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17183 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17184 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17185 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17186 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17187 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17190 * Apparently we need to have VGA mode enabled prior to changing
17191 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17192 * dividers, even though the register value does change.
17194 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17195 I915_WRITE(DPLL(pipe), dpll);
17197 /* Wait for the clocks to stabilize. */
17198 POSTING_READ(DPLL(pipe));
17201 /* The pixel multiplier can only be updated once the
17202 * DPLL is enabled and the clocks are stable.
17204 * So write it again.
17206 I915_WRITE(DPLL(pipe), dpll);
17208 /* We do this three times for luck */
17209 for (i = 0; i < 3 ; i++) {
17210 I915_WRITE(DPLL(pipe), dpll);
17211 POSTING_READ(DPLL(pipe));
17212 udelay(150); /* wait for warmup */
17215 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17216 POSTING_READ(PIPECONF(pipe));
17218 intel_wait_for_pipe_scanline_moving(crtc);
17221 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17223 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17225 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17228 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17229 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17230 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17231 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17232 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17234 I915_WRITE(PIPECONF(pipe), 0);
17235 POSTING_READ(PIPECONF(pipe));
17237 intel_wait_for_pipe_scanline_stopped(crtc);
17239 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17240 POSTING_READ(DPLL(pipe));
17244 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17246 struct intel_crtc *crtc;
17248 if (INTEL_GEN(dev_priv) >= 4)
17251 for_each_intel_crtc(&dev_priv->drm, crtc) {
17252 struct intel_plane *plane =
17253 to_intel_plane(crtc->base.primary);
17254 struct intel_crtc *plane_crtc;
17257 if (!plane->get_hw_state(plane, &pipe))
17260 if (pipe == crtc->pipe)
17263 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17264 plane->base.base.id, plane->base.name);
17266 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17267 intel_plane_disable_noatomic(plane_crtc, plane);
17271 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17273 struct drm_device *dev = crtc->base.dev;
17274 struct intel_encoder *encoder;
17276 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17282 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17284 struct drm_device *dev = encoder->base.dev;
17285 struct intel_connector *connector;
17287 for_each_connector_on_encoder(dev, &encoder->base, connector)
17293 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17294 enum pipe pch_transcoder)
17296 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17297 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17300 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17302 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17304 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17306 if (INTEL_GEN(dev_priv) >= 9 ||
17307 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17308 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17311 if (transcoder_is_dsi(cpu_transcoder))
17314 val = I915_READ(reg);
17315 val &= ~HSW_FRAME_START_DELAY_MASK;
17316 val |= HSW_FRAME_START_DELAY(0);
17317 I915_WRITE(reg, val);
17319 i915_reg_t reg = PIPECONF(cpu_transcoder);
17322 val = I915_READ(reg);
17323 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17324 val |= PIPECONF_FRAME_START_DELAY(0);
17325 I915_WRITE(reg, val);
17328 if (!crtc_state->has_pch_encoder)
17331 if (HAS_PCH_IBX(dev_priv)) {
17332 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17335 val = I915_READ(reg);
17336 val &= ~TRANS_FRAME_START_DELAY_MASK;
17337 val |= TRANS_FRAME_START_DELAY(0);
17338 I915_WRITE(reg, val);
17340 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
17341 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
17344 val = I915_READ(reg);
17345 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17346 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17347 I915_WRITE(reg, val);
17351 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17352 struct drm_modeset_acquire_ctx *ctx)
17354 struct drm_device *dev = crtc->base.dev;
17355 struct drm_i915_private *dev_priv = to_i915(dev);
17356 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17358 if (crtc_state->hw.active) {
17359 struct intel_plane *plane;
17361 /* Clear any frame start delays used for debugging left by the BIOS */
17362 intel_sanitize_frame_start_delay(crtc_state);
17364 /* Disable everything but the primary plane */
17365 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17366 const struct intel_plane_state *plane_state =
17367 to_intel_plane_state(plane->base.state);
17369 if (plane_state->uapi.visible &&
17370 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17371 intel_plane_disable_noatomic(crtc, plane);
17375 * Disable any background color set by the BIOS, but enable the
17376 * gamma and CSC to match how we program our planes.
17378 if (INTEL_GEN(dev_priv) >= 9)
17379 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17380 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17381 SKL_BOTTOM_COLOR_CSC_ENABLE);
17384 /* Adjust the state of the output pipe according to whether we
17385 * have active connectors/encoders. */
17386 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17387 intel_crtc_disable_noatomic(crtc, ctx);
17389 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17391 * We start out with underrun reporting disabled to avoid races.
17392 * For correct bookkeeping mark this on active crtcs.
17394 * Also on gmch platforms we dont have any hardware bits to
17395 * disable the underrun reporting. Which means we need to start
17396 * out with underrun reporting disabled also on inactive pipes,
17397 * since otherwise we'll complain about the garbage we read when
17398 * e.g. coming up after runtime pm.
17400 * No protection against concurrent access is required - at
17401 * worst a fifo underrun happens which also sets this to false.
17403 crtc->cpu_fifo_underrun_disabled = true;
17405 * We track the PCH trancoder underrun reporting state
17406 * within the crtc. With crtc for pipe A housing the underrun
17407 * reporting state for PCH transcoder A, crtc for pipe B housing
17408 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17409 * and marking underrun reporting as disabled for the non-existing
17410 * PCH transcoders B and C would prevent enabling the south
17411 * error interrupt (see cpt_can_enable_serr_int()).
17413 if (has_pch_trancoder(dev_priv, crtc->pipe))
17414 crtc->pch_fifo_underrun_disabled = true;
17418 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17420 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17423 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17424 * the hardware when a high res displays plugged in. DPLL P
17425 * divider is zero, and the pipe timings are bonkers. We'll
17426 * try to disable everything in that case.
17428 * FIXME would be nice to be able to sanitize this state
17429 * without several WARNs, but for now let's take the easy
17432 return IS_GEN(dev_priv, 6) &&
17433 crtc_state->hw.active &&
17434 crtc_state->shared_dpll &&
17435 crtc_state->port_clock == 0;
17438 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17441 struct intel_connector *connector;
17442 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17443 struct intel_crtc_state *crtc_state = crtc ?
17444 to_intel_crtc_state(crtc->base.state) : NULL;
17446 /* We need to check both for a crtc link (meaning that the
17447 * encoder is active and trying to read from a pipe) and the
17448 * pipe itself being active. */
17449 bool has_active_crtc = crtc_state &&
17450 crtc_state->hw.active;
17452 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17453 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17454 pipe_name(crtc->pipe));
17455 has_active_crtc = false;
17458 connector = intel_encoder_find_connector(encoder);
17459 if (connector && !has_active_crtc) {
17460 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17461 encoder->base.base.id,
17462 encoder->base.name);
17464 /* Connector is active, but has no active pipe. This is
17465 * fallout from our resume register restoring. Disable
17466 * the encoder manually again. */
17468 struct drm_encoder *best_encoder;
17470 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17471 encoder->base.base.id,
17472 encoder->base.name);
17474 /* avoid oopsing in case the hooks consult best_encoder */
17475 best_encoder = connector->base.state->best_encoder;
17476 connector->base.state->best_encoder = &encoder->base;
17478 if (encoder->disable)
17479 encoder->disable(encoder, crtc_state,
17480 connector->base.state);
17481 if (encoder->post_disable)
17482 encoder->post_disable(encoder, crtc_state,
17483 connector->base.state);
17485 connector->base.state->best_encoder = best_encoder;
17487 encoder->base.crtc = NULL;
17489 /* Inconsistent output/port/pipe state happens presumably due to
17490 * a bug in one of the get_hw_state functions. Or someplace else
17491 * in our code, like the register restore mess on resume. Clamp
17492 * things to off as a safer default. */
17494 connector->base.dpms = DRM_MODE_DPMS_OFF;
17495 connector->base.encoder = NULL;
17498 /* notify opregion of the sanitized encoder state */
17499 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17501 if (INTEL_GEN(dev_priv) >= 11)
17502 icl_sanitize_encoder_pll_mapping(encoder);
17505 /* FIXME read out full plane state for all planes */
17506 static void readout_plane_state(struct drm_i915_private *dev_priv)
17508 struct intel_plane *plane;
17509 struct intel_crtc *crtc;
17511 for_each_intel_plane(&dev_priv->drm, plane) {
17512 struct intel_plane_state *plane_state =
17513 to_intel_plane_state(plane->base.state);
17514 struct intel_crtc_state *crtc_state;
17515 enum pipe pipe = PIPE_A;
17518 visible = plane->get_hw_state(plane, &pipe);
17520 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17521 crtc_state = to_intel_crtc_state(crtc->base.state);
17523 intel_set_plane_visible(crtc_state, plane_state, visible);
17525 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17526 plane->base.base.id, plane->base.name,
17527 enableddisabled(visible), pipe_name(pipe));
17530 for_each_intel_crtc(&dev_priv->drm, crtc) {
17531 struct intel_crtc_state *crtc_state =
17532 to_intel_crtc_state(crtc->base.state);
17534 fixup_active_planes(crtc_state);
17538 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17540 struct drm_i915_private *dev_priv = to_i915(dev);
17542 struct intel_crtc *crtc;
17543 struct intel_encoder *encoder;
17544 struct intel_connector *connector;
17545 struct drm_connector_list_iter conn_iter;
17548 dev_priv->active_pipes = 0;
17550 for_each_intel_crtc(dev, crtc) {
17551 struct intel_crtc_state *crtc_state =
17552 to_intel_crtc_state(crtc->base.state);
17554 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17555 intel_crtc_free_hw_state(crtc_state);
17556 intel_crtc_state_reset(crtc_state, crtc);
17558 crtc_state->hw.active = crtc_state->hw.enable =
17559 dev_priv->display.get_pipe_config(crtc, crtc_state);
17561 crtc->base.enabled = crtc_state->hw.enable;
17562 crtc->active = crtc_state->hw.active;
17564 if (crtc_state->hw.active)
17565 dev_priv->active_pipes |= BIT(crtc->pipe);
17567 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17568 crtc->base.base.id, crtc->base.name,
17569 enableddisabled(crtc_state->hw.active));
17572 readout_plane_state(dev_priv);
17574 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17575 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17577 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17578 &pll->state.hw_state);
17580 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17581 pll->info->id == DPLL_ID_EHL_DPLL4) {
17582 pll->wakeref = intel_display_power_get(dev_priv,
17583 POWER_DOMAIN_DPLL_DC_OFF);
17586 pll->state.crtc_mask = 0;
17587 for_each_intel_crtc(dev, crtc) {
17588 struct intel_crtc_state *crtc_state =
17589 to_intel_crtc_state(crtc->base.state);
17591 if (crtc_state->hw.active &&
17592 crtc_state->shared_dpll == pll)
17593 pll->state.crtc_mask |= 1 << crtc->pipe;
17595 pll->active_mask = pll->state.crtc_mask;
17597 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17598 pll->info->name, pll->state.crtc_mask, pll->on);
17601 for_each_intel_encoder(dev, encoder) {
17604 if (encoder->get_hw_state(encoder, &pipe)) {
17605 struct intel_crtc_state *crtc_state;
17607 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17608 crtc_state = to_intel_crtc_state(crtc->base.state);
17610 encoder->base.crtc = &crtc->base;
17611 encoder->get_config(encoder, crtc_state);
17613 encoder->base.crtc = NULL;
17616 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17617 encoder->base.base.id, encoder->base.name,
17618 enableddisabled(encoder->base.crtc),
17622 drm_connector_list_iter_begin(dev, &conn_iter);
17623 for_each_intel_connector_iter(connector, &conn_iter) {
17624 if (connector->get_hw_state(connector)) {
17625 struct intel_crtc_state *crtc_state;
17626 struct intel_crtc *crtc;
17628 connector->base.dpms = DRM_MODE_DPMS_ON;
17630 encoder = connector->encoder;
17631 connector->base.encoder = &encoder->base;
17633 crtc = to_intel_crtc(encoder->base.crtc);
17634 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17636 if (crtc_state && crtc_state->hw.active) {
17638 * This has to be done during hardware readout
17639 * because anything calling .crtc_disable may
17640 * rely on the connector_mask being accurate.
17642 crtc_state->uapi.connector_mask |=
17643 drm_connector_mask(&connector->base);
17644 crtc_state->uapi.encoder_mask |=
17645 drm_encoder_mask(&encoder->base);
17648 connector->base.dpms = DRM_MODE_DPMS_OFF;
17649 connector->base.encoder = NULL;
17651 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17652 connector->base.base.id, connector->base.name,
17653 enableddisabled(connector->base.encoder));
17655 drm_connector_list_iter_end(&conn_iter);
17657 for_each_intel_crtc(dev, crtc) {
17658 struct intel_bw_state *bw_state =
17659 to_intel_bw_state(dev_priv->bw_obj.state);
17660 struct intel_crtc_state *crtc_state =
17661 to_intel_crtc_state(crtc->base.state);
17662 struct intel_plane *plane;
17665 if (crtc_state->hw.active) {
17666 struct drm_display_mode *mode = &crtc_state->hw.mode;
17668 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17671 *mode = crtc_state->hw.adjusted_mode;
17672 mode->hdisplay = crtc_state->pipe_src_w;
17673 mode->vdisplay = crtc_state->pipe_src_h;
17676 * The initial mode needs to be set in order to keep
17677 * the atomic core happy. It wants a valid mode if the
17678 * crtc's enabled, so we do the above call.
17680 * But we don't set all the derived state fully, hence
17681 * set a flag to indicate that a full recalculation is
17682 * needed on the next commit.
17684 mode->private_flags = I915_MODE_FLAG_INHERITED;
17686 intel_crtc_compute_pixel_rate(crtc_state);
17688 intel_crtc_update_active_timings(crtc_state);
17690 intel_crtc_copy_hw_to_uapi_state(crtc_state);
17693 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17694 const struct intel_plane_state *plane_state =
17695 to_intel_plane_state(plane->base.state);
17698 * FIXME don't have the fb yet, so can't
17699 * use intel_plane_data_rate() :(
17701 if (plane_state->uapi.visible)
17702 crtc_state->data_rate[plane->id] =
17703 4 * crtc_state->pixel_rate;
17705 * FIXME don't have the fb yet, so can't
17706 * use plane->min_cdclk() :(
17708 if (plane_state->uapi.visible && plane->min_cdclk) {
17709 if (crtc_state->double_wide ||
17710 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17711 crtc_state->min_cdclk[plane->id] =
17712 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17714 crtc_state->min_cdclk[plane->id] =
17715 crtc_state->pixel_rate;
17717 DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17718 plane->base.base.id, plane->base.name,
17719 crtc_state->min_cdclk[plane->id]);
17722 if (crtc_state->hw.active) {
17723 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17724 if (WARN_ON(min_cdclk < 0))
17728 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17729 dev_priv->min_voltage_level[crtc->pipe] =
17730 crtc_state->min_voltage_level;
17732 intel_bw_crtc_update(bw_state, crtc_state);
17734 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17739 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17741 struct intel_encoder *encoder;
17743 for_each_intel_encoder(&dev_priv->drm, encoder) {
17744 struct intel_crtc_state *crtc_state;
17746 if (!encoder->get_power_domains)
17750 * MST-primary and inactive encoders don't have a crtc state
17751 * and neither of these require any power domain references.
17753 if (!encoder->base.crtc)
17756 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17757 encoder->get_power_domains(encoder, crtc_state);
17761 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17763 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17764 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17765 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17768 if (IS_HASWELL(dev_priv)) {
17770 * WaRsPkgCStateDisplayPMReq:hsw
17771 * System hang if this isn't done before disabling all planes!
17773 I915_WRITE(CHICKEN_PAR1_1,
17774 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17778 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17779 enum port port, i915_reg_t hdmi_reg)
17781 u32 val = I915_READ(hdmi_reg);
17783 if (val & SDVO_ENABLE ||
17784 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17787 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17790 val &= ~SDVO_PIPE_SEL_MASK;
17791 val |= SDVO_PIPE_SEL(PIPE_A);
17793 I915_WRITE(hdmi_reg, val);
17796 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17797 enum port port, i915_reg_t dp_reg)
17799 u32 val = I915_READ(dp_reg);
17801 if (val & DP_PORT_EN ||
17802 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17805 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17808 val &= ~DP_PIPE_SEL_MASK;
17809 val |= DP_PIPE_SEL(PIPE_A);
17811 I915_WRITE(dp_reg, val);
17814 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17817 * The BIOS may select transcoder B on some of the PCH
17818 * ports even it doesn't enable the port. This would trip
17819 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17820 * Sanitize the transcoder select bits to prevent that. We
17821 * assume that the BIOS never actually enabled the port,
17822 * because if it did we'd actually have to toggle the port
17823 * on and back off to make the transcoder A select stick
17824 * (see. intel_dp_link_down(), intel_disable_hdmi(),
17825 * intel_disable_sdvo()).
17827 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17828 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17829 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17831 /* PCH SDVOB multiplex with HDMIB */
17832 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17833 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17834 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17837 /* Scan out the current hw modeset state,
17838 * and sanitizes it to the current state
17841 intel_modeset_setup_hw_state(struct drm_device *dev,
17842 struct drm_modeset_acquire_ctx *ctx)
17844 struct drm_i915_private *dev_priv = to_i915(dev);
17845 struct intel_encoder *encoder;
17846 struct intel_crtc *crtc;
17847 intel_wakeref_t wakeref;
17850 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17852 intel_early_display_was(dev_priv);
17853 intel_modeset_readout_hw_state(dev);
17855 /* HW state is read out, now we need to sanitize this mess. */
17857 /* Sanitize the TypeC port mode upfront, encoders depend on this */
17858 for_each_intel_encoder(dev, encoder) {
17859 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17861 /* We need to sanitize only the MST primary port. */
17862 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17863 intel_phy_is_tc(dev_priv, phy))
17864 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17867 get_encoder_power_domains(dev_priv);
17869 if (HAS_PCH_IBX(dev_priv))
17870 ibx_sanitize_pch_ports(dev_priv);
17873 * intel_sanitize_plane_mapping() may need to do vblank
17874 * waits, so we need vblank interrupts restored beforehand.
17876 for_each_intel_crtc(&dev_priv->drm, crtc) {
17877 struct intel_crtc_state *crtc_state =
17878 to_intel_crtc_state(crtc->base.state);
17880 drm_crtc_vblank_reset(&crtc->base);
17882 if (crtc_state->hw.active)
17883 intel_crtc_vblank_on(crtc_state);
17886 intel_sanitize_plane_mapping(dev_priv);
17888 for_each_intel_encoder(dev, encoder)
17889 intel_sanitize_encoder(encoder);
17891 for_each_intel_crtc(&dev_priv->drm, crtc) {
17892 struct intel_crtc_state *crtc_state =
17893 to_intel_crtc_state(crtc->base.state);
17895 intel_sanitize_crtc(crtc, ctx);
17896 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17899 intel_modeset_update_connector_atomic_state(dev);
17901 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17902 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17904 if (!pll->on || pll->active_mask)
17907 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17910 pll->info->funcs->disable(dev_priv, pll);
17914 if (IS_G4X(dev_priv)) {
17915 g4x_wm_get_hw_state(dev_priv);
17916 g4x_wm_sanitize(dev_priv);
17917 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17918 vlv_wm_get_hw_state(dev_priv);
17919 vlv_wm_sanitize(dev_priv);
17920 } else if (INTEL_GEN(dev_priv) >= 9) {
17921 skl_wm_get_hw_state(dev_priv);
17922 } else if (HAS_PCH_SPLIT(dev_priv)) {
17923 ilk_wm_get_hw_state(dev_priv);
17926 for_each_intel_crtc(dev, crtc) {
17927 struct intel_crtc_state *crtc_state =
17928 to_intel_crtc_state(crtc->base.state);
17931 put_domains = modeset_get_crtc_power_domains(crtc_state);
17932 if (WARN_ON(put_domains))
17933 modeset_put_power_domains(dev_priv, put_domains);
17936 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17939 void intel_display_resume(struct drm_device *dev)
17941 struct drm_i915_private *dev_priv = to_i915(dev);
17942 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17943 struct drm_modeset_acquire_ctx ctx;
17946 dev_priv->modeset_restore_state = NULL;
17948 state->acquire_ctx = &ctx;
17950 drm_modeset_acquire_init(&ctx, 0);
17953 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17954 if (ret != -EDEADLK)
17957 drm_modeset_backoff(&ctx);
17961 ret = __intel_display_resume(dev, state, &ctx);
17963 intel_enable_ipc(dev_priv);
17964 drm_modeset_drop_locks(&ctx);
17965 drm_modeset_acquire_fini(&ctx);
17968 DRM_ERROR("Restoring old state failed with %i\n", ret);
17970 drm_atomic_state_put(state);
17973 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17975 struct intel_connector *connector;
17976 struct drm_connector_list_iter conn_iter;
17978 /* Kill all the work that may have been queued by hpd. */
17979 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17980 for_each_intel_connector_iter(connector, &conn_iter) {
17981 if (connector->modeset_retry_work.func)
17982 cancel_work_sync(&connector->modeset_retry_work);
17983 if (connector->hdcp.shim) {
17984 cancel_delayed_work_sync(&connector->hdcp.check_work);
17985 cancel_work_sync(&connector->hdcp.prop_work);
17988 drm_connector_list_iter_end(&conn_iter);
17991 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17993 flush_workqueue(i915->flip_wq);
17994 flush_workqueue(i915->modeset_wq);
17996 flush_work(&i915->atomic_helper.free_work);
17997 WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
18000 * Interrupts and polling as the first thing to avoid creating havoc.
18001 * Too much stuff here (turning of connectors, ...) would
18002 * experience fancy races otherwise.
18004 intel_irq_uninstall(i915);
18007 * Due to the hpd irq storm handling the hotplug work can re-arm the
18008 * poll handlers. Hence disable polling after hpd handling is shut down.
18010 intel_hpd_poll_fini(i915);
18013 * MST topology needs to be suspended so we don't have any calls to
18014 * fbdev after it's finalized. MST will be destroyed later as part of
18015 * drm_mode_config_cleanup()
18017 intel_dp_mst_suspend(i915);
18019 /* poll work can call into fbdev, hence clean that up afterwards */
18020 intel_fbdev_fini(i915);
18022 intel_unregister_dsm_handler();
18024 intel_fbc_global_disable(i915);
18026 /* flush any delayed tasks or pending work */
18027 flush_scheduled_work();
18029 intel_hdcp_component_fini(i915);
18031 drm_mode_config_cleanup(&i915->drm);
18033 intel_overlay_cleanup(i915);
18035 intel_gmbus_teardown(i915);
18037 destroy_workqueue(i915->flip_wq);
18038 destroy_workqueue(i915->modeset_wq);
18040 intel_fbc_cleanup_cfb(i915);
18043 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18045 struct intel_display_error_state {
18047 u32 power_well_driver;
18049 struct intel_cursor_error_state {
18054 } cursor[I915_MAX_PIPES];
18056 struct intel_pipe_error_state {
18057 bool power_domain_on;
18060 } pipe[I915_MAX_PIPES];
18062 struct intel_plane_error_state {
18070 } plane[I915_MAX_PIPES];
18072 struct intel_transcoder_error_state {
18074 bool power_domain_on;
18075 enum transcoder cpu_transcoder;
18088 struct intel_display_error_state *
18089 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18091 struct intel_display_error_state *error;
18092 int transcoders[] = {
18101 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18103 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18106 error = kzalloc(sizeof(*error), GFP_ATOMIC);
18110 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18111 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
18113 for_each_pipe(dev_priv, i) {
18114 error->pipe[i].power_domain_on =
18115 __intel_display_power_is_enabled(dev_priv,
18116 POWER_DOMAIN_PIPE(i));
18117 if (!error->pipe[i].power_domain_on)
18120 error->cursor[i].control = I915_READ(CURCNTR(i));
18121 error->cursor[i].position = I915_READ(CURPOS(i));
18122 error->cursor[i].base = I915_READ(CURBASE(i));
18124 error->plane[i].control = I915_READ(DSPCNTR(i));
18125 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
18126 if (INTEL_GEN(dev_priv) <= 3) {
18127 error->plane[i].size = I915_READ(DSPSIZE(i));
18128 error->plane[i].pos = I915_READ(DSPPOS(i));
18130 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18131 error->plane[i].addr = I915_READ(DSPADDR(i));
18132 if (INTEL_GEN(dev_priv) >= 4) {
18133 error->plane[i].surface = I915_READ(DSPSURF(i));
18134 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
18137 error->pipe[i].source = I915_READ(PIPESRC(i));
18139 if (HAS_GMCH(dev_priv))
18140 error->pipe[i].stat = I915_READ(PIPESTAT(i));
18143 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18144 enum transcoder cpu_transcoder = transcoders[i];
18146 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18149 error->transcoder[i].available = true;
18150 error->transcoder[i].power_domain_on =
18151 __intel_display_power_is_enabled(dev_priv,
18152 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18153 if (!error->transcoder[i].power_domain_on)
18156 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18158 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
18159 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
18160 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
18161 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
18162 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
18163 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
18164 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
18170 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18173 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18174 struct intel_display_error_state *error)
18176 struct drm_i915_private *dev_priv = m->i915;
18182 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18183 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18184 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18185 error->power_well_driver);
18186 for_each_pipe(dev_priv, i) {
18187 err_printf(m, "Pipe [%d]:\n", i);
18188 err_printf(m, " Power: %s\n",
18189 onoff(error->pipe[i].power_domain_on));
18190 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
18191 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
18193 err_printf(m, "Plane [%d]:\n", i);
18194 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
18195 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
18196 if (INTEL_GEN(dev_priv) <= 3) {
18197 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
18198 err_printf(m, " POS: %08x\n", error->plane[i].pos);
18200 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18201 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
18202 if (INTEL_GEN(dev_priv) >= 4) {
18203 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
18204 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
18207 err_printf(m, "Cursor [%d]:\n", i);
18208 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
18209 err_printf(m, " POS: %08x\n", error->cursor[i].position);
18210 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
18213 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18214 if (!error->transcoder[i].available)
18217 err_printf(m, "CPU transcoder: %s\n",
18218 transcoder_name(error->transcoder[i].cpu_transcoder));
18219 err_printf(m, " Power: %s\n",
18220 onoff(error->transcoder[i].power_domain_on));
18221 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
18222 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
18223 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
18224 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
18225 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
18226 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
18227 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);