OSDN Git Service

drm/i915/tgl: Gen-12 render decompression
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88         DRM_FORMAT_C8,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_RGB565,
91         DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96         DRM_FORMAT_C8,
97         DRM_FORMAT_RGB565,
98         DRM_FORMAT_XRGB8888,
99         DRM_FORMAT_XBGR8888,
100         DRM_FORMAT_XRGB2101010,
101         DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106         DRM_FORMAT_C8,
107         DRM_FORMAT_RGB565,
108         DRM_FORMAT_XRGB8888,
109         DRM_FORMAT_XBGR8888,
110         DRM_FORMAT_XRGB2101010,
111         DRM_FORMAT_XBGR2101010,
112         DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117         DRM_FORMAT_C8,
118         DRM_FORMAT_RGB565,
119         DRM_FORMAT_XRGB8888,
120         DRM_FORMAT_XBGR8888,
121         DRM_FORMAT_ARGB8888,
122         DRM_FORMAT_ABGR8888,
123         DRM_FORMAT_XRGB2101010,
124         DRM_FORMAT_XBGR2101010,
125         DRM_FORMAT_ARGB2101010,
126         DRM_FORMAT_ABGR2101010,
127         DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131         I915_FORMAT_MOD_X_TILED,
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138         DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142         DRM_FORMAT_MOD_LINEAR,
143         DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147                                 struct intel_crtc_state *pipe_config);
148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
149                                    struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152                                   struct drm_i915_gem_object *obj,
153                                   struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157                                          const struct intel_link_m_n *m_n,
158                                          const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164                             const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166                             const struct intel_crtc_state *pipe_config);
167 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
168 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
169 static void intel_modeset_setup_hw_state(struct drm_device *dev,
170                                          struct drm_modeset_acquire_ctx *ctx);
171 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
172
173 struct intel_limit {
174         struct {
175                 int min, max;
176         } dot, vco, n, m, m1, m2, p, p1;
177
178         struct {
179                 int dot_limit;
180                 int p2_slow, p2_fast;
181         } p2;
182 };
183
184 /* returns HPLL frequency in kHz */
185 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
186 {
187         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
188
189         /* Obtain SKU information */
190         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
191                 CCK_FUSE_HPLL_FREQ_MASK;
192
193         return vco_freq[hpll_freq] * 1000;
194 }
195
196 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
197                       const char *name, u32 reg, int ref_freq)
198 {
199         u32 val;
200         int divider;
201
202         val = vlv_cck_read(dev_priv, reg);
203         divider = val & CCK_FREQUENCY_VALUES;
204
205         WARN((val & CCK_FREQUENCY_STATUS) !=
206              (divider << CCK_FREQUENCY_STATUS_SHIFT),
207              "%s change in progress\n", name);
208
209         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
210 }
211
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213                            const char *name, u32 reg)
214 {
215         int hpll;
216
217         vlv_cck_get(dev_priv);
218
219         if (dev_priv->hpll_freq == 0)
220                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
221
222         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
223
224         vlv_cck_put(dev_priv);
225
226         return hpll;
227 }
228
229 static void intel_update_czclk(struct drm_i915_private *dev_priv)
230 {
231         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
232                 return;
233
234         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
235                                                       CCK_CZ_CLOCK_CONTROL);
236
237         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
238 }
239
240 static inline u32 /* units of 100MHz */
241 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
242                     const struct intel_crtc_state *pipe_config)
243 {
244         if (HAS_DDI(dev_priv))
245                 return pipe_config->port_clock; /* SPLL */
246         else
247                 return dev_priv->fdi_pll_freq;
248 }
249
250 static const struct intel_limit intel_limits_i8xx_dac = {
251         .dot = { .min = 25000, .max = 350000 },
252         .vco = { .min = 908000, .max = 1512000 },
253         .n = { .min = 2, .max = 16 },
254         .m = { .min = 96, .max = 140 },
255         .m1 = { .min = 18, .max = 26 },
256         .m2 = { .min = 6, .max = 16 },
257         .p = { .min = 4, .max = 128 },
258         .p1 = { .min = 2, .max = 33 },
259         .p2 = { .dot_limit = 165000,
260                 .p2_slow = 4, .p2_fast = 2 },
261 };
262
263 static const struct intel_limit intel_limits_i8xx_dvo = {
264         .dot = { .min = 25000, .max = 350000 },
265         .vco = { .min = 908000, .max = 1512000 },
266         .n = { .min = 2, .max = 16 },
267         .m = { .min = 96, .max = 140 },
268         .m1 = { .min = 18, .max = 26 },
269         .m2 = { .min = 6, .max = 16 },
270         .p = { .min = 4, .max = 128 },
271         .p1 = { .min = 2, .max = 33 },
272         .p2 = { .dot_limit = 165000,
273                 .p2_slow = 4, .p2_fast = 4 },
274 };
275
276 static const struct intel_limit intel_limits_i8xx_lvds = {
277         .dot = { .min = 25000, .max = 350000 },
278         .vco = { .min = 908000, .max = 1512000 },
279         .n = { .min = 2, .max = 16 },
280         .m = { .min = 96, .max = 140 },
281         .m1 = { .min = 18, .max = 26 },
282         .m2 = { .min = 6, .max = 16 },
283         .p = { .min = 4, .max = 128 },
284         .p1 = { .min = 1, .max = 6 },
285         .p2 = { .dot_limit = 165000,
286                 .p2_slow = 14, .p2_fast = 7 },
287 };
288
289 static const struct intel_limit intel_limits_i9xx_sdvo = {
290         .dot = { .min = 20000, .max = 400000 },
291         .vco = { .min = 1400000, .max = 2800000 },
292         .n = { .min = 1, .max = 6 },
293         .m = { .min = 70, .max = 120 },
294         .m1 = { .min = 8, .max = 18 },
295         .m2 = { .min = 3, .max = 7 },
296         .p = { .min = 5, .max = 80 },
297         .p1 = { .min = 1, .max = 8 },
298         .p2 = { .dot_limit = 200000,
299                 .p2_slow = 10, .p2_fast = 5 },
300 };
301
302 static const struct intel_limit intel_limits_i9xx_lvds = {
303         .dot = { .min = 20000, .max = 400000 },
304         .vco = { .min = 1400000, .max = 2800000 },
305         .n = { .min = 1, .max = 6 },
306         .m = { .min = 70, .max = 120 },
307         .m1 = { .min = 8, .max = 18 },
308         .m2 = { .min = 3, .max = 7 },
309         .p = { .min = 7, .max = 98 },
310         .p1 = { .min = 1, .max = 8 },
311         .p2 = { .dot_limit = 112000,
312                 .p2_slow = 14, .p2_fast = 7 },
313 };
314
315
316 static const struct intel_limit intel_limits_g4x_sdvo = {
317         .dot = { .min = 25000, .max = 270000 },
318         .vco = { .min = 1750000, .max = 3500000},
319         .n = { .min = 1, .max = 4 },
320         .m = { .min = 104, .max = 138 },
321         .m1 = { .min = 17, .max = 23 },
322         .m2 = { .min = 5, .max = 11 },
323         .p = { .min = 10, .max = 30 },
324         .p1 = { .min = 1, .max = 3},
325         .p2 = { .dot_limit = 270000,
326                 .p2_slow = 10,
327                 .p2_fast = 10
328         },
329 };
330
331 static const struct intel_limit intel_limits_g4x_hdmi = {
332         .dot = { .min = 22000, .max = 400000 },
333         .vco = { .min = 1750000, .max = 3500000},
334         .n = { .min = 1, .max = 4 },
335         .m = { .min = 104, .max = 138 },
336         .m1 = { .min = 16, .max = 23 },
337         .m2 = { .min = 5, .max = 11 },
338         .p = { .min = 5, .max = 80 },
339         .p1 = { .min = 1, .max = 8},
340         .p2 = { .dot_limit = 165000,
341                 .p2_slow = 10, .p2_fast = 5 },
342 };
343
344 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
345         .dot = { .min = 20000, .max = 115000 },
346         .vco = { .min = 1750000, .max = 3500000 },
347         .n = { .min = 1, .max = 3 },
348         .m = { .min = 104, .max = 138 },
349         .m1 = { .min = 17, .max = 23 },
350         .m2 = { .min = 5, .max = 11 },
351         .p = { .min = 28, .max = 112 },
352         .p1 = { .min = 2, .max = 8 },
353         .p2 = { .dot_limit = 0,
354                 .p2_slow = 14, .p2_fast = 14
355         },
356 };
357
358 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
359         .dot = { .min = 80000, .max = 224000 },
360         .vco = { .min = 1750000, .max = 3500000 },
361         .n = { .min = 1, .max = 3 },
362         .m = { .min = 104, .max = 138 },
363         .m1 = { .min = 17, .max = 23 },
364         .m2 = { .min = 5, .max = 11 },
365         .p = { .min = 14, .max = 42 },
366         .p1 = { .min = 2, .max = 6 },
367         .p2 = { .dot_limit = 0,
368                 .p2_slow = 7, .p2_fast = 7
369         },
370 };
371
372 static const struct intel_limit intel_limits_pineview_sdvo = {
373         .dot = { .min = 20000, .max = 400000},
374         .vco = { .min = 1700000, .max = 3500000 },
375         /* Pineview's Ncounter is a ring counter */
376         .n = { .min = 3, .max = 6 },
377         .m = { .min = 2, .max = 256 },
378         /* Pineview only has one combined m divider, which we treat as m2. */
379         .m1 = { .min = 0, .max = 0 },
380         .m2 = { .min = 0, .max = 254 },
381         .p = { .min = 5, .max = 80 },
382         .p1 = { .min = 1, .max = 8 },
383         .p2 = { .dot_limit = 200000,
384                 .p2_slow = 10, .p2_fast = 5 },
385 };
386
387 static const struct intel_limit intel_limits_pineview_lvds = {
388         .dot = { .min = 20000, .max = 400000 },
389         .vco = { .min = 1700000, .max = 3500000 },
390         .n = { .min = 3, .max = 6 },
391         .m = { .min = 2, .max = 256 },
392         .m1 = { .min = 0, .max = 0 },
393         .m2 = { .min = 0, .max = 254 },
394         .p = { .min = 7, .max = 112 },
395         .p1 = { .min = 1, .max = 8 },
396         .p2 = { .dot_limit = 112000,
397                 .p2_slow = 14, .p2_fast = 14 },
398 };
399
400 /* Ironlake / Sandybridge
401  *
402  * We calculate clock using (register_value + 2) for N/M1/M2, so here
403  * the range value for them is (actual_value - 2).
404  */
405 static const struct intel_limit intel_limits_ironlake_dac = {
406         .dot = { .min = 25000, .max = 350000 },
407         .vco = { .min = 1760000, .max = 3510000 },
408         .n = { .min = 1, .max = 5 },
409         .m = { .min = 79, .max = 127 },
410         .m1 = { .min = 12, .max = 22 },
411         .m2 = { .min = 5, .max = 9 },
412         .p = { .min = 5, .max = 80 },
413         .p1 = { .min = 1, .max = 8 },
414         .p2 = { .dot_limit = 225000,
415                 .p2_slow = 10, .p2_fast = 5 },
416 };
417
418 static const struct intel_limit intel_limits_ironlake_single_lvds = {
419         .dot = { .min = 25000, .max = 350000 },
420         .vco = { .min = 1760000, .max = 3510000 },
421         .n = { .min = 1, .max = 3 },
422         .m = { .min = 79, .max = 118 },
423         .m1 = { .min = 12, .max = 22 },
424         .m2 = { .min = 5, .max = 9 },
425         .p = { .min = 28, .max = 112 },
426         .p1 = { .min = 2, .max = 8 },
427         .p2 = { .dot_limit = 225000,
428                 .p2_slow = 14, .p2_fast = 14 },
429 };
430
431 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
432         .dot = { .min = 25000, .max = 350000 },
433         .vco = { .min = 1760000, .max = 3510000 },
434         .n = { .min = 1, .max = 3 },
435         .m = { .min = 79, .max = 127 },
436         .m1 = { .min = 12, .max = 22 },
437         .m2 = { .min = 5, .max = 9 },
438         .p = { .min = 14, .max = 56 },
439         .p1 = { .min = 2, .max = 8 },
440         .p2 = { .dot_limit = 225000,
441                 .p2_slow = 7, .p2_fast = 7 },
442 };
443
444 /* LVDS 100mhz refclk limits. */
445 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
446         .dot = { .min = 25000, .max = 350000 },
447         .vco = { .min = 1760000, .max = 3510000 },
448         .n = { .min = 1, .max = 2 },
449         .m = { .min = 79, .max = 126 },
450         .m1 = { .min = 12, .max = 22 },
451         .m2 = { .min = 5, .max = 9 },
452         .p = { .min = 28, .max = 112 },
453         .p1 = { .min = 2, .max = 8 },
454         .p2 = { .dot_limit = 225000,
455                 .p2_slow = 14, .p2_fast = 14 },
456 };
457
458 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
459         .dot = { .min = 25000, .max = 350000 },
460         .vco = { .min = 1760000, .max = 3510000 },
461         .n = { .min = 1, .max = 3 },
462         .m = { .min = 79, .max = 126 },
463         .m1 = { .min = 12, .max = 22 },
464         .m2 = { .min = 5, .max = 9 },
465         .p = { .min = 14, .max = 42 },
466         .p1 = { .min = 2, .max = 6 },
467         .p2 = { .dot_limit = 225000,
468                 .p2_slow = 7, .p2_fast = 7 },
469 };
470
471 static const struct intel_limit intel_limits_vlv = {
472          /*
473           * These are the data rate limits (measured in fast clocks)
474           * since those are the strictest limits we have. The fast
475           * clock and actual rate limits are more relaxed, so checking
476           * them would make no difference.
477           */
478         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
479         .vco = { .min = 4000000, .max = 6000000 },
480         .n = { .min = 1, .max = 7 },
481         .m1 = { .min = 2, .max = 3 },
482         .m2 = { .min = 11, .max = 156 },
483         .p1 = { .min = 2, .max = 3 },
484         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
485 };
486
487 static const struct intel_limit intel_limits_chv = {
488         /*
489          * These are the data rate limits (measured in fast clocks)
490          * since those are the strictest limits we have.  The fast
491          * clock and actual rate limits are more relaxed, so checking
492          * them would make no difference.
493          */
494         .dot = { .min = 25000 * 5, .max = 540000 * 5},
495         .vco = { .min = 4800000, .max = 6480000 },
496         .n = { .min = 1, .max = 1 },
497         .m1 = { .min = 2, .max = 2 },
498         .m2 = { .min = 24 << 22, .max = 175 << 22 },
499         .p1 = { .min = 2, .max = 4 },
500         .p2 = { .p2_slow = 1, .p2_fast = 14 },
501 };
502
503 static const struct intel_limit intel_limits_bxt = {
504         /* FIXME: find real dot limits */
505         .dot = { .min = 0, .max = INT_MAX },
506         .vco = { .min = 4800000, .max = 6700000 },
507         .n = { .min = 1, .max = 1 },
508         .m1 = { .min = 2, .max = 2 },
509         /* FIXME: find real m2 limits */
510         .m2 = { .min = 2 << 22, .max = 255 << 22 },
511         .p1 = { .min = 2, .max = 4 },
512         .p2 = { .p2_slow = 1, .p2_fast = 20 },
513 };
514
515 /* WA Display #0827: Gen9:all */
516 static void
517 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
518 {
519         if (enable)
520                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
521                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
522                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
523         else
524                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
525                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
526                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
527 }
528
529 /* Wa_2006604312:icl */
530 static void
531 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
532                        bool enable)
533 {
534         if (enable)
535                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
536                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
537         else
538                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
539                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
540 }
541
542 static bool
543 needs_modeset(const struct intel_crtc_state *state)
544 {
545         return drm_atomic_crtc_needs_modeset(&state->uapi);
546 }
547
548 bool
549 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
550 {
551         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
552                 crtc_state->sync_mode_slaves_mask);
553 }
554
555 static bool
556 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
557 {
558         return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
559                 crtc_state->sync_mode_slaves_mask);
560 }
561
562 static bool
563 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
564 {
565         return crtc_state->master_transcoder != INVALID_TRANSCODER;
566 }
567
568 /*
569  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
570  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
571  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
572  * The helpers' return value is the rate of the clock that is fed to the
573  * display engine's pipe which can be the above fast dot clock rate or a
574  * divided-down version of it.
575  */
576 /* m1 is reserved as 0 in Pineview, n is a ring counter */
577 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
578 {
579         clock->m = clock->m2 + 2;
580         clock->p = clock->p1 * clock->p2;
581         if (WARN_ON(clock->n == 0 || clock->p == 0))
582                 return 0;
583         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
584         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
585
586         return clock->dot;
587 }
588
589 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
590 {
591         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
592 }
593
594 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
595 {
596         clock->m = i9xx_dpll_compute_m(clock);
597         clock->p = clock->p1 * clock->p2;
598         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
599                 return 0;
600         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
601         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
602
603         return clock->dot;
604 }
605
606 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
607 {
608         clock->m = clock->m1 * clock->m2;
609         clock->p = clock->p1 * clock->p2;
610         if (WARN_ON(clock->n == 0 || clock->p == 0))
611                 return 0;
612         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
613         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
614
615         return clock->dot / 5;
616 }
617
618 int chv_calc_dpll_params(int refclk, struct dpll *clock)
619 {
620         clock->m = clock->m1 * clock->m2;
621         clock->p = clock->p1 * clock->p2;
622         if (WARN_ON(clock->n == 0 || clock->p == 0))
623                 return 0;
624         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
625                                            clock->n << 22);
626         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
627
628         return clock->dot / 5;
629 }
630
631 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
632
633 /*
634  * Returns whether the given set of divisors are valid for a given refclk with
635  * the given connectors.
636  */
637 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
638                                const struct intel_limit *limit,
639                                const struct dpll *clock)
640 {
641         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
642                 INTELPllInvalid("n out of range\n");
643         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
644                 INTELPllInvalid("p1 out of range\n");
645         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
646                 INTELPllInvalid("m2 out of range\n");
647         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
648                 INTELPllInvalid("m1 out of range\n");
649
650         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
651             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
652                 if (clock->m1 <= clock->m2)
653                         INTELPllInvalid("m1 <= m2\n");
654
655         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
656             !IS_GEN9_LP(dev_priv)) {
657                 if (clock->p < limit->p.min || limit->p.max < clock->p)
658                         INTELPllInvalid("p out of range\n");
659                 if (clock->m < limit->m.min || limit->m.max < clock->m)
660                         INTELPllInvalid("m out of range\n");
661         }
662
663         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
664                 INTELPllInvalid("vco out of range\n");
665         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
666          * connector, etc., rather than just a single range.
667          */
668         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
669                 INTELPllInvalid("dot out of range\n");
670
671         return true;
672 }
673
674 static int
675 i9xx_select_p2_div(const struct intel_limit *limit,
676                    const struct intel_crtc_state *crtc_state,
677                    int target)
678 {
679         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
680
681         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
682                 /*
683                  * For LVDS just rely on its current settings for dual-channel.
684                  * We haven't figured out how to reliably set up different
685                  * single/dual channel state, if we even can.
686                  */
687                 if (intel_is_dual_link_lvds(dev_priv))
688                         return limit->p2.p2_fast;
689                 else
690                         return limit->p2.p2_slow;
691         } else {
692                 if (target < limit->p2.dot_limit)
693                         return limit->p2.p2_slow;
694                 else
695                         return limit->p2.p2_fast;
696         }
697 }
698
699 /*
700  * Returns a set of divisors for the desired target clock with the given
701  * refclk, or FALSE.  The returned values represent the clock equation:
702  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
703  *
704  * Target and reference clocks are specified in kHz.
705  *
706  * If match_clock is provided, then best_clock P divider must match the P
707  * divider from @match_clock used for LVDS downclocking.
708  */
709 static bool
710 i9xx_find_best_dpll(const struct intel_limit *limit,
711                     struct intel_crtc_state *crtc_state,
712                     int target, int refclk, struct dpll *match_clock,
713                     struct dpll *best_clock)
714 {
715         struct drm_device *dev = crtc_state->uapi.crtc->dev;
716         struct dpll clock;
717         int err = target;
718
719         memset(best_clock, 0, sizeof(*best_clock));
720
721         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
722
723         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
724              clock.m1++) {
725                 for (clock.m2 = limit->m2.min;
726                      clock.m2 <= limit->m2.max; clock.m2++) {
727                         if (clock.m2 >= clock.m1)
728                                 break;
729                         for (clock.n = limit->n.min;
730                              clock.n <= limit->n.max; clock.n++) {
731                                 for (clock.p1 = limit->p1.min;
732                                         clock.p1 <= limit->p1.max; clock.p1++) {
733                                         int this_err;
734
735                                         i9xx_calc_dpll_params(refclk, &clock);
736                                         if (!intel_PLL_is_valid(to_i915(dev),
737                                                                 limit,
738                                                                 &clock))
739                                                 continue;
740                                         if (match_clock &&
741                                             clock.p != match_clock->p)
742                                                 continue;
743
744                                         this_err = abs(clock.dot - target);
745                                         if (this_err < err) {
746                                                 *best_clock = clock;
747                                                 err = this_err;
748                                         }
749                                 }
750                         }
751                 }
752         }
753
754         return (err != target);
755 }
756
757 /*
758  * Returns a set of divisors for the desired target clock with the given
759  * refclk, or FALSE.  The returned values represent the clock equation:
760  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
761  *
762  * Target and reference clocks are specified in kHz.
763  *
764  * If match_clock is provided, then best_clock P divider must match the P
765  * divider from @match_clock used for LVDS downclocking.
766  */
767 static bool
768 pnv_find_best_dpll(const struct intel_limit *limit,
769                    struct intel_crtc_state *crtc_state,
770                    int target, int refclk, struct dpll *match_clock,
771                    struct dpll *best_clock)
772 {
773         struct drm_device *dev = crtc_state->uapi.crtc->dev;
774         struct dpll clock;
775         int err = target;
776
777         memset(best_clock, 0, sizeof(*best_clock));
778
779         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
780
781         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
782              clock.m1++) {
783                 for (clock.m2 = limit->m2.min;
784                      clock.m2 <= limit->m2.max; clock.m2++) {
785                         for (clock.n = limit->n.min;
786                              clock.n <= limit->n.max; clock.n++) {
787                                 for (clock.p1 = limit->p1.min;
788                                         clock.p1 <= limit->p1.max; clock.p1++) {
789                                         int this_err;
790
791                                         pnv_calc_dpll_params(refclk, &clock);
792                                         if (!intel_PLL_is_valid(to_i915(dev),
793                                                                 limit,
794                                                                 &clock))
795                                                 continue;
796                                         if (match_clock &&
797                                             clock.p != match_clock->p)
798                                                 continue;
799
800                                         this_err = abs(clock.dot - target);
801                                         if (this_err < err) {
802                                                 *best_clock = clock;
803                                                 err = this_err;
804                                         }
805                                 }
806                         }
807                 }
808         }
809
810         return (err != target);
811 }
812
813 /*
814  * Returns a set of divisors for the desired target clock with the given
815  * refclk, or FALSE.  The returned values represent the clock equation:
816  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
817  *
818  * Target and reference clocks are specified in kHz.
819  *
820  * If match_clock is provided, then best_clock P divider must match the P
821  * divider from @match_clock used for LVDS downclocking.
822  */
823 static bool
824 g4x_find_best_dpll(const struct intel_limit *limit,
825                    struct intel_crtc_state *crtc_state,
826                    int target, int refclk, struct dpll *match_clock,
827                    struct dpll *best_clock)
828 {
829         struct drm_device *dev = crtc_state->uapi.crtc->dev;
830         struct dpll clock;
831         int max_n;
832         bool found = false;
833         /* approximately equals target * 0.00585 */
834         int err_most = (target >> 8) + (target >> 9);
835
836         memset(best_clock, 0, sizeof(*best_clock));
837
838         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
839
840         max_n = limit->n.max;
841         /* based on hardware requirement, prefer smaller n to precision */
842         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
843                 /* based on hardware requirement, prefere larger m1,m2 */
844                 for (clock.m1 = limit->m1.max;
845                      clock.m1 >= limit->m1.min; clock.m1--) {
846                         for (clock.m2 = limit->m2.max;
847                              clock.m2 >= limit->m2.min; clock.m2--) {
848                                 for (clock.p1 = limit->p1.max;
849                                      clock.p1 >= limit->p1.min; clock.p1--) {
850                                         int this_err;
851
852                                         i9xx_calc_dpll_params(refclk, &clock);
853                                         if (!intel_PLL_is_valid(to_i915(dev),
854                                                                 limit,
855                                                                 &clock))
856                                                 continue;
857
858                                         this_err = abs(clock.dot - target);
859                                         if (this_err < err_most) {
860                                                 *best_clock = clock;
861                                                 err_most = this_err;
862                                                 max_n = clock.n;
863                                                 found = true;
864                                         }
865                                 }
866                         }
867                 }
868         }
869         return found;
870 }
871
872 /*
873  * Check if the calculated PLL configuration is more optimal compared to the
874  * best configuration and error found so far. Return the calculated error.
875  */
876 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
877                                const struct dpll *calculated_clock,
878                                const struct dpll *best_clock,
879                                unsigned int best_error_ppm,
880                                unsigned int *error_ppm)
881 {
882         /*
883          * For CHV ignore the error and consider only the P value.
884          * Prefer a bigger P value based on HW requirements.
885          */
886         if (IS_CHERRYVIEW(to_i915(dev))) {
887                 *error_ppm = 0;
888
889                 return calculated_clock->p > best_clock->p;
890         }
891
892         if (WARN_ON_ONCE(!target_freq))
893                 return false;
894
895         *error_ppm = div_u64(1000000ULL *
896                                 abs(target_freq - calculated_clock->dot),
897                              target_freq);
898         /*
899          * Prefer a better P value over a better (smaller) error if the error
900          * is small. Ensure this preference for future configurations too by
901          * setting the error to 0.
902          */
903         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
904                 *error_ppm = 0;
905
906                 return true;
907         }
908
909         return *error_ppm + 10 < best_error_ppm;
910 }
911
912 /*
913  * Returns a set of divisors for the desired target clock with the given
914  * refclk, or FALSE.  The returned values represent the clock equation:
915  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
916  */
917 static bool
918 vlv_find_best_dpll(const struct intel_limit *limit,
919                    struct intel_crtc_state *crtc_state,
920                    int target, int refclk, struct dpll *match_clock,
921                    struct dpll *best_clock)
922 {
923         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
924         struct drm_device *dev = crtc->base.dev;
925         struct dpll clock;
926         unsigned int bestppm = 1000000;
927         /* min update 19.2 MHz */
928         int max_n = min(limit->n.max, refclk / 19200);
929         bool found = false;
930
931         target *= 5; /* fast clock */
932
933         memset(best_clock, 0, sizeof(*best_clock));
934
935         /* based on hardware requirement, prefer smaller n to precision */
936         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
937                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
938                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
939                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
940                                 clock.p = clock.p1 * clock.p2;
941                                 /* based on hardware requirement, prefer bigger m1,m2 values */
942                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
943                                         unsigned int ppm;
944
945                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
946                                                                      refclk * clock.m1);
947
948                                         vlv_calc_dpll_params(refclk, &clock);
949
950                                         if (!intel_PLL_is_valid(to_i915(dev),
951                                                                 limit,
952                                                                 &clock))
953                                                 continue;
954
955                                         if (!vlv_PLL_is_optimal(dev, target,
956                                                                 &clock,
957                                                                 best_clock,
958                                                                 bestppm, &ppm))
959                                                 continue;
960
961                                         *best_clock = clock;
962                                         bestppm = ppm;
963                                         found = true;
964                                 }
965                         }
966                 }
967         }
968
969         return found;
970 }
971
972 /*
973  * Returns a set of divisors for the desired target clock with the given
974  * refclk, or FALSE.  The returned values represent the clock equation:
975  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
976  */
977 static bool
978 chv_find_best_dpll(const struct intel_limit *limit,
979                    struct intel_crtc_state *crtc_state,
980                    int target, int refclk, struct dpll *match_clock,
981                    struct dpll *best_clock)
982 {
983         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
984         struct drm_device *dev = crtc->base.dev;
985         unsigned int best_error_ppm;
986         struct dpll clock;
987         u64 m2;
988         int found = false;
989
990         memset(best_clock, 0, sizeof(*best_clock));
991         best_error_ppm = 1000000;
992
993         /*
994          * Based on hardware doc, the n always set to 1, and m1 always
995          * set to 2.  If requires to support 200Mhz refclk, we need to
996          * revisit this because n may not 1 anymore.
997          */
998         clock.n = 1, clock.m1 = 2;
999         target *= 5;    /* fast clock */
1000
1001         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1002                 for (clock.p2 = limit->p2.p2_fast;
1003                                 clock.p2 >= limit->p2.p2_slow;
1004                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1005                         unsigned int error_ppm;
1006
1007                         clock.p = clock.p1 * clock.p2;
1008
1009                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1010                                                    refclk * clock.m1);
1011
1012                         if (m2 > INT_MAX/clock.m1)
1013                                 continue;
1014
1015                         clock.m2 = m2;
1016
1017                         chv_calc_dpll_params(refclk, &clock);
1018
1019                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1020                                 continue;
1021
1022                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1023                                                 best_error_ppm, &error_ppm))
1024                                 continue;
1025
1026                         *best_clock = clock;
1027                         best_error_ppm = error_ppm;
1028                         found = true;
1029                 }
1030         }
1031
1032         return found;
1033 }
1034
1035 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1036                         struct dpll *best_clock)
1037 {
1038         int refclk = 100000;
1039         const struct intel_limit *limit = &intel_limits_bxt;
1040
1041         return chv_find_best_dpll(limit, crtc_state,
1042                                   crtc_state->port_clock, refclk,
1043                                   NULL, best_clock);
1044 }
1045
1046 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1047                                     enum pipe pipe)
1048 {
1049         i915_reg_t reg = PIPEDSL(pipe);
1050         u32 line1, line2;
1051         u32 line_mask;
1052
1053         if (IS_GEN(dev_priv, 2))
1054                 line_mask = DSL_LINEMASK_GEN2;
1055         else
1056                 line_mask = DSL_LINEMASK_GEN3;
1057
1058         line1 = I915_READ(reg) & line_mask;
1059         msleep(5);
1060         line2 = I915_READ(reg) & line_mask;
1061
1062         return line1 != line2;
1063 }
1064
1065 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1066 {
1067         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1068         enum pipe pipe = crtc->pipe;
1069
1070         /* Wait for the display line to settle/start moving */
1071         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1072                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1073                           pipe_name(pipe), onoff(state));
1074 }
1075
1076 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1077 {
1078         wait_for_pipe_scanline_moving(crtc, false);
1079 }
1080
1081 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1082 {
1083         wait_for_pipe_scanline_moving(crtc, true);
1084 }
1085
1086 static void
1087 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1088 {
1089         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1090         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1091
1092         if (INTEL_GEN(dev_priv) >= 4) {
1093                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1094                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1095
1096                 /* Wait for the Pipe State to go off */
1097                 if (intel_de_wait_for_clear(dev_priv, reg,
1098                                             I965_PIPECONF_ACTIVE, 100))
1099                         WARN(1, "pipe_off wait timed out\n");
1100         } else {
1101                 intel_wait_for_pipe_scanline_stopped(crtc);
1102         }
1103 }
1104
1105 /* Only for pre-ILK configs */
1106 void assert_pll(struct drm_i915_private *dev_priv,
1107                 enum pipe pipe, bool state)
1108 {
1109         u32 val;
1110         bool cur_state;
1111
1112         val = I915_READ(DPLL(pipe));
1113         cur_state = !!(val & DPLL_VCO_ENABLE);
1114         I915_STATE_WARN(cur_state != state,
1115              "PLL state assertion failure (expected %s, current %s)\n",
1116                         onoff(state), onoff(cur_state));
1117 }
1118
1119 /* XXX: the dsi pll is shared between MIPI DSI ports */
1120 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1121 {
1122         u32 val;
1123         bool cur_state;
1124
1125         vlv_cck_get(dev_priv);
1126         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1127         vlv_cck_put(dev_priv);
1128
1129         cur_state = val & DSI_PLL_VCO_EN;
1130         I915_STATE_WARN(cur_state != state,
1131              "DSI PLL state assertion failure (expected %s, current %s)\n",
1132                         onoff(state), onoff(cur_state));
1133 }
1134
1135 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1136                           enum pipe pipe, bool state)
1137 {
1138         bool cur_state;
1139
1140         if (HAS_DDI(dev_priv)) {
1141                 /*
1142                  * DDI does not have a specific FDI_TX register.
1143                  *
1144                  * FDI is never fed from EDP transcoder
1145                  * so pipe->transcoder cast is fine here.
1146                  */
1147                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1148                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1149                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1150         } else {
1151                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1152                 cur_state = !!(val & FDI_TX_ENABLE);
1153         }
1154         I915_STATE_WARN(cur_state != state,
1155              "FDI TX state assertion failure (expected %s, current %s)\n",
1156                         onoff(state), onoff(cur_state));
1157 }
1158 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1159 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1160
1161 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1162                           enum pipe pipe, bool state)
1163 {
1164         u32 val;
1165         bool cur_state;
1166
1167         val = I915_READ(FDI_RX_CTL(pipe));
1168         cur_state = !!(val & FDI_RX_ENABLE);
1169         I915_STATE_WARN(cur_state != state,
1170              "FDI RX state assertion failure (expected %s, current %s)\n",
1171                         onoff(state), onoff(cur_state));
1172 }
1173 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1174 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1175
1176 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1177                                       enum pipe pipe)
1178 {
1179         u32 val;
1180
1181         /* ILK FDI PLL is always enabled */
1182         if (IS_GEN(dev_priv, 5))
1183                 return;
1184
1185         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1186         if (HAS_DDI(dev_priv))
1187                 return;
1188
1189         val = I915_READ(FDI_TX_CTL(pipe));
1190         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1191 }
1192
1193 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1194                        enum pipe pipe, bool state)
1195 {
1196         u32 val;
1197         bool cur_state;
1198
1199         val = I915_READ(FDI_RX_CTL(pipe));
1200         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1201         I915_STATE_WARN(cur_state != state,
1202              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1203                         onoff(state), onoff(cur_state));
1204 }
1205
1206 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1207 {
1208         i915_reg_t pp_reg;
1209         u32 val;
1210         enum pipe panel_pipe = INVALID_PIPE;
1211         bool locked = true;
1212
1213         if (WARN_ON(HAS_DDI(dev_priv)))
1214                 return;
1215
1216         if (HAS_PCH_SPLIT(dev_priv)) {
1217                 u32 port_sel;
1218
1219                 pp_reg = PP_CONTROL(0);
1220                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1221
1222                 switch (port_sel) {
1223                 case PANEL_PORT_SELECT_LVDS:
1224                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1225                         break;
1226                 case PANEL_PORT_SELECT_DPA:
1227                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1228                         break;
1229                 case PANEL_PORT_SELECT_DPC:
1230                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1231                         break;
1232                 case PANEL_PORT_SELECT_DPD:
1233                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1234                         break;
1235                 default:
1236                         MISSING_CASE(port_sel);
1237                         break;
1238                 }
1239         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1240                 /* presumably write lock depends on pipe, not port select */
1241                 pp_reg = PP_CONTROL(pipe);
1242                 panel_pipe = pipe;
1243         } else {
1244                 u32 port_sel;
1245
1246                 pp_reg = PP_CONTROL(0);
1247                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1248
1249                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1250                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1251         }
1252
1253         val = I915_READ(pp_reg);
1254         if (!(val & PANEL_POWER_ON) ||
1255             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1256                 locked = false;
1257
1258         I915_STATE_WARN(panel_pipe == pipe && locked,
1259              "panel assertion failure, pipe %c regs locked\n",
1260              pipe_name(pipe));
1261 }
1262
1263 void assert_pipe(struct drm_i915_private *dev_priv,
1264                  enum transcoder cpu_transcoder, bool state)
1265 {
1266         bool cur_state;
1267         enum intel_display_power_domain power_domain;
1268         intel_wakeref_t wakeref;
1269
1270         /* we keep both pipes enabled on 830 */
1271         if (IS_I830(dev_priv))
1272                 state = true;
1273
1274         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1275         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1276         if (wakeref) {
1277                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1278                 cur_state = !!(val & PIPECONF_ENABLE);
1279
1280                 intel_display_power_put(dev_priv, power_domain, wakeref);
1281         } else {
1282                 cur_state = false;
1283         }
1284
1285         I915_STATE_WARN(cur_state != state,
1286                         "transcoder %s assertion failure (expected %s, current %s)\n",
1287                         transcoder_name(cpu_transcoder),
1288                         onoff(state), onoff(cur_state));
1289 }
1290
1291 static void assert_plane(struct intel_plane *plane, bool state)
1292 {
1293         enum pipe pipe;
1294         bool cur_state;
1295
1296         cur_state = plane->get_hw_state(plane, &pipe);
1297
1298         I915_STATE_WARN(cur_state != state,
1299                         "%s assertion failure (expected %s, current %s)\n",
1300                         plane->base.name, onoff(state), onoff(cur_state));
1301 }
1302
1303 #define assert_plane_enabled(p) assert_plane(p, true)
1304 #define assert_plane_disabled(p) assert_plane(p, false)
1305
1306 static void assert_planes_disabled(struct intel_crtc *crtc)
1307 {
1308         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1309         struct intel_plane *plane;
1310
1311         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1312                 assert_plane_disabled(plane);
1313 }
1314
1315 static void assert_vblank_disabled(struct drm_crtc *crtc)
1316 {
1317         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1318                 drm_crtc_vblank_put(crtc);
1319 }
1320
1321 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1322                                     enum pipe pipe)
1323 {
1324         u32 val;
1325         bool enabled;
1326
1327         val = I915_READ(PCH_TRANSCONF(pipe));
1328         enabled = !!(val & TRANS_ENABLE);
1329         I915_STATE_WARN(enabled,
1330              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1331              pipe_name(pipe));
1332 }
1333
1334 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1335                                    enum pipe pipe, enum port port,
1336                                    i915_reg_t dp_reg)
1337 {
1338         enum pipe port_pipe;
1339         bool state;
1340
1341         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1342
1343         I915_STATE_WARN(state && port_pipe == pipe,
1344                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1345                         port_name(port), pipe_name(pipe));
1346
1347         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1348                         "IBX PCH DP %c still using transcoder B\n",
1349                         port_name(port));
1350 }
1351
1352 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1353                                      enum pipe pipe, enum port port,
1354                                      i915_reg_t hdmi_reg)
1355 {
1356         enum pipe port_pipe;
1357         bool state;
1358
1359         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1360
1361         I915_STATE_WARN(state && port_pipe == pipe,
1362                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1363                         port_name(port), pipe_name(pipe));
1364
1365         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1366                         "IBX PCH HDMI %c still using transcoder B\n",
1367                         port_name(port));
1368 }
1369
1370 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1371                                       enum pipe pipe)
1372 {
1373         enum pipe port_pipe;
1374
1375         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1376         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1377         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1378
1379         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1380                         port_pipe == pipe,
1381                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1382                         pipe_name(pipe));
1383
1384         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1385                         port_pipe == pipe,
1386                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1387                         pipe_name(pipe));
1388
1389         /* PCH SDVOB multiplex with HDMIB */
1390         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1391         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1392         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1393 }
1394
1395 static void _vlv_enable_pll(struct intel_crtc *crtc,
1396                             const struct intel_crtc_state *pipe_config)
1397 {
1398         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1399         enum pipe pipe = crtc->pipe;
1400
1401         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1402         POSTING_READ(DPLL(pipe));
1403         udelay(150);
1404
1405         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1406                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1407 }
1408
1409 static void vlv_enable_pll(struct intel_crtc *crtc,
1410                            const struct intel_crtc_state *pipe_config)
1411 {
1412         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1413         enum pipe pipe = crtc->pipe;
1414
1415         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1416
1417         /* PLL is protected by panel, make sure we can write it */
1418         assert_panel_unlocked(dev_priv, pipe);
1419
1420         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1421                 _vlv_enable_pll(crtc, pipe_config);
1422
1423         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1424         POSTING_READ(DPLL_MD(pipe));
1425 }
1426
1427
1428 static void _chv_enable_pll(struct intel_crtc *crtc,
1429                             const struct intel_crtc_state *pipe_config)
1430 {
1431         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1432         enum pipe pipe = crtc->pipe;
1433         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1434         u32 tmp;
1435
1436         vlv_dpio_get(dev_priv);
1437
1438         /* Enable back the 10bit clock to display controller */
1439         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1440         tmp |= DPIO_DCLKP_EN;
1441         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1442
1443         vlv_dpio_put(dev_priv);
1444
1445         /*
1446          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1447          */
1448         udelay(1);
1449
1450         /* Enable PLL */
1451         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1452
1453         /* Check PLL is locked */
1454         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1455                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1456 }
1457
1458 static void chv_enable_pll(struct intel_crtc *crtc,
1459                            const struct intel_crtc_state *pipe_config)
1460 {
1461         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1462         enum pipe pipe = crtc->pipe;
1463
1464         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1465
1466         /* PLL is protected by panel, make sure we can write it */
1467         assert_panel_unlocked(dev_priv, pipe);
1468
1469         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1470                 _chv_enable_pll(crtc, pipe_config);
1471
1472         if (pipe != PIPE_A) {
1473                 /*
1474                  * WaPixelRepeatModeFixForC0:chv
1475                  *
1476                  * DPLLCMD is AWOL. Use chicken bits to propagate
1477                  * the value from DPLLBMD to either pipe B or C.
1478                  */
1479                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1480                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1481                 I915_WRITE(CBR4_VLV, 0);
1482                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1483
1484                 /*
1485                  * DPLLB VGA mode also seems to cause problems.
1486                  * We should always have it disabled.
1487                  */
1488                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1489         } else {
1490                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1491                 POSTING_READ(DPLL_MD(pipe));
1492         }
1493 }
1494
1495 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1496 {
1497         if (IS_I830(dev_priv))
1498                 return false;
1499
1500         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1501 }
1502
1503 static void i9xx_enable_pll(struct intel_crtc *crtc,
1504                             const struct intel_crtc_state *crtc_state)
1505 {
1506         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1507         i915_reg_t reg = DPLL(crtc->pipe);
1508         u32 dpll = crtc_state->dpll_hw_state.dpll;
1509         int i;
1510
1511         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1512
1513         /* PLL is protected by panel, make sure we can write it */
1514         if (i9xx_has_pps(dev_priv))
1515                 assert_panel_unlocked(dev_priv, crtc->pipe);
1516
1517         /*
1518          * Apparently we need to have VGA mode enabled prior to changing
1519          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1520          * dividers, even though the register value does change.
1521          */
1522         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1523         I915_WRITE(reg, dpll);
1524
1525         /* Wait for the clocks to stabilize. */
1526         POSTING_READ(reg);
1527         udelay(150);
1528
1529         if (INTEL_GEN(dev_priv) >= 4) {
1530                 I915_WRITE(DPLL_MD(crtc->pipe),
1531                            crtc_state->dpll_hw_state.dpll_md);
1532         } else {
1533                 /* The pixel multiplier can only be updated once the
1534                  * DPLL is enabled and the clocks are stable.
1535                  *
1536                  * So write it again.
1537                  */
1538                 I915_WRITE(reg, dpll);
1539         }
1540
1541         /* We do this three times for luck */
1542         for (i = 0; i < 3; i++) {
1543                 I915_WRITE(reg, dpll);
1544                 POSTING_READ(reg);
1545                 udelay(150); /* wait for warmup */
1546         }
1547 }
1548
1549 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1550 {
1551         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1552         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1553         enum pipe pipe = crtc->pipe;
1554
1555         /* Don't disable pipe or pipe PLLs if needed */
1556         if (IS_I830(dev_priv))
1557                 return;
1558
1559         /* Make sure the pipe isn't still relying on us */
1560         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1561
1562         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1563         POSTING_READ(DPLL(pipe));
1564 }
1565
1566 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1567 {
1568         u32 val;
1569
1570         /* Make sure the pipe isn't still relying on us */
1571         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1572
1573         val = DPLL_INTEGRATED_REF_CLK_VLV |
1574                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1575         if (pipe != PIPE_A)
1576                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1577
1578         I915_WRITE(DPLL(pipe), val);
1579         POSTING_READ(DPLL(pipe));
1580 }
1581
1582 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1583 {
1584         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1585         u32 val;
1586
1587         /* Make sure the pipe isn't still relying on us */
1588         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1589
1590         val = DPLL_SSC_REF_CLK_CHV |
1591                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1592         if (pipe != PIPE_A)
1593                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1594
1595         I915_WRITE(DPLL(pipe), val);
1596         POSTING_READ(DPLL(pipe));
1597
1598         vlv_dpio_get(dev_priv);
1599
1600         /* Disable 10bit clock to display controller */
1601         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1602         val &= ~DPIO_DCLKP_EN;
1603         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1604
1605         vlv_dpio_put(dev_priv);
1606 }
1607
1608 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1609                          struct intel_digital_port *dport,
1610                          unsigned int expected_mask)
1611 {
1612         u32 port_mask;
1613         i915_reg_t dpll_reg;
1614
1615         switch (dport->base.port) {
1616         case PORT_B:
1617                 port_mask = DPLL_PORTB_READY_MASK;
1618                 dpll_reg = DPLL(0);
1619                 break;
1620         case PORT_C:
1621                 port_mask = DPLL_PORTC_READY_MASK;
1622                 dpll_reg = DPLL(0);
1623                 expected_mask <<= 4;
1624                 break;
1625         case PORT_D:
1626                 port_mask = DPLL_PORTD_READY_MASK;
1627                 dpll_reg = DPIO_PHY_STATUS;
1628                 break;
1629         default:
1630                 BUG();
1631         }
1632
1633         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1634                                        port_mask, expected_mask, 1000))
1635                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1636                      dport->base.base.base.id, dport->base.base.name,
1637                      I915_READ(dpll_reg) & port_mask, expected_mask);
1638 }
1639
1640 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1641 {
1642         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1643         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1644         enum pipe pipe = crtc->pipe;
1645         i915_reg_t reg;
1646         u32 val, pipeconf_val;
1647
1648         /* Make sure PCH DPLL is enabled */
1649         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1650
1651         /* FDI must be feeding us bits for PCH ports */
1652         assert_fdi_tx_enabled(dev_priv, pipe);
1653         assert_fdi_rx_enabled(dev_priv, pipe);
1654
1655         if (HAS_PCH_CPT(dev_priv)) {
1656                 reg = TRANS_CHICKEN2(pipe);
1657                 val = I915_READ(reg);
1658                 /*
1659                  * Workaround: Set the timing override bit
1660                  * before enabling the pch transcoder.
1661                  */
1662                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1663                 /* Configure frame start delay to match the CPU */
1664                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1665                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1666                 I915_WRITE(reg, val);
1667         }
1668
1669         reg = PCH_TRANSCONF(pipe);
1670         val = I915_READ(reg);
1671         pipeconf_val = I915_READ(PIPECONF(pipe));
1672
1673         if (HAS_PCH_IBX(dev_priv)) {
1674                 /* Configure frame start delay to match the CPU */
1675                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1676                 val |= TRANS_FRAME_START_DELAY(0);
1677
1678                 /*
1679                  * Make the BPC in transcoder be consistent with
1680                  * that in pipeconf reg. For HDMI we must use 8bpc
1681                  * here for both 8bpc and 12bpc.
1682                  */
1683                 val &= ~PIPECONF_BPC_MASK;
1684                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1685                         val |= PIPECONF_8BPC;
1686                 else
1687                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1688         }
1689
1690         val &= ~TRANS_INTERLACE_MASK;
1691         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1692                 if (HAS_PCH_IBX(dev_priv) &&
1693                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1694                         val |= TRANS_LEGACY_INTERLACED_ILK;
1695                 else
1696                         val |= TRANS_INTERLACED;
1697         } else {
1698                 val |= TRANS_PROGRESSIVE;
1699         }
1700
1701         I915_WRITE(reg, val | TRANS_ENABLE);
1702         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1703                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1704 }
1705
1706 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1707                                       enum transcoder cpu_transcoder)
1708 {
1709         u32 val, pipeconf_val;
1710
1711         /* FDI must be feeding us bits for PCH ports */
1712         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1713         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1714
1715         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1716         /* Workaround: set timing override bit. */
1717         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1718         /* Configure frame start delay to match the CPU */
1719         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1720         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1721         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1722
1723         val = TRANS_ENABLE;
1724         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1725
1726         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1727             PIPECONF_INTERLACED_ILK)
1728                 val |= TRANS_INTERLACED;
1729         else
1730                 val |= TRANS_PROGRESSIVE;
1731
1732         I915_WRITE(LPT_TRANSCONF, val);
1733         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1734                                   TRANS_STATE_ENABLE, 100))
1735                 DRM_ERROR("Failed to enable PCH transcoder\n");
1736 }
1737
1738 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1739                                             enum pipe pipe)
1740 {
1741         i915_reg_t reg;
1742         u32 val;
1743
1744         /* FDI relies on the transcoder */
1745         assert_fdi_tx_disabled(dev_priv, pipe);
1746         assert_fdi_rx_disabled(dev_priv, pipe);
1747
1748         /* Ports must be off as well */
1749         assert_pch_ports_disabled(dev_priv, pipe);
1750
1751         reg = PCH_TRANSCONF(pipe);
1752         val = I915_READ(reg);
1753         val &= ~TRANS_ENABLE;
1754         I915_WRITE(reg, val);
1755         /* wait for PCH transcoder off, transcoder state */
1756         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1757                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1758
1759         if (HAS_PCH_CPT(dev_priv)) {
1760                 /* Workaround: Clear the timing override chicken bit again. */
1761                 reg = TRANS_CHICKEN2(pipe);
1762                 val = I915_READ(reg);
1763                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1764                 I915_WRITE(reg, val);
1765         }
1766 }
1767
1768 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1769 {
1770         u32 val;
1771
1772         val = I915_READ(LPT_TRANSCONF);
1773         val &= ~TRANS_ENABLE;
1774         I915_WRITE(LPT_TRANSCONF, val);
1775         /* wait for PCH transcoder off, transcoder state */
1776         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1777                                     TRANS_STATE_ENABLE, 50))
1778                 DRM_ERROR("Failed to disable PCH transcoder\n");
1779
1780         /* Workaround: clear timing override bit. */
1781         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1782         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1783         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1784 }
1785
1786 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1787 {
1788         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1789
1790         if (HAS_PCH_LPT(dev_priv))
1791                 return PIPE_A;
1792         else
1793                 return crtc->pipe;
1794 }
1795
1796 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1797 {
1798         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1799
1800         /*
1801          * On i965gm the hardware frame counter reads
1802          * zero when the TV encoder is enabled :(
1803          */
1804         if (IS_I965GM(dev_priv) &&
1805             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1806                 return 0;
1807
1808         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1809                 return 0xffffffff; /* full 32 bit counter */
1810         else if (INTEL_GEN(dev_priv) >= 3)
1811                 return 0xffffff; /* only 24 bits of frame count */
1812         else
1813                 return 0; /* Gen2 doesn't have a hardware frame counter */
1814 }
1815
1816 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1817 {
1818         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1819
1820         assert_vblank_disabled(&crtc->base);
1821         drm_crtc_set_max_vblank_count(&crtc->base,
1822                                       intel_crtc_max_vblank_count(crtc_state));
1823         drm_crtc_vblank_on(&crtc->base);
1824 }
1825
1826 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1827 {
1828         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1829
1830         drm_crtc_vblank_off(&crtc->base);
1831         assert_vblank_disabled(&crtc->base);
1832 }
1833
1834 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1835 {
1836         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1837         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1838         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1839         enum pipe pipe = crtc->pipe;
1840         i915_reg_t reg;
1841         u32 val;
1842
1843         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1844
1845         assert_planes_disabled(crtc);
1846
1847         /*
1848          * A pipe without a PLL won't actually be able to drive bits from
1849          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1850          * need the check.
1851          */
1852         if (HAS_GMCH(dev_priv)) {
1853                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1854                         assert_dsi_pll_enabled(dev_priv);
1855                 else
1856                         assert_pll_enabled(dev_priv, pipe);
1857         } else {
1858                 if (new_crtc_state->has_pch_encoder) {
1859                         /* if driving the PCH, we need FDI enabled */
1860                         assert_fdi_rx_pll_enabled(dev_priv,
1861                                                   intel_crtc_pch_transcoder(crtc));
1862                         assert_fdi_tx_pll_enabled(dev_priv,
1863                                                   (enum pipe) cpu_transcoder);
1864                 }
1865                 /* FIXME: assert CPU port conditions for SNB+ */
1866         }
1867
1868         trace_intel_pipe_enable(crtc);
1869
1870         reg = PIPECONF(cpu_transcoder);
1871         val = I915_READ(reg);
1872         if (val & PIPECONF_ENABLE) {
1873                 /* we keep both pipes enabled on 830 */
1874                 WARN_ON(!IS_I830(dev_priv));
1875                 return;
1876         }
1877
1878         I915_WRITE(reg, val | PIPECONF_ENABLE);
1879         POSTING_READ(reg);
1880
1881         /*
1882          * Until the pipe starts PIPEDSL reads will return a stale value,
1883          * which causes an apparent vblank timestamp jump when PIPEDSL
1884          * resets to its proper value. That also messes up the frame count
1885          * when it's derived from the timestamps. So let's wait for the
1886          * pipe to start properly before we call drm_crtc_vblank_on()
1887          */
1888         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1889                 intel_wait_for_pipe_scanline_moving(crtc);
1890 }
1891
1892 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1893 {
1894         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1895         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1896         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1897         enum pipe pipe = crtc->pipe;
1898         i915_reg_t reg;
1899         u32 val;
1900
1901         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1902
1903         /*
1904          * Make sure planes won't keep trying to pump pixels to us,
1905          * or we might hang the display.
1906          */
1907         assert_planes_disabled(crtc);
1908
1909         trace_intel_pipe_disable(crtc);
1910
1911         reg = PIPECONF(cpu_transcoder);
1912         val = I915_READ(reg);
1913         if ((val & PIPECONF_ENABLE) == 0)
1914                 return;
1915
1916         /*
1917          * Double wide has implications for planes
1918          * so best keep it disabled when not needed.
1919          */
1920         if (old_crtc_state->double_wide)
1921                 val &= ~PIPECONF_DOUBLE_WIDE;
1922
1923         /* Don't disable pipe or pipe PLLs if needed */
1924         if (!IS_I830(dev_priv))
1925                 val &= ~PIPECONF_ENABLE;
1926
1927         I915_WRITE(reg, val);
1928         if ((val & PIPECONF_ENABLE) == 0)
1929                 intel_wait_for_pipe_off(old_crtc_state);
1930 }
1931
1932 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1933 {
1934         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1935 }
1936
1937 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1938 {
1939         if (!is_ccs_modifier(fb->modifier))
1940                 return false;
1941
1942         return plane >= fb->format->num_planes / 2;
1943 }
1944
1945 static bool is_gen12_ccs_modifier(u64 modifier)
1946 {
1947         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
1948 }
1949
1950 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1951 {
1952         return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1953 }
1954
1955 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1956 {
1957         if (is_ccs_modifier(fb->modifier))
1958                 return is_ccs_plane(fb, plane);
1959
1960         return plane == 1;
1961 }
1962
1963 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1964 {
1965         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1966                 (main_plane && main_plane >= fb->format->num_planes / 2));
1967
1968         return fb->format->num_planes / 2 + main_plane;
1969 }
1970
1971 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1972 {
1973         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1974                 ccs_plane < fb->format->num_planes / 2);
1975
1976         return ccs_plane - fb->format->num_planes / 2;
1977 }
1978
1979 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1980 static int
1981 intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1982 {
1983         if (is_ccs_modifier(fb->modifier))
1984                 return main_to_ccs_plane(fb, main_plane);
1985
1986         return 1;
1987 }
1988
1989 static unsigned int
1990 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1991 {
1992         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1993         unsigned int cpp = fb->format->cpp[color_plane];
1994
1995         switch (fb->modifier) {
1996         case DRM_FORMAT_MOD_LINEAR:
1997                 return intel_tile_size(dev_priv);
1998         case I915_FORMAT_MOD_X_TILED:
1999                 if (IS_GEN(dev_priv, 2))
2000                         return 128;
2001                 else
2002                         return 512;
2003         case I915_FORMAT_MOD_Y_TILED_CCS:
2004                 if (is_ccs_plane(fb, color_plane))
2005                         return 128;
2006                 /* fall through */
2007         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2008                 if (is_ccs_plane(fb, color_plane))
2009                         return 64;
2010                 /* fall through */
2011         case I915_FORMAT_MOD_Y_TILED:
2012                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2013                         return 128;
2014                 else
2015                         return 512;
2016         case I915_FORMAT_MOD_Yf_TILED_CCS:
2017                 if (is_ccs_plane(fb, color_plane))
2018                         return 128;
2019                 /* fall through */
2020         case I915_FORMAT_MOD_Yf_TILED:
2021                 switch (cpp) {
2022                 case 1:
2023                         return 64;
2024                 case 2:
2025                 case 4:
2026                         return 128;
2027                 case 8:
2028                 case 16:
2029                         return 256;
2030                 default:
2031                         MISSING_CASE(cpp);
2032                         return cpp;
2033                 }
2034                 break;
2035         default:
2036                 MISSING_CASE(fb->modifier);
2037                 return cpp;
2038         }
2039 }
2040
2041 static unsigned int
2042 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2043 {
2044         if (is_gen12_ccs_plane(fb, color_plane))
2045                 return 1;
2046
2047         return intel_tile_size(to_i915(fb->dev)) /
2048                 intel_tile_width_bytes(fb, color_plane);
2049 }
2050
2051 /* Return the tile dimensions in pixel units */
2052 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2053                             unsigned int *tile_width,
2054                             unsigned int *tile_height)
2055 {
2056         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2057         unsigned int cpp = fb->format->cpp[color_plane];
2058
2059         *tile_width = tile_width_bytes / cpp;
2060         *tile_height = intel_tile_height(fb, color_plane);
2061 }
2062
2063 unsigned int
2064 intel_fb_align_height(const struct drm_framebuffer *fb,
2065                       int color_plane, unsigned int height)
2066 {
2067         unsigned int tile_height = intel_tile_height(fb, color_plane);
2068
2069         return ALIGN(height, tile_height);
2070 }
2071
2072 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2073 {
2074         unsigned int size = 0;
2075         int i;
2076
2077         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2078                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2079
2080         return size;
2081 }
2082
2083 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2084 {
2085         unsigned int size = 0;
2086         int i;
2087
2088         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2089                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2090
2091         return size;
2092 }
2093
2094 static void
2095 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2096                         const struct drm_framebuffer *fb,
2097                         unsigned int rotation)
2098 {
2099         view->type = I915_GGTT_VIEW_NORMAL;
2100         if (drm_rotation_90_or_270(rotation)) {
2101                 view->type = I915_GGTT_VIEW_ROTATED;
2102                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2103         }
2104 }
2105
2106 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2107 {
2108         if (IS_I830(dev_priv))
2109                 return 16 * 1024;
2110         else if (IS_I85X(dev_priv))
2111                 return 256;
2112         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2113                 return 32;
2114         else
2115                 return 4 * 1024;
2116 }
2117
2118 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2119 {
2120         if (INTEL_GEN(dev_priv) >= 9)
2121                 return 256 * 1024;
2122         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2123                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2124                 return 128 * 1024;
2125         else if (INTEL_GEN(dev_priv) >= 4)
2126                 return 4 * 1024;
2127         else
2128                 return 0;
2129 }
2130
2131 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2132                                          int color_plane)
2133 {
2134         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2135
2136         /* AUX_DIST needs only 4K alignment */
2137         if (is_aux_plane(fb, color_plane))
2138                 return 4096;
2139
2140         switch (fb->modifier) {
2141         case DRM_FORMAT_MOD_LINEAR:
2142                 return intel_linear_alignment(dev_priv);
2143         case I915_FORMAT_MOD_X_TILED:
2144                 if (INTEL_GEN(dev_priv) >= 9)
2145                         return 256 * 1024;
2146                 return 0;
2147         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2148                 return 16 * 1024;
2149         case I915_FORMAT_MOD_Y_TILED_CCS:
2150         case I915_FORMAT_MOD_Yf_TILED_CCS:
2151         case I915_FORMAT_MOD_Y_TILED:
2152         case I915_FORMAT_MOD_Yf_TILED:
2153                 return 1 * 1024 * 1024;
2154         default:
2155                 MISSING_CASE(fb->modifier);
2156                 return 0;
2157         }
2158 }
2159
2160 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2161 {
2162         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2163         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2164
2165         return INTEL_GEN(dev_priv) < 4 ||
2166                 (plane->has_fbc &&
2167                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2168 }
2169
2170 struct i915_vma *
2171 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2172                            const struct i915_ggtt_view *view,
2173                            bool uses_fence,
2174                            unsigned long *out_flags)
2175 {
2176         struct drm_device *dev = fb->dev;
2177         struct drm_i915_private *dev_priv = to_i915(dev);
2178         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2179         intel_wakeref_t wakeref;
2180         struct i915_vma *vma;
2181         unsigned int pinctl;
2182         u32 alignment;
2183
2184         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2185                 return ERR_PTR(-EINVAL);
2186
2187         alignment = intel_surf_alignment(fb, 0);
2188
2189         /* Note that the w/a also requires 64 PTE of padding following the
2190          * bo. We currently fill all unused PTE with the shadow page and so
2191          * we should always have valid PTE following the scanout preventing
2192          * the VT-d warning.
2193          */
2194         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2195                 alignment = 256 * 1024;
2196
2197         /*
2198          * Global gtt pte registers are special registers which actually forward
2199          * writes to a chunk of system memory. Which means that there is no risk
2200          * that the register values disappear as soon as we call
2201          * intel_runtime_pm_put(), so it is correct to wrap only the
2202          * pin/unpin/fence and not more.
2203          */
2204         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2205
2206         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2207
2208         /*
2209          * Valleyview is definitely limited to scanning out the first
2210          * 512MiB. Lets presume this behaviour was inherited from the
2211          * g4x display engine and that all earlier gen are similarly
2212          * limited. Testing suggests that it is a little more
2213          * complicated than this. For example, Cherryview appears quite
2214          * happy to scanout from anywhere within its global aperture.
2215          */
2216         pinctl = 0;
2217         if (HAS_GMCH(dev_priv))
2218                 pinctl |= PIN_MAPPABLE;
2219
2220         vma = i915_gem_object_pin_to_display_plane(obj,
2221                                                    alignment, view, pinctl);
2222         if (IS_ERR(vma))
2223                 goto err;
2224
2225         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2226                 int ret;
2227
2228                 /*
2229                  * Install a fence for tiled scan-out. Pre-i965 always needs a
2230                  * fence, whereas 965+ only requires a fence if using
2231                  * framebuffer compression.  For simplicity, we always, when
2232                  * possible, install a fence as the cost is not that onerous.
2233                  *
2234                  * If we fail to fence the tiled scanout, then either the
2235                  * modeset will reject the change (which is highly unlikely as
2236                  * the affected systems, all but one, do not have unmappable
2237                  * space) or we will not be able to enable full powersaving
2238                  * techniques (also likely not to apply due to various limits
2239                  * FBC and the like impose on the size of the buffer, which
2240                  * presumably we violated anyway with this unmappable buffer).
2241                  * Anyway, it is presumably better to stumble onwards with
2242                  * something and try to run the system in a "less than optimal"
2243                  * mode that matches the user configuration.
2244                  */
2245                 ret = i915_vma_pin_fence(vma);
2246                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2247                         i915_gem_object_unpin_from_display_plane(vma);
2248                         vma = ERR_PTR(ret);
2249                         goto err;
2250                 }
2251
2252                 if (ret == 0 && vma->fence)
2253                         *out_flags |= PLANE_HAS_FENCE;
2254         }
2255
2256         i915_vma_get(vma);
2257 err:
2258         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2259         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2260         return vma;
2261 }
2262
2263 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2264 {
2265         i915_gem_object_lock(vma->obj);
2266         if (flags & PLANE_HAS_FENCE)
2267                 i915_vma_unpin_fence(vma);
2268         i915_gem_object_unpin_from_display_plane(vma);
2269         i915_gem_object_unlock(vma->obj);
2270
2271         i915_vma_put(vma);
2272 }
2273
2274 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2275                           unsigned int rotation)
2276 {
2277         if (drm_rotation_90_or_270(rotation))
2278                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2279         else
2280                 return fb->pitches[color_plane];
2281 }
2282
2283 /*
2284  * Convert the x/y offsets into a linear offset.
2285  * Only valid with 0/180 degree rotation, which is fine since linear
2286  * offset is only used with linear buffers on pre-hsw and tiled buffers
2287  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2288  */
2289 u32 intel_fb_xy_to_linear(int x, int y,
2290                           const struct intel_plane_state *state,
2291                           int color_plane)
2292 {
2293         const struct drm_framebuffer *fb = state->hw.fb;
2294         unsigned int cpp = fb->format->cpp[color_plane];
2295         unsigned int pitch = state->color_plane[color_plane].stride;
2296
2297         return y * pitch + x * cpp;
2298 }
2299
2300 /*
2301  * Add the x/y offsets derived from fb->offsets[] to the user
2302  * specified plane src x/y offsets. The resulting x/y offsets
2303  * specify the start of scanout from the beginning of the gtt mapping.
2304  */
2305 void intel_add_fb_offsets(int *x, int *y,
2306                           const struct intel_plane_state *state,
2307                           int color_plane)
2308
2309 {
2310         *x += state->color_plane[color_plane].x;
2311         *y += state->color_plane[color_plane].y;
2312 }
2313
2314 static u32 intel_adjust_tile_offset(int *x, int *y,
2315                                     unsigned int tile_width,
2316                                     unsigned int tile_height,
2317                                     unsigned int tile_size,
2318                                     unsigned int pitch_tiles,
2319                                     u32 old_offset,
2320                                     u32 new_offset)
2321 {
2322         unsigned int pitch_pixels = pitch_tiles * tile_width;
2323         unsigned int tiles;
2324
2325         WARN_ON(old_offset & (tile_size - 1));
2326         WARN_ON(new_offset & (tile_size - 1));
2327         WARN_ON(new_offset > old_offset);
2328
2329         tiles = (old_offset - new_offset) / tile_size;
2330
2331         *y += tiles / pitch_tiles * tile_height;
2332         *x += tiles % pitch_tiles * tile_width;
2333
2334         /* minimize x in case it got needlessly big */
2335         *y += *x / pitch_pixels * tile_height;
2336         *x %= pitch_pixels;
2337
2338         return new_offset;
2339 }
2340
2341 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2342 {
2343         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2344                is_gen12_ccs_plane(fb, color_plane);
2345 }
2346
2347 static u32 intel_adjust_aligned_offset(int *x, int *y,
2348                                        const struct drm_framebuffer *fb,
2349                                        int color_plane,
2350                                        unsigned int rotation,
2351                                        unsigned int pitch,
2352                                        u32 old_offset, u32 new_offset)
2353 {
2354         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2355         unsigned int cpp = fb->format->cpp[color_plane];
2356
2357         WARN_ON(new_offset > old_offset);
2358
2359         if (!is_surface_linear(fb, color_plane)) {
2360                 unsigned int tile_size, tile_width, tile_height;
2361                 unsigned int pitch_tiles;
2362
2363                 tile_size = intel_tile_size(dev_priv);
2364                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2365
2366                 if (drm_rotation_90_or_270(rotation)) {
2367                         pitch_tiles = pitch / tile_height;
2368                         swap(tile_width, tile_height);
2369                 } else {
2370                         pitch_tiles = pitch / (tile_width * cpp);
2371                 }
2372
2373                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2374                                          tile_size, pitch_tiles,
2375                                          old_offset, new_offset);
2376         } else {
2377                 old_offset += *y * pitch + *x * cpp;
2378
2379                 *y = (old_offset - new_offset) / pitch;
2380                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2381         }
2382
2383         return new_offset;
2384 }
2385
2386 /*
2387  * Adjust the tile offset by moving the difference into
2388  * the x/y offsets.
2389  */
2390 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2391                                              const struct intel_plane_state *state,
2392                                              int color_plane,
2393                                              u32 old_offset, u32 new_offset)
2394 {
2395         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2396                                            state->hw.rotation,
2397                                            state->color_plane[color_plane].stride,
2398                                            old_offset, new_offset);
2399 }
2400
2401 /*
2402  * Computes the aligned offset to the base tile and adjusts
2403  * x, y. bytes per pixel is assumed to be a power-of-two.
2404  *
2405  * In the 90/270 rotated case, x and y are assumed
2406  * to be already rotated to match the rotated GTT view, and
2407  * pitch is the tile_height aligned framebuffer height.
2408  *
2409  * This function is used when computing the derived information
2410  * under intel_framebuffer, so using any of that information
2411  * here is not allowed. Anything under drm_framebuffer can be
2412  * used. This is why the user has to pass in the pitch since it
2413  * is specified in the rotated orientation.
2414  */
2415 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2416                                         int *x, int *y,
2417                                         const struct drm_framebuffer *fb,
2418                                         int color_plane,
2419                                         unsigned int pitch,
2420                                         unsigned int rotation,
2421                                         u32 alignment)
2422 {
2423         unsigned int cpp = fb->format->cpp[color_plane];
2424         u32 offset, offset_aligned;
2425
2426         if (alignment)
2427                 alignment--;
2428
2429         if (!is_surface_linear(fb, color_plane)) {
2430                 unsigned int tile_size, tile_width, tile_height;
2431                 unsigned int tile_rows, tiles, pitch_tiles;
2432
2433                 tile_size = intel_tile_size(dev_priv);
2434                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2435
2436                 if (drm_rotation_90_or_270(rotation)) {
2437                         pitch_tiles = pitch / tile_height;
2438                         swap(tile_width, tile_height);
2439                 } else {
2440                         pitch_tiles = pitch / (tile_width * cpp);
2441                 }
2442
2443                 tile_rows = *y / tile_height;
2444                 *y %= tile_height;
2445
2446                 tiles = *x / tile_width;
2447                 *x %= tile_width;
2448
2449                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2450                 offset_aligned = offset & ~alignment;
2451
2452                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2453                                          tile_size, pitch_tiles,
2454                                          offset, offset_aligned);
2455         } else {
2456                 offset = *y * pitch + *x * cpp;
2457                 offset_aligned = offset & ~alignment;
2458
2459                 *y = (offset & alignment) / pitch;
2460                 *x = ((offset & alignment) - *y * pitch) / cpp;
2461         }
2462
2463         return offset_aligned;
2464 }
2465
2466 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2467                                               const struct intel_plane_state *state,
2468                                               int color_plane)
2469 {
2470         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2471         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2472         const struct drm_framebuffer *fb = state->hw.fb;
2473         unsigned int rotation = state->hw.rotation;
2474         int pitch = state->color_plane[color_plane].stride;
2475         u32 alignment;
2476
2477         if (intel_plane->id == PLANE_CURSOR)
2478                 alignment = intel_cursor_alignment(dev_priv);
2479         else
2480                 alignment = intel_surf_alignment(fb, color_plane);
2481
2482         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2483                                             pitch, rotation, alignment);
2484 }
2485
2486 /* Convert the fb->offset[] into x/y offsets */
2487 static int intel_fb_offset_to_xy(int *x, int *y,
2488                                  const struct drm_framebuffer *fb,
2489                                  int color_plane)
2490 {
2491         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2492         unsigned int height;
2493
2494         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2495             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2496                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2497                               fb->offsets[color_plane], color_plane);
2498                 return -EINVAL;
2499         }
2500
2501         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2502         height = ALIGN(height, intel_tile_height(fb, color_plane));
2503
2504         /* Catch potential overflows early */
2505         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2506                             fb->offsets[color_plane])) {
2507                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2508                               fb->offsets[color_plane], fb->pitches[color_plane],
2509                               color_plane);
2510                 return -ERANGE;
2511         }
2512
2513         *x = 0;
2514         *y = 0;
2515
2516         intel_adjust_aligned_offset(x, y,
2517                                     fb, color_plane, DRM_MODE_ROTATE_0,
2518                                     fb->pitches[color_plane],
2519                                     fb->offsets[color_plane], 0);
2520
2521         return 0;
2522 }
2523
2524 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2525 {
2526         switch (fb_modifier) {
2527         case I915_FORMAT_MOD_X_TILED:
2528                 return I915_TILING_X;
2529         case I915_FORMAT_MOD_Y_TILED:
2530         case I915_FORMAT_MOD_Y_TILED_CCS:
2531         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2532                 return I915_TILING_Y;
2533         default:
2534                 return I915_TILING_NONE;
2535         }
2536 }
2537
2538 /*
2539  * From the Sky Lake PRM:
2540  * "The Color Control Surface (CCS) contains the compression status of
2541  *  the cache-line pairs. The compression state of the cache-line pair
2542  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2543  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2544  *  cache-line-pairs. CCS is always Y tiled."
2545  *
2546  * Since cache line pairs refers to horizontally adjacent cache lines,
2547  * each cache line in the CCS corresponds to an area of 32x16 cache
2548  * lines on the main surface. Since each pixel is 4 bytes, this gives
2549  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2550  * main surface.
2551  */
2552 static const struct drm_format_info skl_ccs_formats[] = {
2553         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2554           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2555         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2556           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2557         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2558           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2559         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2560           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2561 };
2562
2563 /*
2564  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2565  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2566  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2567  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2568  * the main surface.
2569  */
2570 static const struct drm_format_info gen12_ccs_formats[] = {
2571         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2572           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2573           .hsub = 1, .vsub = 1, },
2574         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2575           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2576           .hsub = 1, .vsub = 1, },
2577         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2578           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2579           .hsub = 1, .vsub = 1, .has_alpha = true },
2580         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2581           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2582           .hsub = 1, .vsub = 1, .has_alpha = true },
2583 };
2584
2585 static const struct drm_format_info *
2586 lookup_format_info(const struct drm_format_info formats[],
2587                    int num_formats, u32 format)
2588 {
2589         int i;
2590
2591         for (i = 0; i < num_formats; i++) {
2592                 if (formats[i].format == format)
2593                         return &formats[i];
2594         }
2595
2596         return NULL;
2597 }
2598
2599 static const struct drm_format_info *
2600 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2601 {
2602         switch (cmd->modifier[0]) {
2603         case I915_FORMAT_MOD_Y_TILED_CCS:
2604         case I915_FORMAT_MOD_Yf_TILED_CCS:
2605                 return lookup_format_info(skl_ccs_formats,
2606                                           ARRAY_SIZE(skl_ccs_formats),
2607                                           cmd->pixel_format);
2608         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2609                 return lookup_format_info(gen12_ccs_formats,
2610                                           ARRAY_SIZE(gen12_ccs_formats),
2611                                           cmd->pixel_format);
2612         default:
2613                 return NULL;
2614         }
2615 }
2616
2617 bool is_ccs_modifier(u64 modifier)
2618 {
2619         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2620                modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2621                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2622 }
2623
2624 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2625                               u32 pixel_format, u64 modifier)
2626 {
2627         struct intel_crtc *crtc;
2628         struct intel_plane *plane;
2629
2630         /*
2631          * We assume the primary plane for pipe A has
2632          * the highest stride limits of them all.
2633          */
2634         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2635         if (!crtc)
2636                 return 0;
2637
2638         plane = to_intel_plane(crtc->base.primary);
2639
2640         return plane->max_stride(plane, pixel_format, modifier,
2641                                  DRM_MODE_ROTATE_0);
2642 }
2643
2644 static
2645 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2646                         u32 pixel_format, u64 modifier)
2647 {
2648         /*
2649          * Arbitrary limit for gen4+ chosen to match the
2650          * render engine max stride.
2651          *
2652          * The new CCS hash mode makes remapping impossible
2653          */
2654         if (!is_ccs_modifier(modifier)) {
2655                 if (INTEL_GEN(dev_priv) >= 7)
2656                         return 256*1024;
2657                 else if (INTEL_GEN(dev_priv) >= 4)
2658                         return 128*1024;
2659         }
2660
2661         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2662 }
2663
2664 static u32
2665 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2666 {
2667         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2668         u32 tile_width;
2669
2670         if (is_surface_linear(fb, color_plane)) {
2671                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2672                                                            fb->format->format,
2673                                                            fb->modifier);
2674
2675                 /*
2676                  * To make remapping with linear generally feasible
2677                  * we need the stride to be page aligned.
2678                  */
2679                 if (fb->pitches[color_plane] > max_stride &&
2680                     !is_ccs_modifier(fb->modifier))
2681                         return intel_tile_size(dev_priv);
2682                 else
2683                         return 64;
2684         }
2685
2686         tile_width = intel_tile_width_bytes(fb, color_plane);
2687         if (is_ccs_modifier(fb->modifier) && color_plane == 0) {
2688                 /*
2689                  * Display WA #0531: skl,bxt,kbl,glk
2690                  *
2691                  * Render decompression and plane width > 3840
2692                  * combined with horizontal panning requires the
2693                  * plane stride to be a multiple of 4. We'll just
2694                  * require the entire fb to accommodate that to avoid
2695                  * potential runtime errors at plane configuration time.
2696                  */
2697                 if (IS_GEN(dev_priv, 9) && fb->width > 3840)
2698                         tile_width *= 4;
2699                 /*
2700                  * The main surface pitch must be padded to a multiple of four
2701                  * tile widths.
2702                  */
2703                 else if (INTEL_GEN(dev_priv) >= 12)
2704                         tile_width *= 4;
2705         }
2706         return tile_width;
2707 }
2708
2709 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2710 {
2711         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2712         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2713         const struct drm_framebuffer *fb = plane_state->hw.fb;
2714         int i;
2715
2716         /* We don't want to deal with remapping with cursors */
2717         if (plane->id == PLANE_CURSOR)
2718                 return false;
2719
2720         /*
2721          * The display engine limits already match/exceed the
2722          * render engine limits, so not much point in remapping.
2723          * Would also need to deal with the fence POT alignment
2724          * and gen2 2KiB GTT tile size.
2725          */
2726         if (INTEL_GEN(dev_priv) < 4)
2727                 return false;
2728
2729         /*
2730          * The new CCS hash mode isn't compatible with remapping as
2731          * the virtual address of the pages affects the compressed data.
2732          */
2733         if (is_ccs_modifier(fb->modifier))
2734                 return false;
2735
2736         /* Linear needs a page aligned stride for remapping */
2737         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2738                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2739
2740                 for (i = 0; i < fb->format->num_planes; i++) {
2741                         if (fb->pitches[i] & alignment)
2742                                 return false;
2743                 }
2744         }
2745
2746         return true;
2747 }
2748
2749 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2750 {
2751         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2752         const struct drm_framebuffer *fb = plane_state->hw.fb;
2753         unsigned int rotation = plane_state->hw.rotation;
2754         u32 stride, max_stride;
2755
2756         /*
2757          * No remapping for invisible planes since we don't have
2758          * an actual source viewport to remap.
2759          */
2760         if (!plane_state->uapi.visible)
2761                 return false;
2762
2763         if (!intel_plane_can_remap(plane_state))
2764                 return false;
2765
2766         /*
2767          * FIXME: aux plane limits on gen9+ are
2768          * unclear in Bspec, for now no checking.
2769          */
2770         stride = intel_fb_pitch(fb, 0, rotation);
2771         max_stride = plane->max_stride(plane, fb->format->format,
2772                                        fb->modifier, rotation);
2773
2774         return stride > max_stride;
2775 }
2776
2777 static void
2778 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2779                                const struct drm_framebuffer *fb,
2780                                int color_plane)
2781 {
2782         int main_plane;
2783
2784         if (color_plane == 0) {
2785                 *hsub = 1;
2786                 *vsub = 1;
2787
2788                 return;
2789         }
2790
2791         /*
2792          * TODO: Deduct the subsampling from the char block for all CCS
2793          * formats and planes.
2794          */
2795         if (!is_gen12_ccs_plane(fb, color_plane)) {
2796                 *hsub = fb->format->hsub;
2797                 *vsub = fb->format->vsub;
2798
2799                 return;
2800         }
2801
2802         main_plane = ccs_to_main_plane(fb, color_plane);
2803         *hsub = drm_format_info_block_width(fb->format, color_plane) /
2804                 drm_format_info_block_width(fb->format, main_plane);
2805
2806         /*
2807          * The min stride check in the core framebuffer_check() function
2808          * assumes that format->hsub applies to every plane except for the
2809          * first plane. That's incorrect for the CCS AUX plane of the first
2810          * plane, but for the above check to pass we must define the block
2811          * width with that subsampling applied to it. Adjust the width here
2812          * accordingly, so we can calculate the actual subsampling factor.
2813          */
2814         if (main_plane == 0)
2815                 *hsub *= fb->format->hsub;
2816
2817         *vsub = 32;
2818 }
2819 static int
2820 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2821 {
2822         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2823         int main_plane;
2824         int hsub, vsub;
2825         int tile_width, tile_height;
2826         int ccs_x, ccs_y;
2827         int main_x, main_y;
2828
2829         if (!is_ccs_plane(fb, ccs_plane))
2830                 return 0;
2831
2832         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2833         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2834
2835         tile_width *= hsub;
2836         tile_height *= vsub;
2837
2838         ccs_x = (x * hsub) % tile_width;
2839         ccs_y = (y * vsub) % tile_height;
2840
2841         main_plane = ccs_to_main_plane(fb, ccs_plane);
2842         main_x = intel_fb->normal[main_plane].x % tile_width;
2843         main_y = intel_fb->normal[main_plane].y % tile_height;
2844
2845         /*
2846          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2847          * x/y offsets must match between CCS and the main surface.
2848          */
2849         if (main_x != ccs_x || main_y != ccs_y) {
2850                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2851                               main_x, main_y,
2852                               ccs_x, ccs_y,
2853                               intel_fb->normal[main_plane].x,
2854                               intel_fb->normal[main_plane].y,
2855                               x, y);
2856                 return -EINVAL;
2857         }
2858
2859         return 0;
2860 }
2861
2862 static void
2863 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2864 {
2865         int hsub, vsub;
2866
2867         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2868         *w = fb->width / hsub;
2869         *h = fb->height / vsub;
2870 }
2871
2872 static int
2873 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2874                    struct drm_framebuffer *fb)
2875 {
2876         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2877         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2878         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2879         u32 gtt_offset_rotated = 0;
2880         unsigned int max_size = 0;
2881         int i, num_planes = fb->format->num_planes;
2882         unsigned int tile_size = intel_tile_size(dev_priv);
2883
2884         for (i = 0; i < num_planes; i++) {
2885                 unsigned int width, height;
2886                 unsigned int cpp, size;
2887                 u32 offset;
2888                 int x, y;
2889                 int ret;
2890
2891                 cpp = fb->format->cpp[i];
2892                 intel_fb_plane_dims(&width, &height, fb, i);
2893
2894                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2895                 if (ret) {
2896                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2897                                       i, fb->offsets[i]);
2898                         return ret;
2899                 }
2900
2901                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
2902                 if (ret)
2903                         return ret;
2904
2905                 /*
2906                  * The fence (if used) is aligned to the start of the object
2907                  * so having the framebuffer wrap around across the edge of the
2908                  * fenced region doesn't really work. We have no API to configure
2909                  * the fence start offset within the object (nor could we probably
2910                  * on gen2/3). So it's just easier if we just require that the
2911                  * fb layout agrees with the fence layout. We already check that the
2912                  * fb stride matches the fence stride elsewhere.
2913                  */
2914                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2915                     (x + width) * cpp > fb->pitches[i]) {
2916                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2917                                       i, fb->offsets[i]);
2918                         return -EINVAL;
2919                 }
2920
2921                 /*
2922                  * First pixel of the framebuffer from
2923                  * the start of the normal gtt mapping.
2924                  */
2925                 intel_fb->normal[i].x = x;
2926                 intel_fb->normal[i].y = y;
2927
2928                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2929                                                       fb->pitches[i],
2930                                                       DRM_MODE_ROTATE_0,
2931                                                       tile_size);
2932                 offset /= tile_size;
2933
2934                 if (!is_surface_linear(fb, i)) {
2935                         unsigned int tile_width, tile_height;
2936                         unsigned int pitch_tiles;
2937                         struct drm_rect r;
2938
2939                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2940
2941                         rot_info->plane[i].offset = offset;
2942                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2943                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2944                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2945
2946                         intel_fb->rotated[i].pitch =
2947                                 rot_info->plane[i].height * tile_height;
2948
2949                         /* how many tiles does this plane need */
2950                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2951                         /*
2952                          * If the plane isn't horizontally tile aligned,
2953                          * we need one more tile.
2954                          */
2955                         if (x != 0)
2956                                 size++;
2957
2958                         /* rotate the x/y offsets to match the GTT view */
2959                         drm_rect_init(&r, x, y, width, height);
2960                         drm_rect_rotate(&r,
2961                                         rot_info->plane[i].width * tile_width,
2962                                         rot_info->plane[i].height * tile_height,
2963                                         DRM_MODE_ROTATE_270);
2964                         x = r.x1;
2965                         y = r.y1;
2966
2967                         /* rotate the tile dimensions to match the GTT view */
2968                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2969                         swap(tile_width, tile_height);
2970
2971                         /*
2972                          * We only keep the x/y offsets, so push all of the
2973                          * gtt offset into the x/y offsets.
2974                          */
2975                         intel_adjust_tile_offset(&x, &y,
2976                                                  tile_width, tile_height,
2977                                                  tile_size, pitch_tiles,
2978                                                  gtt_offset_rotated * tile_size, 0);
2979
2980                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2981
2982                         /*
2983                          * First pixel of the framebuffer from
2984                          * the start of the rotated gtt mapping.
2985                          */
2986                         intel_fb->rotated[i].x = x;
2987                         intel_fb->rotated[i].y = y;
2988                 } else {
2989                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2990                                             x * cpp, tile_size);
2991                 }
2992
2993                 /* how many tiles in total needed in the bo */
2994                 max_size = max(max_size, offset + size);
2995         }
2996
2997         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2998                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2999                               mul_u32_u32(max_size, tile_size), obj->base.size);
3000                 return -EINVAL;
3001         }
3002
3003         return 0;
3004 }
3005
3006 static void
3007 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3008 {
3009         struct drm_i915_private *dev_priv =
3010                 to_i915(plane_state->uapi.plane->dev);
3011         struct drm_framebuffer *fb = plane_state->hw.fb;
3012         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3013         struct intel_rotation_info *info = &plane_state->view.rotated;
3014         unsigned int rotation = plane_state->hw.rotation;
3015         int i, num_planes = fb->format->num_planes;
3016         unsigned int tile_size = intel_tile_size(dev_priv);
3017         unsigned int src_x, src_y;
3018         unsigned int src_w, src_h;
3019         u32 gtt_offset = 0;
3020
3021         memset(&plane_state->view, 0, sizeof(plane_state->view));
3022         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3023                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3024
3025         src_x = plane_state->uapi.src.x1 >> 16;
3026         src_y = plane_state->uapi.src.y1 >> 16;
3027         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3028         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3029
3030         WARN_ON(is_ccs_modifier(fb->modifier));
3031
3032         /* Make src coordinates relative to the viewport */
3033         drm_rect_translate(&plane_state->uapi.src,
3034                            -(src_x << 16), -(src_y << 16));
3035
3036         /* Rotate src coordinates to match rotated GTT view */
3037         if (drm_rotation_90_or_270(rotation))
3038                 drm_rect_rotate(&plane_state->uapi.src,
3039                                 src_w << 16, src_h << 16,
3040                                 DRM_MODE_ROTATE_270);
3041
3042         for (i = 0; i < num_planes; i++) {
3043                 unsigned int hsub = i ? fb->format->hsub : 1;
3044                 unsigned int vsub = i ? fb->format->vsub : 1;
3045                 unsigned int cpp = fb->format->cpp[i];
3046                 unsigned int tile_width, tile_height;
3047                 unsigned int width, height;
3048                 unsigned int pitch_tiles;
3049                 unsigned int x, y;
3050                 u32 offset;
3051
3052                 intel_tile_dims(fb, i, &tile_width, &tile_height);
3053
3054                 x = src_x / hsub;
3055                 y = src_y / vsub;
3056                 width = src_w / hsub;
3057                 height = src_h / vsub;
3058
3059                 /*
3060                  * First pixel of the src viewport from the
3061                  * start of the normal gtt mapping.
3062                  */
3063                 x += intel_fb->normal[i].x;
3064                 y += intel_fb->normal[i].y;
3065
3066                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3067                                                       fb, i, fb->pitches[i],
3068                                                       DRM_MODE_ROTATE_0, tile_size);
3069                 offset /= tile_size;
3070
3071                 info->plane[i].offset = offset;
3072                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3073                                                      tile_width * cpp);
3074                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3075                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3076
3077                 if (drm_rotation_90_or_270(rotation)) {
3078                         struct drm_rect r;
3079
3080                         /* rotate the x/y offsets to match the GTT view */
3081                         drm_rect_init(&r, x, y, width, height);
3082                         drm_rect_rotate(&r,
3083                                         info->plane[i].width * tile_width,
3084                                         info->plane[i].height * tile_height,
3085                                         DRM_MODE_ROTATE_270);
3086                         x = r.x1;
3087                         y = r.y1;
3088
3089                         pitch_tiles = info->plane[i].height;
3090                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3091
3092                         /* rotate the tile dimensions to match the GTT view */
3093                         swap(tile_width, tile_height);
3094                 } else {
3095                         pitch_tiles = info->plane[i].width;
3096                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3097                 }
3098
3099                 /*
3100                  * We only keep the x/y offsets, so push all of the
3101                  * gtt offset into the x/y offsets.
3102                  */
3103                 intel_adjust_tile_offset(&x, &y,
3104                                          tile_width, tile_height,
3105                                          tile_size, pitch_tiles,
3106                                          gtt_offset * tile_size, 0);
3107
3108                 gtt_offset += info->plane[i].width * info->plane[i].height;
3109
3110                 plane_state->color_plane[i].offset = 0;
3111                 plane_state->color_plane[i].x = x;
3112                 plane_state->color_plane[i].y = y;
3113         }
3114 }
3115
3116 static int
3117 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3118 {
3119         const struct intel_framebuffer *fb =
3120                 to_intel_framebuffer(plane_state->hw.fb);
3121         unsigned int rotation = plane_state->hw.rotation;
3122         int i, num_planes;
3123
3124         if (!fb)
3125                 return 0;
3126
3127         num_planes = fb->base.format->num_planes;
3128
3129         if (intel_plane_needs_remap(plane_state)) {
3130                 intel_plane_remap_gtt(plane_state);
3131
3132                 /*
3133                  * Sometimes even remapping can't overcome
3134                  * the stride limitations :( Can happen with
3135                  * big plane sizes and suitably misaligned
3136                  * offsets.
3137                  */
3138                 return intel_plane_check_stride(plane_state);
3139         }
3140
3141         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3142
3143         for (i = 0; i < num_planes; i++) {
3144                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3145                 plane_state->color_plane[i].offset = 0;
3146
3147                 if (drm_rotation_90_or_270(rotation)) {
3148                         plane_state->color_plane[i].x = fb->rotated[i].x;
3149                         plane_state->color_plane[i].y = fb->rotated[i].y;
3150                 } else {
3151                         plane_state->color_plane[i].x = fb->normal[i].x;
3152                         plane_state->color_plane[i].y = fb->normal[i].y;
3153                 }
3154         }
3155
3156         /* Rotate src coordinates to match rotated GTT view */
3157         if (drm_rotation_90_or_270(rotation))
3158                 drm_rect_rotate(&plane_state->uapi.src,
3159                                 fb->base.width << 16, fb->base.height << 16,
3160                                 DRM_MODE_ROTATE_270);
3161
3162         return intel_plane_check_stride(plane_state);
3163 }
3164
3165 static int i9xx_format_to_fourcc(int format)
3166 {
3167         switch (format) {
3168         case DISPPLANE_8BPP:
3169                 return DRM_FORMAT_C8;
3170         case DISPPLANE_BGRA555:
3171                 return DRM_FORMAT_ARGB1555;
3172         case DISPPLANE_BGRX555:
3173                 return DRM_FORMAT_XRGB1555;
3174         case DISPPLANE_BGRX565:
3175                 return DRM_FORMAT_RGB565;
3176         default:
3177         case DISPPLANE_BGRX888:
3178                 return DRM_FORMAT_XRGB8888;
3179         case DISPPLANE_RGBX888:
3180                 return DRM_FORMAT_XBGR8888;
3181         case DISPPLANE_BGRA888:
3182                 return DRM_FORMAT_ARGB8888;
3183         case DISPPLANE_RGBA888:
3184                 return DRM_FORMAT_ABGR8888;
3185         case DISPPLANE_BGRX101010:
3186                 return DRM_FORMAT_XRGB2101010;
3187         case DISPPLANE_RGBX101010:
3188                 return DRM_FORMAT_XBGR2101010;
3189         case DISPPLANE_BGRA101010:
3190                 return DRM_FORMAT_ARGB2101010;
3191         case DISPPLANE_RGBA101010:
3192                 return DRM_FORMAT_ABGR2101010;
3193         case DISPPLANE_RGBX161616:
3194                 return DRM_FORMAT_XBGR16161616F;
3195         }
3196 }
3197
3198 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3199 {
3200         switch (format) {
3201         case PLANE_CTL_FORMAT_RGB_565:
3202                 return DRM_FORMAT_RGB565;
3203         case PLANE_CTL_FORMAT_NV12:
3204                 return DRM_FORMAT_NV12;
3205         case PLANE_CTL_FORMAT_P010:
3206                 return DRM_FORMAT_P010;
3207         case PLANE_CTL_FORMAT_P012:
3208                 return DRM_FORMAT_P012;
3209         case PLANE_CTL_FORMAT_P016:
3210                 return DRM_FORMAT_P016;
3211         case PLANE_CTL_FORMAT_Y210:
3212                 return DRM_FORMAT_Y210;
3213         case PLANE_CTL_FORMAT_Y212:
3214                 return DRM_FORMAT_Y212;
3215         case PLANE_CTL_FORMAT_Y216:
3216                 return DRM_FORMAT_Y216;
3217         case PLANE_CTL_FORMAT_Y410:
3218                 return DRM_FORMAT_XVYU2101010;
3219         case PLANE_CTL_FORMAT_Y412:
3220                 return DRM_FORMAT_XVYU12_16161616;
3221         case PLANE_CTL_FORMAT_Y416:
3222                 return DRM_FORMAT_XVYU16161616;
3223         default:
3224         case PLANE_CTL_FORMAT_XRGB_8888:
3225                 if (rgb_order) {
3226                         if (alpha)
3227                                 return DRM_FORMAT_ABGR8888;
3228                         else
3229                                 return DRM_FORMAT_XBGR8888;
3230                 } else {
3231                         if (alpha)
3232                                 return DRM_FORMAT_ARGB8888;
3233                         else
3234                                 return DRM_FORMAT_XRGB8888;
3235                 }
3236         case PLANE_CTL_FORMAT_XRGB_2101010:
3237                 if (rgb_order) {
3238                         if (alpha)
3239                                 return DRM_FORMAT_ABGR2101010;
3240                         else
3241                                 return DRM_FORMAT_XBGR2101010;
3242                 } else {
3243                         if (alpha)
3244                                 return DRM_FORMAT_ARGB2101010;
3245                         else
3246                                 return DRM_FORMAT_XRGB2101010;
3247                 }
3248         case PLANE_CTL_FORMAT_XRGB_16161616F:
3249                 if (rgb_order) {
3250                         if (alpha)
3251                                 return DRM_FORMAT_ABGR16161616F;
3252                         else
3253                                 return DRM_FORMAT_XBGR16161616F;
3254                 } else {
3255                         if (alpha)
3256                                 return DRM_FORMAT_ARGB16161616F;
3257                         else
3258                                 return DRM_FORMAT_XRGB16161616F;
3259                 }
3260         }
3261 }
3262
3263 static bool
3264 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3265                               struct intel_initial_plane_config *plane_config)
3266 {
3267         struct drm_device *dev = crtc->base.dev;
3268         struct drm_i915_private *dev_priv = to_i915(dev);
3269         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3270         struct drm_framebuffer *fb = &plane_config->fb->base;
3271         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3272         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3273                                     PAGE_SIZE);
3274         struct drm_i915_gem_object *obj;
3275         bool ret = false;
3276
3277         size_aligned -= base_aligned;
3278
3279         if (plane_config->size == 0)
3280                 return false;
3281
3282         /* If the FB is too big, just don't use it since fbdev is not very
3283          * important and we should probably use that space with FBC or other
3284          * features. */
3285         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3286                 return false;
3287
3288         switch (fb->modifier) {
3289         case DRM_FORMAT_MOD_LINEAR:
3290         case I915_FORMAT_MOD_X_TILED:
3291         case I915_FORMAT_MOD_Y_TILED:
3292                 break;
3293         default:
3294                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3295                                  fb->modifier);
3296                 return false;
3297         }
3298
3299         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3300                                                              base_aligned,
3301                                                              base_aligned,
3302                                                              size_aligned);
3303         if (IS_ERR(obj))
3304                 return false;
3305
3306         switch (plane_config->tiling) {
3307         case I915_TILING_NONE:
3308                 break;
3309         case I915_TILING_X:
3310         case I915_TILING_Y:
3311                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3312                 break;
3313         default:
3314                 MISSING_CASE(plane_config->tiling);
3315                 goto out;
3316         }
3317
3318         mode_cmd.pixel_format = fb->format->format;
3319         mode_cmd.width = fb->width;
3320         mode_cmd.height = fb->height;
3321         mode_cmd.pitches[0] = fb->pitches[0];
3322         mode_cmd.modifier[0] = fb->modifier;
3323         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3324
3325         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3326                 DRM_DEBUG_KMS("intel fb init failed\n");
3327                 goto out;
3328         }
3329
3330
3331         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3332         ret = true;
3333 out:
3334         i915_gem_object_put(obj);
3335         return ret;
3336 }
3337
3338 static void
3339 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3340                         struct intel_plane_state *plane_state,
3341                         bool visible)
3342 {
3343         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3344
3345         plane_state->uapi.visible = visible;
3346
3347         if (visible)
3348                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3349         else
3350                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3351 }
3352
3353 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3354 {
3355         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3356         struct drm_plane *plane;
3357
3358         /*
3359          * Active_planes aliases if multiple "primary" or cursor planes
3360          * have been used on the same (or wrong) pipe. plane_mask uses
3361          * unique ids, hence we can use that to reconstruct active_planes.
3362          */
3363         crtc_state->active_planes = 0;
3364
3365         drm_for_each_plane_mask(plane, &dev_priv->drm,
3366                                 crtc_state->uapi.plane_mask)
3367                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3368 }
3369
3370 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3371                                          struct intel_plane *plane)
3372 {
3373         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3374         struct intel_crtc_state *crtc_state =
3375                 to_intel_crtc_state(crtc->base.state);
3376         struct intel_plane_state *plane_state =
3377                 to_intel_plane_state(plane->base.state);
3378
3379         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3380                       plane->base.base.id, plane->base.name,
3381                       crtc->base.base.id, crtc->base.name);
3382
3383         intel_set_plane_visible(crtc_state, plane_state, false);
3384         fixup_active_planes(crtc_state);
3385         crtc_state->data_rate[plane->id] = 0;
3386         crtc_state->min_cdclk[plane->id] = 0;
3387
3388         if (plane->id == PLANE_PRIMARY)
3389                 hsw_disable_ips(crtc_state);
3390
3391         /*
3392          * Vblank time updates from the shadow to live plane control register
3393          * are blocked if the memory self-refresh mode is active at that
3394          * moment. So to make sure the plane gets truly disabled, disable
3395          * first the self-refresh mode. The self-refresh enable bit in turn
3396          * will be checked/applied by the HW only at the next frame start
3397          * event which is after the vblank start event, so we need to have a
3398          * wait-for-vblank between disabling the plane and the pipe.
3399          */
3400         if (HAS_GMCH(dev_priv) &&
3401             intel_set_memory_cxsr(dev_priv, false))
3402                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3403
3404         /*
3405          * Gen2 reports pipe underruns whenever all planes are disabled.
3406          * So disable underrun reporting before all the planes get disabled.
3407          */
3408         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3409                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3410
3411         intel_disable_plane(plane, crtc_state);
3412 }
3413
3414 static struct intel_frontbuffer *
3415 to_intel_frontbuffer(struct drm_framebuffer *fb)
3416 {
3417         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3418 }
3419
3420 static void
3421 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3422                              struct intel_initial_plane_config *plane_config)
3423 {
3424         struct drm_device *dev = intel_crtc->base.dev;
3425         struct drm_i915_private *dev_priv = to_i915(dev);
3426         struct drm_crtc *c;
3427         struct drm_plane *primary = intel_crtc->base.primary;
3428         struct drm_plane_state *plane_state = primary->state;
3429         struct intel_plane *intel_plane = to_intel_plane(primary);
3430         struct intel_plane_state *intel_state =
3431                 to_intel_plane_state(plane_state);
3432         struct drm_framebuffer *fb;
3433
3434         if (!plane_config->fb)
3435                 return;
3436
3437         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3438                 fb = &plane_config->fb->base;
3439                 goto valid_fb;
3440         }
3441
3442         kfree(plane_config->fb);
3443
3444         /*
3445          * Failed to alloc the obj, check to see if we should share
3446          * an fb with another CRTC instead
3447          */
3448         for_each_crtc(dev, c) {
3449                 struct intel_plane_state *state;
3450
3451                 if (c == &intel_crtc->base)
3452                         continue;
3453
3454                 if (!to_intel_crtc(c)->active)
3455                         continue;
3456
3457                 state = to_intel_plane_state(c->primary->state);
3458                 if (!state->vma)
3459                         continue;
3460
3461                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3462                         fb = state->hw.fb;
3463                         drm_framebuffer_get(fb);
3464                         goto valid_fb;
3465                 }
3466         }
3467
3468         /*
3469          * We've failed to reconstruct the BIOS FB.  Current display state
3470          * indicates that the primary plane is visible, but has a NULL FB,
3471          * which will lead to problems later if we don't fix it up.  The
3472          * simplest solution is to just disable the primary plane now and
3473          * pretend the BIOS never had it enabled.
3474          */
3475         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3476
3477         return;
3478
3479 valid_fb:
3480         intel_state->hw.rotation = plane_config->rotation;
3481         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3482                                 intel_state->hw.rotation);
3483         intel_state->color_plane[0].stride =
3484                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3485
3486         intel_state->vma =
3487                 intel_pin_and_fence_fb_obj(fb,
3488                                            &intel_state->view,
3489                                            intel_plane_uses_fence(intel_state),
3490                                            &intel_state->flags);
3491         if (IS_ERR(intel_state->vma)) {
3492                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3493                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3494
3495                 intel_state->vma = NULL;
3496                 drm_framebuffer_put(fb);
3497                 return;
3498         }
3499
3500         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3501
3502         plane_state->src_x = 0;
3503         plane_state->src_y = 0;
3504         plane_state->src_w = fb->width << 16;
3505         plane_state->src_h = fb->height << 16;
3506
3507         plane_state->crtc_x = 0;
3508         plane_state->crtc_y = 0;
3509         plane_state->crtc_w = fb->width;
3510         plane_state->crtc_h = fb->height;
3511
3512         intel_state->uapi.src = drm_plane_state_src(plane_state);
3513         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3514
3515         if (plane_config->tiling)
3516                 dev_priv->preserve_bios_swizzle = true;
3517
3518         plane_state->fb = fb;
3519         plane_state->crtc = &intel_crtc->base;
3520         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3521
3522         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3523                   &to_intel_frontbuffer(fb)->bits);
3524 }
3525
3526 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3527                                int color_plane,
3528                                unsigned int rotation)
3529 {
3530         int cpp = fb->format->cpp[color_plane];
3531
3532         switch (fb->modifier) {
3533         case DRM_FORMAT_MOD_LINEAR:
3534         case I915_FORMAT_MOD_X_TILED:
3535                 /*
3536                  * Validated limit is 4k, but has 5k should
3537                  * work apart from the following features:
3538                  * - Ytile (already limited to 4k)
3539                  * - FP16 (already limited to 4k)
3540                  * - render compression (already limited to 4k)
3541                  * - KVMR sprite and cursor (don't care)
3542                  * - horizontal panning (TODO verify this)
3543                  * - pipe and plane scaling (TODO verify this)
3544                  */
3545                 if (cpp == 8)
3546                         return 4096;
3547                 else
3548                         return 5120;
3549         case I915_FORMAT_MOD_Y_TILED_CCS:
3550         case I915_FORMAT_MOD_Yf_TILED_CCS:
3551                 /* FIXME AUX plane? */
3552         case I915_FORMAT_MOD_Y_TILED:
3553         case I915_FORMAT_MOD_Yf_TILED:
3554                 if (cpp == 8)
3555                         return 2048;
3556                 else
3557                         return 4096;
3558         default:
3559                 MISSING_CASE(fb->modifier);
3560                 return 2048;
3561         }
3562 }
3563
3564 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3565                                int color_plane,
3566                                unsigned int rotation)
3567 {
3568         int cpp = fb->format->cpp[color_plane];
3569
3570         switch (fb->modifier) {
3571         case DRM_FORMAT_MOD_LINEAR:
3572         case I915_FORMAT_MOD_X_TILED:
3573                 if (cpp == 8)
3574                         return 4096;
3575                 else
3576                         return 5120;
3577         case I915_FORMAT_MOD_Y_TILED_CCS:
3578         case I915_FORMAT_MOD_Yf_TILED_CCS:
3579                 /* FIXME AUX plane? */
3580         case I915_FORMAT_MOD_Y_TILED:
3581         case I915_FORMAT_MOD_Yf_TILED:
3582                 if (cpp == 8)
3583                         return 2048;
3584                 else
3585                         return 5120;
3586         default:
3587                 MISSING_CASE(fb->modifier);
3588                 return 2048;
3589         }
3590 }
3591
3592 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3593                                int color_plane,
3594                                unsigned int rotation)
3595 {
3596         return 5120;
3597 }
3598
3599 static int skl_max_plane_height(void)
3600 {
3601         return 4096;
3602 }
3603
3604 static int icl_max_plane_height(void)
3605 {
3606         return 4320;
3607 }
3608
3609 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3610                                            int main_x, int main_y, u32 main_offset)
3611 {
3612         const struct drm_framebuffer *fb = plane_state->hw.fb;
3613         int ccs_plane = main_to_ccs_plane(fb, 0);
3614         int aux_x = plane_state->color_plane[ccs_plane].x;
3615         int aux_y = plane_state->color_plane[ccs_plane].y;
3616         u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3617         u32 alignment = intel_surf_alignment(fb, ccs_plane);
3618         int hsub;
3619         int vsub;
3620
3621         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3622         while (aux_offset >= main_offset && aux_y <= main_y) {
3623                 int x, y;
3624
3625                 if (aux_x == main_x && aux_y == main_y)
3626                         break;
3627
3628                 if (aux_offset == 0)
3629                         break;
3630
3631                 x = aux_x / hsub;
3632                 y = aux_y / vsub;
3633                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3634                                                                plane_state,
3635                                                                ccs_plane,
3636                                                                aux_offset,
3637                                                                aux_offset -
3638                                                                 alignment);
3639                 aux_x = x * hsub + aux_x % hsub;
3640                 aux_y = y * vsub + aux_y % vsub;
3641         }
3642
3643         if (aux_x != main_x || aux_y != main_y)
3644                 return false;
3645
3646         plane_state->color_plane[ccs_plane].offset = aux_offset;
3647         plane_state->color_plane[ccs_plane].x = aux_x;
3648         plane_state->color_plane[ccs_plane].y = aux_y;
3649
3650         return true;
3651 }
3652
3653 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3654 {
3655         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3656         const struct drm_framebuffer *fb = plane_state->hw.fb;
3657         unsigned int rotation = plane_state->hw.rotation;
3658         int x = plane_state->uapi.src.x1 >> 16;
3659         int y = plane_state->uapi.src.y1 >> 16;
3660         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3661         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3662         int max_width;
3663         int max_height;
3664         u32 alignment;
3665         u32 offset;
3666         int aux_plane = intel_main_to_aux_plane(fb, 0);
3667         u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3668
3669         if (INTEL_GEN(dev_priv) >= 11)
3670                 max_width = icl_max_plane_width(fb, 0, rotation);
3671         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3672                 max_width = glk_max_plane_width(fb, 0, rotation);
3673         else
3674                 max_width = skl_max_plane_width(fb, 0, rotation);
3675
3676         if (INTEL_GEN(dev_priv) >= 11)
3677                 max_height = icl_max_plane_height();
3678         else
3679                 max_height = skl_max_plane_height();
3680
3681         if (w > max_width || h > max_height) {
3682                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3683                               w, h, max_width, max_height);
3684                 return -EINVAL;
3685         }
3686
3687         intel_add_fb_offsets(&x, &y, plane_state, 0);
3688         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3689         alignment = intel_surf_alignment(fb, 0);
3690
3691         /*
3692          * AUX surface offset is specified as the distance from the
3693          * main surface offset, and it must be non-negative. Make
3694          * sure that is what we will get.
3695          */
3696         if (offset > aux_offset)
3697                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3698                                                            offset, aux_offset & ~(alignment - 1));
3699
3700         /*
3701          * When using an X-tiled surface, the plane blows up
3702          * if the x offset + width exceed the stride.
3703          *
3704          * TODO: linear and Y-tiled seem fine, Yf untested,
3705          */
3706         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3707                 int cpp = fb->format->cpp[0];
3708
3709                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3710                         if (offset == 0) {
3711                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3712                                 return -EINVAL;
3713                         }
3714
3715                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3716                                                                    offset, offset - alignment);
3717                 }
3718         }
3719
3720         /*
3721          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3722          * they match with the main surface x/y offsets.
3723          */
3724         if (is_ccs_modifier(fb->modifier)) {
3725                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3726                         if (offset == 0)
3727                                 break;
3728
3729                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3730                                                                    offset, offset - alignment);
3731                 }
3732
3733                 if (x != plane_state->color_plane[aux_plane].x ||
3734                     y != plane_state->color_plane[aux_plane].y) {
3735                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3736                         return -EINVAL;
3737                 }
3738         }
3739
3740         plane_state->color_plane[0].offset = offset;
3741         plane_state->color_plane[0].x = x;
3742         plane_state->color_plane[0].y = y;
3743
3744         /*
3745          * Put the final coordinates back so that the src
3746          * coordinate checks will see the right values.
3747          */
3748         drm_rect_translate_to(&plane_state->uapi.src,
3749                               x << 16, y << 16);
3750
3751         return 0;
3752 }
3753
3754 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3755 {
3756         const struct drm_framebuffer *fb = plane_state->hw.fb;
3757         unsigned int rotation = plane_state->hw.rotation;
3758         int max_width = skl_max_plane_width(fb, 1, rotation);
3759         int max_height = 4096;
3760         int x = plane_state->uapi.src.x1 >> 17;
3761         int y = plane_state->uapi.src.y1 >> 17;
3762         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3763         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3764         u32 offset;
3765
3766         intel_add_fb_offsets(&x, &y, plane_state, 1);
3767         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3768
3769         /* FIXME not quite sure how/if these apply to the chroma plane */
3770         if (w > max_width || h > max_height) {
3771                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3772                               w, h, max_width, max_height);
3773                 return -EINVAL;
3774         }
3775
3776         plane_state->color_plane[1].offset = offset;
3777         plane_state->color_plane[1].x = x;
3778         plane_state->color_plane[1].y = y;
3779
3780         return 0;
3781 }
3782
3783 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3784 {
3785         const struct drm_framebuffer *fb = plane_state->hw.fb;
3786         int src_x = plane_state->uapi.src.x1 >> 16;
3787         int src_y = plane_state->uapi.src.y1 >> 16;
3788         int hsub;
3789         int vsub;
3790         int x;
3791         int y;
3792         u32 offset;
3793
3794         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1);
3795         x = src_x / hsub;
3796         y = src_y / vsub;
3797         intel_add_fb_offsets(&x, &y, plane_state, 1);
3798         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3799
3800         plane_state->color_plane[1].offset = offset;
3801         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3802         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3803
3804         return 0;
3805 }
3806
3807 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3808 {
3809         const struct drm_framebuffer *fb = plane_state->hw.fb;
3810         int ret;
3811
3812         ret = intel_plane_compute_gtt(plane_state);
3813         if (ret)
3814                 return ret;
3815
3816         if (!plane_state->uapi.visible)
3817                 return 0;
3818
3819         /*
3820          * Handle the AUX surface first since
3821          * the main surface setup depends on it.
3822          */
3823         if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3824                 ret = skl_check_nv12_aux_surface(plane_state);
3825                 if (ret)
3826                         return ret;
3827         } else if (is_ccs_modifier(fb->modifier)) {
3828                 ret = skl_check_ccs_aux_surface(plane_state);
3829                 if (ret)
3830                         return ret;
3831         } else {
3832                 plane_state->color_plane[1].offset = ~0xfff;
3833                 plane_state->color_plane[1].x = 0;
3834                 plane_state->color_plane[1].y = 0;
3835         }
3836
3837         ret = skl_check_main_surface(plane_state);
3838         if (ret)
3839                 return ret;
3840
3841         return 0;
3842 }
3843
3844 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3845                              const struct intel_plane_state *plane_state,
3846                              unsigned int *num, unsigned int *den)
3847 {
3848         const struct drm_framebuffer *fb = plane_state->hw.fb;
3849         unsigned int cpp = fb->format->cpp[0];
3850
3851         /*
3852          * g4x bspec says 64bpp pixel rate can't exceed 80%
3853          * of cdclk when the sprite plane is enabled on the
3854          * same pipe. ilk/snb bspec says 64bpp pixel rate is
3855          * never allowed to exceed 80% of cdclk. Let's just go
3856          * with the ilk/snb limit always.
3857          */
3858         if (cpp == 8) {
3859                 *num = 10;
3860                 *den = 8;
3861         } else {
3862                 *num = 1;
3863                 *den = 1;
3864         }
3865 }
3866
3867 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3868                                 const struct intel_plane_state *plane_state)
3869 {
3870         unsigned int pixel_rate;
3871         unsigned int num, den;
3872
3873         /*
3874          * Note that crtc_state->pixel_rate accounts for both
3875          * horizontal and vertical panel fitter downscaling factors.
3876          * Pre-HSW bspec tells us to only consider the horizontal
3877          * downscaling factor here. We ignore that and just consider
3878          * both for simplicity.
3879          */
3880         pixel_rate = crtc_state->pixel_rate;
3881
3882         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3883
3884         /* two pixels per clock with double wide pipe */
3885         if (crtc_state->double_wide)
3886                 den *= 2;
3887
3888         return DIV_ROUND_UP(pixel_rate * num, den);
3889 }
3890
3891 unsigned int
3892 i9xx_plane_max_stride(struct intel_plane *plane,
3893                       u32 pixel_format, u64 modifier,
3894                       unsigned int rotation)
3895 {
3896         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3897
3898         if (!HAS_GMCH(dev_priv)) {
3899                 return 32*1024;
3900         } else if (INTEL_GEN(dev_priv) >= 4) {
3901                 if (modifier == I915_FORMAT_MOD_X_TILED)
3902                         return 16*1024;
3903                 else
3904                         return 32*1024;
3905         } else if (INTEL_GEN(dev_priv) >= 3) {
3906                 if (modifier == I915_FORMAT_MOD_X_TILED)
3907                         return 8*1024;
3908                 else
3909                         return 16*1024;
3910         } else {
3911                 if (plane->i9xx_plane == PLANE_C)
3912                         return 4*1024;
3913                 else
3914                         return 8*1024;
3915         }
3916 }
3917
3918 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3919 {
3920         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3921         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3922         u32 dspcntr = 0;
3923
3924         if (crtc_state->gamma_enable)
3925                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3926
3927         if (crtc_state->csc_enable)
3928                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3929
3930         if (INTEL_GEN(dev_priv) < 5)
3931                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3932
3933         return dspcntr;
3934 }
3935
3936 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3937                           const struct intel_plane_state *plane_state)
3938 {
3939         struct drm_i915_private *dev_priv =
3940                 to_i915(plane_state->uapi.plane->dev);
3941         const struct drm_framebuffer *fb = plane_state->hw.fb;
3942         unsigned int rotation = plane_state->hw.rotation;
3943         u32 dspcntr;
3944
3945         dspcntr = DISPLAY_PLANE_ENABLE;
3946
3947         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3948             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3949                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3950
3951         switch (fb->format->format) {
3952         case DRM_FORMAT_C8:
3953                 dspcntr |= DISPPLANE_8BPP;
3954                 break;
3955         case DRM_FORMAT_XRGB1555:
3956                 dspcntr |= DISPPLANE_BGRX555;
3957                 break;
3958         case DRM_FORMAT_ARGB1555:
3959                 dspcntr |= DISPPLANE_BGRA555;
3960                 break;
3961         case DRM_FORMAT_RGB565:
3962                 dspcntr |= DISPPLANE_BGRX565;
3963                 break;
3964         case DRM_FORMAT_XRGB8888:
3965                 dspcntr |= DISPPLANE_BGRX888;
3966                 break;
3967         case DRM_FORMAT_XBGR8888:
3968                 dspcntr |= DISPPLANE_RGBX888;
3969                 break;
3970         case DRM_FORMAT_ARGB8888:
3971                 dspcntr |= DISPPLANE_BGRA888;
3972                 break;
3973         case DRM_FORMAT_ABGR8888:
3974                 dspcntr |= DISPPLANE_RGBA888;
3975                 break;
3976         case DRM_FORMAT_XRGB2101010:
3977                 dspcntr |= DISPPLANE_BGRX101010;
3978                 break;
3979         case DRM_FORMAT_XBGR2101010:
3980                 dspcntr |= DISPPLANE_RGBX101010;
3981                 break;
3982         case DRM_FORMAT_ARGB2101010:
3983                 dspcntr |= DISPPLANE_BGRA101010;
3984                 break;
3985         case DRM_FORMAT_ABGR2101010:
3986                 dspcntr |= DISPPLANE_RGBA101010;
3987                 break;
3988         case DRM_FORMAT_XBGR16161616F:
3989                 dspcntr |= DISPPLANE_RGBX161616;
3990                 break;
3991         default:
3992                 MISSING_CASE(fb->format->format);
3993                 return 0;
3994         }
3995
3996         if (INTEL_GEN(dev_priv) >= 4 &&
3997             fb->modifier == I915_FORMAT_MOD_X_TILED)
3998                 dspcntr |= DISPPLANE_TILED;
3999
4000         if (rotation & DRM_MODE_ROTATE_180)
4001                 dspcntr |= DISPPLANE_ROTATE_180;
4002
4003         if (rotation & DRM_MODE_REFLECT_X)
4004                 dspcntr |= DISPPLANE_MIRROR;
4005
4006         return dspcntr;
4007 }
4008
4009 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4010 {
4011         struct drm_i915_private *dev_priv =
4012                 to_i915(plane_state->uapi.plane->dev);
4013         const struct drm_framebuffer *fb = plane_state->hw.fb;
4014         int src_x, src_y, src_w;
4015         u32 offset;
4016         int ret;
4017
4018         ret = intel_plane_compute_gtt(plane_state);
4019         if (ret)
4020                 return ret;
4021
4022         if (!plane_state->uapi.visible)
4023                 return 0;
4024
4025         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4026         src_x = plane_state->uapi.src.x1 >> 16;
4027         src_y = plane_state->uapi.src.y1 >> 16;
4028
4029         /* Undocumented hardware limit on i965/g4x/vlv/chv */
4030         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4031                 return -EINVAL;
4032
4033         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4034
4035         if (INTEL_GEN(dev_priv) >= 4)
4036                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4037                                                             plane_state, 0);
4038         else
4039                 offset = 0;
4040
4041         /*
4042          * Put the final coordinates back so that the src
4043          * coordinate checks will see the right values.
4044          */
4045         drm_rect_translate_to(&plane_state->uapi.src,
4046                               src_x << 16, src_y << 16);
4047
4048         /* HSW/BDW do this automagically in hardware */
4049         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4050                 unsigned int rotation = plane_state->hw.rotation;
4051                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4052                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4053
4054                 if (rotation & DRM_MODE_ROTATE_180) {
4055                         src_x += src_w - 1;
4056                         src_y += src_h - 1;
4057                 } else if (rotation & DRM_MODE_REFLECT_X) {
4058                         src_x += src_w - 1;
4059                 }
4060         }
4061
4062         plane_state->color_plane[0].offset = offset;
4063         plane_state->color_plane[0].x = src_x;
4064         plane_state->color_plane[0].y = src_y;
4065
4066         return 0;
4067 }
4068
4069 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4070 {
4071         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4072         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4073
4074         if (IS_CHERRYVIEW(dev_priv))
4075                 return i9xx_plane == PLANE_B;
4076         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4077                 return false;
4078         else if (IS_GEN(dev_priv, 4))
4079                 return i9xx_plane == PLANE_C;
4080         else
4081                 return i9xx_plane == PLANE_B ||
4082                         i9xx_plane == PLANE_C;
4083 }
4084
4085 static int
4086 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4087                  struct intel_plane_state *plane_state)
4088 {
4089         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4090         int ret;
4091
4092         ret = chv_plane_check_rotation(plane_state);
4093         if (ret)
4094                 return ret;
4095
4096         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4097                                                   &crtc_state->uapi,
4098                                                   DRM_PLANE_HELPER_NO_SCALING,
4099                                                   DRM_PLANE_HELPER_NO_SCALING,
4100                                                   i9xx_plane_has_windowing(plane),
4101                                                   true);
4102         if (ret)
4103                 return ret;
4104
4105         ret = i9xx_check_plane_surface(plane_state);
4106         if (ret)
4107                 return ret;
4108
4109         if (!plane_state->uapi.visible)
4110                 return 0;
4111
4112         ret = intel_plane_check_src_coordinates(plane_state);
4113         if (ret)
4114                 return ret;
4115
4116         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4117
4118         return 0;
4119 }
4120
4121 static void i9xx_update_plane(struct intel_plane *plane,
4122                               const struct intel_crtc_state *crtc_state,
4123                               const struct intel_plane_state *plane_state)
4124 {
4125         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4126         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4127         u32 linear_offset;
4128         int x = plane_state->color_plane[0].x;
4129         int y = plane_state->color_plane[0].y;
4130         int crtc_x = plane_state->uapi.dst.x1;
4131         int crtc_y = plane_state->uapi.dst.y1;
4132         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4133         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4134         unsigned long irqflags;
4135         u32 dspaddr_offset;
4136         u32 dspcntr;
4137
4138         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4139
4140         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4141
4142         if (INTEL_GEN(dev_priv) >= 4)
4143                 dspaddr_offset = plane_state->color_plane[0].offset;
4144         else
4145                 dspaddr_offset = linear_offset;
4146
4147         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4148
4149         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
4150
4151         if (INTEL_GEN(dev_priv) < 4) {
4152                 /*
4153                  * PLANE_A doesn't actually have a full window
4154                  * generator but let's assume we still need to
4155                  * program whatever is there.
4156                  */
4157                 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
4158                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
4159                               ((crtc_h - 1) << 16) | (crtc_w - 1));
4160         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4161                 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
4162                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
4163                               ((crtc_h - 1) << 16) | (crtc_w - 1));
4164                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
4165         }
4166
4167         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4168                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
4169         } else if (INTEL_GEN(dev_priv) >= 4) {
4170                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
4171                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
4172         }
4173
4174         /*
4175          * The control register self-arms if the plane was previously
4176          * disabled. Try to make the plane enable atomic by writing
4177          * the control register just before the surface register.
4178          */
4179         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4180         if (INTEL_GEN(dev_priv) >= 4)
4181                 I915_WRITE_FW(DSPSURF(i9xx_plane),
4182                               intel_plane_ggtt_offset(plane_state) +
4183                               dspaddr_offset);
4184         else
4185                 I915_WRITE_FW(DSPADDR(i9xx_plane),
4186                               intel_plane_ggtt_offset(plane_state) +
4187                               dspaddr_offset);
4188
4189         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4190 }
4191
4192 static void i9xx_disable_plane(struct intel_plane *plane,
4193                                const struct intel_crtc_state *crtc_state)
4194 {
4195         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4196         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4197         unsigned long irqflags;
4198         u32 dspcntr;
4199
4200         /*
4201          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4202          * enable on ilk+ affect the pipe bottom color as
4203          * well, so we must configure them even if the plane
4204          * is disabled.
4205          *
4206          * On pre-g4x there is no way to gamma correct the
4207          * pipe bottom color but we'll keep on doing this
4208          * anyway so that the crtc state readout works correctly.
4209          */
4210         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4211
4212         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4213
4214         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4215         if (INTEL_GEN(dev_priv) >= 4)
4216                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
4217         else
4218                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
4219
4220         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4221 }
4222
4223 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4224                                     enum pipe *pipe)
4225 {
4226         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4227         enum intel_display_power_domain power_domain;
4228         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4229         intel_wakeref_t wakeref;
4230         bool ret;
4231         u32 val;
4232
4233         /*
4234          * Not 100% correct for planes that can move between pipes,
4235          * but that's only the case for gen2-4 which don't have any
4236          * display power wells.
4237          */
4238         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4239         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4240         if (!wakeref)
4241                 return false;
4242
4243         val = I915_READ(DSPCNTR(i9xx_plane));
4244
4245         ret = val & DISPLAY_PLANE_ENABLE;
4246
4247         if (INTEL_GEN(dev_priv) >= 5)
4248                 *pipe = plane->pipe;
4249         else
4250                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4251                         DISPPLANE_SEL_PIPE_SHIFT;
4252
4253         intel_display_power_put(dev_priv, power_domain, wakeref);
4254
4255         return ret;
4256 }
4257
4258 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4259 {
4260         struct drm_device *dev = intel_crtc->base.dev;
4261         struct drm_i915_private *dev_priv = to_i915(dev);
4262
4263         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4264         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4265         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4266 }
4267
4268 /*
4269  * This function detaches (aka. unbinds) unused scalers in hardware
4270  */
4271 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4272 {
4273         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4274         const struct intel_crtc_scaler_state *scaler_state =
4275                 &crtc_state->scaler_state;
4276         int i;
4277
4278         /* loop through and disable scalers that aren't in use */
4279         for (i = 0; i < intel_crtc->num_scalers; i++) {
4280                 if (!scaler_state->scalers[i].in_use)
4281                         skl_detach_scaler(intel_crtc, i);
4282         }
4283 }
4284
4285 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4286                                           int color_plane, unsigned int rotation)
4287 {
4288         /*
4289          * The stride is either expressed as a multiple of 64 bytes chunks for
4290          * linear buffers or in number of tiles for tiled buffers.
4291          */
4292         if (is_surface_linear(fb, color_plane))
4293                 return 64;
4294         else if (drm_rotation_90_or_270(rotation))
4295                 return intel_tile_height(fb, color_plane);
4296         else
4297                 return intel_tile_width_bytes(fb, color_plane);
4298 }
4299
4300 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4301                      int color_plane)
4302 {
4303         const struct drm_framebuffer *fb = plane_state->hw.fb;
4304         unsigned int rotation = plane_state->hw.rotation;
4305         u32 stride = plane_state->color_plane[color_plane].stride;
4306
4307         if (color_plane >= fb->format->num_planes)
4308                 return 0;
4309
4310         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4311 }
4312
4313 static u32 skl_plane_ctl_format(u32 pixel_format)
4314 {
4315         switch (pixel_format) {
4316         case DRM_FORMAT_C8:
4317                 return PLANE_CTL_FORMAT_INDEXED;
4318         case DRM_FORMAT_RGB565:
4319                 return PLANE_CTL_FORMAT_RGB_565;
4320         case DRM_FORMAT_XBGR8888:
4321         case DRM_FORMAT_ABGR8888:
4322                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4323         case DRM_FORMAT_XRGB8888:
4324         case DRM_FORMAT_ARGB8888:
4325                 return PLANE_CTL_FORMAT_XRGB_8888;
4326         case DRM_FORMAT_XBGR2101010:
4327         case DRM_FORMAT_ABGR2101010:
4328                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4329         case DRM_FORMAT_XRGB2101010:
4330         case DRM_FORMAT_ARGB2101010:
4331                 return PLANE_CTL_FORMAT_XRGB_2101010;
4332         case DRM_FORMAT_XBGR16161616F:
4333         case DRM_FORMAT_ABGR16161616F:
4334                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4335         case DRM_FORMAT_XRGB16161616F:
4336         case DRM_FORMAT_ARGB16161616F:
4337                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4338         case DRM_FORMAT_YUYV:
4339                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4340         case DRM_FORMAT_YVYU:
4341                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4342         case DRM_FORMAT_UYVY:
4343                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4344         case DRM_FORMAT_VYUY:
4345                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4346         case DRM_FORMAT_NV12:
4347                 return PLANE_CTL_FORMAT_NV12;
4348         case DRM_FORMAT_P010:
4349                 return PLANE_CTL_FORMAT_P010;
4350         case DRM_FORMAT_P012:
4351                 return PLANE_CTL_FORMAT_P012;
4352         case DRM_FORMAT_P016:
4353                 return PLANE_CTL_FORMAT_P016;
4354         case DRM_FORMAT_Y210:
4355                 return PLANE_CTL_FORMAT_Y210;
4356         case DRM_FORMAT_Y212:
4357                 return PLANE_CTL_FORMAT_Y212;
4358         case DRM_FORMAT_Y216:
4359                 return PLANE_CTL_FORMAT_Y216;
4360         case DRM_FORMAT_XVYU2101010:
4361                 return PLANE_CTL_FORMAT_Y410;
4362         case DRM_FORMAT_XVYU12_16161616:
4363                 return PLANE_CTL_FORMAT_Y412;
4364         case DRM_FORMAT_XVYU16161616:
4365                 return PLANE_CTL_FORMAT_Y416;
4366         default:
4367                 MISSING_CASE(pixel_format);
4368         }
4369
4370         return 0;
4371 }
4372
4373 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4374 {
4375         if (!plane_state->hw.fb->format->has_alpha)
4376                 return PLANE_CTL_ALPHA_DISABLE;
4377
4378         switch (plane_state->hw.pixel_blend_mode) {
4379         case DRM_MODE_BLEND_PIXEL_NONE:
4380                 return PLANE_CTL_ALPHA_DISABLE;
4381         case DRM_MODE_BLEND_PREMULTI:
4382                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4383         case DRM_MODE_BLEND_COVERAGE:
4384                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4385         default:
4386                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4387                 return PLANE_CTL_ALPHA_DISABLE;
4388         }
4389 }
4390
4391 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4392 {
4393         if (!plane_state->hw.fb->format->has_alpha)
4394                 return PLANE_COLOR_ALPHA_DISABLE;
4395
4396         switch (plane_state->hw.pixel_blend_mode) {
4397         case DRM_MODE_BLEND_PIXEL_NONE:
4398                 return PLANE_COLOR_ALPHA_DISABLE;
4399         case DRM_MODE_BLEND_PREMULTI:
4400                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4401         case DRM_MODE_BLEND_COVERAGE:
4402                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4403         default:
4404                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4405                 return PLANE_COLOR_ALPHA_DISABLE;
4406         }
4407 }
4408
4409 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4410 {
4411         switch (fb_modifier) {
4412         case DRM_FORMAT_MOD_LINEAR:
4413                 break;
4414         case I915_FORMAT_MOD_X_TILED:
4415                 return PLANE_CTL_TILED_X;
4416         case I915_FORMAT_MOD_Y_TILED:
4417                 return PLANE_CTL_TILED_Y;
4418         case I915_FORMAT_MOD_Y_TILED_CCS:
4419                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4420         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4421                 return PLANE_CTL_TILED_Y |
4422                        PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4423                        PLANE_CTL_CLEAR_COLOR_DISABLE;
4424         case I915_FORMAT_MOD_Yf_TILED:
4425                 return PLANE_CTL_TILED_YF;
4426         case I915_FORMAT_MOD_Yf_TILED_CCS:
4427                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4428         default:
4429                 MISSING_CASE(fb_modifier);
4430         }
4431
4432         return 0;
4433 }
4434
4435 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4436 {
4437         switch (rotate) {
4438         case DRM_MODE_ROTATE_0:
4439                 break;
4440         /*
4441          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4442          * while i915 HW rotation is clockwise, thats why this swapping.
4443          */
4444         case DRM_MODE_ROTATE_90:
4445                 return PLANE_CTL_ROTATE_270;
4446         case DRM_MODE_ROTATE_180:
4447                 return PLANE_CTL_ROTATE_180;
4448         case DRM_MODE_ROTATE_270:
4449                 return PLANE_CTL_ROTATE_90;
4450         default:
4451                 MISSING_CASE(rotate);
4452         }
4453
4454         return 0;
4455 }
4456
4457 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4458 {
4459         switch (reflect) {
4460         case 0:
4461                 break;
4462         case DRM_MODE_REFLECT_X:
4463                 return PLANE_CTL_FLIP_HORIZONTAL;
4464         case DRM_MODE_REFLECT_Y:
4465         default:
4466                 MISSING_CASE(reflect);
4467         }
4468
4469         return 0;
4470 }
4471
4472 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4473 {
4474         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4475         u32 plane_ctl = 0;
4476
4477         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4478                 return plane_ctl;
4479
4480         if (crtc_state->gamma_enable)
4481                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4482
4483         if (crtc_state->csc_enable)
4484                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4485
4486         return plane_ctl;
4487 }
4488
4489 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4490                   const struct intel_plane_state *plane_state)
4491 {
4492         struct drm_i915_private *dev_priv =
4493                 to_i915(plane_state->uapi.plane->dev);
4494         const struct drm_framebuffer *fb = plane_state->hw.fb;
4495         unsigned int rotation = plane_state->hw.rotation;
4496         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4497         u32 plane_ctl;
4498
4499         plane_ctl = PLANE_CTL_ENABLE;
4500
4501         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4502                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4503                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4504
4505                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4506                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4507
4508                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4509                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4510         }
4511
4512         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4513         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4514         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4515
4516         if (INTEL_GEN(dev_priv) >= 10)
4517                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4518                                                 DRM_MODE_REFLECT_MASK);
4519
4520         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4521                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4522         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4523                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4524
4525         return plane_ctl;
4526 }
4527
4528 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4529 {
4530         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4531         u32 plane_color_ctl = 0;
4532
4533         if (INTEL_GEN(dev_priv) >= 11)
4534                 return plane_color_ctl;
4535
4536         if (crtc_state->gamma_enable)
4537                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4538
4539         if (crtc_state->csc_enable)
4540                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4541
4542         return plane_color_ctl;
4543 }
4544
4545 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4546                         const struct intel_plane_state *plane_state)
4547 {
4548         struct drm_i915_private *dev_priv =
4549                 to_i915(plane_state->uapi.plane->dev);
4550         const struct drm_framebuffer *fb = plane_state->hw.fb;
4551         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4552         u32 plane_color_ctl = 0;
4553
4554         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4555         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4556
4557         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4558                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4559                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4560                 else
4561                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4562
4563                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4564                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4565         } else if (fb->format->is_yuv) {
4566                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4567         }
4568
4569         return plane_color_ctl;
4570 }
4571
4572 static int
4573 __intel_display_resume(struct drm_device *dev,
4574                        struct drm_atomic_state *state,
4575                        struct drm_modeset_acquire_ctx *ctx)
4576 {
4577         struct drm_crtc_state *crtc_state;
4578         struct drm_crtc *crtc;
4579         int i, ret;
4580
4581         intel_modeset_setup_hw_state(dev, ctx);
4582         intel_vga_redisable(to_i915(dev));
4583
4584         if (!state)
4585                 return 0;
4586
4587         /*
4588          * We've duplicated the state, pointers to the old state are invalid.
4589          *
4590          * Don't attempt to use the old state until we commit the duplicated state.
4591          */
4592         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4593                 /*
4594                  * Force recalculation even if we restore
4595                  * current state. With fast modeset this may not result
4596                  * in a modeset when the state is compatible.
4597                  */
4598                 crtc_state->mode_changed = true;
4599         }
4600
4601         /* ignore any reset values/BIOS leftovers in the WM registers */
4602         if (!HAS_GMCH(to_i915(dev)))
4603                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4604
4605         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4606
4607         WARN_ON(ret == -EDEADLK);
4608         return ret;
4609 }
4610
4611 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4612 {
4613         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4614                 intel_has_gpu_reset(&dev_priv->gt));
4615 }
4616
4617 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4618 {
4619         struct drm_device *dev = &dev_priv->drm;
4620         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4621         struct drm_atomic_state *state;
4622         int ret;
4623
4624         /* reset doesn't touch the display */
4625         if (!i915_modparams.force_reset_modeset_test &&
4626             !gpu_reset_clobbers_display(dev_priv))
4627                 return;
4628
4629         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4630         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4631         smp_mb__after_atomic();
4632         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4633
4634         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4635                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4636                 intel_gt_set_wedged(&dev_priv->gt);
4637         }
4638
4639         /*
4640          * Need mode_config.mutex so that we don't
4641          * trample ongoing ->detect() and whatnot.
4642          */
4643         mutex_lock(&dev->mode_config.mutex);
4644         drm_modeset_acquire_init(ctx, 0);
4645         while (1) {
4646                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4647                 if (ret != -EDEADLK)
4648                         break;
4649
4650                 drm_modeset_backoff(ctx);
4651         }
4652         /*
4653          * Disabling the crtcs gracefully seems nicer. Also the
4654          * g33 docs say we should at least disable all the planes.
4655          */
4656         state = drm_atomic_helper_duplicate_state(dev, ctx);
4657         if (IS_ERR(state)) {
4658                 ret = PTR_ERR(state);
4659                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4660                 return;
4661         }
4662
4663         ret = drm_atomic_helper_disable_all(dev, ctx);
4664         if (ret) {
4665                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4666                 drm_atomic_state_put(state);
4667                 return;
4668         }
4669
4670         dev_priv->modeset_restore_state = state;
4671         state->acquire_ctx = ctx;
4672 }
4673
4674 void intel_finish_reset(struct drm_i915_private *dev_priv)
4675 {
4676         struct drm_device *dev = &dev_priv->drm;
4677         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4678         struct drm_atomic_state *state;
4679         int ret;
4680
4681         /* reset doesn't touch the display */
4682         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4683                 return;
4684
4685         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4686         if (!state)
4687                 goto unlock;
4688
4689         /* reset doesn't touch the display */
4690         if (!gpu_reset_clobbers_display(dev_priv)) {
4691                 /* for testing only restore the display */
4692                 ret = __intel_display_resume(dev, state, ctx);
4693                 if (ret)
4694                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4695         } else {
4696                 /*
4697                  * The display has been reset as well,
4698                  * so need a full re-initialization.
4699                  */
4700                 intel_pps_unlock_regs_wa(dev_priv);
4701                 intel_modeset_init_hw(dev_priv);
4702                 intel_init_clock_gating(dev_priv);
4703
4704                 spin_lock_irq(&dev_priv->irq_lock);
4705                 if (dev_priv->display.hpd_irq_setup)
4706                         dev_priv->display.hpd_irq_setup(dev_priv);
4707                 spin_unlock_irq(&dev_priv->irq_lock);
4708
4709                 ret = __intel_display_resume(dev, state, ctx);
4710                 if (ret)
4711                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4712
4713                 intel_hpd_init(dev_priv);
4714         }
4715
4716         drm_atomic_state_put(state);
4717 unlock:
4718         drm_modeset_drop_locks(ctx);
4719         drm_modeset_acquire_fini(ctx);
4720         mutex_unlock(&dev->mode_config.mutex);
4721
4722         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4723 }
4724
4725 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4726 {
4727         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4728         enum pipe pipe = crtc->pipe;
4729         u32 tmp;
4730
4731         tmp = I915_READ(PIPE_CHICKEN(pipe));
4732
4733         /*
4734          * Display WA #1153: icl
4735          * enable hardware to bypass the alpha math
4736          * and rounding for per-pixel values 00 and 0xff
4737          */
4738         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4739         /*
4740          * Display WA # 1605353570: icl
4741          * Set the pixel rounding bit to 1 for allowing
4742          * passthrough of Frame buffer pixels unmodified
4743          * across pipe
4744          */
4745         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4746         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4747 }
4748
4749 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4750 {
4751         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4752         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4753         u32 trans_ddi_func_ctl2_val;
4754         u8 master_select;
4755
4756         /*
4757          * Configure the master select and enable Transcoder Port Sync for
4758          * Slave CRTCs transcoder.
4759          */
4760         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4761                 return;
4762
4763         if (crtc_state->master_transcoder == TRANSCODER_EDP)
4764                 master_select = 0;
4765         else
4766                 master_select = crtc_state->master_transcoder + 1;
4767
4768         /* Set the master select bits for Tranascoder Port Sync */
4769         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4770                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4771                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4772         /* Enable Transcoder Port Sync */
4773         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4774
4775         I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4776                    trans_ddi_func_ctl2_val);
4777 }
4778
4779 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4780 {
4781         struct drm_device *dev = crtc->base.dev;
4782         struct drm_i915_private *dev_priv = to_i915(dev);
4783         enum pipe pipe = crtc->pipe;
4784         i915_reg_t reg;
4785         u32 temp;
4786
4787         /* enable normal train */
4788         reg = FDI_TX_CTL(pipe);
4789         temp = I915_READ(reg);
4790         if (IS_IVYBRIDGE(dev_priv)) {
4791                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4792                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4793         } else {
4794                 temp &= ~FDI_LINK_TRAIN_NONE;
4795                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4796         }
4797         I915_WRITE(reg, temp);
4798
4799         reg = FDI_RX_CTL(pipe);
4800         temp = I915_READ(reg);
4801         if (HAS_PCH_CPT(dev_priv)) {
4802                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4803                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4804         } else {
4805                 temp &= ~FDI_LINK_TRAIN_NONE;
4806                 temp |= FDI_LINK_TRAIN_NONE;
4807         }
4808         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4809
4810         /* wait one idle pattern time */
4811         POSTING_READ(reg);
4812         udelay(1000);
4813
4814         /* IVB wants error correction enabled */
4815         if (IS_IVYBRIDGE(dev_priv))
4816                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4817                            FDI_FE_ERRC_ENABLE);
4818 }
4819
4820 /* The FDI link training functions for ILK/Ibexpeak. */
4821 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4822                                     const struct intel_crtc_state *crtc_state)
4823 {
4824         struct drm_device *dev = crtc->base.dev;
4825         struct drm_i915_private *dev_priv = to_i915(dev);
4826         enum pipe pipe = crtc->pipe;
4827         i915_reg_t reg;
4828         u32 temp, tries;
4829
4830         /* FDI needs bits from pipe first */
4831         assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
4832
4833         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4834            for train result */
4835         reg = FDI_RX_IMR(pipe);
4836         temp = I915_READ(reg);
4837         temp &= ~FDI_RX_SYMBOL_LOCK;
4838         temp &= ~FDI_RX_BIT_LOCK;
4839         I915_WRITE(reg, temp);
4840         I915_READ(reg);
4841         udelay(150);
4842
4843         /* enable CPU FDI TX and PCH FDI RX */
4844         reg = FDI_TX_CTL(pipe);
4845         temp = I915_READ(reg);
4846         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4847         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4848         temp &= ~FDI_LINK_TRAIN_NONE;
4849         temp |= FDI_LINK_TRAIN_PATTERN_1;
4850         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4851
4852         reg = FDI_RX_CTL(pipe);
4853         temp = I915_READ(reg);
4854         temp &= ~FDI_LINK_TRAIN_NONE;
4855         temp |= FDI_LINK_TRAIN_PATTERN_1;
4856         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4857
4858         POSTING_READ(reg);
4859         udelay(150);
4860
4861         /* Ironlake workaround, enable clock pointer after FDI enable*/
4862         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4863         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4864                    FDI_RX_PHASE_SYNC_POINTER_EN);
4865
4866         reg = FDI_RX_IIR(pipe);
4867         for (tries = 0; tries < 5; tries++) {
4868                 temp = I915_READ(reg);
4869                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4870
4871                 if ((temp & FDI_RX_BIT_LOCK)) {
4872                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4873                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4874                         break;
4875                 }
4876         }
4877         if (tries == 5)
4878                 DRM_ERROR("FDI train 1 fail!\n");
4879
4880         /* Train 2 */
4881         reg = FDI_TX_CTL(pipe);
4882         temp = I915_READ(reg);
4883         temp &= ~FDI_LINK_TRAIN_NONE;
4884         temp |= FDI_LINK_TRAIN_PATTERN_2;
4885         I915_WRITE(reg, temp);
4886
4887         reg = FDI_RX_CTL(pipe);
4888         temp = I915_READ(reg);
4889         temp &= ~FDI_LINK_TRAIN_NONE;
4890         temp |= FDI_LINK_TRAIN_PATTERN_2;
4891         I915_WRITE(reg, temp);
4892
4893         POSTING_READ(reg);
4894         udelay(150);
4895
4896         reg = FDI_RX_IIR(pipe);
4897         for (tries = 0; tries < 5; tries++) {
4898                 temp = I915_READ(reg);
4899                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4900
4901                 if (temp & FDI_RX_SYMBOL_LOCK) {
4902                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4903                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4904                         break;
4905                 }
4906         }
4907         if (tries == 5)
4908                 DRM_ERROR("FDI train 2 fail!\n");
4909
4910         DRM_DEBUG_KMS("FDI train done\n");
4911
4912 }
4913
4914 static const int snb_b_fdi_train_param[] = {
4915         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4916         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4917         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4918         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4919 };
4920
4921 /* The FDI link training functions for SNB/Cougarpoint. */
4922 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4923                                 const struct intel_crtc_state *crtc_state)
4924 {
4925         struct drm_device *dev = crtc->base.dev;
4926         struct drm_i915_private *dev_priv = to_i915(dev);
4927         enum pipe pipe = crtc->pipe;
4928         i915_reg_t reg;
4929         u32 temp, i, retry;
4930
4931         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4932            for train result */
4933         reg = FDI_RX_IMR(pipe);
4934         temp = I915_READ(reg);
4935         temp &= ~FDI_RX_SYMBOL_LOCK;
4936         temp &= ~FDI_RX_BIT_LOCK;
4937         I915_WRITE(reg, temp);
4938
4939         POSTING_READ(reg);
4940         udelay(150);
4941
4942         /* enable CPU FDI TX and PCH FDI RX */
4943         reg = FDI_TX_CTL(pipe);
4944         temp = I915_READ(reg);
4945         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4946         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4947         temp &= ~FDI_LINK_TRAIN_NONE;
4948         temp |= FDI_LINK_TRAIN_PATTERN_1;
4949         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4950         /* SNB-B */
4951         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4952         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4953
4954         I915_WRITE(FDI_RX_MISC(pipe),
4955                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4956
4957         reg = FDI_RX_CTL(pipe);
4958         temp = I915_READ(reg);
4959         if (HAS_PCH_CPT(dev_priv)) {
4960                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4961                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4962         } else {
4963                 temp &= ~FDI_LINK_TRAIN_NONE;
4964                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4965         }
4966         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4967
4968         POSTING_READ(reg);
4969         udelay(150);
4970
4971         for (i = 0; i < 4; i++) {
4972                 reg = FDI_TX_CTL(pipe);
4973                 temp = I915_READ(reg);
4974                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4975                 temp |= snb_b_fdi_train_param[i];
4976                 I915_WRITE(reg, temp);
4977
4978                 POSTING_READ(reg);
4979                 udelay(500);
4980
4981                 for (retry = 0; retry < 5; retry++) {
4982                         reg = FDI_RX_IIR(pipe);
4983                         temp = I915_READ(reg);
4984                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4985                         if (temp & FDI_RX_BIT_LOCK) {
4986                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4987                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4988                                 break;
4989                         }
4990                         udelay(50);
4991                 }
4992                 if (retry < 5)
4993                         break;
4994         }
4995         if (i == 4)
4996                 DRM_ERROR("FDI train 1 fail!\n");
4997
4998         /* Train 2 */
4999         reg = FDI_TX_CTL(pipe);
5000         temp = I915_READ(reg);
5001         temp &= ~FDI_LINK_TRAIN_NONE;
5002         temp |= FDI_LINK_TRAIN_PATTERN_2;
5003         if (IS_GEN(dev_priv, 6)) {
5004                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5005                 /* SNB-B */
5006                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5007         }
5008         I915_WRITE(reg, temp);
5009
5010         reg = FDI_RX_CTL(pipe);
5011         temp = I915_READ(reg);
5012         if (HAS_PCH_CPT(dev_priv)) {
5013                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5014                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5015         } else {
5016                 temp &= ~FDI_LINK_TRAIN_NONE;
5017                 temp |= FDI_LINK_TRAIN_PATTERN_2;
5018         }
5019         I915_WRITE(reg, temp);
5020
5021         POSTING_READ(reg);
5022         udelay(150);
5023
5024         for (i = 0; i < 4; i++) {
5025                 reg = FDI_TX_CTL(pipe);
5026                 temp = I915_READ(reg);
5027                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5028                 temp |= snb_b_fdi_train_param[i];
5029                 I915_WRITE(reg, temp);
5030
5031                 POSTING_READ(reg);
5032                 udelay(500);
5033
5034                 for (retry = 0; retry < 5; retry++) {
5035                         reg = FDI_RX_IIR(pipe);
5036                         temp = I915_READ(reg);
5037                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
5038                         if (temp & FDI_RX_SYMBOL_LOCK) {
5039                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
5040                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
5041                                 break;
5042                         }
5043                         udelay(50);
5044                 }
5045                 if (retry < 5)
5046                         break;
5047         }
5048         if (i == 4)
5049                 DRM_ERROR("FDI train 2 fail!\n");
5050
5051         DRM_DEBUG_KMS("FDI train done.\n");
5052 }
5053
5054 /* Manual link training for Ivy Bridge A0 parts */
5055 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5056                                       const struct intel_crtc_state *crtc_state)
5057 {
5058         struct drm_device *dev = crtc->base.dev;
5059         struct drm_i915_private *dev_priv = to_i915(dev);
5060         enum pipe pipe = crtc->pipe;
5061         i915_reg_t reg;
5062         u32 temp, i, j;
5063
5064         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5065            for train result */
5066         reg = FDI_RX_IMR(pipe);
5067         temp = I915_READ(reg);
5068         temp &= ~FDI_RX_SYMBOL_LOCK;
5069         temp &= ~FDI_RX_BIT_LOCK;
5070         I915_WRITE(reg, temp);
5071
5072         POSTING_READ(reg);
5073         udelay(150);
5074
5075         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
5076                       I915_READ(FDI_RX_IIR(pipe)));
5077
5078         /* Try each vswing and preemphasis setting twice before moving on */
5079         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5080                 /* disable first in case we need to retry */
5081                 reg = FDI_TX_CTL(pipe);
5082                 temp = I915_READ(reg);
5083                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5084                 temp &= ~FDI_TX_ENABLE;
5085                 I915_WRITE(reg, temp);
5086
5087                 reg = FDI_RX_CTL(pipe);
5088                 temp = I915_READ(reg);
5089                 temp &= ~FDI_LINK_TRAIN_AUTO;
5090                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5091                 temp &= ~FDI_RX_ENABLE;
5092                 I915_WRITE(reg, temp);
5093
5094                 /* enable CPU FDI TX and PCH FDI RX */
5095                 reg = FDI_TX_CTL(pipe);
5096                 temp = I915_READ(reg);
5097                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5098                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5099                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5100                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5101                 temp |= snb_b_fdi_train_param[j/2];
5102                 temp |= FDI_COMPOSITE_SYNC;
5103                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
5104
5105                 I915_WRITE(FDI_RX_MISC(pipe),
5106                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5107
5108                 reg = FDI_RX_CTL(pipe);
5109                 temp = I915_READ(reg);
5110                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5111                 temp |= FDI_COMPOSITE_SYNC;
5112                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
5113
5114                 POSTING_READ(reg);
5115                 udelay(1); /* should be 0.5us */
5116
5117                 for (i = 0; i < 4; i++) {
5118                         reg = FDI_RX_IIR(pipe);
5119                         temp = I915_READ(reg);
5120                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
5121
5122                         if (temp & FDI_RX_BIT_LOCK ||
5123                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
5124                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
5125                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
5126                                               i);
5127                                 break;
5128                         }
5129                         udelay(1); /* should be 0.5us */
5130                 }
5131                 if (i == 4) {
5132                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
5133                         continue;
5134                 }
5135
5136                 /* Train 2 */
5137                 reg = FDI_TX_CTL(pipe);
5138                 temp = I915_READ(reg);
5139                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5140                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5141                 I915_WRITE(reg, temp);
5142
5143                 reg = FDI_RX_CTL(pipe);
5144                 temp = I915_READ(reg);
5145                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5146                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5147                 I915_WRITE(reg, temp);
5148
5149                 POSTING_READ(reg);
5150                 udelay(2); /* should be 1.5us */
5151
5152                 for (i = 0; i < 4; i++) {
5153                         reg = FDI_RX_IIR(pipe);
5154                         temp = I915_READ(reg);
5155                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
5156
5157                         if (temp & FDI_RX_SYMBOL_LOCK ||
5158                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
5159                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
5160                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
5161                                               i);
5162                                 goto train_done;
5163                         }
5164                         udelay(2); /* should be 1.5us */
5165                 }
5166                 if (i == 4)
5167                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
5168         }
5169
5170 train_done:
5171         DRM_DEBUG_KMS("FDI train done.\n");
5172 }
5173
5174 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5175 {
5176         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5177         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5178         enum pipe pipe = intel_crtc->pipe;
5179         i915_reg_t reg;
5180         u32 temp;
5181
5182         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5183         reg = FDI_RX_CTL(pipe);
5184         temp = I915_READ(reg);
5185         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5186         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5187         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5188         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
5189
5190         POSTING_READ(reg);
5191         udelay(200);
5192
5193         /* Switch from Rawclk to PCDclk */
5194         temp = I915_READ(reg);
5195         I915_WRITE(reg, temp | FDI_PCDCLK);
5196
5197         POSTING_READ(reg);
5198         udelay(200);
5199
5200         /* Enable CPU FDI TX PLL, always on for Ironlake */
5201         reg = FDI_TX_CTL(pipe);
5202         temp = I915_READ(reg);
5203         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5204                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5205
5206                 POSTING_READ(reg);
5207                 udelay(100);
5208         }
5209 }
5210
5211 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
5212 {
5213         struct drm_device *dev = intel_crtc->base.dev;
5214         struct drm_i915_private *dev_priv = to_i915(dev);
5215         enum pipe pipe = intel_crtc->pipe;
5216         i915_reg_t reg;
5217         u32 temp;
5218
5219         /* Switch from PCDclk to Rawclk */
5220         reg = FDI_RX_CTL(pipe);
5221         temp = I915_READ(reg);
5222         I915_WRITE(reg, temp & ~FDI_PCDCLK);
5223
5224         /* Disable CPU FDI TX PLL */
5225         reg = FDI_TX_CTL(pipe);
5226         temp = I915_READ(reg);
5227         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
5228
5229         POSTING_READ(reg);
5230         udelay(100);
5231
5232         reg = FDI_RX_CTL(pipe);
5233         temp = I915_READ(reg);
5234         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
5235
5236         /* Wait for the clocks to turn off. */
5237         POSTING_READ(reg);
5238         udelay(100);
5239 }
5240
5241 static void ironlake_fdi_disable(struct intel_crtc *crtc)
5242 {
5243         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5244         enum pipe pipe = crtc->pipe;
5245         i915_reg_t reg;
5246         u32 temp;
5247
5248         /* disable CPU FDI tx and PCH FDI rx */
5249         reg = FDI_TX_CTL(pipe);
5250         temp = I915_READ(reg);
5251         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5252         POSTING_READ(reg);
5253
5254         reg = FDI_RX_CTL(pipe);
5255         temp = I915_READ(reg);
5256         temp &= ~(0x7 << 16);
5257         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5258         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5259
5260         POSTING_READ(reg);
5261         udelay(100);
5262
5263         /* Ironlake workaround, disable clock pointer after downing FDI */
5264         if (HAS_PCH_IBX(dev_priv))
5265                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5266
5267         /* still set train pattern 1 */
5268         reg = FDI_TX_CTL(pipe);
5269         temp = I915_READ(reg);
5270         temp &= ~FDI_LINK_TRAIN_NONE;
5271         temp |= FDI_LINK_TRAIN_PATTERN_1;
5272         I915_WRITE(reg, temp);
5273
5274         reg = FDI_RX_CTL(pipe);
5275         temp = I915_READ(reg);
5276         if (HAS_PCH_CPT(dev_priv)) {
5277                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5278                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5279         } else {
5280                 temp &= ~FDI_LINK_TRAIN_NONE;
5281                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5282         }
5283         /* BPC in FDI rx is consistent with that in PIPECONF */
5284         temp &= ~(0x07 << 16);
5285         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5286         I915_WRITE(reg, temp);
5287
5288         POSTING_READ(reg);
5289         udelay(100);
5290 }
5291
5292 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5293 {
5294         struct drm_crtc *crtc;
5295         bool cleanup_done;
5296
5297         drm_for_each_crtc(crtc, &dev_priv->drm) {
5298                 struct drm_crtc_commit *commit;
5299                 spin_lock(&crtc->commit_lock);
5300                 commit = list_first_entry_or_null(&crtc->commit_list,
5301                                                   struct drm_crtc_commit, commit_entry);
5302                 cleanup_done = commit ?
5303                         try_wait_for_completion(&commit->cleanup_done) : true;
5304                 spin_unlock(&crtc->commit_lock);
5305
5306                 if (cleanup_done)
5307                         continue;
5308
5309                 drm_crtc_wait_one_vblank(crtc);
5310
5311                 return true;
5312         }
5313
5314         return false;
5315 }
5316
5317 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5318 {
5319         u32 temp;
5320
5321         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5322
5323         mutex_lock(&dev_priv->sb_lock);
5324
5325         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5326         temp |= SBI_SSCCTL_DISABLE;
5327         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5328
5329         mutex_unlock(&dev_priv->sb_lock);
5330 }
5331
5332 /* Program iCLKIP clock to the desired frequency */
5333 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5334 {
5335         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5336         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5337         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5338         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5339         u32 temp;
5340
5341         lpt_disable_iclkip(dev_priv);
5342
5343         /* The iCLK virtual clock root frequency is in MHz,
5344          * but the adjusted_mode->crtc_clock in in KHz. To get the
5345          * divisors, it is necessary to divide one by another, so we
5346          * convert the virtual clock precision to KHz here for higher
5347          * precision.
5348          */
5349         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5350                 u32 iclk_virtual_root_freq = 172800 * 1000;
5351                 u32 iclk_pi_range = 64;
5352                 u32 desired_divisor;
5353
5354                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5355                                                     clock << auxdiv);
5356                 divsel = (desired_divisor / iclk_pi_range) - 2;
5357                 phaseinc = desired_divisor % iclk_pi_range;
5358
5359                 /*
5360                  * Near 20MHz is a corner case which is
5361                  * out of range for the 7-bit divisor
5362                  */
5363                 if (divsel <= 0x7f)
5364                         break;
5365         }
5366
5367         /* This should not happen with any sane values */
5368         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5369                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5370         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5371                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5372
5373         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5374                         clock,
5375                         auxdiv,
5376                         divsel,
5377                         phasedir,
5378                         phaseinc);
5379
5380         mutex_lock(&dev_priv->sb_lock);
5381
5382         /* Program SSCDIVINTPHASE6 */
5383         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5384         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5385         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5386         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5387         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5388         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5389         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5390         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5391
5392         /* Program SSCAUXDIV */
5393         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5394         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5395         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5396         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5397
5398         /* Enable modulator and associated divider */
5399         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5400         temp &= ~SBI_SSCCTL_DISABLE;
5401         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5402
5403         mutex_unlock(&dev_priv->sb_lock);
5404
5405         /* Wait for initialization time */
5406         udelay(24);
5407
5408         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5409 }
5410
5411 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5412 {
5413         u32 divsel, phaseinc, auxdiv;
5414         u32 iclk_virtual_root_freq = 172800 * 1000;
5415         u32 iclk_pi_range = 64;
5416         u32 desired_divisor;
5417         u32 temp;
5418
5419         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5420                 return 0;
5421
5422         mutex_lock(&dev_priv->sb_lock);
5423
5424         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5425         if (temp & SBI_SSCCTL_DISABLE) {
5426                 mutex_unlock(&dev_priv->sb_lock);
5427                 return 0;
5428         }
5429
5430         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5431         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5432                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5433         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5434                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5435
5436         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5437         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5438                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5439
5440         mutex_unlock(&dev_priv->sb_lock);
5441
5442         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5443
5444         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5445                                  desired_divisor << auxdiv);
5446 }
5447
5448 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5449                                                 enum pipe pch_transcoder)
5450 {
5451         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5453         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5454
5455         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5456                    I915_READ(HTOTAL(cpu_transcoder)));
5457         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5458                    I915_READ(HBLANK(cpu_transcoder)));
5459         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5460                    I915_READ(HSYNC(cpu_transcoder)));
5461
5462         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5463                    I915_READ(VTOTAL(cpu_transcoder)));
5464         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5465                    I915_READ(VBLANK(cpu_transcoder)));
5466         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5467                    I915_READ(VSYNC(cpu_transcoder)));
5468         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5469                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5470 }
5471
5472 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5473 {
5474         u32 temp;
5475
5476         temp = I915_READ(SOUTH_CHICKEN1);
5477         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5478                 return;
5479
5480         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5481         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5482
5483         temp &= ~FDI_BC_BIFURCATION_SELECT;
5484         if (enable)
5485                 temp |= FDI_BC_BIFURCATION_SELECT;
5486
5487         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5488         I915_WRITE(SOUTH_CHICKEN1, temp);
5489         POSTING_READ(SOUTH_CHICKEN1);
5490 }
5491
5492 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5493 {
5494         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5495         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5496
5497         switch (crtc->pipe) {
5498         case PIPE_A:
5499                 break;
5500         case PIPE_B:
5501                 if (crtc_state->fdi_lanes > 2)
5502                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5503                 else
5504                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5505
5506                 break;
5507         case PIPE_C:
5508                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5509
5510                 break;
5511         default:
5512                 BUG();
5513         }
5514 }
5515
5516 /*
5517  * Finds the encoder associated with the given CRTC. This can only be
5518  * used when we know that the CRTC isn't feeding multiple encoders!
5519  */
5520 static struct intel_encoder *
5521 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5522                            const struct intel_crtc_state *crtc_state)
5523 {
5524         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5525         const struct drm_connector_state *connector_state;
5526         const struct drm_connector *connector;
5527         struct intel_encoder *encoder = NULL;
5528         int num_encoders = 0;
5529         int i;
5530
5531         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5532                 if (connector_state->crtc != &crtc->base)
5533                         continue;
5534
5535                 encoder = to_intel_encoder(connector_state->best_encoder);
5536                 num_encoders++;
5537         }
5538
5539         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5540              num_encoders, pipe_name(crtc->pipe));
5541
5542         return encoder;
5543 }
5544
5545 /*
5546  * Enable PCH resources required for PCH ports:
5547  *   - PCH PLLs
5548  *   - FDI training & RX/TX
5549  *   - update transcoder timings
5550  *   - DP transcoding bits
5551  *   - transcoder
5552  */
5553 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5554                                 const struct intel_crtc_state *crtc_state)
5555 {
5556         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5557         struct drm_device *dev = crtc->base.dev;
5558         struct drm_i915_private *dev_priv = to_i915(dev);
5559         enum pipe pipe = crtc->pipe;
5560         u32 temp;
5561
5562         assert_pch_transcoder_disabled(dev_priv, pipe);
5563
5564         if (IS_IVYBRIDGE(dev_priv))
5565                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5566
5567         /* Write the TU size bits before fdi link training, so that error
5568          * detection works. */
5569         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5570                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5571
5572         /* For PCH output, training FDI link */
5573         dev_priv->display.fdi_link_train(crtc, crtc_state);
5574
5575         /* We need to program the right clock selection before writing the pixel
5576          * mutliplier into the DPLL. */
5577         if (HAS_PCH_CPT(dev_priv)) {
5578                 u32 sel;
5579
5580                 temp = I915_READ(PCH_DPLL_SEL);
5581                 temp |= TRANS_DPLL_ENABLE(pipe);
5582                 sel = TRANS_DPLLB_SEL(pipe);
5583                 if (crtc_state->shared_dpll ==
5584                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5585                         temp |= sel;
5586                 else
5587                         temp &= ~sel;
5588                 I915_WRITE(PCH_DPLL_SEL, temp);
5589         }
5590
5591         /* XXX: pch pll's can be enabled any time before we enable the PCH
5592          * transcoder, and we actually should do this to not upset any PCH
5593          * transcoder that already use the clock when we share it.
5594          *
5595          * Note that enable_shared_dpll tries to do the right thing, but
5596          * get_shared_dpll unconditionally resets the pll - we need that to have
5597          * the right LVDS enable sequence. */
5598         intel_enable_shared_dpll(crtc_state);
5599
5600         /* set transcoder timing, panel must allow it */
5601         assert_panel_unlocked(dev_priv, pipe);
5602         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5603
5604         intel_fdi_normal_train(crtc);
5605
5606         /* For PCH DP, enable TRANS_DP_CTL */
5607         if (HAS_PCH_CPT(dev_priv) &&
5608             intel_crtc_has_dp_encoder(crtc_state)) {
5609                 const struct drm_display_mode *adjusted_mode =
5610                         &crtc_state->hw.adjusted_mode;
5611                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5612                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5613                 enum port port;
5614
5615                 temp = I915_READ(reg);
5616                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5617                           TRANS_DP_SYNC_MASK |
5618                           TRANS_DP_BPC_MASK);
5619                 temp |= TRANS_DP_OUTPUT_ENABLE;
5620                 temp |= bpc << 9; /* same format but at 11:9 */
5621
5622                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5623                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5624                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5625                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5626
5627                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5628                 WARN_ON(port < PORT_B || port > PORT_D);
5629                 temp |= TRANS_DP_PORT_SEL(port);
5630
5631                 I915_WRITE(reg, temp);
5632         }
5633
5634         ironlake_enable_pch_transcoder(crtc_state);
5635 }
5636
5637 static void lpt_pch_enable(const struct intel_atomic_state *state,
5638                            const struct intel_crtc_state *crtc_state)
5639 {
5640         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5641         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5642         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5643
5644         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5645
5646         lpt_program_iclkip(crtc_state);
5647
5648         /* Set transcoder timing. */
5649         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5650
5651         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5652 }
5653
5654 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5655                                enum pipe pipe)
5656 {
5657         i915_reg_t dslreg = PIPEDSL(pipe);
5658         u32 temp;
5659
5660         temp = I915_READ(dslreg);
5661         udelay(500);
5662         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5663                 if (wait_for(I915_READ(dslreg) != temp, 5))
5664                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5665         }
5666 }
5667
5668 /*
5669  * The hardware phase 0.0 refers to the center of the pixel.
5670  * We want to start from the top/left edge which is phase
5671  * -0.5. That matches how the hardware calculates the scaling
5672  * factors (from top-left of the first pixel to bottom-right
5673  * of the last pixel, as opposed to the pixel centers).
5674  *
5675  * For 4:2:0 subsampled chroma planes we obviously have to
5676  * adjust that so that the chroma sample position lands in
5677  * the right spot.
5678  *
5679  * Note that for packed YCbCr 4:2:2 formats there is no way to
5680  * control chroma siting. The hardware simply replicates the
5681  * chroma samples for both of the luma samples, and thus we don't
5682  * actually get the expected MPEG2 chroma siting convention :(
5683  * The same behaviour is observed on pre-SKL platforms as well.
5684  *
5685  * Theory behind the formula (note that we ignore sub-pixel
5686  * source coordinates):
5687  * s = source sample position
5688  * d = destination sample position
5689  *
5690  * Downscaling 4:1:
5691  * -0.5
5692  * | 0.0
5693  * | |     1.5 (initial phase)
5694  * | |     |
5695  * v v     v
5696  * | s | s | s | s |
5697  * |       d       |
5698  *
5699  * Upscaling 1:4:
5700  * -0.5
5701  * | -0.375 (initial phase)
5702  * | |     0.0
5703  * | |     |
5704  * v v     v
5705  * |       s       |
5706  * | d | d | d | d |
5707  */
5708 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5709 {
5710         int phase = -0x8000;
5711         u16 trip = 0;
5712
5713         if (chroma_cosited)
5714                 phase += (sub - 1) * 0x8000 / sub;
5715
5716         phase += scale / (2 * sub);
5717
5718         /*
5719          * Hardware initial phase limited to [-0.5:1.5].
5720          * Since the max hardware scale factor is 3.0, we
5721          * should never actually excdeed 1.0 here.
5722          */
5723         WARN_ON(phase < -0x8000 || phase > 0x18000);
5724
5725         if (phase < 0)
5726                 phase = 0x10000 + phase;
5727         else
5728                 trip = PS_PHASE_TRIP;
5729
5730         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5731 }
5732
5733 #define SKL_MIN_SRC_W 8
5734 #define SKL_MAX_SRC_W 4096
5735 #define SKL_MIN_SRC_H 8
5736 #define SKL_MAX_SRC_H 4096
5737 #define SKL_MIN_DST_W 8
5738 #define SKL_MAX_DST_W 4096
5739 #define SKL_MIN_DST_H 8
5740 #define SKL_MAX_DST_H 4096
5741 #define ICL_MAX_SRC_W 5120
5742 #define ICL_MAX_SRC_H 4096
5743 #define ICL_MAX_DST_W 5120
5744 #define ICL_MAX_DST_H 4096
5745 #define SKL_MIN_YUV_420_SRC_W 16
5746 #define SKL_MIN_YUV_420_SRC_H 16
5747
5748 static int
5749 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5750                   unsigned int scaler_user, int *scaler_id,
5751                   int src_w, int src_h, int dst_w, int dst_h,
5752                   const struct drm_format_info *format, bool need_scaler)
5753 {
5754         struct intel_crtc_scaler_state *scaler_state =
5755                 &crtc_state->scaler_state;
5756         struct intel_crtc *intel_crtc =
5757                 to_intel_crtc(crtc_state->uapi.crtc);
5758         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5759         const struct drm_display_mode *adjusted_mode =
5760                 &crtc_state->hw.adjusted_mode;
5761
5762         /*
5763          * Src coordinates are already rotated by 270 degrees for
5764          * the 90/270 degree plane rotation cases (to match the
5765          * GTT mapping), hence no need to account for rotation here.
5766          */
5767         if (src_w != dst_w || src_h != dst_h)
5768                 need_scaler = true;
5769
5770         /*
5771          * Scaling/fitting not supported in IF-ID mode in GEN9+
5772          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5773          * Once NV12 is enabled, handle it here while allocating scaler
5774          * for NV12.
5775          */
5776         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5777             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5778                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5779                 return -EINVAL;
5780         }
5781
5782         /*
5783          * if plane is being disabled or scaler is no more required or force detach
5784          *  - free scaler binded to this plane/crtc
5785          *  - in order to do this, update crtc->scaler_usage
5786          *
5787          * Here scaler state in crtc_state is set free so that
5788          * scaler can be assigned to other user. Actual register
5789          * update to free the scaler is done in plane/panel-fit programming.
5790          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5791          */
5792         if (force_detach || !need_scaler) {
5793                 if (*scaler_id >= 0) {
5794                         scaler_state->scaler_users &= ~(1 << scaler_user);
5795                         scaler_state->scalers[*scaler_id].in_use = 0;
5796
5797                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5798                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5799                                 intel_crtc->pipe, scaler_user, *scaler_id,
5800                                 scaler_state->scaler_users);
5801                         *scaler_id = -1;
5802                 }
5803                 return 0;
5804         }
5805
5806         if (format && drm_format_info_is_yuv_semiplanar(format) &&
5807             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5808                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5809                 return -EINVAL;
5810         }
5811
5812         /* range checks */
5813         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5814             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5815             (INTEL_GEN(dev_priv) >= 11 &&
5816              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5817               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5818             (INTEL_GEN(dev_priv) < 11 &&
5819              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5820               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5821                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5822                         "size is out of scaler range\n",
5823                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5824                 return -EINVAL;
5825         }
5826
5827         /* mark this plane as a scaler user in crtc_state */
5828         scaler_state->scaler_users |= (1 << scaler_user);
5829         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5830                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5831                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5832                 scaler_state->scaler_users);
5833
5834         return 0;
5835 }
5836
5837 /**
5838  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5839  *
5840  * @state: crtc's scaler state
5841  *
5842  * Return
5843  *     0 - scaler_usage updated successfully
5844  *    error - requested scaling cannot be supported or other error condition
5845  */
5846 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5847 {
5848         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5849         bool need_scaler = false;
5850
5851         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5852                 need_scaler = true;
5853
5854         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5855                                  &state->scaler_state.scaler_id,
5856                                  state->pipe_src_w, state->pipe_src_h,
5857                                  adjusted_mode->crtc_hdisplay,
5858                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5859 }
5860
5861 /**
5862  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5863  * @crtc_state: crtc's scaler state
5864  * @plane_state: atomic plane state to update
5865  *
5866  * Return
5867  *     0 - scaler_usage updated successfully
5868  *    error - requested scaling cannot be supported or other error condition
5869  */
5870 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5871                                    struct intel_plane_state *plane_state)
5872 {
5873         struct intel_plane *intel_plane =
5874                 to_intel_plane(plane_state->uapi.plane);
5875         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5876         struct drm_framebuffer *fb = plane_state->hw.fb;
5877         int ret;
5878         bool force_detach = !fb || !plane_state->uapi.visible;
5879         bool need_scaler = false;
5880
5881         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5882         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5883             fb && drm_format_info_is_yuv_semiplanar(fb->format))
5884                 need_scaler = true;
5885
5886         ret = skl_update_scaler(crtc_state, force_detach,
5887                                 drm_plane_index(&intel_plane->base),
5888                                 &plane_state->scaler_id,
5889                                 drm_rect_width(&plane_state->uapi.src) >> 16,
5890                                 drm_rect_height(&plane_state->uapi.src) >> 16,
5891                                 drm_rect_width(&plane_state->uapi.dst),
5892                                 drm_rect_height(&plane_state->uapi.dst),
5893                                 fb ? fb->format : NULL, need_scaler);
5894
5895         if (ret || plane_state->scaler_id < 0)
5896                 return ret;
5897
5898         /* check colorkey */
5899         if (plane_state->ckey.flags) {
5900                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5901                               intel_plane->base.base.id,
5902                               intel_plane->base.name);
5903                 return -EINVAL;
5904         }
5905
5906         /* Check src format */
5907         switch (fb->format->format) {
5908         case DRM_FORMAT_RGB565:
5909         case DRM_FORMAT_XBGR8888:
5910         case DRM_FORMAT_XRGB8888:
5911         case DRM_FORMAT_ABGR8888:
5912         case DRM_FORMAT_ARGB8888:
5913         case DRM_FORMAT_XRGB2101010:
5914         case DRM_FORMAT_XBGR2101010:
5915         case DRM_FORMAT_ARGB2101010:
5916         case DRM_FORMAT_ABGR2101010:
5917         case DRM_FORMAT_YUYV:
5918         case DRM_FORMAT_YVYU:
5919         case DRM_FORMAT_UYVY:
5920         case DRM_FORMAT_VYUY:
5921         case DRM_FORMAT_NV12:
5922         case DRM_FORMAT_P010:
5923         case DRM_FORMAT_P012:
5924         case DRM_FORMAT_P016:
5925         case DRM_FORMAT_Y210:
5926         case DRM_FORMAT_Y212:
5927         case DRM_FORMAT_Y216:
5928         case DRM_FORMAT_XVYU2101010:
5929         case DRM_FORMAT_XVYU12_16161616:
5930         case DRM_FORMAT_XVYU16161616:
5931                 break;
5932         case DRM_FORMAT_XBGR16161616F:
5933         case DRM_FORMAT_ABGR16161616F:
5934         case DRM_FORMAT_XRGB16161616F:
5935         case DRM_FORMAT_ARGB16161616F:
5936                 if (INTEL_GEN(dev_priv) >= 11)
5937                         break;
5938                 /* fall through */
5939         default:
5940                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5941                               intel_plane->base.base.id, intel_plane->base.name,
5942                               fb->base.id, fb->format->format);
5943                 return -EINVAL;
5944         }
5945
5946         return 0;
5947 }
5948
5949 void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state)
5950 {
5951         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5952         int i;
5953
5954         for (i = 0; i < crtc->num_scalers; i++)
5955                 skl_detach_scaler(crtc, i);
5956 }
5957
5958 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5959 {
5960         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5961         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5962         enum pipe pipe = crtc->pipe;
5963         const struct intel_crtc_scaler_state *scaler_state =
5964                 &crtc_state->scaler_state;
5965
5966         if (crtc_state->pch_pfit.enabled) {
5967                 u16 uv_rgb_hphase, uv_rgb_vphase;
5968                 int pfit_w, pfit_h, hscale, vscale;
5969                 int id;
5970
5971                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5972                         return;
5973
5974                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5975                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5976
5977                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5978                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5979
5980                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5981                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5982
5983                 id = scaler_state->scaler_id;
5984                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5985                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5986                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5987                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5988                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5989                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5990                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5991                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5992         }
5993 }
5994
5995 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5996 {
5997         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5998         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5999         enum pipe pipe = crtc->pipe;
6000
6001         if (crtc_state->pch_pfit.enabled) {
6002                 /* Force use of hard-coded filter coefficients
6003                  * as some pre-programmed values are broken,
6004                  * e.g. x201.
6005                  */
6006                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6007                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
6008                                                  PF_PIPE_SEL_IVB(pipe));
6009                 else
6010                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6011                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
6012                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
6013         }
6014 }
6015
6016 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6017 {
6018         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6019         struct drm_device *dev = crtc->base.dev;
6020         struct drm_i915_private *dev_priv = to_i915(dev);
6021
6022         if (!crtc_state->ips_enabled)
6023                 return;
6024
6025         /*
6026          * We can only enable IPS after we enable a plane and wait for a vblank
6027          * This function is called from post_plane_update, which is run after
6028          * a vblank wait.
6029          */
6030         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6031
6032         if (IS_BROADWELL(dev_priv)) {
6033                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6034                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
6035                 /* Quoting Art Runyan: "its not safe to expect any particular
6036                  * value in IPS_CTL bit 31 after enabling IPS through the
6037                  * mailbox." Moreover, the mailbox may return a bogus state,
6038                  * so we need to just enable it and continue on.
6039                  */
6040         } else {
6041                 I915_WRITE(IPS_CTL, IPS_ENABLE);
6042                 /* The bit only becomes 1 in the next vblank, so this wait here
6043                  * is essentially intel_wait_for_vblank. If we don't have this
6044                  * and don't wait for vblanks until the end of crtc_enable, then
6045                  * the HW state readout code will complain that the expected
6046                  * IPS_CTL value is not the one we read. */
6047                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6048                         DRM_ERROR("Timed out waiting for IPS enable\n");
6049         }
6050 }
6051
6052 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6053 {
6054         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6055         struct drm_device *dev = crtc->base.dev;
6056         struct drm_i915_private *dev_priv = to_i915(dev);
6057
6058         if (!crtc_state->ips_enabled)
6059                 return;
6060
6061         if (IS_BROADWELL(dev_priv)) {
6062                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6063                 /*
6064                  * Wait for PCODE to finish disabling IPS. The BSpec specified
6065                  * 42ms timeout value leads to occasional timeouts so use 100ms
6066                  * instead.
6067                  */
6068                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6069                         DRM_ERROR("Timed out waiting for IPS disable\n");
6070         } else {
6071                 I915_WRITE(IPS_CTL, 0);
6072                 POSTING_READ(IPS_CTL);
6073         }
6074
6075         /* We need to wait for a vblank before we can disable the plane. */
6076         intel_wait_for_vblank(dev_priv, crtc->pipe);
6077 }
6078
6079 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6080 {
6081         if (intel_crtc->overlay)
6082                 (void) intel_overlay_switch_off(intel_crtc->overlay);
6083
6084         /* Let userspace switch the overlay on again. In most cases userspace
6085          * has to recompute where to put it anyway.
6086          */
6087 }
6088
6089 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6090                                        const struct intel_crtc_state *new_crtc_state)
6091 {
6092         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6093         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6094
6095         if (!old_crtc_state->ips_enabled)
6096                 return false;
6097
6098         if (needs_modeset(new_crtc_state))
6099                 return true;
6100
6101         /*
6102          * Workaround : Do not read or write the pipe palette/gamma data while
6103          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6104          *
6105          * Disable IPS before we program the LUT.
6106          */
6107         if (IS_HASWELL(dev_priv) &&
6108             (new_crtc_state->uapi.color_mgmt_changed ||
6109              new_crtc_state->update_pipe) &&
6110             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6111                 return true;
6112
6113         return !new_crtc_state->ips_enabled;
6114 }
6115
6116 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6117                                        const struct intel_crtc_state *new_crtc_state)
6118 {
6119         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6120         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6121
6122         if (!new_crtc_state->ips_enabled)
6123                 return false;
6124
6125         if (needs_modeset(new_crtc_state))
6126                 return true;
6127
6128         /*
6129          * Workaround : Do not read or write the pipe palette/gamma data while
6130          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6131          *
6132          * Re-enable IPS after the LUT has been programmed.
6133          */
6134         if (IS_HASWELL(dev_priv) &&
6135             (new_crtc_state->uapi.color_mgmt_changed ||
6136              new_crtc_state->update_pipe) &&
6137             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6138                 return true;
6139
6140         /*
6141          * We can't read out IPS on broadwell, assume the worst and
6142          * forcibly enable IPS on the first fastset.
6143          */
6144         if (new_crtc_state->update_pipe &&
6145             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6146                 return true;
6147
6148         return !old_crtc_state->ips_enabled;
6149 }
6150
6151 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6152 {
6153         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6154
6155         if (!crtc_state->nv12_planes)
6156                 return false;
6157
6158         /* WA Display #0827: Gen9:all */
6159         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6160                 return true;
6161
6162         return false;
6163 }
6164
6165 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6166 {
6167         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6168
6169         /* Wa_2006604312:icl */
6170         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6171                 return true;
6172
6173         return false;
6174 }
6175
6176 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6177                             const struct intel_crtc_state *new_crtc_state)
6178 {
6179         return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6180                 new_crtc_state->active_planes;
6181 }
6182
6183 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6184                              const struct intel_crtc_state *new_crtc_state)
6185 {
6186         return old_crtc_state->active_planes &&
6187                 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6188 }
6189
6190 static void intel_post_plane_update(struct intel_atomic_state *state,
6191                                     struct intel_crtc *crtc)
6192 {
6193         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6194         struct intel_plane *primary = to_intel_plane(crtc->base.primary);
6195         const struct intel_crtc_state *old_crtc_state =
6196                 intel_atomic_get_old_crtc_state(state, crtc);
6197         const struct intel_crtc_state *new_crtc_state =
6198                 intel_atomic_get_new_crtc_state(state, crtc);
6199         const struct intel_plane_state *new_primary_state =
6200                 intel_atomic_get_new_plane_state(state, primary);
6201         enum pipe pipe = crtc->pipe;
6202
6203         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6204
6205         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6206                 intel_update_watermarks(crtc);
6207
6208         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6209                 hsw_enable_ips(new_crtc_state);
6210
6211         if (new_primary_state)
6212                 intel_fbc_post_update(crtc);
6213
6214         if (needs_nv12_wa(old_crtc_state) &&
6215             !needs_nv12_wa(new_crtc_state))
6216                 skl_wa_827(dev_priv, pipe, false);
6217
6218         if (needs_scalerclk_wa(old_crtc_state) &&
6219             !needs_scalerclk_wa(new_crtc_state))
6220                 icl_wa_scalerclkgating(dev_priv, pipe, false);
6221 }
6222
6223 static void intel_pre_plane_update(struct intel_atomic_state *state,
6224                                    struct intel_crtc *crtc)
6225 {
6226         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6227         struct intel_plane *primary = to_intel_plane(crtc->base.primary);
6228         const struct intel_crtc_state *old_crtc_state =
6229                 intel_atomic_get_old_crtc_state(state, crtc);
6230         const struct intel_crtc_state *new_crtc_state =
6231                 intel_atomic_get_new_crtc_state(state, crtc);
6232         const struct intel_plane_state *new_primary_state =
6233                 intel_atomic_get_new_plane_state(state, primary);
6234         enum pipe pipe = crtc->pipe;
6235
6236         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6237                 hsw_disable_ips(old_crtc_state);
6238
6239         if (new_primary_state &&
6240             intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
6241                 intel_wait_for_vblank(dev_priv, pipe);
6242
6243         /* Display WA 827 */
6244         if (!needs_nv12_wa(old_crtc_state) &&
6245             needs_nv12_wa(new_crtc_state))
6246                 skl_wa_827(dev_priv, pipe, true);
6247
6248         /* Wa_2006604312:icl */
6249         if (!needs_scalerclk_wa(old_crtc_state) &&
6250             needs_scalerclk_wa(new_crtc_state))
6251                 icl_wa_scalerclkgating(dev_priv, pipe, true);
6252
6253         /*
6254          * Vblank time updates from the shadow to live plane control register
6255          * are blocked if the memory self-refresh mode is active at that
6256          * moment. So to make sure the plane gets truly disabled, disable
6257          * first the self-refresh mode. The self-refresh enable bit in turn
6258          * will be checked/applied by the HW only at the next frame start
6259          * event which is after the vblank start event, so we need to have a
6260          * wait-for-vblank between disabling the plane and the pipe.
6261          */
6262         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6263             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6264                 intel_wait_for_vblank(dev_priv, pipe);
6265
6266         /*
6267          * IVB workaround: must disable low power watermarks for at least
6268          * one frame before enabling scaling.  LP watermarks can be re-enabled
6269          * when scaling is disabled.
6270          *
6271          * WaCxSRDisabledForSpriteScaling:ivb
6272          */
6273         if (old_crtc_state->hw.active &&
6274             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6275                 intel_wait_for_vblank(dev_priv, pipe);
6276
6277         /*
6278          * If we're doing a modeset we don't need to do any
6279          * pre-vblank watermark programming here.
6280          */
6281         if (!needs_modeset(new_crtc_state)) {
6282                 /*
6283                  * For platforms that support atomic watermarks, program the
6284                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6285                  * will be the intermediate values that are safe for both pre- and
6286                  * post- vblank; when vblank happens, the 'active' values will be set
6287                  * to the final 'target' values and we'll do this again to get the
6288                  * optimal watermarks.  For gen9+ platforms, the values we program here
6289                  * will be the final target values which will get automatically latched
6290                  * at vblank time; no further programming will be necessary.
6291                  *
6292                  * If a platform hasn't been transitioned to atomic watermarks yet,
6293                  * we'll continue to update watermarks the old way, if flags tell
6294                  * us to.
6295                  */
6296                 if (dev_priv->display.initial_watermarks)
6297                         dev_priv->display.initial_watermarks(state, crtc);
6298                 else if (new_crtc_state->update_wm_pre)
6299                         intel_update_watermarks(crtc);
6300         }
6301
6302         /*
6303          * Gen2 reports pipe underruns whenever all planes are disabled.
6304          * So disable underrun reporting before all the planes get disabled.
6305          *
6306          * We do this after .initial_watermarks() so that we have a
6307          * chance of catching underruns with the intermediate watermarks
6308          * vs. the old plane configuration.
6309          */
6310         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6311                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6312 }
6313
6314 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6315                                       struct intel_crtc *crtc)
6316 {
6317         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6318         const struct intel_crtc_state *new_crtc_state =
6319                 intel_atomic_get_new_crtc_state(state, crtc);
6320         unsigned int update_mask = new_crtc_state->update_planes;
6321         const struct intel_plane_state *old_plane_state;
6322         struct intel_plane *plane;
6323         unsigned fb_bits = 0;
6324         int i;
6325
6326         intel_crtc_dpms_overlay_disable(crtc);
6327
6328         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6329                 if (crtc->pipe != plane->pipe ||
6330                     !(update_mask & BIT(plane->id)))
6331                         continue;
6332
6333                 intel_disable_plane(plane, new_crtc_state);
6334
6335                 if (old_plane_state->uapi.visible)
6336                         fb_bits |= plane->frontbuffer_bit;
6337         }
6338
6339         intel_frontbuffer_flip(dev_priv, fb_bits);
6340 }
6341
6342 /*
6343  * intel_connector_primary_encoder - get the primary encoder for a connector
6344  * @connector: connector for which to return the encoder
6345  *
6346  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6347  * all connectors to their encoder, except for DP-MST connectors which have
6348  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6349  * pointed to by as many DP-MST connectors as there are pipes.
6350  */
6351 static struct intel_encoder *
6352 intel_connector_primary_encoder(struct intel_connector *connector)
6353 {
6354         struct intel_encoder *encoder;
6355
6356         if (connector->mst_port)
6357                 return &dp_to_dig_port(connector->mst_port)->base;
6358
6359         encoder = intel_attached_encoder(&connector->base);
6360         WARN_ON(!encoder);
6361
6362         return encoder;
6363 }
6364
6365 static bool
6366 intel_connector_needs_modeset(struct intel_atomic_state *state,
6367                               const struct drm_connector_state *old_conn_state,
6368                               const struct drm_connector_state *new_conn_state)
6369 {
6370         struct intel_crtc *old_crtc = old_conn_state->crtc ?
6371                                       to_intel_crtc(old_conn_state->crtc) : NULL;
6372         struct intel_crtc *new_crtc = new_conn_state->crtc ?
6373                                       to_intel_crtc(new_conn_state->crtc) : NULL;
6374
6375         return new_crtc != old_crtc ||
6376                (new_crtc &&
6377                 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6378 }
6379
6380 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6381 {
6382         struct drm_connector_state *old_conn_state;
6383         struct drm_connector_state *new_conn_state;
6384         struct drm_connector *conn;
6385         int i;
6386
6387         for_each_oldnew_connector_in_state(&state->base, conn,
6388                                            old_conn_state, new_conn_state, i) {
6389                 struct intel_encoder *encoder;
6390                 struct intel_crtc *crtc;
6391
6392                 if (!intel_connector_needs_modeset(state,
6393                                                    old_conn_state,
6394                                                    new_conn_state))
6395                         continue;
6396
6397                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6398                 if (!encoder->update_prepare)
6399                         continue;
6400
6401                 crtc = new_conn_state->crtc ?
6402                         to_intel_crtc(new_conn_state->crtc) : NULL;
6403                 encoder->update_prepare(state, encoder, crtc);
6404         }
6405 }
6406
6407 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6408 {
6409         struct drm_connector_state *old_conn_state;
6410         struct drm_connector_state *new_conn_state;
6411         struct drm_connector *conn;
6412         int i;
6413
6414         for_each_oldnew_connector_in_state(&state->base, conn,
6415                                            old_conn_state, new_conn_state, i) {
6416                 struct intel_encoder *encoder;
6417                 struct intel_crtc *crtc;
6418
6419                 if (!intel_connector_needs_modeset(state,
6420                                                    old_conn_state,
6421                                                    new_conn_state))
6422                         continue;
6423
6424                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6425                 if (!encoder->update_complete)
6426                         continue;
6427
6428                 crtc = new_conn_state->crtc ?
6429                         to_intel_crtc(new_conn_state->crtc) : NULL;
6430                 encoder->update_complete(state, encoder, crtc);
6431         }
6432 }
6433
6434 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6435                                           struct intel_crtc *crtc)
6436 {
6437         const struct intel_crtc_state *crtc_state =
6438                 intel_atomic_get_new_crtc_state(state, crtc);
6439         const struct drm_connector_state *conn_state;
6440         struct drm_connector *conn;
6441         int i;
6442
6443         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6444                 struct intel_encoder *encoder =
6445                         to_intel_encoder(conn_state->best_encoder);
6446
6447                 if (conn_state->crtc != &crtc->base)
6448                         continue;
6449
6450                 if (encoder->pre_pll_enable)
6451                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6452         }
6453 }
6454
6455 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6456                                       struct intel_crtc *crtc)
6457 {
6458         const struct intel_crtc_state *crtc_state =
6459                 intel_atomic_get_new_crtc_state(state, crtc);
6460         const struct drm_connector_state *conn_state;
6461         struct drm_connector *conn;
6462         int i;
6463
6464         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6465                 struct intel_encoder *encoder =
6466                         to_intel_encoder(conn_state->best_encoder);
6467
6468                 if (conn_state->crtc != &crtc->base)
6469                         continue;
6470
6471                 if (encoder->pre_enable)
6472                         encoder->pre_enable(encoder, crtc_state, conn_state);
6473         }
6474 }
6475
6476 static void intel_encoders_enable(struct intel_atomic_state *state,
6477                                   struct intel_crtc *crtc)
6478 {
6479         const struct intel_crtc_state *crtc_state =
6480                 intel_atomic_get_new_crtc_state(state, crtc);
6481         const struct drm_connector_state *conn_state;
6482         struct drm_connector *conn;
6483         int i;
6484
6485         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6486                 struct intel_encoder *encoder =
6487                         to_intel_encoder(conn_state->best_encoder);
6488
6489                 if (conn_state->crtc != &crtc->base)
6490                         continue;
6491
6492                 if (encoder->enable)
6493                         encoder->enable(encoder, crtc_state, conn_state);
6494                 intel_opregion_notify_encoder(encoder, true);
6495         }
6496 }
6497
6498 static void intel_encoders_disable(struct intel_atomic_state *state,
6499                                    struct intel_crtc *crtc)
6500 {
6501         const struct intel_crtc_state *old_crtc_state =
6502                 intel_atomic_get_old_crtc_state(state, crtc);
6503         const struct drm_connector_state *old_conn_state;
6504         struct drm_connector *conn;
6505         int i;
6506
6507         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6508                 struct intel_encoder *encoder =
6509                         to_intel_encoder(old_conn_state->best_encoder);
6510
6511                 if (old_conn_state->crtc != &crtc->base)
6512                         continue;
6513
6514                 intel_opregion_notify_encoder(encoder, false);
6515                 if (encoder->disable)
6516                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6517         }
6518 }
6519
6520 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6521                                         struct intel_crtc *crtc)
6522 {
6523         const struct intel_crtc_state *old_crtc_state =
6524                 intel_atomic_get_old_crtc_state(state, crtc);
6525         const struct drm_connector_state *old_conn_state;
6526         struct drm_connector *conn;
6527         int i;
6528
6529         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6530                 struct intel_encoder *encoder =
6531                         to_intel_encoder(old_conn_state->best_encoder);
6532
6533                 if (old_conn_state->crtc != &crtc->base)
6534                         continue;
6535
6536                 if (encoder->post_disable)
6537                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6538         }
6539 }
6540
6541 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6542                                             struct intel_crtc *crtc)
6543 {
6544         const struct intel_crtc_state *old_crtc_state =
6545                 intel_atomic_get_old_crtc_state(state, crtc);
6546         const struct drm_connector_state *old_conn_state;
6547         struct drm_connector *conn;
6548         int i;
6549
6550         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6551                 struct intel_encoder *encoder =
6552                         to_intel_encoder(old_conn_state->best_encoder);
6553
6554                 if (old_conn_state->crtc != &crtc->base)
6555                         continue;
6556
6557                 if (encoder->post_pll_disable)
6558                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6559         }
6560 }
6561
6562 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6563                                        struct intel_crtc *crtc)
6564 {
6565         const struct intel_crtc_state *crtc_state =
6566                 intel_atomic_get_new_crtc_state(state, crtc);
6567         const struct drm_connector_state *conn_state;
6568         struct drm_connector *conn;
6569         int i;
6570
6571         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6572                 struct intel_encoder *encoder =
6573                         to_intel_encoder(conn_state->best_encoder);
6574
6575                 if (conn_state->crtc != &crtc->base)
6576                         continue;
6577
6578                 if (encoder->update_pipe)
6579                         encoder->update_pipe(encoder, crtc_state, conn_state);
6580         }
6581 }
6582
6583 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6584 {
6585         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6586         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6587
6588         plane->disable_plane(plane, crtc_state);
6589 }
6590
6591 static void ironlake_crtc_enable(struct intel_atomic_state *state,
6592                                  struct intel_crtc *crtc)
6593 {
6594         const struct intel_crtc_state *new_crtc_state =
6595                 intel_atomic_get_new_crtc_state(state, crtc);
6596         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6597         enum pipe pipe = crtc->pipe;
6598
6599         if (WARN_ON(crtc->active))
6600                 return;
6601
6602         /*
6603          * Sometimes spurious CPU pipe underruns happen during FDI
6604          * training, at least with VGA+HDMI cloning. Suppress them.
6605          *
6606          * On ILK we get an occasional spurious CPU pipe underruns
6607          * between eDP port A enable and vdd enable. Also PCH port
6608          * enable seems to result in the occasional CPU pipe underrun.
6609          *
6610          * Spurious PCH underruns also occur during PCH enabling.
6611          */
6612         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6613         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6614
6615         if (new_crtc_state->has_pch_encoder)
6616                 intel_prepare_shared_dpll(new_crtc_state);
6617
6618         if (intel_crtc_has_dp_encoder(new_crtc_state))
6619                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6620
6621         intel_set_pipe_timings(new_crtc_state);
6622         intel_set_pipe_src_size(new_crtc_state);
6623
6624         if (new_crtc_state->has_pch_encoder)
6625                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6626                                              &new_crtc_state->fdi_m_n, NULL);
6627
6628         ironlake_set_pipeconf(new_crtc_state);
6629
6630         crtc->active = true;
6631
6632         intel_encoders_pre_enable(state, crtc);
6633
6634         if (new_crtc_state->has_pch_encoder) {
6635                 /* Note: FDI PLL enabling _must_ be done before we enable the
6636                  * cpu pipes, hence this is separate from all the other fdi/pch
6637                  * enabling. */
6638                 ironlake_fdi_pll_enable(new_crtc_state);
6639         } else {
6640                 assert_fdi_tx_disabled(dev_priv, pipe);
6641                 assert_fdi_rx_disabled(dev_priv, pipe);
6642         }
6643
6644         ironlake_pfit_enable(new_crtc_state);
6645
6646         /*
6647          * On ILK+ LUT must be loaded before the pipe is running but with
6648          * clocks enabled
6649          */
6650         intel_color_load_luts(new_crtc_state);
6651         intel_color_commit(new_crtc_state);
6652         /* update DSPCNTR to configure gamma for pipe bottom color */
6653         intel_disable_primary_plane(new_crtc_state);
6654
6655         if (dev_priv->display.initial_watermarks)
6656                 dev_priv->display.initial_watermarks(state, crtc);
6657         intel_enable_pipe(new_crtc_state);
6658
6659         if (new_crtc_state->has_pch_encoder)
6660                 ironlake_pch_enable(state, new_crtc_state);
6661
6662         intel_crtc_vblank_on(new_crtc_state);
6663
6664         intel_encoders_enable(state, crtc);
6665
6666         if (HAS_PCH_CPT(dev_priv))
6667                 cpt_verify_modeset(dev_priv, pipe);
6668
6669         /*
6670          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6671          * And a second vblank wait is needed at least on ILK with
6672          * some interlaced HDMI modes. Let's do the double wait always
6673          * in case there are more corner cases we don't know about.
6674          */
6675         if (new_crtc_state->has_pch_encoder) {
6676                 intel_wait_for_vblank(dev_priv, pipe);
6677                 intel_wait_for_vblank(dev_priv, pipe);
6678         }
6679         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6680         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6681 }
6682
6683 /* IPS only exists on ULT machines and is tied to pipe A. */
6684 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6685 {
6686         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6687 }
6688
6689 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6690                                             enum pipe pipe, bool apply)
6691 {
6692         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6693         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6694
6695         if (apply)
6696                 val |= mask;
6697         else
6698                 val &= ~mask;
6699
6700         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6701 }
6702
6703 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6704 {
6705         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6706         enum pipe pipe = crtc->pipe;
6707         u32 val;
6708
6709         val = MBUS_DBOX_A_CREDIT(2);
6710
6711         if (INTEL_GEN(dev_priv) >= 12) {
6712                 val |= MBUS_DBOX_BW_CREDIT(2);
6713                 val |= MBUS_DBOX_B_CREDIT(12);
6714         } else {
6715                 val |= MBUS_DBOX_BW_CREDIT(1);
6716                 val |= MBUS_DBOX_B_CREDIT(8);
6717         }
6718
6719         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6720 }
6721
6722 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6723 {
6724         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6725         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6726         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6727         u32 val;
6728
6729         val = I915_READ(reg);
6730         val &= ~HSW_FRAME_START_DELAY_MASK;
6731         val |= HSW_FRAME_START_DELAY(0);
6732         I915_WRITE(reg, val);
6733 }
6734
6735 static void haswell_crtc_enable(struct intel_atomic_state *state,
6736                                 struct intel_crtc *crtc)
6737 {
6738         const struct intel_crtc_state *new_crtc_state =
6739                 intel_atomic_get_new_crtc_state(state, crtc);
6740         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6741         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
6742         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
6743         bool psl_clkgate_wa;
6744
6745         if (WARN_ON(crtc->active))
6746                 return;
6747
6748         intel_encoders_pre_pll_enable(state, crtc);
6749
6750         if (new_crtc_state->shared_dpll)
6751                 intel_enable_shared_dpll(new_crtc_state);
6752
6753         intel_encoders_pre_enable(state, crtc);
6754
6755         if (intel_crtc_has_dp_encoder(new_crtc_state))
6756                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6757
6758         if (!transcoder_is_dsi(cpu_transcoder))
6759                 intel_set_pipe_timings(new_crtc_state);
6760
6761         if (INTEL_GEN(dev_priv) >= 11)
6762                 icl_enable_trans_port_sync(new_crtc_state);
6763
6764         intel_set_pipe_src_size(new_crtc_state);
6765
6766         if (cpu_transcoder != TRANSCODER_EDP &&
6767             !transcoder_is_dsi(cpu_transcoder))
6768                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6769                            new_crtc_state->pixel_multiplier - 1);
6770
6771         if (new_crtc_state->has_pch_encoder)
6772                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6773                                              &new_crtc_state->fdi_m_n, NULL);
6774
6775         if (!transcoder_is_dsi(cpu_transcoder)) {
6776                 hsw_set_frame_start_delay(new_crtc_state);
6777                 haswell_set_pipeconf(new_crtc_state);
6778         }
6779
6780         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6781                 bdw_set_pipemisc(new_crtc_state);
6782
6783         crtc->active = true;
6784
6785         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6786         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6787                 new_crtc_state->pch_pfit.enabled;
6788         if (psl_clkgate_wa)
6789                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6790
6791         if (INTEL_GEN(dev_priv) >= 9)
6792                 skylake_pfit_enable(new_crtc_state);
6793         else
6794                 ironlake_pfit_enable(new_crtc_state);
6795
6796         /*
6797          * On ILK+ LUT must be loaded before the pipe is running but with
6798          * clocks enabled
6799          */
6800         intel_color_load_luts(new_crtc_state);
6801         intel_color_commit(new_crtc_state);
6802         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6803         if (INTEL_GEN(dev_priv) < 9)
6804                 intel_disable_primary_plane(new_crtc_state);
6805
6806         if (INTEL_GEN(dev_priv) >= 11)
6807                 icl_set_pipe_chicken(crtc);
6808
6809         if (!transcoder_is_dsi(cpu_transcoder))
6810                 intel_ddi_enable_transcoder_func(new_crtc_state);
6811
6812         if (dev_priv->display.initial_watermarks)
6813                 dev_priv->display.initial_watermarks(state, crtc);
6814
6815         if (INTEL_GEN(dev_priv) >= 11)
6816                 icl_pipe_mbus_enable(crtc);
6817
6818         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6819         if (!transcoder_is_dsi(cpu_transcoder))
6820                 intel_enable_pipe(new_crtc_state);
6821
6822         if (new_crtc_state->has_pch_encoder)
6823                 lpt_pch_enable(state, new_crtc_state);
6824
6825         intel_crtc_vblank_on(new_crtc_state);
6826
6827         intel_encoders_enable(state, crtc);
6828
6829         if (psl_clkgate_wa) {
6830                 intel_wait_for_vblank(dev_priv, pipe);
6831                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6832         }
6833
6834         /* If we change the relative order between pipe/planes enabling, we need
6835          * to change the workaround. */
6836         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
6837         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6838                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6839                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6840         }
6841 }
6842
6843 void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6844 {
6845         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6846         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6847         enum pipe pipe = crtc->pipe;
6848
6849         /* To avoid upsetting the power well on haswell only disable the pfit if
6850          * it's in use. The hw state code will make sure we get this right. */
6851         if (old_crtc_state->pch_pfit.enabled) {
6852                 I915_WRITE(PF_CTL(pipe), 0);
6853                 I915_WRITE(PF_WIN_POS(pipe), 0);
6854                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6855         }
6856 }
6857
6858 static void ironlake_crtc_disable(struct intel_atomic_state *state,
6859                                   struct intel_crtc *crtc)
6860 {
6861         const struct intel_crtc_state *old_crtc_state =
6862                 intel_atomic_get_old_crtc_state(state, crtc);
6863         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6864         enum pipe pipe = crtc->pipe;
6865
6866         /*
6867          * Sometimes spurious CPU pipe underruns happen when the
6868          * pipe is already disabled, but FDI RX/TX is still enabled.
6869          * Happens at least with VGA+HDMI cloning. Suppress them.
6870          */
6871         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6872         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6873
6874         intel_encoders_disable(state, crtc);
6875
6876         intel_crtc_vblank_off(old_crtc_state);
6877
6878         intel_disable_pipe(old_crtc_state);
6879
6880         ironlake_pfit_disable(old_crtc_state);
6881
6882         if (old_crtc_state->has_pch_encoder)
6883                 ironlake_fdi_disable(crtc);
6884
6885         intel_encoders_post_disable(state, crtc);
6886
6887         if (old_crtc_state->has_pch_encoder) {
6888                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6889
6890                 if (HAS_PCH_CPT(dev_priv)) {
6891                         i915_reg_t reg;
6892                         u32 temp;
6893
6894                         /* disable TRANS_DP_CTL */
6895                         reg = TRANS_DP_CTL(pipe);
6896                         temp = I915_READ(reg);
6897                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6898                                   TRANS_DP_PORT_SEL_MASK);
6899                         temp |= TRANS_DP_PORT_SEL_NONE;
6900                         I915_WRITE(reg, temp);
6901
6902                         /* disable DPLL_SEL */
6903                         temp = I915_READ(PCH_DPLL_SEL);
6904                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6905                         I915_WRITE(PCH_DPLL_SEL, temp);
6906                 }
6907
6908                 ironlake_fdi_pll_disable(crtc);
6909         }
6910
6911         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6912         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6913 }
6914
6915 static void haswell_crtc_disable(struct intel_atomic_state *state,
6916                                  struct intel_crtc *crtc)
6917 {
6918         /*
6919          * FIXME collapse everything to one hook.
6920          * Need care with mst->ddi interactions.
6921          */
6922         intel_encoders_disable(state, crtc);
6923         intel_encoders_post_disable(state, crtc);
6924 }
6925
6926 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6927 {
6928         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6929         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6930
6931         if (!crtc_state->gmch_pfit.control)
6932                 return;
6933
6934         /*
6935          * The panel fitter should only be adjusted whilst the pipe is disabled,
6936          * according to register description and PRM.
6937          */
6938         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6939         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
6940
6941         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6942         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6943
6944         /* Border color in case we don't scale up to the full screen. Black by
6945          * default, change to something else for debugging. */
6946         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6947 }
6948
6949 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6950 {
6951         if (phy == PHY_NONE)
6952                 return false;
6953
6954         if (IS_ELKHARTLAKE(dev_priv))
6955                 return phy <= PHY_C;
6956
6957         if (INTEL_GEN(dev_priv) >= 11)
6958                 return phy <= PHY_B;
6959
6960         return false;
6961 }
6962
6963 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6964 {
6965         if (INTEL_GEN(dev_priv) >= 12)
6966                 return phy >= PHY_D && phy <= PHY_I;
6967
6968         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6969                 return phy >= PHY_C && phy <= PHY_F;
6970
6971         return false;
6972 }
6973
6974 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6975 {
6976         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6977                 return PHY_A;
6978
6979         return (enum phy)port;
6980 }
6981
6982 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6983 {
6984         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6985                 return PORT_TC_NONE;
6986
6987         if (INTEL_GEN(dev_priv) >= 12)
6988                 return port - PORT_D;
6989
6990         return port - PORT_C;
6991 }
6992
6993 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6994 {
6995         switch (port) {
6996         case PORT_A:
6997                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6998         case PORT_B:
6999                 return POWER_DOMAIN_PORT_DDI_B_LANES;
7000         case PORT_C:
7001                 return POWER_DOMAIN_PORT_DDI_C_LANES;
7002         case PORT_D:
7003                 return POWER_DOMAIN_PORT_DDI_D_LANES;
7004         case PORT_E:
7005                 return POWER_DOMAIN_PORT_DDI_E_LANES;
7006         case PORT_F:
7007                 return POWER_DOMAIN_PORT_DDI_F_LANES;
7008         case PORT_G:
7009                 return POWER_DOMAIN_PORT_DDI_G_LANES;
7010         default:
7011                 MISSING_CASE(port);
7012                 return POWER_DOMAIN_PORT_OTHER;
7013         }
7014 }
7015
7016 enum intel_display_power_domain
7017 intel_aux_power_domain(struct intel_digital_port *dig_port)
7018 {
7019         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7020         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7021
7022         if (intel_phy_is_tc(dev_priv, phy) &&
7023             dig_port->tc_mode == TC_PORT_TBT_ALT) {
7024                 switch (dig_port->aux_ch) {
7025                 case AUX_CH_C:
7026                         return POWER_DOMAIN_AUX_C_TBT;
7027                 case AUX_CH_D:
7028                         return POWER_DOMAIN_AUX_D_TBT;
7029                 case AUX_CH_E:
7030                         return POWER_DOMAIN_AUX_E_TBT;
7031                 case AUX_CH_F:
7032                         return POWER_DOMAIN_AUX_F_TBT;
7033                 case AUX_CH_G:
7034                         return POWER_DOMAIN_AUX_G_TBT;
7035                 default:
7036                         MISSING_CASE(dig_port->aux_ch);
7037                         return POWER_DOMAIN_AUX_C_TBT;
7038                 }
7039         }
7040
7041         switch (dig_port->aux_ch) {
7042         case AUX_CH_A:
7043                 return POWER_DOMAIN_AUX_A;
7044         case AUX_CH_B:
7045                 return POWER_DOMAIN_AUX_B;
7046         case AUX_CH_C:
7047                 return POWER_DOMAIN_AUX_C;
7048         case AUX_CH_D:
7049                 return POWER_DOMAIN_AUX_D;
7050         case AUX_CH_E:
7051                 return POWER_DOMAIN_AUX_E;
7052         case AUX_CH_F:
7053                 return POWER_DOMAIN_AUX_F;
7054         case AUX_CH_G:
7055                 return POWER_DOMAIN_AUX_G;
7056         default:
7057                 MISSING_CASE(dig_port->aux_ch);
7058                 return POWER_DOMAIN_AUX_A;
7059         }
7060 }
7061
7062 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7063 {
7064         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7065         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7066         struct drm_encoder *encoder;
7067         enum pipe pipe = crtc->pipe;
7068         u64 mask;
7069         enum transcoder transcoder = crtc_state->cpu_transcoder;
7070
7071         if (!crtc_state->hw.active)
7072                 return 0;
7073
7074         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7075         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7076         if (crtc_state->pch_pfit.enabled ||
7077             crtc_state->pch_pfit.force_thru)
7078                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7079
7080         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7081                                   crtc_state->uapi.encoder_mask) {
7082                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7083
7084                 mask |= BIT_ULL(intel_encoder->power_domain);
7085         }
7086
7087         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7088                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7089
7090         if (crtc_state->shared_dpll)
7091                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7092
7093         return mask;
7094 }
7095
7096 static u64
7097 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7098 {
7099         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7100         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7101         enum intel_display_power_domain domain;
7102         u64 domains, new_domains, old_domains;
7103
7104         old_domains = crtc->enabled_power_domains;
7105         crtc->enabled_power_domains = new_domains =
7106                 get_crtc_power_domains(crtc_state);
7107
7108         domains = new_domains & ~old_domains;
7109
7110         for_each_power_domain(domain, domains)
7111                 intel_display_power_get(dev_priv, domain);
7112
7113         return old_domains & ~new_domains;
7114 }
7115
7116 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7117                                       u64 domains)
7118 {
7119         enum intel_display_power_domain domain;
7120
7121         for_each_power_domain(domain, domains)
7122                 intel_display_power_put_unchecked(dev_priv, domain);
7123 }
7124
7125 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7126                                    struct intel_crtc *crtc)
7127 {
7128         const struct intel_crtc_state *new_crtc_state =
7129                 intel_atomic_get_new_crtc_state(state, crtc);
7130         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7131         enum pipe pipe = crtc->pipe;
7132
7133         if (WARN_ON(crtc->active))
7134                 return;
7135
7136         if (intel_crtc_has_dp_encoder(new_crtc_state))
7137                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7138
7139         intel_set_pipe_timings(new_crtc_state);
7140         intel_set_pipe_src_size(new_crtc_state);
7141
7142         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7143                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7144                 I915_WRITE(CHV_CANVAS(pipe), 0);
7145         }
7146
7147         i9xx_set_pipeconf(new_crtc_state);
7148
7149         crtc->active = true;
7150
7151         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7152
7153         intel_encoders_pre_pll_enable(state, crtc);
7154
7155         if (IS_CHERRYVIEW(dev_priv)) {
7156                 chv_prepare_pll(crtc, new_crtc_state);
7157                 chv_enable_pll(crtc, new_crtc_state);
7158         } else {
7159                 vlv_prepare_pll(crtc, new_crtc_state);
7160                 vlv_enable_pll(crtc, new_crtc_state);
7161         }
7162
7163         intel_encoders_pre_enable(state, crtc);
7164
7165         i9xx_pfit_enable(new_crtc_state);
7166
7167         intel_color_load_luts(new_crtc_state);
7168         intel_color_commit(new_crtc_state);
7169         /* update DSPCNTR to configure gamma for pipe bottom color */
7170         intel_disable_primary_plane(new_crtc_state);
7171
7172         dev_priv->display.initial_watermarks(state, crtc);
7173         intel_enable_pipe(new_crtc_state);
7174
7175         intel_crtc_vblank_on(new_crtc_state);
7176
7177         intel_encoders_enable(state, crtc);
7178 }
7179
7180 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7181 {
7182         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7183         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7184
7185         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7186         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7187 }
7188
7189 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7190                              struct intel_crtc *crtc)
7191 {
7192         const struct intel_crtc_state *new_crtc_state =
7193                 intel_atomic_get_new_crtc_state(state, crtc);
7194         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7195         enum pipe pipe = crtc->pipe;
7196
7197         if (WARN_ON(crtc->active))
7198                 return;
7199
7200         i9xx_set_pll_dividers(new_crtc_state);
7201
7202         if (intel_crtc_has_dp_encoder(new_crtc_state))
7203                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7204
7205         intel_set_pipe_timings(new_crtc_state);
7206         intel_set_pipe_src_size(new_crtc_state);
7207
7208         i9xx_set_pipeconf(new_crtc_state);
7209
7210         crtc->active = true;
7211
7212         if (!IS_GEN(dev_priv, 2))
7213                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7214
7215         intel_encoders_pre_enable(state, crtc);
7216
7217         i9xx_enable_pll(crtc, new_crtc_state);
7218
7219         i9xx_pfit_enable(new_crtc_state);
7220
7221         intel_color_load_luts(new_crtc_state);
7222         intel_color_commit(new_crtc_state);
7223         /* update DSPCNTR to configure gamma for pipe bottom color */
7224         intel_disable_primary_plane(new_crtc_state);
7225
7226         if (dev_priv->display.initial_watermarks)
7227                 dev_priv->display.initial_watermarks(state, crtc);
7228         else
7229                 intel_update_watermarks(crtc);
7230         intel_enable_pipe(new_crtc_state);
7231
7232         intel_crtc_vblank_on(new_crtc_state);
7233
7234         intel_encoders_enable(state, crtc);
7235 }
7236
7237 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7238 {
7239         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7240         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7241
7242         if (!old_crtc_state->gmch_pfit.control)
7243                 return;
7244
7245         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7246
7247         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7248                       I915_READ(PFIT_CONTROL));
7249         I915_WRITE(PFIT_CONTROL, 0);
7250 }
7251
7252 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7253                               struct intel_crtc *crtc)
7254 {
7255         struct intel_crtc_state *old_crtc_state =
7256                 intel_atomic_get_old_crtc_state(state, crtc);
7257         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7258         enum pipe pipe = crtc->pipe;
7259
7260         /*
7261          * On gen2 planes are double buffered but the pipe isn't, so we must
7262          * wait for planes to fully turn off before disabling the pipe.
7263          */
7264         if (IS_GEN(dev_priv, 2))
7265                 intel_wait_for_vblank(dev_priv, pipe);
7266
7267         intel_encoders_disable(state, crtc);
7268
7269         intel_crtc_vblank_off(old_crtc_state);
7270
7271         intel_disable_pipe(old_crtc_state);
7272
7273         i9xx_pfit_disable(old_crtc_state);
7274
7275         intel_encoders_post_disable(state, crtc);
7276
7277         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7278                 if (IS_CHERRYVIEW(dev_priv))
7279                         chv_disable_pll(dev_priv, pipe);
7280                 else if (IS_VALLEYVIEW(dev_priv))
7281                         vlv_disable_pll(dev_priv, pipe);
7282                 else
7283                         i9xx_disable_pll(old_crtc_state);
7284         }
7285
7286         intel_encoders_post_pll_disable(state, crtc);
7287
7288         if (!IS_GEN(dev_priv, 2))
7289                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7290
7291         if (!dev_priv->display.initial_watermarks)
7292                 intel_update_watermarks(crtc);
7293
7294         /* clock the pipe down to 640x480@60 to potentially save power */
7295         if (IS_I830(dev_priv))
7296                 i830_enable_pipe(dev_priv, pipe);
7297 }
7298
7299 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7300                                         struct drm_modeset_acquire_ctx *ctx)
7301 {
7302         struct intel_encoder *encoder;
7303         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7304         struct intel_bw_state *bw_state =
7305                 to_intel_bw_state(dev_priv->bw_obj.state);
7306         struct intel_crtc_state *crtc_state =
7307                 to_intel_crtc_state(crtc->base.state);
7308         enum intel_display_power_domain domain;
7309         struct intel_plane *plane;
7310         struct drm_atomic_state *state;
7311         struct intel_crtc_state *temp_crtc_state;
7312         enum pipe pipe = crtc->pipe;
7313         u64 domains;
7314         int ret;
7315
7316         if (!crtc_state->hw.active)
7317                 return;
7318
7319         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7320                 const struct intel_plane_state *plane_state =
7321                         to_intel_plane_state(plane->base.state);
7322
7323                 if (plane_state->uapi.visible)
7324                         intel_plane_disable_noatomic(crtc, plane);
7325         }
7326
7327         state = drm_atomic_state_alloc(&dev_priv->drm);
7328         if (!state) {
7329                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7330                               crtc->base.base.id, crtc->base.name);
7331                 return;
7332         }
7333
7334         state->acquire_ctx = ctx;
7335
7336         /* Everything's already locked, -EDEADLK can't happen. */
7337         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7338         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7339
7340         WARN_ON(IS_ERR(temp_crtc_state) || ret);
7341
7342         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7343
7344         drm_atomic_state_put(state);
7345
7346         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7347                       crtc->base.base.id, crtc->base.name);
7348
7349         crtc->active = false;
7350         crtc->base.enabled = false;
7351
7352         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7353         crtc_state->uapi.active = false;
7354         crtc_state->uapi.connector_mask = 0;
7355         crtc_state->uapi.encoder_mask = 0;
7356         intel_crtc_free_hw_state(crtc_state);
7357         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7358
7359         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7360                 encoder->base.crtc = NULL;
7361
7362         intel_fbc_disable(crtc);
7363         intel_update_watermarks(crtc);
7364         intel_disable_shared_dpll(crtc_state);
7365
7366         domains = crtc->enabled_power_domains;
7367         for_each_power_domain(domain, domains)
7368                 intel_display_power_put_unchecked(dev_priv, domain);
7369         crtc->enabled_power_domains = 0;
7370
7371         dev_priv->active_pipes &= ~BIT(pipe);
7372         dev_priv->min_cdclk[pipe] = 0;
7373         dev_priv->min_voltage_level[pipe] = 0;
7374
7375         bw_state->data_rate[pipe] = 0;
7376         bw_state->num_active_planes[pipe] = 0;
7377 }
7378
7379 /*
7380  * turn all crtc's off, but do not adjust state
7381  * This has to be paired with a call to intel_modeset_setup_hw_state.
7382  */
7383 int intel_display_suspend(struct drm_device *dev)
7384 {
7385         struct drm_i915_private *dev_priv = to_i915(dev);
7386         struct drm_atomic_state *state;
7387         int ret;
7388
7389         state = drm_atomic_helper_suspend(dev);
7390         ret = PTR_ERR_OR_ZERO(state);
7391         if (ret)
7392                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7393         else
7394                 dev_priv->modeset_restore_state = state;
7395         return ret;
7396 }
7397
7398 void intel_encoder_destroy(struct drm_encoder *encoder)
7399 {
7400         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7401
7402         drm_encoder_cleanup(encoder);
7403         kfree(intel_encoder);
7404 }
7405
7406 /* Cross check the actual hw state with our own modeset state tracking (and it's
7407  * internal consistency). */
7408 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7409                                          struct drm_connector_state *conn_state)
7410 {
7411         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7412
7413         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7414                       connector->base.base.id,
7415                       connector->base.name);
7416
7417         if (connector->get_hw_state(connector)) {
7418                 struct intel_encoder *encoder = connector->encoder;
7419
7420                 I915_STATE_WARN(!crtc_state,
7421                          "connector enabled without attached crtc\n");
7422
7423                 if (!crtc_state)
7424                         return;
7425
7426                 I915_STATE_WARN(!crtc_state->hw.active,
7427                                 "connector is active, but attached crtc isn't\n");
7428
7429                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7430                         return;
7431
7432                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7433                         "atomic encoder doesn't match attached encoder\n");
7434
7435                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7436                         "attached encoder crtc differs from connector crtc\n");
7437         } else {
7438                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7439                                 "attached crtc is active, but connector isn't\n");
7440                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7441                         "best encoder set without crtc!\n");
7442         }
7443 }
7444
7445 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7446 {
7447         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7448                 return crtc_state->fdi_lanes;
7449
7450         return 0;
7451 }
7452
7453 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7454                                      struct intel_crtc_state *pipe_config)
7455 {
7456         struct drm_i915_private *dev_priv = to_i915(dev);
7457         struct drm_atomic_state *state = pipe_config->uapi.state;
7458         struct intel_crtc *other_crtc;
7459         struct intel_crtc_state *other_crtc_state;
7460
7461         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7462                       pipe_name(pipe), pipe_config->fdi_lanes);
7463         if (pipe_config->fdi_lanes > 4) {
7464                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7465                               pipe_name(pipe), pipe_config->fdi_lanes);
7466                 return -EINVAL;
7467         }
7468
7469         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7470                 if (pipe_config->fdi_lanes > 2) {
7471                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7472                                       pipe_config->fdi_lanes);
7473                         return -EINVAL;
7474                 } else {
7475                         return 0;
7476                 }
7477         }
7478
7479         if (INTEL_NUM_PIPES(dev_priv) == 2)
7480                 return 0;
7481
7482         /* Ivybridge 3 pipe is really complicated */
7483         switch (pipe) {
7484         case PIPE_A:
7485                 return 0;
7486         case PIPE_B:
7487                 if (pipe_config->fdi_lanes <= 2)
7488                         return 0;
7489
7490                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7491                 other_crtc_state =
7492                         intel_atomic_get_crtc_state(state, other_crtc);
7493                 if (IS_ERR(other_crtc_state))
7494                         return PTR_ERR(other_crtc_state);
7495
7496                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7497                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7498                                       pipe_name(pipe), pipe_config->fdi_lanes);
7499                         return -EINVAL;
7500                 }
7501                 return 0;
7502         case PIPE_C:
7503                 if (pipe_config->fdi_lanes > 2) {
7504                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7505                                       pipe_name(pipe), pipe_config->fdi_lanes);
7506                         return -EINVAL;
7507                 }
7508
7509                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7510                 other_crtc_state =
7511                         intel_atomic_get_crtc_state(state, other_crtc);
7512                 if (IS_ERR(other_crtc_state))
7513                         return PTR_ERR(other_crtc_state);
7514
7515                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7516                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7517                         return -EINVAL;
7518                 }
7519                 return 0;
7520         default:
7521                 BUG();
7522         }
7523 }
7524
7525 #define RETRY 1
7526 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7527                                        struct intel_crtc_state *pipe_config)
7528 {
7529         struct drm_device *dev = intel_crtc->base.dev;
7530         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7531         int lane, link_bw, fdi_dotclock, ret;
7532         bool needs_recompute = false;
7533
7534 retry:
7535         /* FDI is a binary signal running at ~2.7GHz, encoding
7536          * each output octet as 10 bits. The actual frequency
7537          * is stored as a divider into a 100MHz clock, and the
7538          * mode pixel clock is stored in units of 1KHz.
7539          * Hence the bw of each lane in terms of the mode signal
7540          * is:
7541          */
7542         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7543
7544         fdi_dotclock = adjusted_mode->crtc_clock;
7545
7546         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7547                                            pipe_config->pipe_bpp);
7548
7549         pipe_config->fdi_lanes = lane;
7550
7551         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7552                                link_bw, &pipe_config->fdi_m_n, false, false);
7553
7554         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7555         if (ret == -EDEADLK)
7556                 return ret;
7557
7558         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7559                 pipe_config->pipe_bpp -= 2*3;
7560                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7561                               pipe_config->pipe_bpp);
7562                 needs_recompute = true;
7563                 pipe_config->bw_constrained = true;
7564
7565                 goto retry;
7566         }
7567
7568         if (needs_recompute)
7569                 return RETRY;
7570
7571         return ret;
7572 }
7573
7574 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7575 {
7576         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7577         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7578
7579         /* IPS only exists on ULT machines and is tied to pipe A. */
7580         if (!hsw_crtc_supports_ips(crtc))
7581                 return false;
7582
7583         if (!i915_modparams.enable_ips)
7584                 return false;
7585
7586         if (crtc_state->pipe_bpp > 24)
7587                 return false;
7588
7589         /*
7590          * We compare against max which means we must take
7591          * the increased cdclk requirement into account when
7592          * calculating the new cdclk.
7593          *
7594          * Should measure whether using a lower cdclk w/o IPS
7595          */
7596         if (IS_BROADWELL(dev_priv) &&
7597             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7598                 return false;
7599
7600         return true;
7601 }
7602
7603 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7604 {
7605         struct drm_i915_private *dev_priv =
7606                 to_i915(crtc_state->uapi.crtc->dev);
7607         struct intel_atomic_state *intel_state =
7608                 to_intel_atomic_state(crtc_state->uapi.state);
7609
7610         if (!hsw_crtc_state_ips_capable(crtc_state))
7611                 return false;
7612
7613         /*
7614          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7615          * enabled and disabled dynamically based on package C states,
7616          * user space can't make reliable use of the CRCs, so let's just
7617          * completely disable it.
7618          */
7619         if (crtc_state->crc_enabled)
7620                 return false;
7621
7622         /* IPS should be fine as long as at least one plane is enabled. */
7623         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7624                 return false;
7625
7626         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7627         if (IS_BROADWELL(dev_priv) &&
7628             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7629                 return false;
7630
7631         return true;
7632 }
7633
7634 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7635 {
7636         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7637
7638         /* GDG double wide on either pipe, otherwise pipe A only */
7639         return INTEL_GEN(dev_priv) < 4 &&
7640                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7641 }
7642
7643 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7644 {
7645         u32 pixel_rate;
7646
7647         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7648
7649         /*
7650          * We only use IF-ID interlacing. If we ever use
7651          * PF-ID we'll need to adjust the pixel_rate here.
7652          */
7653
7654         if (pipe_config->pch_pfit.enabled) {
7655                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7656                 u32 pfit_size = pipe_config->pch_pfit.size;
7657
7658                 pipe_w = pipe_config->pipe_src_w;
7659                 pipe_h = pipe_config->pipe_src_h;
7660
7661                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7662                 pfit_h = pfit_size & 0xFFFF;
7663                 if (pipe_w < pfit_w)
7664                         pipe_w = pfit_w;
7665                 if (pipe_h < pfit_h)
7666                         pipe_h = pfit_h;
7667
7668                 if (WARN_ON(!pfit_w || !pfit_h))
7669                         return pixel_rate;
7670
7671                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7672                                      pfit_w * pfit_h);
7673         }
7674
7675         return pixel_rate;
7676 }
7677
7678 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7679 {
7680         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7681
7682         if (HAS_GMCH(dev_priv))
7683                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7684                 crtc_state->pixel_rate =
7685                         crtc_state->hw.adjusted_mode.crtc_clock;
7686         else
7687                 crtc_state->pixel_rate =
7688                         ilk_pipe_pixel_rate(crtc_state);
7689 }
7690
7691 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7692                                      struct intel_crtc_state *pipe_config)
7693 {
7694         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7695         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7696         int clock_limit = dev_priv->max_dotclk_freq;
7697
7698         if (INTEL_GEN(dev_priv) < 4) {
7699                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7700
7701                 /*
7702                  * Enable double wide mode when the dot clock
7703                  * is > 90% of the (display) core speed.
7704                  */
7705                 if (intel_crtc_supports_double_wide(crtc) &&
7706                     adjusted_mode->crtc_clock > clock_limit) {
7707                         clock_limit = dev_priv->max_dotclk_freq;
7708                         pipe_config->double_wide = true;
7709                 }
7710         }
7711
7712         if (adjusted_mode->crtc_clock > clock_limit) {
7713                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7714                               adjusted_mode->crtc_clock, clock_limit,
7715                               yesno(pipe_config->double_wide));
7716                 return -EINVAL;
7717         }
7718
7719         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7720              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7721              pipe_config->hw.ctm) {
7722                 /*
7723                  * There is only one pipe CSC unit per pipe, and we need that
7724                  * for output conversion from RGB->YCBCR. So if CTM is already
7725                  * applied we can't support YCBCR420 output.
7726                  */
7727                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7728                 return -EINVAL;
7729         }
7730
7731         /*
7732          * Pipe horizontal size must be even in:
7733          * - DVO ganged mode
7734          * - LVDS dual channel mode
7735          * - Double wide pipe
7736          */
7737         if (pipe_config->pipe_src_w & 1) {
7738                 if (pipe_config->double_wide) {
7739                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7740                         return -EINVAL;
7741                 }
7742
7743                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7744                     intel_is_dual_link_lvds(dev_priv)) {
7745                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7746                         return -EINVAL;
7747                 }
7748         }
7749
7750         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7751          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7752          */
7753         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7754                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7755                 return -EINVAL;
7756
7757         intel_crtc_compute_pixel_rate(pipe_config);
7758
7759         if (pipe_config->has_pch_encoder)
7760                 return ironlake_fdi_compute_config(crtc, pipe_config);
7761
7762         return 0;
7763 }
7764
7765 static void
7766 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7767 {
7768         while (*num > DATA_LINK_M_N_MASK ||
7769                *den > DATA_LINK_M_N_MASK) {
7770                 *num >>= 1;
7771                 *den >>= 1;
7772         }
7773 }
7774
7775 static void compute_m_n(unsigned int m, unsigned int n,
7776                         u32 *ret_m, u32 *ret_n,
7777                         bool constant_n)
7778 {
7779         /*
7780          * Several DP dongles in particular seem to be fussy about
7781          * too large link M/N values. Give N value as 0x8000 that
7782          * should be acceptable by specific devices. 0x8000 is the
7783          * specified fixed N value for asynchronous clock mode,
7784          * which the devices expect also in synchronous clock mode.
7785          */
7786         if (constant_n)
7787                 *ret_n = 0x8000;
7788         else
7789                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7790
7791         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7792         intel_reduce_m_n_ratio(ret_m, ret_n);
7793 }
7794
7795 void
7796 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7797                        int pixel_clock, int link_clock,
7798                        struct intel_link_m_n *m_n,
7799                        bool constant_n, bool fec_enable)
7800 {
7801         u32 data_clock = bits_per_pixel * pixel_clock;
7802
7803         if (fec_enable)
7804                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7805
7806         m_n->tu = 64;
7807         compute_m_n(data_clock,
7808                     link_clock * nlanes * 8,
7809                     &m_n->gmch_m, &m_n->gmch_n,
7810                     constant_n);
7811
7812         compute_m_n(pixel_clock, link_clock,
7813                     &m_n->link_m, &m_n->link_n,
7814                     constant_n);
7815 }
7816
7817 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7818 {
7819         /*
7820          * There may be no VBT; and if the BIOS enabled SSC we can
7821          * just keep using it to avoid unnecessary flicker.  Whereas if the
7822          * BIOS isn't using it, don't assume it will work even if the VBT
7823          * indicates as much.
7824          */
7825         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7826                 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7827                         DREF_SSC1_ENABLE;
7828
7829                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7830                         DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7831                                       enableddisabled(bios_lvds_use_ssc),
7832                                       enableddisabled(dev_priv->vbt.lvds_use_ssc));
7833                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7834                 }
7835         }
7836 }
7837
7838 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7839 {
7840         if (i915_modparams.panel_use_ssc >= 0)
7841                 return i915_modparams.panel_use_ssc != 0;
7842         return dev_priv->vbt.lvds_use_ssc
7843                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7844 }
7845
7846 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7847 {
7848         return (1 << dpll->n) << 16 | dpll->m2;
7849 }
7850
7851 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7852 {
7853         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7854 }
7855
7856 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7857                                      struct intel_crtc_state *crtc_state,
7858                                      struct dpll *reduced_clock)
7859 {
7860         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7861         u32 fp, fp2 = 0;
7862
7863         if (IS_PINEVIEW(dev_priv)) {
7864                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7865                 if (reduced_clock)
7866                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7867         } else {
7868                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7869                 if (reduced_clock)
7870                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7871         }
7872
7873         crtc_state->dpll_hw_state.fp0 = fp;
7874
7875         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7876             reduced_clock) {
7877                 crtc_state->dpll_hw_state.fp1 = fp2;
7878         } else {
7879                 crtc_state->dpll_hw_state.fp1 = fp;
7880         }
7881 }
7882
7883 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7884                 pipe)
7885 {
7886         u32 reg_val;
7887
7888         /*
7889          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7890          * and set it to a reasonable value instead.
7891          */
7892         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7893         reg_val &= 0xffffff00;
7894         reg_val |= 0x00000030;
7895         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7896
7897         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7898         reg_val &= 0x00ffffff;
7899         reg_val |= 0x8c000000;
7900         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7901
7902         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7903         reg_val &= 0xffffff00;
7904         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7905
7906         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7907         reg_val &= 0x00ffffff;
7908         reg_val |= 0xb0000000;
7909         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7910 }
7911
7912 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7913                                          const struct intel_link_m_n *m_n)
7914 {
7915         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7916         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7917         enum pipe pipe = crtc->pipe;
7918
7919         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7920         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7921         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7922         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7923 }
7924
7925 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7926                                  enum transcoder transcoder)
7927 {
7928         if (IS_HASWELL(dev_priv))
7929                 return transcoder == TRANSCODER_EDP;
7930
7931         /*
7932          * Strictly speaking some registers are available before
7933          * gen7, but we only support DRRS on gen7+
7934          */
7935         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7936 }
7937
7938 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7939                                          const struct intel_link_m_n *m_n,
7940                                          const struct intel_link_m_n *m2_n2)
7941 {
7942         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7943         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7944         enum pipe pipe = crtc->pipe;
7945         enum transcoder transcoder = crtc_state->cpu_transcoder;
7946
7947         if (INTEL_GEN(dev_priv) >= 5) {
7948                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7949                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7950                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7951                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7952                 /*
7953                  *  M2_N2 registers are set only if DRRS is supported
7954                  * (to make sure the registers are not unnecessarily accessed).
7955                  */
7956                 if (m2_n2 && crtc_state->has_drrs &&
7957                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7958                         I915_WRITE(PIPE_DATA_M2(transcoder),
7959                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7960                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7961                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7962                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7963                 }
7964         } else {
7965                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7966                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7967                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7968                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7969         }
7970 }
7971
7972 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7973 {
7974         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7975
7976         if (m_n == M1_N1) {
7977                 dp_m_n = &crtc_state->dp_m_n;
7978                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7979         } else if (m_n == M2_N2) {
7980
7981                 /*
7982                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7983                  * needs to be programmed into M1_N1.
7984                  */
7985                 dp_m_n = &crtc_state->dp_m2_n2;
7986         } else {
7987                 DRM_ERROR("Unsupported divider value\n");
7988                 return;
7989         }
7990
7991         if (crtc_state->has_pch_encoder)
7992                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7993         else
7994                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7995 }
7996
7997 static void vlv_compute_dpll(struct intel_crtc *crtc,
7998                              struct intel_crtc_state *pipe_config)
7999 {
8000         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8001                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8002         if (crtc->pipe != PIPE_A)
8003                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8004
8005         /* DPLL not used with DSI, but still need the rest set up */
8006         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8007                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8008                         DPLL_EXT_BUFFER_ENABLE_VLV;
8009
8010         pipe_config->dpll_hw_state.dpll_md =
8011                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8012 }
8013
8014 static void chv_compute_dpll(struct intel_crtc *crtc,
8015                              struct intel_crtc_state *pipe_config)
8016 {
8017         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8018                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8019         if (crtc->pipe != PIPE_A)
8020                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8021
8022         /* DPLL not used with DSI, but still need the rest set up */
8023         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8024                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8025
8026         pipe_config->dpll_hw_state.dpll_md =
8027                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8028 }
8029
8030 static void vlv_prepare_pll(struct intel_crtc *crtc,
8031                             const struct intel_crtc_state *pipe_config)
8032 {
8033         struct drm_device *dev = crtc->base.dev;
8034         struct drm_i915_private *dev_priv = to_i915(dev);
8035         enum pipe pipe = crtc->pipe;
8036         u32 mdiv;
8037         u32 bestn, bestm1, bestm2, bestp1, bestp2;
8038         u32 coreclk, reg_val;
8039
8040         /* Enable Refclk */
8041         I915_WRITE(DPLL(pipe),
8042                    pipe_config->dpll_hw_state.dpll &
8043                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8044
8045         /* No need to actually set up the DPLL with DSI */
8046         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8047                 return;
8048
8049         vlv_dpio_get(dev_priv);
8050
8051         bestn = pipe_config->dpll.n;
8052         bestm1 = pipe_config->dpll.m1;
8053         bestm2 = pipe_config->dpll.m2;
8054         bestp1 = pipe_config->dpll.p1;
8055         bestp2 = pipe_config->dpll.p2;
8056
8057         /* See eDP HDMI DPIO driver vbios notes doc */
8058
8059         /* PLL B needs special handling */
8060         if (pipe == PIPE_B)
8061                 vlv_pllb_recal_opamp(dev_priv, pipe);
8062
8063         /* Set up Tx target for periodic Rcomp update */
8064         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8065
8066         /* Disable target IRef on PLL */
8067         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8068         reg_val &= 0x00ffffff;
8069         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8070
8071         /* Disable fast lock */
8072         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8073
8074         /* Set idtafcrecal before PLL is enabled */
8075         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8076         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8077         mdiv |= ((bestn << DPIO_N_SHIFT));
8078         mdiv |= (1 << DPIO_K_SHIFT);
8079
8080         /*
8081          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8082          * but we don't support that).
8083          * Note: don't use the DAC post divider as it seems unstable.
8084          */
8085         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8086         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8087
8088         mdiv |= DPIO_ENABLE_CALIBRATION;
8089         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8090
8091         /* Set HBR and RBR LPF coefficients */
8092         if (pipe_config->port_clock == 162000 ||
8093             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8094             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8095                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8096                                  0x009f0003);
8097         else
8098                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8099                                  0x00d0000f);
8100
8101         if (intel_crtc_has_dp_encoder(pipe_config)) {
8102                 /* Use SSC source */
8103                 if (pipe == PIPE_A)
8104                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8105                                          0x0df40000);
8106                 else
8107                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8108                                          0x0df70000);
8109         } else { /* HDMI or VGA */
8110                 /* Use bend source */
8111                 if (pipe == PIPE_A)
8112                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8113                                          0x0df70000);
8114                 else
8115                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8116                                          0x0df40000);
8117         }
8118
8119         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8120         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8121         if (intel_crtc_has_dp_encoder(pipe_config))
8122                 coreclk |= 0x01000000;
8123         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8124
8125         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8126
8127         vlv_dpio_put(dev_priv);
8128 }
8129
8130 static void chv_prepare_pll(struct intel_crtc *crtc,
8131                             const struct intel_crtc_state *pipe_config)
8132 {
8133         struct drm_device *dev = crtc->base.dev;
8134         struct drm_i915_private *dev_priv = to_i915(dev);
8135         enum pipe pipe = crtc->pipe;
8136         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8137         u32 loopfilter, tribuf_calcntr;
8138         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8139         u32 dpio_val;
8140         int vco;
8141
8142         /* Enable Refclk and SSC */
8143         I915_WRITE(DPLL(pipe),
8144                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8145
8146         /* No need to actually set up the DPLL with DSI */
8147         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8148                 return;
8149
8150         bestn = pipe_config->dpll.n;
8151         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8152         bestm1 = pipe_config->dpll.m1;
8153         bestm2 = pipe_config->dpll.m2 >> 22;
8154         bestp1 = pipe_config->dpll.p1;
8155         bestp2 = pipe_config->dpll.p2;
8156         vco = pipe_config->dpll.vco;
8157         dpio_val = 0;
8158         loopfilter = 0;
8159
8160         vlv_dpio_get(dev_priv);
8161
8162         /* p1 and p2 divider */
8163         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8164                         5 << DPIO_CHV_S1_DIV_SHIFT |
8165                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8166                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8167                         1 << DPIO_CHV_K_DIV_SHIFT);
8168
8169         /* Feedback post-divider - m2 */
8170         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8171
8172         /* Feedback refclk divider - n and m1 */
8173         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8174                         DPIO_CHV_M1_DIV_BY_2 |
8175                         1 << DPIO_CHV_N_DIV_SHIFT);
8176
8177         /* M2 fraction division */
8178         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8179
8180         /* M2 fraction division enable */
8181         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8182         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8183         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8184         if (bestm2_frac)
8185                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8186         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8187
8188         /* Program digital lock detect threshold */
8189         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8190         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8191                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8192         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8193         if (!bestm2_frac)
8194                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8195         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8196
8197         /* Loop filter */
8198         if (vco == 5400000) {
8199                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8200                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8201                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8202                 tribuf_calcntr = 0x9;
8203         } else if (vco <= 6200000) {
8204                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8205                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8206                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8207                 tribuf_calcntr = 0x9;
8208         } else if (vco <= 6480000) {
8209                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8210                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8211                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8212                 tribuf_calcntr = 0x8;
8213         } else {
8214                 /* Not supported. Apply the same limits as in the max case */
8215                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8216                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8217                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8218                 tribuf_calcntr = 0;
8219         }
8220         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8221
8222         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8223         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8224         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8225         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8226
8227         /* AFC Recal */
8228         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8229                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8230                         DPIO_AFC_RECAL);
8231
8232         vlv_dpio_put(dev_priv);
8233 }
8234
8235 /**
8236  * vlv_force_pll_on - forcibly enable just the PLL
8237  * @dev_priv: i915 private structure
8238  * @pipe: pipe PLL to enable
8239  * @dpll: PLL configuration
8240  *
8241  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8242  * in cases where we need the PLL enabled even when @pipe is not going to
8243  * be enabled.
8244  */
8245 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8246                      const struct dpll *dpll)
8247 {
8248         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8249         struct intel_crtc_state *pipe_config;
8250
8251         pipe_config = intel_crtc_state_alloc(crtc);
8252         if (!pipe_config)
8253                 return -ENOMEM;
8254
8255         pipe_config->cpu_transcoder = (enum transcoder)pipe;
8256         pipe_config->pixel_multiplier = 1;
8257         pipe_config->dpll = *dpll;
8258
8259         if (IS_CHERRYVIEW(dev_priv)) {
8260                 chv_compute_dpll(crtc, pipe_config);
8261                 chv_prepare_pll(crtc, pipe_config);
8262                 chv_enable_pll(crtc, pipe_config);
8263         } else {
8264                 vlv_compute_dpll(crtc, pipe_config);
8265                 vlv_prepare_pll(crtc, pipe_config);
8266                 vlv_enable_pll(crtc, pipe_config);
8267         }
8268
8269         kfree(pipe_config);
8270
8271         return 0;
8272 }
8273
8274 /**
8275  * vlv_force_pll_off - forcibly disable just the PLL
8276  * @dev_priv: i915 private structure
8277  * @pipe: pipe PLL to disable
8278  *
8279  * Disable the PLL for @pipe. To be used in cases where we need
8280  * the PLL enabled even when @pipe is not going to be enabled.
8281  */
8282 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8283 {
8284         if (IS_CHERRYVIEW(dev_priv))
8285                 chv_disable_pll(dev_priv, pipe);
8286         else
8287                 vlv_disable_pll(dev_priv, pipe);
8288 }
8289
8290 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8291                               struct intel_crtc_state *crtc_state,
8292                               struct dpll *reduced_clock)
8293 {
8294         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8295         u32 dpll;
8296         struct dpll *clock = &crtc_state->dpll;
8297
8298         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8299
8300         dpll = DPLL_VGA_MODE_DIS;
8301
8302         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8303                 dpll |= DPLLB_MODE_LVDS;
8304         else
8305                 dpll |= DPLLB_MODE_DAC_SERIAL;
8306
8307         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8308             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8309                 dpll |= (crtc_state->pixel_multiplier - 1)
8310                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8311         }
8312
8313         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8314             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8315                 dpll |= DPLL_SDVO_HIGH_SPEED;
8316
8317         if (intel_crtc_has_dp_encoder(crtc_state))
8318                 dpll |= DPLL_SDVO_HIGH_SPEED;
8319
8320         /* compute bitmask from p1 value */
8321         if (IS_PINEVIEW(dev_priv))
8322                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8323         else {
8324                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8325                 if (IS_G4X(dev_priv) && reduced_clock)
8326                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8327         }
8328         switch (clock->p2) {
8329         case 5:
8330                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8331                 break;
8332         case 7:
8333                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8334                 break;
8335         case 10:
8336                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8337                 break;
8338         case 14:
8339                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8340                 break;
8341         }
8342         if (INTEL_GEN(dev_priv) >= 4)
8343                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8344
8345         if (crtc_state->sdvo_tv_clock)
8346                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8347         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8348                  intel_panel_use_ssc(dev_priv))
8349                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8350         else
8351                 dpll |= PLL_REF_INPUT_DREFCLK;
8352
8353         dpll |= DPLL_VCO_ENABLE;
8354         crtc_state->dpll_hw_state.dpll = dpll;
8355
8356         if (INTEL_GEN(dev_priv) >= 4) {
8357                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8358                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8359                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8360         }
8361 }
8362
8363 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8364                               struct intel_crtc_state *crtc_state,
8365                               struct dpll *reduced_clock)
8366 {
8367         struct drm_device *dev = crtc->base.dev;
8368         struct drm_i915_private *dev_priv = to_i915(dev);
8369         u32 dpll;
8370         struct dpll *clock = &crtc_state->dpll;
8371
8372         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8373
8374         dpll = DPLL_VGA_MODE_DIS;
8375
8376         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8377                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8378         } else {
8379                 if (clock->p1 == 2)
8380                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8381                 else
8382                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8383                 if (clock->p2 == 4)
8384                         dpll |= PLL_P2_DIVIDE_BY_4;
8385         }
8386
8387         /*
8388          * Bspec:
8389          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8390          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8391          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8392          *  Enable) must be set to “1” in both the DPLL A Control Register
8393          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8394          *
8395          * For simplicity We simply keep both bits always enabled in
8396          * both DPLLS. The spec says we should disable the DVO 2X clock
8397          * when not needed, but this seems to work fine in practice.
8398          */
8399         if (IS_I830(dev_priv) ||
8400             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8401                 dpll |= DPLL_DVO_2X_MODE;
8402
8403         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8404             intel_panel_use_ssc(dev_priv))
8405                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8406         else
8407                 dpll |= PLL_REF_INPUT_DREFCLK;
8408
8409         dpll |= DPLL_VCO_ENABLE;
8410         crtc_state->dpll_hw_state.dpll = dpll;
8411 }
8412
8413 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8414 {
8415         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8416         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8417         enum pipe pipe = crtc->pipe;
8418         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8419         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8420         u32 crtc_vtotal, crtc_vblank_end;
8421         int vsyncshift = 0;
8422
8423         /* We need to be careful not to changed the adjusted mode, for otherwise
8424          * the hw state checker will get angry at the mismatch. */
8425         crtc_vtotal = adjusted_mode->crtc_vtotal;
8426         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8427
8428         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8429                 /* the chip adds 2 halflines automatically */
8430                 crtc_vtotal -= 1;
8431                 crtc_vblank_end -= 1;
8432
8433                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8434                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8435                 else
8436                         vsyncshift = adjusted_mode->crtc_hsync_start -
8437                                 adjusted_mode->crtc_htotal / 2;
8438                 if (vsyncshift < 0)
8439                         vsyncshift += adjusted_mode->crtc_htotal;
8440         }
8441
8442         if (INTEL_GEN(dev_priv) > 3)
8443                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8444
8445         I915_WRITE(HTOTAL(cpu_transcoder),
8446                    (adjusted_mode->crtc_hdisplay - 1) |
8447                    ((adjusted_mode->crtc_htotal - 1) << 16));
8448         I915_WRITE(HBLANK(cpu_transcoder),
8449                    (adjusted_mode->crtc_hblank_start - 1) |
8450                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8451         I915_WRITE(HSYNC(cpu_transcoder),
8452                    (adjusted_mode->crtc_hsync_start - 1) |
8453                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8454
8455         I915_WRITE(VTOTAL(cpu_transcoder),
8456                    (adjusted_mode->crtc_vdisplay - 1) |
8457                    ((crtc_vtotal - 1) << 16));
8458         I915_WRITE(VBLANK(cpu_transcoder),
8459                    (adjusted_mode->crtc_vblank_start - 1) |
8460                    ((crtc_vblank_end - 1) << 16));
8461         I915_WRITE(VSYNC(cpu_transcoder),
8462                    (adjusted_mode->crtc_vsync_start - 1) |
8463                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8464
8465         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8466          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8467          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8468          * bits. */
8469         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8470             (pipe == PIPE_B || pipe == PIPE_C))
8471                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8472
8473 }
8474
8475 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8476 {
8477         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8478         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8479         enum pipe pipe = crtc->pipe;
8480
8481         /* pipesrc controls the size that is scaled from, which should
8482          * always be the user's requested size.
8483          */
8484         I915_WRITE(PIPESRC(pipe),
8485                    ((crtc_state->pipe_src_w - 1) << 16) |
8486                    (crtc_state->pipe_src_h - 1));
8487 }
8488
8489 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8490 {
8491         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8492         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8493
8494         if (IS_GEN(dev_priv, 2))
8495                 return false;
8496
8497         if (INTEL_GEN(dev_priv) >= 9 ||
8498             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8499                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8500         else
8501                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8502 }
8503
8504 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8505                                    struct intel_crtc_state *pipe_config)
8506 {
8507         struct drm_device *dev = crtc->base.dev;
8508         struct drm_i915_private *dev_priv = to_i915(dev);
8509         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8510         u32 tmp;
8511
8512         tmp = I915_READ(HTOTAL(cpu_transcoder));
8513         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8514         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8515
8516         if (!transcoder_is_dsi(cpu_transcoder)) {
8517                 tmp = I915_READ(HBLANK(cpu_transcoder));
8518                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8519                                                         (tmp & 0xffff) + 1;
8520                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8521                                                 ((tmp >> 16) & 0xffff) + 1;
8522         }
8523         tmp = I915_READ(HSYNC(cpu_transcoder));
8524         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8525         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8526
8527         tmp = I915_READ(VTOTAL(cpu_transcoder));
8528         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8529         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8530
8531         if (!transcoder_is_dsi(cpu_transcoder)) {
8532                 tmp = I915_READ(VBLANK(cpu_transcoder));
8533                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8534                                                         (tmp & 0xffff) + 1;
8535                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8536                                                 ((tmp >> 16) & 0xffff) + 1;
8537         }
8538         tmp = I915_READ(VSYNC(cpu_transcoder));
8539         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8540         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8541
8542         if (intel_pipe_is_interlaced(pipe_config)) {
8543                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8544                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8545                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8546         }
8547 }
8548
8549 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8550                                     struct intel_crtc_state *pipe_config)
8551 {
8552         struct drm_device *dev = crtc->base.dev;
8553         struct drm_i915_private *dev_priv = to_i915(dev);
8554         u32 tmp;
8555
8556         tmp = I915_READ(PIPESRC(crtc->pipe));
8557         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8558         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8559
8560         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8561         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8562 }
8563
8564 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8565                                  struct intel_crtc_state *pipe_config)
8566 {
8567         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8568         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8569         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8570         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8571
8572         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8573         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8574         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8575         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8576
8577         mode->flags = pipe_config->hw.adjusted_mode.flags;
8578         mode->type = DRM_MODE_TYPE_DRIVER;
8579
8580         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8581
8582         mode->hsync = drm_mode_hsync(mode);
8583         mode->vrefresh = drm_mode_vrefresh(mode);
8584         drm_mode_set_name(mode);
8585 }
8586
8587 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8588 {
8589         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8590         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8591         u32 pipeconf;
8592
8593         pipeconf = 0;
8594
8595         /* we keep both pipes enabled on 830 */
8596         if (IS_I830(dev_priv))
8597                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8598
8599         if (crtc_state->double_wide)
8600                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8601
8602         /* only g4x and later have fancy bpc/dither controls */
8603         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8604             IS_CHERRYVIEW(dev_priv)) {
8605                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8606                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8607                         pipeconf |= PIPECONF_DITHER_EN |
8608                                     PIPECONF_DITHER_TYPE_SP;
8609
8610                 switch (crtc_state->pipe_bpp) {
8611                 case 18:
8612                         pipeconf |= PIPECONF_6BPC;
8613                         break;
8614                 case 24:
8615                         pipeconf |= PIPECONF_8BPC;
8616                         break;
8617                 case 30:
8618                         pipeconf |= PIPECONF_10BPC;
8619                         break;
8620                 default:
8621                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8622                         BUG();
8623                 }
8624         }
8625
8626         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8627                 if (INTEL_GEN(dev_priv) < 4 ||
8628                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8629                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8630                 else
8631                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8632         } else {
8633                 pipeconf |= PIPECONF_PROGRESSIVE;
8634         }
8635
8636         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8637              crtc_state->limited_color_range)
8638                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8639
8640         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8641
8642         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8643
8644         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8645         POSTING_READ(PIPECONF(crtc->pipe));
8646 }
8647
8648 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8649                                    struct intel_crtc_state *crtc_state)
8650 {
8651         struct drm_device *dev = crtc->base.dev;
8652         struct drm_i915_private *dev_priv = to_i915(dev);
8653         const struct intel_limit *limit;
8654         int refclk = 48000;
8655
8656         memset(&crtc_state->dpll_hw_state, 0,
8657                sizeof(crtc_state->dpll_hw_state));
8658
8659         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8660                 if (intel_panel_use_ssc(dev_priv)) {
8661                         refclk = dev_priv->vbt.lvds_ssc_freq;
8662                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8663                 }
8664
8665                 limit = &intel_limits_i8xx_lvds;
8666         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8667                 limit = &intel_limits_i8xx_dvo;
8668         } else {
8669                 limit = &intel_limits_i8xx_dac;
8670         }
8671
8672         if (!crtc_state->clock_set &&
8673             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8674                                  refclk, NULL, &crtc_state->dpll)) {
8675                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8676                 return -EINVAL;
8677         }
8678
8679         i8xx_compute_dpll(crtc, crtc_state, NULL);
8680
8681         return 0;
8682 }
8683
8684 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8685                                   struct intel_crtc_state *crtc_state)
8686 {
8687         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8688         const struct intel_limit *limit;
8689         int refclk = 96000;
8690
8691         memset(&crtc_state->dpll_hw_state, 0,
8692                sizeof(crtc_state->dpll_hw_state));
8693
8694         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8695                 if (intel_panel_use_ssc(dev_priv)) {
8696                         refclk = dev_priv->vbt.lvds_ssc_freq;
8697                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8698                 }
8699
8700                 if (intel_is_dual_link_lvds(dev_priv))
8701                         limit = &intel_limits_g4x_dual_channel_lvds;
8702                 else
8703                         limit = &intel_limits_g4x_single_channel_lvds;
8704         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8705                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8706                 limit = &intel_limits_g4x_hdmi;
8707         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8708                 limit = &intel_limits_g4x_sdvo;
8709         } else {
8710                 /* The option is for other outputs */
8711                 limit = &intel_limits_i9xx_sdvo;
8712         }
8713
8714         if (!crtc_state->clock_set &&
8715             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8716                                 refclk, NULL, &crtc_state->dpll)) {
8717                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8718                 return -EINVAL;
8719         }
8720
8721         i9xx_compute_dpll(crtc, crtc_state, NULL);
8722
8723         return 0;
8724 }
8725
8726 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8727                                   struct intel_crtc_state *crtc_state)
8728 {
8729         struct drm_device *dev = crtc->base.dev;
8730         struct drm_i915_private *dev_priv = to_i915(dev);
8731         const struct intel_limit *limit;
8732         int refclk = 96000;
8733
8734         memset(&crtc_state->dpll_hw_state, 0,
8735                sizeof(crtc_state->dpll_hw_state));
8736
8737         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8738                 if (intel_panel_use_ssc(dev_priv)) {
8739                         refclk = dev_priv->vbt.lvds_ssc_freq;
8740                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8741                 }
8742
8743                 limit = &intel_limits_pineview_lvds;
8744         } else {
8745                 limit = &intel_limits_pineview_sdvo;
8746         }
8747
8748         if (!crtc_state->clock_set &&
8749             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8750                                 refclk, NULL, &crtc_state->dpll)) {
8751                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8752                 return -EINVAL;
8753         }
8754
8755         i9xx_compute_dpll(crtc, crtc_state, NULL);
8756
8757         return 0;
8758 }
8759
8760 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8761                                    struct intel_crtc_state *crtc_state)
8762 {
8763         struct drm_device *dev = crtc->base.dev;
8764         struct drm_i915_private *dev_priv = to_i915(dev);
8765         const struct intel_limit *limit;
8766         int refclk = 96000;
8767
8768         memset(&crtc_state->dpll_hw_state, 0,
8769                sizeof(crtc_state->dpll_hw_state));
8770
8771         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8772                 if (intel_panel_use_ssc(dev_priv)) {
8773                         refclk = dev_priv->vbt.lvds_ssc_freq;
8774                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8775                 }
8776
8777                 limit = &intel_limits_i9xx_lvds;
8778         } else {
8779                 limit = &intel_limits_i9xx_sdvo;
8780         }
8781
8782         if (!crtc_state->clock_set &&
8783             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8784                                  refclk, NULL, &crtc_state->dpll)) {
8785                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8786                 return -EINVAL;
8787         }
8788
8789         i9xx_compute_dpll(crtc, crtc_state, NULL);
8790
8791         return 0;
8792 }
8793
8794 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8795                                   struct intel_crtc_state *crtc_state)
8796 {
8797         int refclk = 100000;
8798         const struct intel_limit *limit = &intel_limits_chv;
8799
8800         memset(&crtc_state->dpll_hw_state, 0,
8801                sizeof(crtc_state->dpll_hw_state));
8802
8803         if (!crtc_state->clock_set &&
8804             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8805                                 refclk, NULL, &crtc_state->dpll)) {
8806                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8807                 return -EINVAL;
8808         }
8809
8810         chv_compute_dpll(crtc, crtc_state);
8811
8812         return 0;
8813 }
8814
8815 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8816                                   struct intel_crtc_state *crtc_state)
8817 {
8818         int refclk = 100000;
8819         const struct intel_limit *limit = &intel_limits_vlv;
8820
8821         memset(&crtc_state->dpll_hw_state, 0,
8822                sizeof(crtc_state->dpll_hw_state));
8823
8824         if (!crtc_state->clock_set &&
8825             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8826                                 refclk, NULL, &crtc_state->dpll)) {
8827                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8828                 return -EINVAL;
8829         }
8830
8831         vlv_compute_dpll(crtc, crtc_state);
8832
8833         return 0;
8834 }
8835
8836 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8837 {
8838         if (IS_I830(dev_priv))
8839                 return false;
8840
8841         return INTEL_GEN(dev_priv) >= 4 ||
8842                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8843 }
8844
8845 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8846                                  struct intel_crtc_state *pipe_config)
8847 {
8848         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8849         u32 tmp;
8850
8851         if (!i9xx_has_pfit(dev_priv))
8852                 return;
8853
8854         tmp = I915_READ(PFIT_CONTROL);
8855         if (!(tmp & PFIT_ENABLE))
8856                 return;
8857
8858         /* Check whether the pfit is attached to our pipe. */
8859         if (INTEL_GEN(dev_priv) < 4) {
8860                 if (crtc->pipe != PIPE_B)
8861                         return;
8862         } else {
8863                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8864                         return;
8865         }
8866
8867         pipe_config->gmch_pfit.control = tmp;
8868         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8869 }
8870
8871 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8872                                struct intel_crtc_state *pipe_config)
8873 {
8874         struct drm_device *dev = crtc->base.dev;
8875         struct drm_i915_private *dev_priv = to_i915(dev);
8876         enum pipe pipe = crtc->pipe;
8877         struct dpll clock;
8878         u32 mdiv;
8879         int refclk = 100000;
8880
8881         /* In case of DSI, DPLL will not be used */
8882         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8883                 return;
8884
8885         vlv_dpio_get(dev_priv);
8886         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8887         vlv_dpio_put(dev_priv);
8888
8889         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8890         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8891         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8892         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8893         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8894
8895         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8896 }
8897
8898 static void
8899 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8900                               struct intel_initial_plane_config *plane_config)
8901 {
8902         struct drm_device *dev = crtc->base.dev;
8903         struct drm_i915_private *dev_priv = to_i915(dev);
8904         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8905         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8906         enum pipe pipe;
8907         u32 val, base, offset;
8908         int fourcc, pixel_format;
8909         unsigned int aligned_height;
8910         struct drm_framebuffer *fb;
8911         struct intel_framebuffer *intel_fb;
8912
8913         if (!plane->get_hw_state(plane, &pipe))
8914                 return;
8915
8916         WARN_ON(pipe != crtc->pipe);
8917
8918         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8919         if (!intel_fb) {
8920                 DRM_DEBUG_KMS("failed to alloc fb\n");
8921                 return;
8922         }
8923
8924         fb = &intel_fb->base;
8925
8926         fb->dev = dev;
8927
8928         val = I915_READ(DSPCNTR(i9xx_plane));
8929
8930         if (INTEL_GEN(dev_priv) >= 4) {
8931                 if (val & DISPPLANE_TILED) {
8932                         plane_config->tiling = I915_TILING_X;
8933                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8934                 }
8935
8936                 if (val & DISPPLANE_ROTATE_180)
8937                         plane_config->rotation = DRM_MODE_ROTATE_180;
8938         }
8939
8940         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8941             val & DISPPLANE_MIRROR)
8942                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8943
8944         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8945         fourcc = i9xx_format_to_fourcc(pixel_format);
8946         fb->format = drm_format_info(fourcc);
8947
8948         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8949                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8950                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8951         } else if (INTEL_GEN(dev_priv) >= 4) {
8952                 if (plane_config->tiling)
8953                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8954                 else
8955                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8956                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8957         } else {
8958                 base = I915_READ(DSPADDR(i9xx_plane));
8959         }
8960         plane_config->base = base;
8961
8962         val = I915_READ(PIPESRC(pipe));
8963         fb->width = ((val >> 16) & 0xfff) + 1;
8964         fb->height = ((val >> 0) & 0xfff) + 1;
8965
8966         val = I915_READ(DSPSTRIDE(i9xx_plane));
8967         fb->pitches[0] = val & 0xffffffc0;
8968
8969         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8970
8971         plane_config->size = fb->pitches[0] * aligned_height;
8972
8973         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8974                       crtc->base.name, plane->base.name, fb->width, fb->height,
8975                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8976                       plane_config->size);
8977
8978         plane_config->fb = intel_fb;
8979 }
8980
8981 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8982                                struct intel_crtc_state *pipe_config)
8983 {
8984         struct drm_device *dev = crtc->base.dev;
8985         struct drm_i915_private *dev_priv = to_i915(dev);
8986         enum pipe pipe = crtc->pipe;
8987         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8988         struct dpll clock;
8989         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8990         int refclk = 100000;
8991
8992         /* In case of DSI, DPLL will not be used */
8993         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8994                 return;
8995
8996         vlv_dpio_get(dev_priv);
8997         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8998         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8999         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9000         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9001         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9002         vlv_dpio_put(dev_priv);
9003
9004         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9005         clock.m2 = (pll_dw0 & 0xff) << 22;
9006         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9007                 clock.m2 |= pll_dw2 & 0x3fffff;
9008         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9009         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9010         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9011
9012         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9013 }
9014
9015 static enum intel_output_format
9016 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9017 {
9018         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9019         u32 tmp;
9020
9021         tmp = I915_READ(PIPEMISC(crtc->pipe));
9022
9023         if (tmp & PIPEMISC_YUV420_ENABLE) {
9024                 /* We support 4:2:0 in full blend mode only */
9025                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9026
9027                 return INTEL_OUTPUT_FORMAT_YCBCR420;
9028         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9029                 return INTEL_OUTPUT_FORMAT_YCBCR444;
9030         } else {
9031                 return INTEL_OUTPUT_FORMAT_RGB;
9032         }
9033 }
9034
9035 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9036 {
9037         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9038         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9039         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9040         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9041         u32 tmp;
9042
9043         tmp = I915_READ(DSPCNTR(i9xx_plane));
9044
9045         if (tmp & DISPPLANE_GAMMA_ENABLE)
9046                 crtc_state->gamma_enable = true;
9047
9048         if (!HAS_GMCH(dev_priv) &&
9049             tmp & DISPPLANE_PIPE_CSC_ENABLE)
9050                 crtc_state->csc_enable = true;
9051 }
9052
9053 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9054                                  struct intel_crtc_state *pipe_config)
9055 {
9056         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9057         enum intel_display_power_domain power_domain;
9058         intel_wakeref_t wakeref;
9059         u32 tmp;
9060         bool ret;
9061
9062         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9063         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9064         if (!wakeref)
9065                 return false;
9066
9067         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9068         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9069         pipe_config->shared_dpll = NULL;
9070         pipe_config->master_transcoder = INVALID_TRANSCODER;
9071
9072         ret = false;
9073
9074         tmp = I915_READ(PIPECONF(crtc->pipe));
9075         if (!(tmp & PIPECONF_ENABLE))
9076                 goto out;
9077
9078         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9079             IS_CHERRYVIEW(dev_priv)) {
9080                 switch (tmp & PIPECONF_BPC_MASK) {
9081                 case PIPECONF_6BPC:
9082                         pipe_config->pipe_bpp = 18;
9083                         break;
9084                 case PIPECONF_8BPC:
9085                         pipe_config->pipe_bpp = 24;
9086                         break;
9087                 case PIPECONF_10BPC:
9088                         pipe_config->pipe_bpp = 30;
9089                         break;
9090                 default:
9091                         break;
9092                 }
9093         }
9094
9095         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9096             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9097                 pipe_config->limited_color_range = true;
9098
9099         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9100                 PIPECONF_GAMMA_MODE_SHIFT;
9101
9102         if (IS_CHERRYVIEW(dev_priv))
9103                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
9104
9105         i9xx_get_pipe_color_config(pipe_config);
9106         intel_color_get_config(pipe_config);
9107
9108         if (INTEL_GEN(dev_priv) < 4)
9109                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9110
9111         intel_get_pipe_timings(crtc, pipe_config);
9112         intel_get_pipe_src_size(crtc, pipe_config);
9113
9114         i9xx_get_pfit_config(crtc, pipe_config);
9115
9116         if (INTEL_GEN(dev_priv) >= 4) {
9117                 /* No way to read it out on pipes B and C */
9118                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9119                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9120                 else
9121                         tmp = I915_READ(DPLL_MD(crtc->pipe));
9122                 pipe_config->pixel_multiplier =
9123                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9124                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9125                 pipe_config->dpll_hw_state.dpll_md = tmp;
9126         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9127                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9128                 tmp = I915_READ(DPLL(crtc->pipe));
9129                 pipe_config->pixel_multiplier =
9130                         ((tmp & SDVO_MULTIPLIER_MASK)
9131                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9132         } else {
9133                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9134                  * port and will be fixed up in the encoder->get_config
9135                  * function. */
9136                 pipe_config->pixel_multiplier = 1;
9137         }
9138         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
9139         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9140                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
9141                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
9142         } else {
9143                 /* Mask out read-only status bits. */
9144                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9145                                                      DPLL_PORTC_READY_MASK |
9146                                                      DPLL_PORTB_READY_MASK);
9147         }
9148
9149         if (IS_CHERRYVIEW(dev_priv))
9150                 chv_crtc_clock_get(crtc, pipe_config);
9151         else if (IS_VALLEYVIEW(dev_priv))
9152                 vlv_crtc_clock_get(crtc, pipe_config);
9153         else
9154                 i9xx_crtc_clock_get(crtc, pipe_config);
9155
9156         /*
9157          * Normally the dotclock is filled in by the encoder .get_config()
9158          * but in case the pipe is enabled w/o any ports we need a sane
9159          * default.
9160          */
9161         pipe_config->hw.adjusted_mode.crtc_clock =
9162                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9163
9164         ret = true;
9165
9166 out:
9167         intel_display_power_put(dev_priv, power_domain, wakeref);
9168
9169         return ret;
9170 }
9171
9172 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9173 {
9174         struct intel_encoder *encoder;
9175         int i;
9176         u32 val, final;
9177         bool has_lvds = false;
9178         bool has_cpu_edp = false;
9179         bool has_panel = false;
9180         bool has_ck505 = false;
9181         bool can_ssc = false;
9182         bool using_ssc_source = false;
9183
9184         /* We need to take the global config into account */
9185         for_each_intel_encoder(&dev_priv->drm, encoder) {
9186                 switch (encoder->type) {
9187                 case INTEL_OUTPUT_LVDS:
9188                         has_panel = true;
9189                         has_lvds = true;
9190                         break;
9191                 case INTEL_OUTPUT_EDP:
9192                         has_panel = true;
9193                         if (encoder->port == PORT_A)
9194                                 has_cpu_edp = true;
9195                         break;
9196                 default:
9197                         break;
9198                 }
9199         }
9200
9201         if (HAS_PCH_IBX(dev_priv)) {
9202                 has_ck505 = dev_priv->vbt.display_clock_mode;
9203                 can_ssc = has_ck505;
9204         } else {
9205                 has_ck505 = false;
9206                 can_ssc = true;
9207         }
9208
9209         /* Check if any DPLLs are using the SSC source */
9210         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9211                 u32 temp = I915_READ(PCH_DPLL(i));
9212
9213                 if (!(temp & DPLL_VCO_ENABLE))
9214                         continue;
9215
9216                 if ((temp & PLL_REF_INPUT_MASK) ==
9217                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9218                         using_ssc_source = true;
9219                         break;
9220                 }
9221         }
9222
9223         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9224                       has_panel, has_lvds, has_ck505, using_ssc_source);
9225
9226         /* Ironlake: try to setup display ref clock before DPLL
9227          * enabling. This is only under driver's control after
9228          * PCH B stepping, previous chipset stepping should be
9229          * ignoring this setting.
9230          */
9231         val = I915_READ(PCH_DREF_CONTROL);
9232
9233         /* As we must carefully and slowly disable/enable each source in turn,
9234          * compute the final state we want first and check if we need to
9235          * make any changes at all.
9236          */
9237         final = val;
9238         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9239         if (has_ck505)
9240                 final |= DREF_NONSPREAD_CK505_ENABLE;
9241         else
9242                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9243
9244         final &= ~DREF_SSC_SOURCE_MASK;
9245         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9246         final &= ~DREF_SSC1_ENABLE;
9247
9248         if (has_panel) {
9249                 final |= DREF_SSC_SOURCE_ENABLE;
9250
9251                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9252                         final |= DREF_SSC1_ENABLE;
9253
9254                 if (has_cpu_edp) {
9255                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9256                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9257                         else
9258                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9259                 } else
9260                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9261         } else if (using_ssc_source) {
9262                 final |= DREF_SSC_SOURCE_ENABLE;
9263                 final |= DREF_SSC1_ENABLE;
9264         }
9265
9266         if (final == val)
9267                 return;
9268
9269         /* Always enable nonspread source */
9270         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9271
9272         if (has_ck505)
9273                 val |= DREF_NONSPREAD_CK505_ENABLE;
9274         else
9275                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9276
9277         if (has_panel) {
9278                 val &= ~DREF_SSC_SOURCE_MASK;
9279                 val |= DREF_SSC_SOURCE_ENABLE;
9280
9281                 /* SSC must be turned on before enabling the CPU output  */
9282                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9283                         DRM_DEBUG_KMS("Using SSC on panel\n");
9284                         val |= DREF_SSC1_ENABLE;
9285                 } else
9286                         val &= ~DREF_SSC1_ENABLE;
9287
9288                 /* Get SSC going before enabling the outputs */
9289                 I915_WRITE(PCH_DREF_CONTROL, val);
9290                 POSTING_READ(PCH_DREF_CONTROL);
9291                 udelay(200);
9292
9293                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9294
9295                 /* Enable CPU source on CPU attached eDP */
9296                 if (has_cpu_edp) {
9297                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9298                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9299                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9300                         } else
9301                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9302                 } else
9303                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9304
9305                 I915_WRITE(PCH_DREF_CONTROL, val);
9306                 POSTING_READ(PCH_DREF_CONTROL);
9307                 udelay(200);
9308         } else {
9309                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9310
9311                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9312
9313                 /* Turn off CPU output */
9314                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9315
9316                 I915_WRITE(PCH_DREF_CONTROL, val);
9317                 POSTING_READ(PCH_DREF_CONTROL);
9318                 udelay(200);
9319
9320                 if (!using_ssc_source) {
9321                         DRM_DEBUG_KMS("Disabling SSC source\n");
9322
9323                         /* Turn off the SSC source */
9324                         val &= ~DREF_SSC_SOURCE_MASK;
9325                         val |= DREF_SSC_SOURCE_DISABLE;
9326
9327                         /* Turn off SSC1 */
9328                         val &= ~DREF_SSC1_ENABLE;
9329
9330                         I915_WRITE(PCH_DREF_CONTROL, val);
9331                         POSTING_READ(PCH_DREF_CONTROL);
9332                         udelay(200);
9333                 }
9334         }
9335
9336         BUG_ON(val != final);
9337 }
9338
9339 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9340 {
9341         u32 tmp;
9342
9343         tmp = I915_READ(SOUTH_CHICKEN2);
9344         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9345         I915_WRITE(SOUTH_CHICKEN2, tmp);
9346
9347         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9348                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9349                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9350
9351         tmp = I915_READ(SOUTH_CHICKEN2);
9352         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9353         I915_WRITE(SOUTH_CHICKEN2, tmp);
9354
9355         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9356                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9357                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9358 }
9359
9360 /* WaMPhyProgramming:hsw */
9361 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9362 {
9363         u32 tmp;
9364
9365         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9366         tmp &= ~(0xFF << 24);
9367         tmp |= (0x12 << 24);
9368         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9369
9370         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9371         tmp |= (1 << 11);
9372         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9373
9374         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9375         tmp |= (1 << 11);
9376         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9377
9378         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9379         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9380         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9381
9382         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9383         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9384         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9385
9386         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9387         tmp &= ~(7 << 13);
9388         tmp |= (5 << 13);
9389         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9390
9391         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9392         tmp &= ~(7 << 13);
9393         tmp |= (5 << 13);
9394         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9395
9396         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9397         tmp &= ~0xFF;
9398         tmp |= 0x1C;
9399         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9400
9401         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9402         tmp &= ~0xFF;
9403         tmp |= 0x1C;
9404         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9405
9406         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9407         tmp &= ~(0xFF << 16);
9408         tmp |= (0x1C << 16);
9409         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9410
9411         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9412         tmp &= ~(0xFF << 16);
9413         tmp |= (0x1C << 16);
9414         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9415
9416         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9417         tmp |= (1 << 27);
9418         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9419
9420         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9421         tmp |= (1 << 27);
9422         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9423
9424         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9425         tmp &= ~(0xF << 28);
9426         tmp |= (4 << 28);
9427         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9428
9429         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9430         tmp &= ~(0xF << 28);
9431         tmp |= (4 << 28);
9432         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9433 }
9434
9435 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9436  * Programming" based on the parameters passed:
9437  * - Sequence to enable CLKOUT_DP
9438  * - Sequence to enable CLKOUT_DP without spread
9439  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9440  */
9441 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9442                                  bool with_spread, bool with_fdi)
9443 {
9444         u32 reg, tmp;
9445
9446         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9447                 with_spread = true;
9448         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9449             with_fdi, "LP PCH doesn't have FDI\n"))
9450                 with_fdi = false;
9451
9452         mutex_lock(&dev_priv->sb_lock);
9453
9454         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9455         tmp &= ~SBI_SSCCTL_DISABLE;
9456         tmp |= SBI_SSCCTL_PATHALT;
9457         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9458
9459         udelay(24);
9460
9461         if (with_spread) {
9462                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9463                 tmp &= ~SBI_SSCCTL_PATHALT;
9464                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9465
9466                 if (with_fdi) {
9467                         lpt_reset_fdi_mphy(dev_priv);
9468                         lpt_program_fdi_mphy(dev_priv);
9469                 }
9470         }
9471
9472         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9473         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9474         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9475         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9476
9477         mutex_unlock(&dev_priv->sb_lock);
9478 }
9479
9480 /* Sequence to disable CLKOUT_DP */
9481 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9482 {
9483         u32 reg, tmp;
9484
9485         mutex_lock(&dev_priv->sb_lock);
9486
9487         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9488         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9489         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9490         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9491
9492         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9493         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9494                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9495                         tmp |= SBI_SSCCTL_PATHALT;
9496                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9497                         udelay(32);
9498                 }
9499                 tmp |= SBI_SSCCTL_DISABLE;
9500                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9501         }
9502
9503         mutex_unlock(&dev_priv->sb_lock);
9504 }
9505
9506 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9507
9508 static const u16 sscdivintphase[] = {
9509         [BEND_IDX( 50)] = 0x3B23,
9510         [BEND_IDX( 45)] = 0x3B23,
9511         [BEND_IDX( 40)] = 0x3C23,
9512         [BEND_IDX( 35)] = 0x3C23,
9513         [BEND_IDX( 30)] = 0x3D23,
9514         [BEND_IDX( 25)] = 0x3D23,
9515         [BEND_IDX( 20)] = 0x3E23,
9516         [BEND_IDX( 15)] = 0x3E23,
9517         [BEND_IDX( 10)] = 0x3F23,
9518         [BEND_IDX(  5)] = 0x3F23,
9519         [BEND_IDX(  0)] = 0x0025,
9520         [BEND_IDX( -5)] = 0x0025,
9521         [BEND_IDX(-10)] = 0x0125,
9522         [BEND_IDX(-15)] = 0x0125,
9523         [BEND_IDX(-20)] = 0x0225,
9524         [BEND_IDX(-25)] = 0x0225,
9525         [BEND_IDX(-30)] = 0x0325,
9526         [BEND_IDX(-35)] = 0x0325,
9527         [BEND_IDX(-40)] = 0x0425,
9528         [BEND_IDX(-45)] = 0x0425,
9529         [BEND_IDX(-50)] = 0x0525,
9530 };
9531
9532 /*
9533  * Bend CLKOUT_DP
9534  * steps -50 to 50 inclusive, in steps of 5
9535  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9536  * change in clock period = -(steps / 10) * 5.787 ps
9537  */
9538 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9539 {
9540         u32 tmp;
9541         int idx = BEND_IDX(steps);
9542
9543         if (WARN_ON(steps % 5 != 0))
9544                 return;
9545
9546         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9547                 return;
9548
9549         mutex_lock(&dev_priv->sb_lock);
9550
9551         if (steps % 10 != 0)
9552                 tmp = 0xAAAAAAAB;
9553         else
9554                 tmp = 0x00000000;
9555         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9556
9557         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9558         tmp &= 0xffff0000;
9559         tmp |= sscdivintphase[idx];
9560         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9561
9562         mutex_unlock(&dev_priv->sb_lock);
9563 }
9564
9565 #undef BEND_IDX
9566
9567 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9568 {
9569         u32 fuse_strap = I915_READ(FUSE_STRAP);
9570         u32 ctl = I915_READ(SPLL_CTL);
9571
9572         if ((ctl & SPLL_PLL_ENABLE) == 0)
9573                 return false;
9574
9575         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9576             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9577                 return true;
9578
9579         if (IS_BROADWELL(dev_priv) &&
9580             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9581                 return true;
9582
9583         return false;
9584 }
9585
9586 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9587                                enum intel_dpll_id id)
9588 {
9589         u32 fuse_strap = I915_READ(FUSE_STRAP);
9590         u32 ctl = I915_READ(WRPLL_CTL(id));
9591
9592         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9593                 return false;
9594
9595         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9596                 return true;
9597
9598         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9599             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9600             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9601                 return true;
9602
9603         return false;
9604 }
9605
9606 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9607 {
9608         struct intel_encoder *encoder;
9609         bool has_fdi = false;
9610
9611         for_each_intel_encoder(&dev_priv->drm, encoder) {
9612                 switch (encoder->type) {
9613                 case INTEL_OUTPUT_ANALOG:
9614                         has_fdi = true;
9615                         break;
9616                 default:
9617                         break;
9618                 }
9619         }
9620
9621         /*
9622          * The BIOS may have decided to use the PCH SSC
9623          * reference so we must not disable it until the
9624          * relevant PLLs have stopped relying on it. We'll
9625          * just leave the PCH SSC reference enabled in case
9626          * any active PLL is using it. It will get disabled
9627          * after runtime suspend if we don't have FDI.
9628          *
9629          * TODO: Move the whole reference clock handling
9630          * to the modeset sequence proper so that we can
9631          * actually enable/disable/reconfigure these things
9632          * safely. To do that we need to introduce a real
9633          * clock hierarchy. That would also allow us to do
9634          * clock bending finally.
9635          */
9636         dev_priv->pch_ssc_use = 0;
9637
9638         if (spll_uses_pch_ssc(dev_priv)) {
9639                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9640                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9641         }
9642
9643         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9644                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9645                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9646         }
9647
9648         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9649                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9650                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9651         }
9652
9653         if (dev_priv->pch_ssc_use)
9654                 return;
9655
9656         if (has_fdi) {
9657                 lpt_bend_clkout_dp(dev_priv, 0);
9658                 lpt_enable_clkout_dp(dev_priv, true, true);
9659         } else {
9660                 lpt_disable_clkout_dp(dev_priv);
9661         }
9662 }
9663
9664 /*
9665  * Initialize reference clocks when the driver loads
9666  */
9667 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9668 {
9669         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9670                 ironlake_init_pch_refclk(dev_priv);
9671         else if (HAS_PCH_LPT(dev_priv))
9672                 lpt_init_pch_refclk(dev_priv);
9673 }
9674
9675 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9676 {
9677         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9678         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9679         enum pipe pipe = crtc->pipe;
9680         u32 val;
9681
9682         val = 0;
9683
9684         switch (crtc_state->pipe_bpp) {
9685         case 18:
9686                 val |= PIPECONF_6BPC;
9687                 break;
9688         case 24:
9689                 val |= PIPECONF_8BPC;
9690                 break;
9691         case 30:
9692                 val |= PIPECONF_10BPC;
9693                 break;
9694         case 36:
9695                 val |= PIPECONF_12BPC;
9696                 break;
9697         default:
9698                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9699                 BUG();
9700         }
9701
9702         if (crtc_state->dither)
9703                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9704
9705         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9706                 val |= PIPECONF_INTERLACED_ILK;
9707         else
9708                 val |= PIPECONF_PROGRESSIVE;
9709
9710         /*
9711          * This would end up with an odd purple hue over
9712          * the entire display. Make sure we don't do it.
9713          */
9714         WARN_ON(crtc_state->limited_color_range &&
9715                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9716
9717         if (crtc_state->limited_color_range)
9718                 val |= PIPECONF_COLOR_RANGE_SELECT;
9719
9720         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9721                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9722
9723         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9724
9725         val |= PIPECONF_FRAME_START_DELAY(0);
9726
9727         I915_WRITE(PIPECONF(pipe), val);
9728         POSTING_READ(PIPECONF(pipe));
9729 }
9730
9731 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9732 {
9733         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9734         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9735         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9736         u32 val = 0;
9737
9738         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9739                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9740
9741         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9742                 val |= PIPECONF_INTERLACED_ILK;
9743         else
9744                 val |= PIPECONF_PROGRESSIVE;
9745
9746         if (IS_HASWELL(dev_priv) &&
9747             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9748                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9749
9750         I915_WRITE(PIPECONF(cpu_transcoder), val);
9751         POSTING_READ(PIPECONF(cpu_transcoder));
9752 }
9753
9754 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9755 {
9756         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9757         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9758         u32 val = 0;
9759
9760         switch (crtc_state->pipe_bpp) {
9761         case 18:
9762                 val |= PIPEMISC_DITHER_6_BPC;
9763                 break;
9764         case 24:
9765                 val |= PIPEMISC_DITHER_8_BPC;
9766                 break;
9767         case 30:
9768                 val |= PIPEMISC_DITHER_10_BPC;
9769                 break;
9770         case 36:
9771                 val |= PIPEMISC_DITHER_12_BPC;
9772                 break;
9773         default:
9774                 MISSING_CASE(crtc_state->pipe_bpp);
9775                 break;
9776         }
9777
9778         if (crtc_state->dither)
9779                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9780
9781         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9782             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9783                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9784
9785         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9786                 val |= PIPEMISC_YUV420_ENABLE |
9787                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9788
9789         if (INTEL_GEN(dev_priv) >= 11 &&
9790             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9791                                            BIT(PLANE_CURSOR))) == 0)
9792                 val |= PIPEMISC_HDR_MODE_PRECISION;
9793
9794         I915_WRITE(PIPEMISC(crtc->pipe), val);
9795 }
9796
9797 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9798 {
9799         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9800         u32 tmp;
9801
9802         tmp = I915_READ(PIPEMISC(crtc->pipe));
9803
9804         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9805         case PIPEMISC_DITHER_6_BPC:
9806                 return 18;
9807         case PIPEMISC_DITHER_8_BPC:
9808                 return 24;
9809         case PIPEMISC_DITHER_10_BPC:
9810                 return 30;
9811         case PIPEMISC_DITHER_12_BPC:
9812                 return 36;
9813         default:
9814                 MISSING_CASE(tmp);
9815                 return 0;
9816         }
9817 }
9818
9819 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9820 {
9821         /*
9822          * Account for spread spectrum to avoid
9823          * oversubscribing the link. Max center spread
9824          * is 2.5%; use 5% for safety's sake.
9825          */
9826         u32 bps = target_clock * bpp * 21 / 20;
9827         return DIV_ROUND_UP(bps, link_bw * 8);
9828 }
9829
9830 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9831 {
9832         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9833 }
9834
9835 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9836                                   struct intel_crtc_state *crtc_state,
9837                                   struct dpll *reduced_clock)
9838 {
9839         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9840         u32 dpll, fp, fp2;
9841         int factor;
9842
9843         /* Enable autotuning of the PLL clock (if permissible) */
9844         factor = 21;
9845         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9846                 if ((intel_panel_use_ssc(dev_priv) &&
9847                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9848                     (HAS_PCH_IBX(dev_priv) &&
9849                      intel_is_dual_link_lvds(dev_priv)))
9850                         factor = 25;
9851         } else if (crtc_state->sdvo_tv_clock) {
9852                 factor = 20;
9853         }
9854
9855         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9856
9857         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9858                 fp |= FP_CB_TUNE;
9859
9860         if (reduced_clock) {
9861                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9862
9863                 if (reduced_clock->m < factor * reduced_clock->n)
9864                         fp2 |= FP_CB_TUNE;
9865         } else {
9866                 fp2 = fp;
9867         }
9868
9869         dpll = 0;
9870
9871         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9872                 dpll |= DPLLB_MODE_LVDS;
9873         else
9874                 dpll |= DPLLB_MODE_DAC_SERIAL;
9875
9876         dpll |= (crtc_state->pixel_multiplier - 1)
9877                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9878
9879         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9880             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9881                 dpll |= DPLL_SDVO_HIGH_SPEED;
9882
9883         if (intel_crtc_has_dp_encoder(crtc_state))
9884                 dpll |= DPLL_SDVO_HIGH_SPEED;
9885
9886         /*
9887          * The high speed IO clock is only really required for
9888          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9889          * possible to share the DPLL between CRT and HDMI. Enabling
9890          * the clock needlessly does no real harm, except use up a
9891          * bit of power potentially.
9892          *
9893          * We'll limit this to IVB with 3 pipes, since it has only two
9894          * DPLLs and so DPLL sharing is the only way to get three pipes
9895          * driving PCH ports at the same time. On SNB we could do this,
9896          * and potentially avoid enabling the second DPLL, but it's not
9897          * clear if it''s a win or loss power wise. No point in doing
9898          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9899          */
9900         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9901             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9902                 dpll |= DPLL_SDVO_HIGH_SPEED;
9903
9904         /* compute bitmask from p1 value */
9905         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9906         /* also FPA1 */
9907         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9908
9909         switch (crtc_state->dpll.p2) {
9910         case 5:
9911                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9912                 break;
9913         case 7:
9914                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9915                 break;
9916         case 10:
9917                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9918                 break;
9919         case 14:
9920                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9921                 break;
9922         }
9923
9924         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9925             intel_panel_use_ssc(dev_priv))
9926                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9927         else
9928                 dpll |= PLL_REF_INPUT_DREFCLK;
9929
9930         dpll |= DPLL_VCO_ENABLE;
9931
9932         crtc_state->dpll_hw_state.dpll = dpll;
9933         crtc_state->dpll_hw_state.fp0 = fp;
9934         crtc_state->dpll_hw_state.fp1 = fp2;
9935 }
9936
9937 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9938                                        struct intel_crtc_state *crtc_state)
9939 {
9940         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9941         struct intel_atomic_state *state =
9942                 to_intel_atomic_state(crtc_state->uapi.state);
9943         const struct intel_limit *limit;
9944         int refclk = 120000;
9945
9946         memset(&crtc_state->dpll_hw_state, 0,
9947                sizeof(crtc_state->dpll_hw_state));
9948
9949         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9950         if (!crtc_state->has_pch_encoder)
9951                 return 0;
9952
9953         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9954                 if (intel_panel_use_ssc(dev_priv)) {
9955                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9956                                       dev_priv->vbt.lvds_ssc_freq);
9957                         refclk = dev_priv->vbt.lvds_ssc_freq;
9958                 }
9959
9960                 if (intel_is_dual_link_lvds(dev_priv)) {
9961                         if (refclk == 100000)
9962                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9963                         else
9964                                 limit = &intel_limits_ironlake_dual_lvds;
9965                 } else {
9966                         if (refclk == 100000)
9967                                 limit = &intel_limits_ironlake_single_lvds_100m;
9968                         else
9969                                 limit = &intel_limits_ironlake_single_lvds;
9970                 }
9971         } else {
9972                 limit = &intel_limits_ironlake_dac;
9973         }
9974
9975         if (!crtc_state->clock_set &&
9976             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9977                                 refclk, NULL, &crtc_state->dpll)) {
9978                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9979                 return -EINVAL;
9980         }
9981
9982         ironlake_compute_dpll(crtc, crtc_state, NULL);
9983
9984         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9985                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9986                               pipe_name(crtc->pipe));
9987                 return -EINVAL;
9988         }
9989
9990         return 0;
9991 }
9992
9993 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9994                                          struct intel_link_m_n *m_n)
9995 {
9996         struct drm_device *dev = crtc->base.dev;
9997         struct drm_i915_private *dev_priv = to_i915(dev);
9998         enum pipe pipe = crtc->pipe;
9999
10000         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
10001         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
10002         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
10003                 & ~TU_SIZE_MASK;
10004         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
10005         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
10006                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10007 }
10008
10009 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10010                                          enum transcoder transcoder,
10011                                          struct intel_link_m_n *m_n,
10012                                          struct intel_link_m_n *m2_n2)
10013 {
10014         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10015         enum pipe pipe = crtc->pipe;
10016
10017         if (INTEL_GEN(dev_priv) >= 5) {
10018                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
10019                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
10020                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
10021                         & ~TU_SIZE_MASK;
10022                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
10023                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
10024                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10025
10026                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10027                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
10028                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
10029                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
10030                                         & ~TU_SIZE_MASK;
10031                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
10032                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
10033                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10034                 }
10035         } else {
10036                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
10037                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
10038                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
10039                         & ~TU_SIZE_MASK;
10040                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
10041                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
10042                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10043         }
10044 }
10045
10046 void intel_dp_get_m_n(struct intel_crtc *crtc,
10047                       struct intel_crtc_state *pipe_config)
10048 {
10049         if (pipe_config->has_pch_encoder)
10050                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10051         else
10052                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10053                                              &pipe_config->dp_m_n,
10054                                              &pipe_config->dp_m2_n2);
10055 }
10056
10057 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
10058                                         struct intel_crtc_state *pipe_config)
10059 {
10060         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10061                                      &pipe_config->fdi_m_n, NULL);
10062 }
10063
10064 static void skylake_get_pfit_config(struct intel_crtc *crtc,
10065                                     struct intel_crtc_state *pipe_config)
10066 {
10067         struct drm_device *dev = crtc->base.dev;
10068         struct drm_i915_private *dev_priv = to_i915(dev);
10069         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
10070         u32 ps_ctrl = 0;
10071         int id = -1;
10072         int i;
10073
10074         /* find scaler attached to this pipe */
10075         for (i = 0; i < crtc->num_scalers; i++) {
10076                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
10077                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
10078                         id = i;
10079                         pipe_config->pch_pfit.enabled = true;
10080                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
10081                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
10082                         scaler_state->scalers[i].in_use = true;
10083                         break;
10084                 }
10085         }
10086
10087         scaler_state->scaler_id = id;
10088         if (id >= 0) {
10089                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10090         } else {
10091                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10092         }
10093 }
10094
10095 static void
10096 skylake_get_initial_plane_config(struct intel_crtc *crtc,
10097                                  struct intel_initial_plane_config *plane_config)
10098 {
10099         struct drm_device *dev = crtc->base.dev;
10100         struct drm_i915_private *dev_priv = to_i915(dev);
10101         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10102         enum plane_id plane_id = plane->id;
10103         enum pipe pipe;
10104         u32 val, base, offset, stride_mult, tiling, alpha;
10105         int fourcc, pixel_format;
10106         unsigned int aligned_height;
10107         struct drm_framebuffer *fb;
10108         struct intel_framebuffer *intel_fb;
10109
10110         if (!plane->get_hw_state(plane, &pipe))
10111                 return;
10112
10113         WARN_ON(pipe != crtc->pipe);
10114
10115         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10116         if (!intel_fb) {
10117                 DRM_DEBUG_KMS("failed to alloc fb\n");
10118                 return;
10119         }
10120
10121         fb = &intel_fb->base;
10122
10123         fb->dev = dev;
10124
10125         val = I915_READ(PLANE_CTL(pipe, plane_id));
10126
10127         if (INTEL_GEN(dev_priv) >= 11)
10128                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10129         else
10130                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10131
10132         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10133                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
10134                 alpha &= PLANE_COLOR_ALPHA_MASK;
10135         } else {
10136                 alpha = val & PLANE_CTL_ALPHA_MASK;
10137         }
10138
10139         fourcc = skl_format_to_fourcc(pixel_format,
10140                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10141         fb->format = drm_format_info(fourcc);
10142
10143         tiling = val & PLANE_CTL_TILED_MASK;
10144         switch (tiling) {
10145         case PLANE_CTL_TILED_LINEAR:
10146                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10147                 break;
10148         case PLANE_CTL_TILED_X:
10149                 plane_config->tiling = I915_TILING_X;
10150                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10151                 break;
10152         case PLANE_CTL_TILED_Y:
10153                 plane_config->tiling = I915_TILING_Y;
10154                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10155                         fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10156                                 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10157                                 I915_FORMAT_MOD_Y_TILED_CCS;
10158                 else
10159                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10160                 break;
10161         case PLANE_CTL_TILED_YF:
10162                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10163                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10164                 else
10165                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10166                 break;
10167         default:
10168                 MISSING_CASE(tiling);
10169                 goto error;
10170         }
10171
10172         /*
10173          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10174          * while i915 HW rotation is clockwise, thats why this swapping.
10175          */
10176         switch (val & PLANE_CTL_ROTATE_MASK) {
10177         case PLANE_CTL_ROTATE_0:
10178                 plane_config->rotation = DRM_MODE_ROTATE_0;
10179                 break;
10180         case PLANE_CTL_ROTATE_90:
10181                 plane_config->rotation = DRM_MODE_ROTATE_270;
10182                 break;
10183         case PLANE_CTL_ROTATE_180:
10184                 plane_config->rotation = DRM_MODE_ROTATE_180;
10185                 break;
10186         case PLANE_CTL_ROTATE_270:
10187                 plane_config->rotation = DRM_MODE_ROTATE_90;
10188                 break;
10189         }
10190
10191         if (INTEL_GEN(dev_priv) >= 10 &&
10192             val & PLANE_CTL_FLIP_HORIZONTAL)
10193                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10194
10195         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10196         plane_config->base = base;
10197
10198         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10199
10200         val = I915_READ(PLANE_SIZE(pipe, plane_id));
10201         fb->height = ((val >> 16) & 0xffff) + 1;
10202         fb->width = ((val >> 0) & 0xffff) + 1;
10203
10204         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10205         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10206         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10207
10208         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10209
10210         plane_config->size = fb->pitches[0] * aligned_height;
10211
10212         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10213                       crtc->base.name, plane->base.name, fb->width, fb->height,
10214                       fb->format->cpp[0] * 8, base, fb->pitches[0],
10215                       plane_config->size);
10216
10217         plane_config->fb = intel_fb;
10218         return;
10219
10220 error:
10221         kfree(intel_fb);
10222 }
10223
10224 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10225                                      struct intel_crtc_state *pipe_config)
10226 {
10227         struct drm_device *dev = crtc->base.dev;
10228         struct drm_i915_private *dev_priv = to_i915(dev);
10229         u32 tmp;
10230
10231         tmp = I915_READ(PF_CTL(crtc->pipe));
10232
10233         if (tmp & PF_ENABLE) {
10234                 pipe_config->pch_pfit.enabled = true;
10235                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10236                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10237
10238                 /* We currently do not free assignements of panel fitters on
10239                  * ivb/hsw (since we don't use the higher upscaling modes which
10240                  * differentiates them) so just WARN about this case for now. */
10241                 if (IS_GEN(dev_priv, 7)) {
10242                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10243                                 PF_PIPE_SEL_IVB(crtc->pipe));
10244                 }
10245         }
10246 }
10247
10248 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10249                                      struct intel_crtc_state *pipe_config)
10250 {
10251         struct drm_device *dev = crtc->base.dev;
10252         struct drm_i915_private *dev_priv = to_i915(dev);
10253         enum intel_display_power_domain power_domain;
10254         intel_wakeref_t wakeref;
10255         u32 tmp;
10256         bool ret;
10257
10258         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10259         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10260         if (!wakeref)
10261                 return false;
10262
10263         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10264         pipe_config->shared_dpll = NULL;
10265         pipe_config->master_transcoder = INVALID_TRANSCODER;
10266
10267         ret = false;
10268         tmp = I915_READ(PIPECONF(crtc->pipe));
10269         if (!(tmp & PIPECONF_ENABLE))
10270                 goto out;
10271
10272         switch (tmp & PIPECONF_BPC_MASK) {
10273         case PIPECONF_6BPC:
10274                 pipe_config->pipe_bpp = 18;
10275                 break;
10276         case PIPECONF_8BPC:
10277                 pipe_config->pipe_bpp = 24;
10278                 break;
10279         case PIPECONF_10BPC:
10280                 pipe_config->pipe_bpp = 30;
10281                 break;
10282         case PIPECONF_12BPC:
10283                 pipe_config->pipe_bpp = 36;
10284                 break;
10285         default:
10286                 break;
10287         }
10288
10289         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10290                 pipe_config->limited_color_range = true;
10291
10292         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10293         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10294         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10295                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10296                 break;
10297         default:
10298                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10299                 break;
10300         }
10301
10302         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10303                 PIPECONF_GAMMA_MODE_SHIFT;
10304
10305         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10306
10307         i9xx_get_pipe_color_config(pipe_config);
10308         intel_color_get_config(pipe_config);
10309
10310         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10311                 struct intel_shared_dpll *pll;
10312                 enum intel_dpll_id pll_id;
10313
10314                 pipe_config->has_pch_encoder = true;
10315
10316                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10317                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10318                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10319
10320                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10321
10322                 if (HAS_PCH_IBX(dev_priv)) {
10323                         /*
10324                          * The pipe->pch transcoder and pch transcoder->pll
10325                          * mapping is fixed.
10326                          */
10327                         pll_id = (enum intel_dpll_id) crtc->pipe;
10328                 } else {
10329                         tmp = I915_READ(PCH_DPLL_SEL);
10330                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10331                                 pll_id = DPLL_ID_PCH_PLL_B;
10332                         else
10333                                 pll_id= DPLL_ID_PCH_PLL_A;
10334                 }
10335
10336                 pipe_config->shared_dpll =
10337                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10338                 pll = pipe_config->shared_dpll;
10339
10340                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10341                                                 &pipe_config->dpll_hw_state));
10342
10343                 tmp = pipe_config->dpll_hw_state.dpll;
10344                 pipe_config->pixel_multiplier =
10345                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10346                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10347
10348                 ironlake_pch_clock_get(crtc, pipe_config);
10349         } else {
10350                 pipe_config->pixel_multiplier = 1;
10351         }
10352
10353         intel_get_pipe_timings(crtc, pipe_config);
10354         intel_get_pipe_src_size(crtc, pipe_config);
10355
10356         ironlake_get_pfit_config(crtc, pipe_config);
10357
10358         ret = true;
10359
10360 out:
10361         intel_display_power_put(dev_priv, power_domain, wakeref);
10362
10363         return ret;
10364 }
10365 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10366                                       struct intel_crtc_state *crtc_state)
10367 {
10368         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10369         struct intel_atomic_state *state =
10370                 to_intel_atomic_state(crtc_state->uapi.state);
10371
10372         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10373             INTEL_GEN(dev_priv) >= 11) {
10374                 struct intel_encoder *encoder =
10375                         intel_get_crtc_new_encoder(state, crtc_state);
10376
10377                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10378                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10379                                       pipe_name(crtc->pipe));
10380                         return -EINVAL;
10381                 }
10382         }
10383
10384         return 0;
10385 }
10386
10387 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10388                                    enum port port,
10389                                    struct intel_crtc_state *pipe_config)
10390 {
10391         enum intel_dpll_id id;
10392         u32 temp;
10393
10394         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10395         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10396
10397         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10398                 return;
10399
10400         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10401 }
10402
10403 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10404                                 enum port port,
10405                                 struct intel_crtc_state *pipe_config)
10406 {
10407         enum phy phy = intel_port_to_phy(dev_priv, port);
10408         enum icl_port_dpll_id port_dpll_id;
10409         enum intel_dpll_id id;
10410         u32 temp;
10411
10412         if (intel_phy_is_combo(dev_priv, phy)) {
10413                 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10414                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10415                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10416                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10417         } else if (intel_phy_is_tc(dev_priv, phy)) {
10418                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10419
10420                 if (clk_sel == DDI_CLK_SEL_MG) {
10421                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10422                                                                     port));
10423                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10424                 } else {
10425                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10426                         id = DPLL_ID_ICL_TBTPLL;
10427                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10428                 }
10429         } else {
10430                 WARN(1, "Invalid port %x\n", port);
10431                 return;
10432         }
10433
10434         pipe_config->icl_port_dplls[port_dpll_id].pll =
10435                 intel_get_shared_dpll_by_id(dev_priv, id);
10436
10437         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10438 }
10439
10440 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10441                                 enum port port,
10442                                 struct intel_crtc_state *pipe_config)
10443 {
10444         enum intel_dpll_id id;
10445
10446         switch (port) {
10447         case PORT_A:
10448                 id = DPLL_ID_SKL_DPLL0;
10449                 break;
10450         case PORT_B:
10451                 id = DPLL_ID_SKL_DPLL1;
10452                 break;
10453         case PORT_C:
10454                 id = DPLL_ID_SKL_DPLL2;
10455                 break;
10456         default:
10457                 DRM_ERROR("Incorrect port type\n");
10458                 return;
10459         }
10460
10461         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10462 }
10463
10464 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10465                                 enum port port,
10466                                 struct intel_crtc_state *pipe_config)
10467 {
10468         enum intel_dpll_id id;
10469         u32 temp;
10470
10471         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10472         id = temp >> (port * 3 + 1);
10473
10474         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10475                 return;
10476
10477         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10478 }
10479
10480 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10481                                 enum port port,
10482                                 struct intel_crtc_state *pipe_config)
10483 {
10484         enum intel_dpll_id id;
10485         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10486
10487         switch (ddi_pll_sel) {
10488         case PORT_CLK_SEL_WRPLL1:
10489                 id = DPLL_ID_WRPLL1;
10490                 break;
10491         case PORT_CLK_SEL_WRPLL2:
10492                 id = DPLL_ID_WRPLL2;
10493                 break;
10494         case PORT_CLK_SEL_SPLL:
10495                 id = DPLL_ID_SPLL;
10496                 break;
10497         case PORT_CLK_SEL_LCPLL_810:
10498                 id = DPLL_ID_LCPLL_810;
10499                 break;
10500         case PORT_CLK_SEL_LCPLL_1350:
10501                 id = DPLL_ID_LCPLL_1350;
10502                 break;
10503         case PORT_CLK_SEL_LCPLL_2700:
10504                 id = DPLL_ID_LCPLL_2700;
10505                 break;
10506         default:
10507                 MISSING_CASE(ddi_pll_sel);
10508                 /* fall through */
10509         case PORT_CLK_SEL_NONE:
10510                 return;
10511         }
10512
10513         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10514 }
10515
10516 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10517                                      struct intel_crtc_state *pipe_config,
10518                                      u64 *power_domain_mask,
10519                                      intel_wakeref_t *wakerefs)
10520 {
10521         struct drm_device *dev = crtc->base.dev;
10522         struct drm_i915_private *dev_priv = to_i915(dev);
10523         enum intel_display_power_domain power_domain;
10524         unsigned long panel_transcoder_mask = 0;
10525         unsigned long enabled_panel_transcoders = 0;
10526         enum transcoder panel_transcoder;
10527         intel_wakeref_t wf;
10528         u32 tmp;
10529
10530         if (INTEL_GEN(dev_priv) >= 11)
10531                 panel_transcoder_mask |=
10532                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10533
10534         if (HAS_TRANSCODER_EDP(dev_priv))
10535                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10536
10537         /*
10538          * The pipe->transcoder mapping is fixed with the exception of the eDP
10539          * and DSI transcoders handled below.
10540          */
10541         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10542
10543         /*
10544          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10545          * consistency and less surprising code; it's in always on power).
10546          */
10547         for_each_set_bit(panel_transcoder,
10548                          &panel_transcoder_mask,
10549                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10550                 bool force_thru = false;
10551                 enum pipe trans_pipe;
10552
10553                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10554                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10555                         continue;
10556
10557                 /*
10558                  * Log all enabled ones, only use the first one.
10559                  *
10560                  * FIXME: This won't work for two separate DSI displays.
10561                  */
10562                 enabled_panel_transcoders |= BIT(panel_transcoder);
10563                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10564                         continue;
10565
10566                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10567                 default:
10568                         WARN(1, "unknown pipe linked to transcoder %s\n",
10569                              transcoder_name(panel_transcoder));
10570                         /* fall through */
10571                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10572                         force_thru = true;
10573                         /* fall through */
10574                 case TRANS_DDI_EDP_INPUT_A_ON:
10575                         trans_pipe = PIPE_A;
10576                         break;
10577                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10578                         trans_pipe = PIPE_B;
10579                         break;
10580                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10581                         trans_pipe = PIPE_C;
10582                         break;
10583                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10584                         trans_pipe = PIPE_D;
10585                         break;
10586                 }
10587
10588                 if (trans_pipe == crtc->pipe) {
10589                         pipe_config->cpu_transcoder = panel_transcoder;
10590                         pipe_config->pch_pfit.force_thru = force_thru;
10591                 }
10592         }
10593
10594         /*
10595          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10596          */
10597         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10598                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10599
10600         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10601         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10602
10603         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10604         if (!wf)
10605                 return false;
10606
10607         wakerefs[power_domain] = wf;
10608         *power_domain_mask |= BIT_ULL(power_domain);
10609
10610         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10611
10612         return tmp & PIPECONF_ENABLE;
10613 }
10614
10615 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10616                                          struct intel_crtc_state *pipe_config,
10617                                          u64 *power_domain_mask,
10618                                          intel_wakeref_t *wakerefs)
10619 {
10620         struct drm_device *dev = crtc->base.dev;
10621         struct drm_i915_private *dev_priv = to_i915(dev);
10622         enum intel_display_power_domain power_domain;
10623         enum transcoder cpu_transcoder;
10624         intel_wakeref_t wf;
10625         enum port port;
10626         u32 tmp;
10627
10628         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10629                 if (port == PORT_A)
10630                         cpu_transcoder = TRANSCODER_DSI_A;
10631                 else
10632                         cpu_transcoder = TRANSCODER_DSI_C;
10633
10634                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10635                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10636
10637                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10638                 if (!wf)
10639                         continue;
10640
10641                 wakerefs[power_domain] = wf;
10642                 *power_domain_mask |= BIT_ULL(power_domain);
10643
10644                 /*
10645                  * The PLL needs to be enabled with a valid divider
10646                  * configuration, otherwise accessing DSI registers will hang
10647                  * the machine. See BSpec North Display Engine
10648                  * registers/MIPI[BXT]. We can break out here early, since we
10649                  * need the same DSI PLL to be enabled for both DSI ports.
10650                  */
10651                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10652                         break;
10653
10654                 /* XXX: this works for video mode only */
10655                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10656                 if (!(tmp & DPI_ENABLE))
10657                         continue;
10658
10659                 tmp = I915_READ(MIPI_CTRL(port));
10660                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10661                         continue;
10662
10663                 pipe_config->cpu_transcoder = cpu_transcoder;
10664                 break;
10665         }
10666
10667         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10668 }
10669
10670 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10671                                        struct intel_crtc_state *pipe_config)
10672 {
10673         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10674         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10675         struct intel_shared_dpll *pll;
10676         enum port port;
10677         u32 tmp;
10678
10679         if (transcoder_is_dsi(cpu_transcoder)) {
10680                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10681                                                 PORT_A : PORT_B;
10682         } else {
10683                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
10684                 if (INTEL_GEN(dev_priv) >= 12)
10685                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10686                 else
10687                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10688         }
10689
10690         if (INTEL_GEN(dev_priv) >= 11)
10691                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10692         else if (IS_CANNONLAKE(dev_priv))
10693                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10694         else if (IS_GEN9_BC(dev_priv))
10695                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10696         else if (IS_GEN9_LP(dev_priv))
10697                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10698         else
10699                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10700
10701         pll = pipe_config->shared_dpll;
10702         if (pll) {
10703                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10704                                                 &pipe_config->dpll_hw_state));
10705         }
10706
10707         /*
10708          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10709          * DDI E. So just check whether this pipe is wired to DDI E and whether
10710          * the PCH transcoder is on.
10711          */
10712         if (INTEL_GEN(dev_priv) < 9 &&
10713             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10714                 pipe_config->has_pch_encoder = true;
10715
10716                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10717                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10718                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10719
10720                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10721         }
10722 }
10723
10724 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10725                                                  enum transcoder cpu_transcoder)
10726 {
10727         u32 trans_port_sync, master_select;
10728
10729         trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10730
10731         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10732                 return INVALID_TRANSCODER;
10733
10734         master_select = trans_port_sync &
10735                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
10736         if (master_select == 0)
10737                 return TRANSCODER_EDP;
10738         else
10739                 return master_select - 1;
10740 }
10741
10742 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10743 {
10744         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10745         u32 transcoders;
10746         enum transcoder cpu_transcoder;
10747
10748         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10749                                                                   crtc_state->cpu_transcoder);
10750
10751         transcoders = BIT(TRANSCODER_A) |
10752                 BIT(TRANSCODER_B) |
10753                 BIT(TRANSCODER_C) |
10754                 BIT(TRANSCODER_D);
10755         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10756                 enum intel_display_power_domain power_domain;
10757                 intel_wakeref_t trans_wakeref;
10758
10759                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10760                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10761                                                                    power_domain);
10762
10763                 if (!trans_wakeref)
10764                         continue;
10765
10766                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10767                     crtc_state->cpu_transcoder)
10768                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10769
10770                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10771         }
10772
10773         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10774                 crtc_state->sync_mode_slaves_mask);
10775 }
10776
10777 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10778                                     struct intel_crtc_state *pipe_config)
10779 {
10780         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10781         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10782         enum intel_display_power_domain power_domain;
10783         u64 power_domain_mask;
10784         bool active;
10785
10786         pipe_config->master_transcoder = INVALID_TRANSCODER;
10787
10788         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10789         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10790         if (!wf)
10791                 return false;
10792
10793         wakerefs[power_domain] = wf;
10794         power_domain_mask = BIT_ULL(power_domain);
10795
10796         pipe_config->shared_dpll = NULL;
10797
10798         active = hsw_get_transcoder_state(crtc, pipe_config,
10799                                           &power_domain_mask, wakerefs);
10800
10801         if (IS_GEN9_LP(dev_priv) &&
10802             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10803                                          &power_domain_mask, wakerefs)) {
10804                 WARN_ON(active);
10805                 active = true;
10806         }
10807
10808         if (!active)
10809                 goto out;
10810
10811         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10812             INTEL_GEN(dev_priv) >= 11) {
10813                 haswell_get_ddi_port_state(crtc, pipe_config);
10814                 intel_get_pipe_timings(crtc, pipe_config);
10815         }
10816
10817         intel_get_pipe_src_size(crtc, pipe_config);
10818
10819         if (IS_HASWELL(dev_priv)) {
10820                 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10821
10822                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10823                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10824                 else
10825                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10826         } else {
10827                 pipe_config->output_format =
10828                         bdw_get_pipemisc_output_format(crtc);
10829
10830                 /*
10831                  * Currently there is no interface defined to
10832                  * check user preference between RGB/YCBCR444
10833                  * or YCBCR420. So the only possible case for
10834                  * YCBCR444 usage is driving YCBCR420 output
10835                  * with LSPCON, when pipe is configured for
10836                  * YCBCR444 output and LSPCON takes care of
10837                  * downsampling it.
10838                  */
10839                 pipe_config->lspcon_downsampling =
10840                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10841         }
10842
10843         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10844
10845         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10846
10847         if (INTEL_GEN(dev_priv) >= 9) {
10848                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10849
10850                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10851                         pipe_config->gamma_enable = true;
10852
10853                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10854                         pipe_config->csc_enable = true;
10855         } else {
10856                 i9xx_get_pipe_color_config(pipe_config);
10857         }
10858
10859         intel_color_get_config(pipe_config);
10860
10861         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10862         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10863
10864         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10865         if (wf) {
10866                 wakerefs[power_domain] = wf;
10867                 power_domain_mask |= BIT_ULL(power_domain);
10868
10869                 if (INTEL_GEN(dev_priv) >= 9)
10870                         skylake_get_pfit_config(crtc, pipe_config);
10871                 else
10872                         ironlake_get_pfit_config(crtc, pipe_config);
10873         }
10874
10875         if (hsw_crtc_supports_ips(crtc)) {
10876                 if (IS_HASWELL(dev_priv))
10877                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10878                 else {
10879                         /*
10880                          * We cannot readout IPS state on broadwell, set to
10881                          * true so we can set it to a defined state on first
10882                          * commit.
10883                          */
10884                         pipe_config->ips_enabled = true;
10885                 }
10886         }
10887
10888         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10889             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10890                 pipe_config->pixel_multiplier =
10891                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10892         } else {
10893                 pipe_config->pixel_multiplier = 1;
10894         }
10895
10896         if (INTEL_GEN(dev_priv) >= 11 &&
10897             !transcoder_is_dsi(pipe_config->cpu_transcoder))
10898                 icelake_get_trans_port_sync_config(pipe_config);
10899
10900 out:
10901         for_each_power_domain(power_domain, power_domain_mask)
10902                 intel_display_power_put(dev_priv,
10903                                         power_domain, wakerefs[power_domain]);
10904
10905         return active;
10906 }
10907
10908 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10909 {
10910         struct drm_i915_private *dev_priv =
10911                 to_i915(plane_state->uapi.plane->dev);
10912         const struct drm_framebuffer *fb = plane_state->hw.fb;
10913         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10914         u32 base;
10915
10916         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10917                 base = obj->phys_handle->busaddr;
10918         else
10919                 base = intel_plane_ggtt_offset(plane_state);
10920
10921         return base + plane_state->color_plane[0].offset;
10922 }
10923
10924 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10925 {
10926         int x = plane_state->uapi.dst.x1;
10927         int y = plane_state->uapi.dst.y1;
10928         u32 pos = 0;
10929
10930         if (x < 0) {
10931                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10932                 x = -x;
10933         }
10934         pos |= x << CURSOR_X_SHIFT;
10935
10936         if (y < 0) {
10937                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10938                 y = -y;
10939         }
10940         pos |= y << CURSOR_Y_SHIFT;
10941
10942         return pos;
10943 }
10944
10945 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10946 {
10947         const struct drm_mode_config *config =
10948                 &plane_state->uapi.plane->dev->mode_config;
10949         int width = drm_rect_width(&plane_state->uapi.dst);
10950         int height = drm_rect_height(&plane_state->uapi.dst);
10951
10952         return width > 0 && width <= config->cursor_width &&
10953                 height > 0 && height <= config->cursor_height;
10954 }
10955
10956 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10957 {
10958         struct drm_i915_private *dev_priv =
10959                 to_i915(plane_state->uapi.plane->dev);
10960         unsigned int rotation = plane_state->hw.rotation;
10961         int src_x, src_y;
10962         u32 offset;
10963         int ret;
10964
10965         ret = intel_plane_compute_gtt(plane_state);
10966         if (ret)
10967                 return ret;
10968
10969         if (!plane_state->uapi.visible)
10970                 return 0;
10971
10972         src_x = plane_state->uapi.src.x1 >> 16;
10973         src_y = plane_state->uapi.src.y1 >> 16;
10974
10975         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10976         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10977                                                     plane_state, 0);
10978
10979         if (src_x != 0 || src_y != 0) {
10980                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10981                 return -EINVAL;
10982         }
10983
10984         /*
10985          * Put the final coordinates back so that the src
10986          * coordinate checks will see the right values.
10987          */
10988         drm_rect_translate_to(&plane_state->uapi.src,
10989                               src_x << 16, src_y << 16);
10990
10991         /* ILK+ do this automagically in hardware */
10992         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10993                 const struct drm_framebuffer *fb = plane_state->hw.fb;
10994                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10995                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10996
10997                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
10998         }
10999
11000         plane_state->color_plane[0].offset = offset;
11001         plane_state->color_plane[0].x = src_x;
11002         plane_state->color_plane[0].y = src_y;
11003
11004         return 0;
11005 }
11006
11007 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11008                               struct intel_plane_state *plane_state)
11009 {
11010         const struct drm_framebuffer *fb = plane_state->hw.fb;
11011         int ret;
11012
11013         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11014                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
11015                 return -EINVAL;
11016         }
11017
11018         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11019                                                   &crtc_state->uapi,
11020                                                   DRM_PLANE_HELPER_NO_SCALING,
11021                                                   DRM_PLANE_HELPER_NO_SCALING,
11022                                                   true, true);
11023         if (ret)
11024                 return ret;
11025
11026         /* Use the unclipped src/dst rectangles, which we program to hw */
11027         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11028         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11029
11030         ret = intel_cursor_check_surface(plane_state);
11031         if (ret)
11032                 return ret;
11033
11034         if (!plane_state->uapi.visible)
11035                 return 0;
11036
11037         ret = intel_plane_check_src_coordinates(plane_state);
11038         if (ret)
11039                 return ret;
11040
11041         return 0;
11042 }
11043
11044 static unsigned int
11045 i845_cursor_max_stride(struct intel_plane *plane,
11046                        u32 pixel_format, u64 modifier,
11047                        unsigned int rotation)
11048 {
11049         return 2048;
11050 }
11051
11052 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11053 {
11054         u32 cntl = 0;
11055
11056         if (crtc_state->gamma_enable)
11057                 cntl |= CURSOR_GAMMA_ENABLE;
11058
11059         return cntl;
11060 }
11061
11062 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11063                            const struct intel_plane_state *plane_state)
11064 {
11065         return CURSOR_ENABLE |
11066                 CURSOR_FORMAT_ARGB |
11067                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
11068 }
11069
11070 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11071 {
11072         int width = drm_rect_width(&plane_state->uapi.dst);
11073
11074         /*
11075          * 845g/865g are only limited by the width of their cursors,
11076          * the height is arbitrary up to the precision of the register.
11077          */
11078         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11079 }
11080
11081 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11082                              struct intel_plane_state *plane_state)
11083 {
11084         const struct drm_framebuffer *fb = plane_state->hw.fb;
11085         int ret;
11086
11087         ret = intel_check_cursor(crtc_state, plane_state);
11088         if (ret)
11089                 return ret;
11090
11091         /* if we want to turn off the cursor ignore width and height */
11092         if (!fb)
11093                 return 0;
11094
11095         /* Check for which cursor types we support */
11096         if (!i845_cursor_size_ok(plane_state)) {
11097                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11098                           drm_rect_width(&plane_state->uapi.dst),
11099                           drm_rect_height(&plane_state->uapi.dst));
11100                 return -EINVAL;
11101         }
11102
11103         WARN_ON(plane_state->uapi.visible &&
11104                 plane_state->color_plane[0].stride != fb->pitches[0]);
11105
11106         switch (fb->pitches[0]) {
11107         case 256:
11108         case 512:
11109         case 1024:
11110         case 2048:
11111                 break;
11112         default:
11113                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
11114                               fb->pitches[0]);
11115                 return -EINVAL;
11116         }
11117
11118         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11119
11120         return 0;
11121 }
11122
11123 static void i845_update_cursor(struct intel_plane *plane,
11124                                const struct intel_crtc_state *crtc_state,
11125                                const struct intel_plane_state *plane_state)
11126 {
11127         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11128         u32 cntl = 0, base = 0, pos = 0, size = 0;
11129         unsigned long irqflags;
11130
11131         if (plane_state && plane_state->uapi.visible) {
11132                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11133                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11134
11135                 cntl = plane_state->ctl |
11136                         i845_cursor_ctl_crtc(crtc_state);
11137
11138                 size = (height << 12) | width;
11139
11140                 base = intel_cursor_base(plane_state);
11141                 pos = intel_cursor_position(plane_state);
11142         }
11143
11144         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11145
11146         /* On these chipsets we can only modify the base/size/stride
11147          * whilst the cursor is disabled.
11148          */
11149         if (plane->cursor.base != base ||
11150             plane->cursor.size != size ||
11151             plane->cursor.cntl != cntl) {
11152                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
11153                 I915_WRITE_FW(CURBASE(PIPE_A), base);
11154                 I915_WRITE_FW(CURSIZE, size);
11155                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11156                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
11157
11158                 plane->cursor.base = base;
11159                 plane->cursor.size = size;
11160                 plane->cursor.cntl = cntl;
11161         } else {
11162                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11163         }
11164
11165         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11166 }
11167
11168 static void i845_disable_cursor(struct intel_plane *plane,
11169                                 const struct intel_crtc_state *crtc_state)
11170 {
11171         i845_update_cursor(plane, crtc_state, NULL);
11172 }
11173
11174 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11175                                      enum pipe *pipe)
11176 {
11177         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11178         enum intel_display_power_domain power_domain;
11179         intel_wakeref_t wakeref;
11180         bool ret;
11181
11182         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11183         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11184         if (!wakeref)
11185                 return false;
11186
11187         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11188
11189         *pipe = PIPE_A;
11190
11191         intel_display_power_put(dev_priv, power_domain, wakeref);
11192
11193         return ret;
11194 }
11195
11196 static unsigned int
11197 i9xx_cursor_max_stride(struct intel_plane *plane,
11198                        u32 pixel_format, u64 modifier,
11199                        unsigned int rotation)
11200 {
11201         return plane->base.dev->mode_config.cursor_width * 4;
11202 }
11203
11204 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11205 {
11206         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11207         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11208         u32 cntl = 0;
11209
11210         if (INTEL_GEN(dev_priv) >= 11)
11211                 return cntl;
11212
11213         if (crtc_state->gamma_enable)
11214                 cntl = MCURSOR_GAMMA_ENABLE;
11215
11216         if (crtc_state->csc_enable)
11217                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11218
11219         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11220                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11221
11222         return cntl;
11223 }
11224
11225 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11226                            const struct intel_plane_state *plane_state)
11227 {
11228         struct drm_i915_private *dev_priv =
11229                 to_i915(plane_state->uapi.plane->dev);
11230         u32 cntl = 0;
11231
11232         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11233                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11234
11235         switch (drm_rect_width(&plane_state->uapi.dst)) {
11236         case 64:
11237                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11238                 break;
11239         case 128:
11240                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11241                 break;
11242         case 256:
11243                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11244                 break;
11245         default:
11246                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11247                 return 0;
11248         }
11249
11250         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11251                 cntl |= MCURSOR_ROTATE_180;
11252
11253         return cntl;
11254 }
11255
11256 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11257 {
11258         struct drm_i915_private *dev_priv =
11259                 to_i915(plane_state->uapi.plane->dev);
11260         int width = drm_rect_width(&plane_state->uapi.dst);
11261         int height = drm_rect_height(&plane_state->uapi.dst);
11262
11263         if (!intel_cursor_size_ok(plane_state))
11264                 return false;
11265
11266         /* Cursor width is limited to a few power-of-two sizes */
11267         switch (width) {
11268         case 256:
11269         case 128:
11270         case 64:
11271                 break;
11272         default:
11273                 return false;
11274         }
11275
11276         /*
11277          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11278          * height from 8 lines up to the cursor width, when the
11279          * cursor is not rotated. Everything else requires square
11280          * cursors.
11281          */
11282         if (HAS_CUR_FBC(dev_priv) &&
11283             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11284                 if (height < 8 || height > width)
11285                         return false;
11286         } else {
11287                 if (height != width)
11288                         return false;
11289         }
11290
11291         return true;
11292 }
11293
11294 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11295                              struct intel_plane_state *plane_state)
11296 {
11297         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11298         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11299         const struct drm_framebuffer *fb = plane_state->hw.fb;
11300         enum pipe pipe = plane->pipe;
11301         int ret;
11302
11303         ret = intel_check_cursor(crtc_state, plane_state);
11304         if (ret)
11305                 return ret;
11306
11307         /* if we want to turn off the cursor ignore width and height */
11308         if (!fb)
11309                 return 0;
11310
11311         /* Check for which cursor types we support */
11312         if (!i9xx_cursor_size_ok(plane_state)) {
11313                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11314                           drm_rect_width(&plane_state->uapi.dst),
11315                           drm_rect_height(&plane_state->uapi.dst));
11316                 return -EINVAL;
11317         }
11318
11319         WARN_ON(plane_state->uapi.visible &&
11320                 plane_state->color_plane[0].stride != fb->pitches[0]);
11321
11322         if (fb->pitches[0] !=
11323             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11324                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11325                               fb->pitches[0],
11326                               drm_rect_width(&plane_state->uapi.dst));
11327                 return -EINVAL;
11328         }
11329
11330         /*
11331          * There's something wrong with the cursor on CHV pipe C.
11332          * If it straddles the left edge of the screen then
11333          * moving it away from the edge or disabling it often
11334          * results in a pipe underrun, and often that can lead to
11335          * dead pipe (constant underrun reported, and it scans
11336          * out just a solid color). To recover from that, the
11337          * display power well must be turned off and on again.
11338          * Refuse the put the cursor into that compromised position.
11339          */
11340         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11341             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11342                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11343                 return -EINVAL;
11344         }
11345
11346         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11347
11348         return 0;
11349 }
11350
11351 static void i9xx_update_cursor(struct intel_plane *plane,
11352                                const struct intel_crtc_state *crtc_state,
11353                                const struct intel_plane_state *plane_state)
11354 {
11355         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11356         enum pipe pipe = plane->pipe;
11357         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11358         unsigned long irqflags;
11359
11360         if (plane_state && plane_state->uapi.visible) {
11361                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11362                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11363
11364                 cntl = plane_state->ctl |
11365                         i9xx_cursor_ctl_crtc(crtc_state);
11366
11367                 if (width != height)
11368                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11369
11370                 base = intel_cursor_base(plane_state);
11371                 pos = intel_cursor_position(plane_state);
11372         }
11373
11374         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11375
11376         /*
11377          * On some platforms writing CURCNTR first will also
11378          * cause CURPOS to be armed by the CURBASE write.
11379          * Without the CURCNTR write the CURPOS write would
11380          * arm itself. Thus we always update CURCNTR before
11381          * CURPOS.
11382          *
11383          * On other platforms CURPOS always requires the
11384          * CURBASE write to arm the update. Additonally
11385          * a write to any of the cursor register will cancel
11386          * an already armed cursor update. Thus leaving out
11387          * the CURBASE write after CURPOS could lead to a
11388          * cursor that doesn't appear to move, or even change
11389          * shape. Thus we always write CURBASE.
11390          *
11391          * The other registers are armed by by the CURBASE write
11392          * except when the plane is getting enabled at which time
11393          * the CURCNTR write arms the update.
11394          */
11395
11396         if (INTEL_GEN(dev_priv) >= 9)
11397                 skl_write_cursor_wm(plane, crtc_state);
11398
11399         if (plane->cursor.base != base ||
11400             plane->cursor.size != fbc_ctl ||
11401             plane->cursor.cntl != cntl) {
11402                 if (HAS_CUR_FBC(dev_priv))
11403                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11404                 I915_WRITE_FW(CURCNTR(pipe), cntl);
11405                 I915_WRITE_FW(CURPOS(pipe), pos);
11406                 I915_WRITE_FW(CURBASE(pipe), base);
11407
11408                 plane->cursor.base = base;
11409                 plane->cursor.size = fbc_ctl;
11410                 plane->cursor.cntl = cntl;
11411         } else {
11412                 I915_WRITE_FW(CURPOS(pipe), pos);
11413                 I915_WRITE_FW(CURBASE(pipe), base);
11414         }
11415
11416         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11417 }
11418
11419 static void i9xx_disable_cursor(struct intel_plane *plane,
11420                                 const struct intel_crtc_state *crtc_state)
11421 {
11422         i9xx_update_cursor(plane, crtc_state, NULL);
11423 }
11424
11425 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11426                                      enum pipe *pipe)
11427 {
11428         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11429         enum intel_display_power_domain power_domain;
11430         intel_wakeref_t wakeref;
11431         bool ret;
11432         u32 val;
11433
11434         /*
11435          * Not 100% correct for planes that can move between pipes,
11436          * but that's only the case for gen2-3 which don't have any
11437          * display power wells.
11438          */
11439         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11440         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11441         if (!wakeref)
11442                 return false;
11443
11444         val = I915_READ(CURCNTR(plane->pipe));
11445
11446         ret = val & MCURSOR_MODE;
11447
11448         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11449                 *pipe = plane->pipe;
11450         else
11451                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11452                         MCURSOR_PIPE_SELECT_SHIFT;
11453
11454         intel_display_power_put(dev_priv, power_domain, wakeref);
11455
11456         return ret;
11457 }
11458
11459 /* VESA 640x480x72Hz mode to set on the pipe */
11460 static const struct drm_display_mode load_detect_mode = {
11461         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11462                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11463 };
11464
11465 struct drm_framebuffer *
11466 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11467                          struct drm_mode_fb_cmd2 *mode_cmd)
11468 {
11469         struct intel_framebuffer *intel_fb;
11470         int ret;
11471
11472         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11473         if (!intel_fb)
11474                 return ERR_PTR(-ENOMEM);
11475
11476         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11477         if (ret)
11478                 goto err;
11479
11480         return &intel_fb->base;
11481
11482 err:
11483         kfree(intel_fb);
11484         return ERR_PTR(ret);
11485 }
11486
11487 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11488                                         struct drm_crtc *crtc)
11489 {
11490         struct drm_plane *plane;
11491         struct drm_plane_state *plane_state;
11492         int ret, i;
11493
11494         ret = drm_atomic_add_affected_planes(state, crtc);
11495         if (ret)
11496                 return ret;
11497
11498         for_each_new_plane_in_state(state, plane, plane_state, i) {
11499                 if (plane_state->crtc != crtc)
11500                         continue;
11501
11502                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11503                 if (ret)
11504                         return ret;
11505
11506                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11507         }
11508
11509         return 0;
11510 }
11511
11512 int intel_get_load_detect_pipe(struct drm_connector *connector,
11513                                struct intel_load_detect_pipe *old,
11514                                struct drm_modeset_acquire_ctx *ctx)
11515 {
11516         struct intel_crtc *intel_crtc;
11517         struct intel_encoder *intel_encoder =
11518                 intel_attached_encoder(connector);
11519         struct drm_crtc *possible_crtc;
11520         struct drm_encoder *encoder = &intel_encoder->base;
11521         struct drm_crtc *crtc = NULL;
11522         struct drm_device *dev = encoder->dev;
11523         struct drm_i915_private *dev_priv = to_i915(dev);
11524         struct drm_mode_config *config = &dev->mode_config;
11525         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11526         struct drm_connector_state *connector_state;
11527         struct intel_crtc_state *crtc_state;
11528         int ret, i = -1;
11529
11530         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11531                       connector->base.id, connector->name,
11532                       encoder->base.id, encoder->name);
11533
11534         old->restore_state = NULL;
11535
11536         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11537
11538         /*
11539          * Algorithm gets a little messy:
11540          *
11541          *   - if the connector already has an assigned crtc, use it (but make
11542          *     sure it's on first)
11543          *
11544          *   - try to find the first unused crtc that can drive this connector,
11545          *     and use that if we find one
11546          */
11547
11548         /* See if we already have a CRTC for this connector */
11549         if (connector->state->crtc) {
11550                 crtc = connector->state->crtc;
11551
11552                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11553                 if (ret)
11554                         goto fail;
11555
11556                 /* Make sure the crtc and connector are running */
11557                 goto found;
11558         }
11559
11560         /* Find an unused one (if possible) */
11561         for_each_crtc(dev, possible_crtc) {
11562                 i++;
11563                 if (!(encoder->possible_crtcs & (1 << i)))
11564                         continue;
11565
11566                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11567                 if (ret)
11568                         goto fail;
11569
11570                 if (possible_crtc->state->enable) {
11571                         drm_modeset_unlock(&possible_crtc->mutex);
11572                         continue;
11573                 }
11574
11575                 crtc = possible_crtc;
11576                 break;
11577         }
11578
11579         /*
11580          * If we didn't find an unused CRTC, don't use any.
11581          */
11582         if (!crtc) {
11583                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11584                 ret = -ENODEV;
11585                 goto fail;
11586         }
11587
11588 found:
11589         intel_crtc = to_intel_crtc(crtc);
11590
11591         state = drm_atomic_state_alloc(dev);
11592         restore_state = drm_atomic_state_alloc(dev);
11593         if (!state || !restore_state) {
11594                 ret = -ENOMEM;
11595                 goto fail;
11596         }
11597
11598         state->acquire_ctx = ctx;
11599         restore_state->acquire_ctx = ctx;
11600
11601         connector_state = drm_atomic_get_connector_state(state, connector);
11602         if (IS_ERR(connector_state)) {
11603                 ret = PTR_ERR(connector_state);
11604                 goto fail;
11605         }
11606
11607         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11608         if (ret)
11609                 goto fail;
11610
11611         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11612         if (IS_ERR(crtc_state)) {
11613                 ret = PTR_ERR(crtc_state);
11614                 goto fail;
11615         }
11616
11617         crtc_state->uapi.active = true;
11618
11619         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11620                                            &load_detect_mode);
11621         if (ret)
11622                 goto fail;
11623
11624         ret = intel_modeset_disable_planes(state, crtc);
11625         if (ret)
11626                 goto fail;
11627
11628         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11629         if (!ret)
11630                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11631         if (!ret)
11632                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11633         if (ret) {
11634                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11635                 goto fail;
11636         }
11637
11638         ret = drm_atomic_commit(state);
11639         if (ret) {
11640                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11641                 goto fail;
11642         }
11643
11644         old->restore_state = restore_state;
11645         drm_atomic_state_put(state);
11646
11647         /* let the connector get through one full cycle before testing */
11648         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11649         return true;
11650
11651 fail:
11652         if (state) {
11653                 drm_atomic_state_put(state);
11654                 state = NULL;
11655         }
11656         if (restore_state) {
11657                 drm_atomic_state_put(restore_state);
11658                 restore_state = NULL;
11659         }
11660
11661         if (ret == -EDEADLK)
11662                 return ret;
11663
11664         return false;
11665 }
11666
11667 void intel_release_load_detect_pipe(struct drm_connector *connector,
11668                                     struct intel_load_detect_pipe *old,
11669                                     struct drm_modeset_acquire_ctx *ctx)
11670 {
11671         struct intel_encoder *intel_encoder =
11672                 intel_attached_encoder(connector);
11673         struct drm_encoder *encoder = &intel_encoder->base;
11674         struct drm_atomic_state *state = old->restore_state;
11675         int ret;
11676
11677         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11678                       connector->base.id, connector->name,
11679                       encoder->base.id, encoder->name);
11680
11681         if (!state)
11682                 return;
11683
11684         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11685         if (ret)
11686                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11687         drm_atomic_state_put(state);
11688 }
11689
11690 static int i9xx_pll_refclk(struct drm_device *dev,
11691                            const struct intel_crtc_state *pipe_config)
11692 {
11693         struct drm_i915_private *dev_priv = to_i915(dev);
11694         u32 dpll = pipe_config->dpll_hw_state.dpll;
11695
11696         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11697                 return dev_priv->vbt.lvds_ssc_freq;
11698         else if (HAS_PCH_SPLIT(dev_priv))
11699                 return 120000;
11700         else if (!IS_GEN(dev_priv, 2))
11701                 return 96000;
11702         else
11703                 return 48000;
11704 }
11705
11706 /* Returns the clock of the currently programmed mode of the given pipe. */
11707 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11708                                 struct intel_crtc_state *pipe_config)
11709 {
11710         struct drm_device *dev = crtc->base.dev;
11711         struct drm_i915_private *dev_priv = to_i915(dev);
11712         enum pipe pipe = crtc->pipe;
11713         u32 dpll = pipe_config->dpll_hw_state.dpll;
11714         u32 fp;
11715         struct dpll clock;
11716         int port_clock;
11717         int refclk = i9xx_pll_refclk(dev, pipe_config);
11718
11719         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11720                 fp = pipe_config->dpll_hw_state.fp0;
11721         else
11722                 fp = pipe_config->dpll_hw_state.fp1;
11723
11724         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11725         if (IS_PINEVIEW(dev_priv)) {
11726                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11727                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11728         } else {
11729                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11730                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11731         }
11732
11733         if (!IS_GEN(dev_priv, 2)) {
11734                 if (IS_PINEVIEW(dev_priv))
11735                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11736                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11737                 else
11738                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11739                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11740
11741                 switch (dpll & DPLL_MODE_MASK) {
11742                 case DPLLB_MODE_DAC_SERIAL:
11743                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11744                                 5 : 10;
11745                         break;
11746                 case DPLLB_MODE_LVDS:
11747                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11748                                 7 : 14;
11749                         break;
11750                 default:
11751                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11752                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11753                         return;
11754                 }
11755
11756                 if (IS_PINEVIEW(dev_priv))
11757                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11758                 else
11759                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11760         } else {
11761                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11762                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11763
11764                 if (is_lvds) {
11765                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11766                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11767
11768                         if (lvds & LVDS_CLKB_POWER_UP)
11769                                 clock.p2 = 7;
11770                         else
11771                                 clock.p2 = 14;
11772                 } else {
11773                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11774                                 clock.p1 = 2;
11775                         else {
11776                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11777                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11778                         }
11779                         if (dpll & PLL_P2_DIVIDE_BY_4)
11780                                 clock.p2 = 4;
11781                         else
11782                                 clock.p2 = 2;
11783                 }
11784
11785                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11786         }
11787
11788         /*
11789          * This value includes pixel_multiplier. We will use
11790          * port_clock to compute adjusted_mode.crtc_clock in the
11791          * encoder's get_config() function.
11792          */
11793         pipe_config->port_clock = port_clock;
11794 }
11795
11796 int intel_dotclock_calculate(int link_freq,
11797                              const struct intel_link_m_n *m_n)
11798 {
11799         /*
11800          * The calculation for the data clock is:
11801          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11802          * But we want to avoid losing precison if possible, so:
11803          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11804          *
11805          * and the link clock is simpler:
11806          * link_clock = (m * link_clock) / n
11807          */
11808
11809         if (!m_n->link_n)
11810                 return 0;
11811
11812         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11813 }
11814
11815 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11816                                    struct intel_crtc_state *pipe_config)
11817 {
11818         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11819
11820         /* read out port_clock from the DPLL */
11821         i9xx_crtc_clock_get(crtc, pipe_config);
11822
11823         /*
11824          * In case there is an active pipe without active ports,
11825          * we may need some idea for the dotclock anyway.
11826          * Calculate one based on the FDI configuration.
11827          */
11828         pipe_config->hw.adjusted_mode.crtc_clock =
11829                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11830                                          &pipe_config->fdi_m_n);
11831 }
11832
11833 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
11834                                    struct intel_crtc *crtc)
11835 {
11836         memset(crtc_state, 0, sizeof(*crtc_state));
11837
11838         __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
11839
11840         crtc_state->cpu_transcoder = INVALID_TRANSCODER;
11841         crtc_state->master_transcoder = INVALID_TRANSCODER;
11842         crtc_state->hsw_workaround_pipe = INVALID_PIPE;
11843         crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
11844         crtc_state->scaler_state.scaler_id = -1;
11845 }
11846
11847 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
11848 {
11849         struct intel_crtc_state *crtc_state;
11850
11851         crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
11852
11853         if (crtc_state)
11854                 intel_crtc_state_reset(crtc_state, crtc);
11855
11856         return crtc_state;
11857 }
11858
11859 /* Returns the currently programmed mode of the given encoder. */
11860 struct drm_display_mode *
11861 intel_encoder_current_mode(struct intel_encoder *encoder)
11862 {
11863         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11864         struct intel_crtc_state *crtc_state;
11865         struct drm_display_mode *mode;
11866         struct intel_crtc *crtc;
11867         enum pipe pipe;
11868
11869         if (!encoder->get_hw_state(encoder, &pipe))
11870                 return NULL;
11871
11872         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11873
11874         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11875         if (!mode)
11876                 return NULL;
11877
11878         crtc_state = intel_crtc_state_alloc(crtc);
11879         if (!crtc_state) {
11880                 kfree(mode);
11881                 return NULL;
11882         }
11883
11884         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11885                 kfree(crtc_state);
11886                 kfree(mode);
11887                 return NULL;
11888         }
11889
11890         encoder->get_config(encoder, crtc_state);
11891
11892         intel_mode_from_pipe_config(mode, crtc_state);
11893
11894         kfree(crtc_state);
11895
11896         return mode;
11897 }
11898
11899 static void intel_crtc_destroy(struct drm_crtc *crtc)
11900 {
11901         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11902
11903         drm_crtc_cleanup(crtc);
11904         kfree(intel_crtc);
11905 }
11906
11907 /**
11908  * intel_wm_need_update - Check whether watermarks need updating
11909  * @cur: current plane state
11910  * @new: new plane state
11911  *
11912  * Check current plane state versus the new one to determine whether
11913  * watermarks need to be recalculated.
11914  *
11915  * Returns true or false.
11916  */
11917 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11918                                  struct intel_plane_state *new)
11919 {
11920         /* Update watermarks on tiling or size changes. */
11921         if (new->uapi.visible != cur->uapi.visible)
11922                 return true;
11923
11924         if (!cur->hw.fb || !new->hw.fb)
11925                 return false;
11926
11927         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11928             cur->hw.rotation != new->hw.rotation ||
11929             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11930             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11931             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11932             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11933                 return true;
11934
11935         return false;
11936 }
11937
11938 static bool needs_scaling(const struct intel_plane_state *state)
11939 {
11940         int src_w = drm_rect_width(&state->uapi.src) >> 16;
11941         int src_h = drm_rect_height(&state->uapi.src) >> 16;
11942         int dst_w = drm_rect_width(&state->uapi.dst);
11943         int dst_h = drm_rect_height(&state->uapi.dst);
11944
11945         return (src_w != dst_w || src_h != dst_h);
11946 }
11947
11948 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11949                                     struct intel_crtc_state *crtc_state,
11950                                     const struct intel_plane_state *old_plane_state,
11951                                     struct intel_plane_state *plane_state)
11952 {
11953         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11954         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11955         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11956         bool mode_changed = needs_modeset(crtc_state);
11957         bool was_crtc_enabled = old_crtc_state->hw.active;
11958         bool is_crtc_enabled = crtc_state->hw.active;
11959         bool turn_off, turn_on, visible, was_visible;
11960         int ret;
11961
11962         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11963                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11964                 if (ret)
11965                         return ret;
11966         }
11967
11968         was_visible = old_plane_state->uapi.visible;
11969         visible = plane_state->uapi.visible;
11970
11971         if (!was_crtc_enabled && WARN_ON(was_visible))
11972                 was_visible = false;
11973
11974         /*
11975          * Visibility is calculated as if the crtc was on, but
11976          * after scaler setup everything depends on it being off
11977          * when the crtc isn't active.
11978          *
11979          * FIXME this is wrong for watermarks. Watermarks should also
11980          * be computed as if the pipe would be active. Perhaps move
11981          * per-plane wm computation to the .check_plane() hook, and
11982          * only combine the results from all planes in the current place?
11983          */
11984         if (!is_crtc_enabled) {
11985                 plane_state->uapi.visible = visible = false;
11986                 crtc_state->active_planes &= ~BIT(plane->id);
11987                 crtc_state->data_rate[plane->id] = 0;
11988                 crtc_state->min_cdclk[plane->id] = 0;
11989         }
11990
11991         if (!was_visible && !visible)
11992                 return 0;
11993
11994         turn_off = was_visible && (!visible || mode_changed);
11995         turn_on = visible && (!was_visible || mode_changed);
11996
11997         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11998                          crtc->base.base.id, crtc->base.name,
11999                          plane->base.base.id, plane->base.name,
12000                          was_visible, visible,
12001                          turn_off, turn_on, mode_changed);
12002
12003         if (turn_on) {
12004                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12005                         crtc_state->update_wm_pre = true;
12006
12007                 /* must disable cxsr around plane enable/disable */
12008                 if (plane->id != PLANE_CURSOR)
12009                         crtc_state->disable_cxsr = true;
12010         } else if (turn_off) {
12011                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12012                         crtc_state->update_wm_post = true;
12013
12014                 /* must disable cxsr around plane enable/disable */
12015                 if (plane->id != PLANE_CURSOR)
12016                         crtc_state->disable_cxsr = true;
12017         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
12018                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12019                         /* FIXME bollocks */
12020                         crtc_state->update_wm_pre = true;
12021                         crtc_state->update_wm_post = true;
12022                 }
12023         }
12024
12025         if (visible || was_visible)
12026                 crtc_state->fb_bits |= plane->frontbuffer_bit;
12027
12028         /*
12029          * ILK/SNB DVSACNTR/Sprite Enable
12030          * IVB SPR_CTL/Sprite Enable
12031          * "When in Self Refresh Big FIFO mode, a write to enable the
12032          *  plane will be internally buffered and delayed while Big FIFO
12033          *  mode is exiting."
12034          *
12035          * Which means that enabling the sprite can take an extra frame
12036          * when we start in big FIFO mode (LP1+). Thus we need to drop
12037          * down to LP0 and wait for vblank in order to make sure the
12038          * sprite gets enabled on the next vblank after the register write.
12039          * Doing otherwise would risk enabling the sprite one frame after
12040          * we've already signalled flip completion. We can resume LP1+
12041          * once the sprite has been enabled.
12042          *
12043          *
12044          * WaCxSRDisabledForSpriteScaling:ivb
12045          * IVB SPR_SCALE/Scaling Enable
12046          * "Low Power watermarks must be disabled for at least one
12047          *  frame before enabling sprite scaling, and kept disabled
12048          *  until sprite scaling is disabled."
12049          *
12050          * ILK/SNB DVSASCALE/Scaling Enable
12051          * "When in Self Refresh Big FIFO mode, scaling enable will be
12052          *  masked off while Big FIFO mode is exiting."
12053          *
12054          * Despite the w/a only being listed for IVB we assume that
12055          * the ILK/SNB note has similar ramifications, hence we apply
12056          * the w/a on all three platforms.
12057          *
12058          * With experimental results seems this is needed also for primary
12059          * plane, not only sprite plane.
12060          */
12061         if (plane->id != PLANE_CURSOR &&
12062             (IS_GEN_RANGE(dev_priv, 5, 6) ||
12063              IS_IVYBRIDGE(dev_priv)) &&
12064             (turn_on || (!needs_scaling(old_plane_state) &&
12065                          needs_scaling(plane_state))))
12066                 crtc_state->disable_lp_wm = true;
12067
12068         return 0;
12069 }
12070
12071 static bool encoders_cloneable(const struct intel_encoder *a,
12072                                const struct intel_encoder *b)
12073 {
12074         /* masks could be asymmetric, so check both ways */
12075         return a == b || (a->cloneable & (1 << b->type) &&
12076                           b->cloneable & (1 << a->type));
12077 }
12078
12079 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12080                                          struct intel_crtc *crtc,
12081                                          struct intel_encoder *encoder)
12082 {
12083         struct intel_encoder *source_encoder;
12084         struct drm_connector *connector;
12085         struct drm_connector_state *connector_state;
12086         int i;
12087
12088         for_each_new_connector_in_state(state, connector, connector_state, i) {
12089                 if (connector_state->crtc != &crtc->base)
12090                         continue;
12091
12092                 source_encoder =
12093                         to_intel_encoder(connector_state->best_encoder);
12094                 if (!encoders_cloneable(encoder, source_encoder))
12095                         return false;
12096         }
12097
12098         return true;
12099 }
12100
12101 static int icl_add_linked_planes(struct intel_atomic_state *state)
12102 {
12103         struct intel_plane *plane, *linked;
12104         struct intel_plane_state *plane_state, *linked_plane_state;
12105         int i;
12106
12107         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12108                 linked = plane_state->planar_linked_plane;
12109
12110                 if (!linked)
12111                         continue;
12112
12113                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12114                 if (IS_ERR(linked_plane_state))
12115                         return PTR_ERR(linked_plane_state);
12116
12117                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12118                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12119         }
12120
12121         return 0;
12122 }
12123
12124 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12125 {
12126         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12127         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12128         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12129         struct intel_plane *plane, *linked;
12130         struct intel_plane_state *plane_state;
12131         int i;
12132
12133         if (INTEL_GEN(dev_priv) < 11)
12134                 return 0;
12135
12136         /*
12137          * Destroy all old plane links and make the slave plane invisible
12138          * in the crtc_state->active_planes mask.
12139          */
12140         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12141                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12142                         continue;
12143
12144                 plane_state->planar_linked_plane = NULL;
12145                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12146                         crtc_state->active_planes &= ~BIT(plane->id);
12147                         crtc_state->update_planes |= BIT(plane->id);
12148                 }
12149
12150                 plane_state->planar_slave = false;
12151         }
12152
12153         if (!crtc_state->nv12_planes)
12154                 return 0;
12155
12156         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12157                 struct intel_plane_state *linked_state = NULL;
12158
12159                 if (plane->pipe != crtc->pipe ||
12160                     !(crtc_state->nv12_planes & BIT(plane->id)))
12161                         continue;
12162
12163                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12164                         if (!icl_is_nv12_y_plane(linked->id))
12165                                 continue;
12166
12167                         if (crtc_state->active_planes & BIT(linked->id))
12168                                 continue;
12169
12170                         linked_state = intel_atomic_get_plane_state(state, linked);
12171                         if (IS_ERR(linked_state))
12172                                 return PTR_ERR(linked_state);
12173
12174                         break;
12175                 }
12176
12177                 if (!linked_state) {
12178                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
12179                                       hweight8(crtc_state->nv12_planes));
12180
12181                         return -EINVAL;
12182                 }
12183
12184                 plane_state->planar_linked_plane = linked;
12185
12186                 linked_state->planar_slave = true;
12187                 linked_state->planar_linked_plane = plane;
12188                 crtc_state->active_planes |= BIT(linked->id);
12189                 crtc_state->update_planes |= BIT(linked->id);
12190                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
12191
12192                 /* Copy parameters to slave plane */
12193                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12194                 linked_state->color_ctl = plane_state->color_ctl;
12195                 linked_state->color_plane[0] = plane_state->color_plane[0];
12196
12197                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12198                 linked_state->uapi.src = plane_state->uapi.src;
12199                 linked_state->uapi.dst = plane_state->uapi.dst;
12200
12201                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12202                         if (linked->id == PLANE_SPRITE5)
12203                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12204                         else if (linked->id == PLANE_SPRITE4)
12205                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12206                         else
12207                                 MISSING_CASE(linked->id);
12208                 }
12209         }
12210
12211         return 0;
12212 }
12213
12214 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12215 {
12216         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12217         struct intel_atomic_state *state =
12218                 to_intel_atomic_state(new_crtc_state->uapi.state);
12219         const struct intel_crtc_state *old_crtc_state =
12220                 intel_atomic_get_old_crtc_state(state, crtc);
12221
12222         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12223 }
12224
12225 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12226 {
12227         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12228         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12229         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12230         struct drm_connector *master_connector, *connector;
12231         struct drm_connector_state *connector_state;
12232         struct drm_connector_list_iter conn_iter;
12233         struct drm_crtc *master_crtc = NULL;
12234         struct drm_crtc_state *master_crtc_state;
12235         struct intel_crtc_state *master_pipe_config;
12236         int i, tile_group_id;
12237
12238         if (INTEL_GEN(dev_priv) < 11)
12239                 return 0;
12240
12241         /*
12242          * In case of tiled displays there could be one or more slaves but there is
12243          * only one master. Lets make the CRTC used by the connector corresponding
12244          * to the last horizonal and last vertical tile a master/genlock CRTC.
12245          * All the other CRTCs corresponding to other tiles of the same Tile group
12246          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12247          */
12248         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12249                 if (connector_state->crtc != crtc)
12250                         continue;
12251                 if (!connector->has_tile)
12252                         continue;
12253                 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12254                     crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12255                         return 0;
12256                 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12257                     connector->tile_v_loc == connector->num_v_tile - 1)
12258                         continue;
12259                 crtc_state->sync_mode_slaves_mask = 0;
12260                 tile_group_id = connector->tile_group->id;
12261                 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12262                 drm_for_each_connector_iter(master_connector, &conn_iter) {
12263                         struct drm_connector_state *master_conn_state = NULL;
12264
12265                         if (!master_connector->has_tile)
12266                                 continue;
12267                         if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12268                             master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12269                                 continue;
12270                         if (master_connector->tile_group->id != tile_group_id)
12271                                 continue;
12272
12273                         master_conn_state = drm_atomic_get_connector_state(&state->base,
12274                                                                            master_connector);
12275                         if (IS_ERR(master_conn_state)) {
12276                                 drm_connector_list_iter_end(&conn_iter);
12277                                 return PTR_ERR(master_conn_state);
12278                         }
12279                         if (master_conn_state->crtc) {
12280                                 master_crtc = master_conn_state->crtc;
12281                                 break;
12282                         }
12283                 }
12284                 drm_connector_list_iter_end(&conn_iter);
12285
12286                 if (!master_crtc) {
12287                         DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12288                                       connector_state->crtc->base.id);
12289                         return -EINVAL;
12290                 }
12291
12292                 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12293                                                               master_crtc);
12294                 if (IS_ERR(master_crtc_state))
12295                         return PTR_ERR(master_crtc_state);
12296
12297                 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12298                 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12299                 master_pipe_config->sync_mode_slaves_mask |=
12300                         BIT(crtc_state->cpu_transcoder);
12301                 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12302                               transcoder_name(crtc_state->master_transcoder),
12303                               crtc_state->uapi.crtc->base.id,
12304                               master_pipe_config->sync_mode_slaves_mask);
12305         }
12306
12307         return 0;
12308 }
12309
12310 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12311                                    struct intel_crtc *crtc)
12312 {
12313         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12314         struct intel_crtc_state *crtc_state =
12315                 intel_atomic_get_new_crtc_state(state, crtc);
12316         bool mode_changed = needs_modeset(crtc_state);
12317         int ret;
12318
12319         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12320             mode_changed && !crtc_state->hw.active)
12321                 crtc_state->update_wm_post = true;
12322
12323         if (mode_changed && crtc_state->hw.enable &&
12324             dev_priv->display.crtc_compute_clock &&
12325             !WARN_ON(crtc_state->shared_dpll)) {
12326                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12327                 if (ret)
12328                         return ret;
12329         }
12330
12331         /*
12332          * May need to update pipe gamma enable bits
12333          * when C8 planes are getting enabled/disabled.
12334          */
12335         if (c8_planes_changed(crtc_state))
12336                 crtc_state->uapi.color_mgmt_changed = true;
12337
12338         if (mode_changed || crtc_state->update_pipe ||
12339             crtc_state->uapi.color_mgmt_changed) {
12340                 ret = intel_color_check(crtc_state);
12341                 if (ret)
12342                         return ret;
12343         }
12344
12345         ret = 0;
12346         if (dev_priv->display.compute_pipe_wm) {
12347                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12348                 if (ret) {
12349                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12350                         return ret;
12351                 }
12352         }
12353
12354         if (dev_priv->display.compute_intermediate_wm) {
12355                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12356                         return 0;
12357
12358                 /*
12359                  * Calculate 'intermediate' watermarks that satisfy both the
12360                  * old state and the new state.  We can program these
12361                  * immediately.
12362                  */
12363                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12364                 if (ret) {
12365                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12366                         return ret;
12367                 }
12368         }
12369
12370         if (INTEL_GEN(dev_priv) >= 9) {
12371                 if (mode_changed || crtc_state->update_pipe)
12372                         ret = skl_update_scaler_crtc(crtc_state);
12373                 if (!ret)
12374                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
12375                                                          crtc_state);
12376         }
12377
12378         if (HAS_IPS(dev_priv))
12379                 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12380
12381         return ret;
12382 }
12383
12384 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12385 {
12386         struct intel_connector *connector;
12387         struct drm_connector_list_iter conn_iter;
12388
12389         drm_connector_list_iter_begin(dev, &conn_iter);
12390         for_each_intel_connector_iter(connector, &conn_iter) {
12391                 if (connector->base.state->crtc)
12392                         drm_connector_put(&connector->base);
12393
12394                 if (connector->base.encoder) {
12395                         connector->base.state->best_encoder =
12396                                 connector->base.encoder;
12397                         connector->base.state->crtc =
12398                                 connector->base.encoder->crtc;
12399
12400                         drm_connector_get(&connector->base);
12401                 } else {
12402                         connector->base.state->best_encoder = NULL;
12403                         connector->base.state->crtc = NULL;
12404                 }
12405         }
12406         drm_connector_list_iter_end(&conn_iter);
12407 }
12408
12409 static int
12410 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12411                       struct intel_crtc_state *pipe_config)
12412 {
12413         struct drm_connector *connector = conn_state->connector;
12414         const struct drm_display_info *info = &connector->display_info;
12415         int bpp;
12416
12417         switch (conn_state->max_bpc) {
12418         case 6 ... 7:
12419                 bpp = 6 * 3;
12420                 break;
12421         case 8 ... 9:
12422                 bpp = 8 * 3;
12423                 break;
12424         case 10 ... 11:
12425                 bpp = 10 * 3;
12426                 break;
12427         case 12:
12428                 bpp = 12 * 3;
12429                 break;
12430         default:
12431                 return -EINVAL;
12432         }
12433
12434         if (bpp < pipe_config->pipe_bpp) {
12435                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12436                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12437                               connector->base.id, connector->name,
12438                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12439                               pipe_config->pipe_bpp);
12440
12441                 pipe_config->pipe_bpp = bpp;
12442         }
12443
12444         return 0;
12445 }
12446
12447 static int
12448 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12449                           struct intel_crtc_state *pipe_config)
12450 {
12451         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12452         struct drm_atomic_state *state = pipe_config->uapi.state;
12453         struct drm_connector *connector;
12454         struct drm_connector_state *connector_state;
12455         int bpp, i;
12456
12457         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12458             IS_CHERRYVIEW(dev_priv)))
12459                 bpp = 10*3;
12460         else if (INTEL_GEN(dev_priv) >= 5)
12461                 bpp = 12*3;
12462         else
12463                 bpp = 8*3;
12464
12465         pipe_config->pipe_bpp = bpp;
12466
12467         /* Clamp display bpp to connector max bpp */
12468         for_each_new_connector_in_state(state, connector, connector_state, i) {
12469                 int ret;
12470
12471                 if (connector_state->crtc != &crtc->base)
12472                         continue;
12473
12474                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12475                 if (ret)
12476                         return ret;
12477         }
12478
12479         return 0;
12480 }
12481
12482 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12483 {
12484         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12485                       "type: 0x%x flags: 0x%x\n",
12486                       mode->crtc_clock,
12487                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12488                       mode->crtc_hsync_end, mode->crtc_htotal,
12489                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12490                       mode->crtc_vsync_end, mode->crtc_vtotal,
12491                       mode->type, mode->flags);
12492 }
12493
12494 static inline void
12495 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12496                       const char *id, unsigned int lane_count,
12497                       const struct intel_link_m_n *m_n)
12498 {
12499         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12500                       id, lane_count,
12501                       m_n->gmch_m, m_n->gmch_n,
12502                       m_n->link_m, m_n->link_n, m_n->tu);
12503 }
12504
12505 static void
12506 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12507                      const union hdmi_infoframe *frame)
12508 {
12509         if ((drm_debug & DRM_UT_KMS) == 0)
12510                 return;
12511
12512         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12513 }
12514
12515 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12516
12517 static const char * const output_type_str[] = {
12518         OUTPUT_TYPE(UNUSED),
12519         OUTPUT_TYPE(ANALOG),
12520         OUTPUT_TYPE(DVO),
12521         OUTPUT_TYPE(SDVO),
12522         OUTPUT_TYPE(LVDS),
12523         OUTPUT_TYPE(TVOUT),
12524         OUTPUT_TYPE(HDMI),
12525         OUTPUT_TYPE(DP),
12526         OUTPUT_TYPE(EDP),
12527         OUTPUT_TYPE(DSI),
12528         OUTPUT_TYPE(DDI),
12529         OUTPUT_TYPE(DP_MST),
12530 };
12531
12532 #undef OUTPUT_TYPE
12533
12534 static void snprintf_output_types(char *buf, size_t len,
12535                                   unsigned int output_types)
12536 {
12537         char *str = buf;
12538         int i;
12539
12540         str[0] = '\0';
12541
12542         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12543                 int r;
12544
12545                 if ((output_types & BIT(i)) == 0)
12546                         continue;
12547
12548                 r = snprintf(str, len, "%s%s",
12549                              str != buf ? "," : "", output_type_str[i]);
12550                 if (r >= len)
12551                         break;
12552                 str += r;
12553                 len -= r;
12554
12555                 output_types &= ~BIT(i);
12556         }
12557
12558         WARN_ON_ONCE(output_types != 0);
12559 }
12560
12561 static const char * const output_format_str[] = {
12562         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12563         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12564         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12565         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12566 };
12567
12568 static const char *output_formats(enum intel_output_format format)
12569 {
12570         if (format >= ARRAY_SIZE(output_format_str))
12571                 format = INTEL_OUTPUT_FORMAT_INVALID;
12572         return output_format_str[format];
12573 }
12574
12575 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12576 {
12577         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12578         const struct drm_framebuffer *fb = plane_state->hw.fb;
12579         struct drm_format_name_buf format_name;
12580
12581         if (!fb) {
12582                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12583                               plane->base.base.id, plane->base.name,
12584                               yesno(plane_state->uapi.visible));
12585                 return;
12586         }
12587
12588         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12589                       plane->base.base.id, plane->base.name,
12590                       fb->base.id, fb->width, fb->height,
12591                       drm_get_format_name(fb->format->format, &format_name),
12592                       yesno(plane_state->uapi.visible));
12593         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12594                       plane_state->hw.rotation, plane_state->scaler_id);
12595         if (plane_state->uapi.visible)
12596                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12597                               DRM_RECT_FP_ARG(&plane_state->uapi.src),
12598                               DRM_RECT_ARG(&plane_state->uapi.dst));
12599 }
12600
12601 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12602                                    struct intel_atomic_state *state,
12603                                    const char *context)
12604 {
12605         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12606         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12607         const struct intel_plane_state *plane_state;
12608         struct intel_plane *plane;
12609         char buf[64];
12610         int i;
12611
12612         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12613                       crtc->base.base.id, crtc->base.name,
12614                       yesno(pipe_config->hw.enable), context);
12615
12616         if (!pipe_config->hw.enable)
12617                 goto dump_planes;
12618
12619         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12620         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12621                       yesno(pipe_config->hw.active),
12622                       buf, pipe_config->output_types,
12623                       output_formats(pipe_config->output_format));
12624
12625         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12626                       transcoder_name(pipe_config->cpu_transcoder),
12627                       pipe_config->pipe_bpp, pipe_config->dither);
12628
12629         if (pipe_config->has_pch_encoder)
12630                 intel_dump_m_n_config(pipe_config, "fdi",
12631                                       pipe_config->fdi_lanes,
12632                                       &pipe_config->fdi_m_n);
12633
12634         if (intel_crtc_has_dp_encoder(pipe_config)) {
12635                 intel_dump_m_n_config(pipe_config, "dp m_n",
12636                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12637                 if (pipe_config->has_drrs)
12638                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12639                                               pipe_config->lane_count,
12640                                               &pipe_config->dp_m2_n2);
12641         }
12642
12643         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12644                       pipe_config->has_audio, pipe_config->has_infoframe,
12645                       pipe_config->infoframes.enable);
12646
12647         if (pipe_config->infoframes.enable &
12648             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12649                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12650         if (pipe_config->infoframes.enable &
12651             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12652                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12653         if (pipe_config->infoframes.enable &
12654             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12655                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12656         if (pipe_config->infoframes.enable &
12657             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12658                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12659
12660         DRM_DEBUG_KMS("requested mode:\n");
12661         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12662         DRM_DEBUG_KMS("adjusted mode:\n");
12663         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12664         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12665         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12666                       pipe_config->port_clock,
12667                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12668                       pipe_config->pixel_rate);
12669
12670         if (INTEL_GEN(dev_priv) >= 9)
12671                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12672                               crtc->num_scalers,
12673                               pipe_config->scaler_state.scaler_users,
12674                               pipe_config->scaler_state.scaler_id);
12675
12676         if (HAS_GMCH(dev_priv))
12677                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12678                               pipe_config->gmch_pfit.control,
12679                               pipe_config->gmch_pfit.pgm_ratios,
12680                               pipe_config->gmch_pfit.lvds_border_bits);
12681         else
12682                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12683                               pipe_config->pch_pfit.pos,
12684                               pipe_config->pch_pfit.size,
12685                               enableddisabled(pipe_config->pch_pfit.enabled),
12686                               yesno(pipe_config->pch_pfit.force_thru));
12687
12688         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12689                       pipe_config->ips_enabled, pipe_config->double_wide);
12690
12691         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12692
12693         if (IS_CHERRYVIEW(dev_priv))
12694                 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12695                               pipe_config->cgm_mode, pipe_config->gamma_mode,
12696                               pipe_config->gamma_enable, pipe_config->csc_enable);
12697         else
12698                 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12699                               pipe_config->csc_mode, pipe_config->gamma_mode,
12700                               pipe_config->gamma_enable, pipe_config->csc_enable);
12701
12702 dump_planes:
12703         if (!state)
12704                 return;
12705
12706         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12707                 if (plane->pipe == crtc->pipe)
12708                         intel_dump_plane_state(plane_state);
12709         }
12710 }
12711
12712 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12713 {
12714         struct drm_device *dev = state->base.dev;
12715         struct drm_connector *connector;
12716         struct drm_connector_list_iter conn_iter;
12717         unsigned int used_ports = 0;
12718         unsigned int used_mst_ports = 0;
12719         bool ret = true;
12720
12721         /*
12722          * We're going to peek into connector->state,
12723          * hence connection_mutex must be held.
12724          */
12725         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12726
12727         /*
12728          * Walk the connector list instead of the encoder
12729          * list to detect the problem on ddi platforms
12730          * where there's just one encoder per digital port.
12731          */
12732         drm_connector_list_iter_begin(dev, &conn_iter);
12733         drm_for_each_connector_iter(connector, &conn_iter) {
12734                 struct drm_connector_state *connector_state;
12735                 struct intel_encoder *encoder;
12736
12737                 connector_state =
12738                         drm_atomic_get_new_connector_state(&state->base,
12739                                                            connector);
12740                 if (!connector_state)
12741                         connector_state = connector->state;
12742
12743                 if (!connector_state->best_encoder)
12744                         continue;
12745
12746                 encoder = to_intel_encoder(connector_state->best_encoder);
12747
12748                 WARN_ON(!connector_state->crtc);
12749
12750                 switch (encoder->type) {
12751                         unsigned int port_mask;
12752                 case INTEL_OUTPUT_DDI:
12753                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12754                                 break;
12755                         /* else, fall through */
12756                 case INTEL_OUTPUT_DP:
12757                 case INTEL_OUTPUT_HDMI:
12758                 case INTEL_OUTPUT_EDP:
12759                         port_mask = 1 << encoder->port;
12760
12761                         /* the same port mustn't appear more than once */
12762                         if (used_ports & port_mask)
12763                                 ret = false;
12764
12765                         used_ports |= port_mask;
12766                         break;
12767                 case INTEL_OUTPUT_DP_MST:
12768                         used_mst_ports |=
12769                                 1 << encoder->port;
12770                         break;
12771                 default:
12772                         break;
12773                 }
12774         }
12775         drm_connector_list_iter_end(&conn_iter);
12776
12777         /* can't mix MST and SST/HDMI on the same port */
12778         if (used_ports & used_mst_ports)
12779                 return false;
12780
12781         return ret;
12782 }
12783
12784 static void
12785 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12786 {
12787         intel_crtc_copy_color_blobs(crtc_state);
12788 }
12789
12790 static void
12791 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12792 {
12793         crtc_state->hw.enable = crtc_state->uapi.enable;
12794         crtc_state->hw.active = crtc_state->uapi.active;
12795         crtc_state->hw.mode = crtc_state->uapi.mode;
12796         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12797         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12798 }
12799
12800 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12801 {
12802         crtc_state->uapi.enable = crtc_state->hw.enable;
12803         crtc_state->uapi.active = crtc_state->hw.active;
12804         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12805
12806         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12807
12808         /* copy color blobs to uapi */
12809         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12810                                   crtc_state->hw.degamma_lut);
12811         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12812                                   crtc_state->hw.gamma_lut);
12813         drm_property_replace_blob(&crtc_state->uapi.ctm,
12814                                   crtc_state->hw.ctm);
12815 }
12816
12817 static int
12818 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12819 {
12820         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12821         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12822         struct intel_crtc_state *saved_state;
12823
12824         saved_state = intel_crtc_state_alloc(crtc);
12825         if (!saved_state)
12826                 return -ENOMEM;
12827
12828         /* free the old crtc_state->hw members */
12829         intel_crtc_free_hw_state(crtc_state);
12830
12831         /* FIXME: before the switch to atomic started, a new pipe_config was
12832          * kzalloc'd. Code that depends on any field being zero should be
12833          * fixed, so that the crtc_state can be safely duplicated. For now,
12834          * only fields that are know to not cause problems are preserved. */
12835
12836         saved_state->uapi = crtc_state->uapi;
12837         saved_state->scaler_state = crtc_state->scaler_state;
12838         saved_state->shared_dpll = crtc_state->shared_dpll;
12839         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12840         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12841                sizeof(saved_state->icl_port_dplls));
12842         saved_state->crc_enabled = crtc_state->crc_enabled;
12843         if (IS_G4X(dev_priv) ||
12844             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12845                 saved_state->wm = crtc_state->wm;
12846         /*
12847          * Save the slave bitmask which gets filled for master crtc state during
12848          * slave atomic check call.
12849          */
12850         if (is_trans_port_sync_master(crtc_state))
12851                 saved_state->sync_mode_slaves_mask =
12852                         crtc_state->sync_mode_slaves_mask;
12853
12854         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12855         kfree(saved_state);
12856
12857         intel_crtc_copy_uapi_to_hw_state(crtc_state);
12858
12859         return 0;
12860 }
12861
12862 static int
12863 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12864 {
12865         struct drm_crtc *crtc = pipe_config->uapi.crtc;
12866         struct drm_atomic_state *state = pipe_config->uapi.state;
12867         struct intel_encoder *encoder;
12868         struct drm_connector *connector;
12869         struct drm_connector_state *connector_state;
12870         int base_bpp, ret;
12871         int i;
12872         bool retry = true;
12873
12874         pipe_config->cpu_transcoder =
12875                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12876
12877         /*
12878          * Sanitize sync polarity flags based on requested ones. If neither
12879          * positive or negative polarity is requested, treat this as meaning
12880          * negative polarity.
12881          */
12882         if (!(pipe_config->hw.adjusted_mode.flags &
12883               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12884                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12885
12886         if (!(pipe_config->hw.adjusted_mode.flags &
12887               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12888                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12889
12890         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12891                                         pipe_config);
12892         if (ret)
12893                 return ret;
12894
12895         base_bpp = pipe_config->pipe_bpp;
12896
12897         /*
12898          * Determine the real pipe dimensions. Note that stereo modes can
12899          * increase the actual pipe size due to the frame doubling and
12900          * insertion of additional space for blanks between the frame. This
12901          * is stored in the crtc timings. We use the requested mode to do this
12902          * computation to clearly distinguish it from the adjusted mode, which
12903          * can be changed by the connectors in the below retry loop.
12904          */
12905         drm_mode_get_hv_timing(&pipe_config->hw.mode,
12906                                &pipe_config->pipe_src_w,
12907                                &pipe_config->pipe_src_h);
12908
12909         for_each_new_connector_in_state(state, connector, connector_state, i) {
12910                 if (connector_state->crtc != crtc)
12911                         continue;
12912
12913                 encoder = to_intel_encoder(connector_state->best_encoder);
12914
12915                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12916                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12917                         return -EINVAL;
12918                 }
12919
12920                 /*
12921                  * Determine output_types before calling the .compute_config()
12922                  * hooks so that the hooks can use this information safely.
12923                  */
12924                 if (encoder->compute_output_type)
12925                         pipe_config->output_types |=
12926                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12927                                                                  connector_state));
12928                 else
12929                         pipe_config->output_types |= BIT(encoder->type);
12930         }
12931
12932 encoder_retry:
12933         /* Ensure the port clock defaults are reset when retrying. */
12934         pipe_config->port_clock = 0;
12935         pipe_config->pixel_multiplier = 1;
12936
12937         /* Fill in default crtc timings, allow encoders to overwrite them. */
12938         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12939                               CRTC_STEREO_DOUBLE);
12940
12941         /* Set the crtc_state defaults for trans_port_sync */
12942         pipe_config->master_transcoder = INVALID_TRANSCODER;
12943         ret = icl_add_sync_mode_crtcs(pipe_config);
12944         if (ret) {
12945                 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12946                               ret);
12947                 return ret;
12948         }
12949
12950         /* Pass our mode to the connectors and the CRTC to give them a chance to
12951          * adjust it according to limitations or connector properties, and also
12952          * a chance to reject the mode entirely.
12953          */
12954         for_each_new_connector_in_state(state, connector, connector_state, i) {
12955                 if (connector_state->crtc != crtc)
12956                         continue;
12957
12958                 encoder = to_intel_encoder(connector_state->best_encoder);
12959                 ret = encoder->compute_config(encoder, pipe_config,
12960                                               connector_state);
12961                 if (ret < 0) {
12962                         if (ret != -EDEADLK)
12963                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12964                                               ret);
12965                         return ret;
12966                 }
12967         }
12968
12969         /* Set default port clock if not overwritten by the encoder. Needs to be
12970          * done afterwards in case the encoder adjusts the mode. */
12971         if (!pipe_config->port_clock)
12972                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12973                         * pipe_config->pixel_multiplier;
12974
12975         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12976         if (ret == -EDEADLK)
12977                 return ret;
12978         if (ret < 0) {
12979                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12980                 return ret;
12981         }
12982
12983         if (ret == RETRY) {
12984                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12985                         return -EINVAL;
12986
12987                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12988                 retry = false;
12989                 goto encoder_retry;
12990         }
12991
12992         /* Dithering seems to not pass-through bits correctly when it should, so
12993          * only enable it on 6bpc panels and when its not a compliance
12994          * test requesting 6bpc video pattern.
12995          */
12996         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12997                 !pipe_config->dither_force_disable;
12998         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12999                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13000
13001         /*
13002          * Make drm_calc_timestamping_constants in
13003          * drm_atomic_helper_update_legacy_modeset_state() happy
13004          */
13005         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
13006
13007         return 0;
13008 }
13009
13010 bool intel_fuzzy_clock_check(int clock1, int clock2)
13011 {
13012         int diff;
13013
13014         if (clock1 == clock2)
13015                 return true;
13016
13017         if (!clock1 || !clock2)
13018                 return false;
13019
13020         diff = abs(clock1 - clock2);
13021
13022         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13023                 return true;
13024
13025         return false;
13026 }
13027
13028 static bool
13029 intel_compare_m_n(unsigned int m, unsigned int n,
13030                   unsigned int m2, unsigned int n2,
13031                   bool exact)
13032 {
13033         if (m == m2 && n == n2)
13034                 return true;
13035
13036         if (exact || !m || !n || !m2 || !n2)
13037                 return false;
13038
13039         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13040
13041         if (n > n2) {
13042                 while (n > n2) {
13043                         m2 <<= 1;
13044                         n2 <<= 1;
13045                 }
13046         } else if (n < n2) {
13047                 while (n < n2) {
13048                         m <<= 1;
13049                         n <<= 1;
13050                 }
13051         }
13052
13053         if (n != n2)
13054                 return false;
13055
13056         return intel_fuzzy_clock_check(m, m2);
13057 }
13058
13059 static bool
13060 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13061                        const struct intel_link_m_n *m2_n2,
13062                        bool exact)
13063 {
13064         return m_n->tu == m2_n2->tu &&
13065                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13066                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13067                 intel_compare_m_n(m_n->link_m, m_n->link_n,
13068                                   m2_n2->link_m, m2_n2->link_n, exact);
13069 }
13070
13071 static bool
13072 intel_compare_infoframe(const union hdmi_infoframe *a,
13073                         const union hdmi_infoframe *b)
13074 {
13075         return memcmp(a, b, sizeof(*a)) == 0;
13076 }
13077
13078 static void
13079 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13080                                bool fastset, const char *name,
13081                                const union hdmi_infoframe *a,
13082                                const union hdmi_infoframe *b)
13083 {
13084         if (fastset) {
13085                 if ((drm_debug & DRM_UT_KMS) == 0)
13086                         return;
13087
13088                 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
13089                 DRM_DEBUG_KMS("expected:\n");
13090                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13091                 DRM_DEBUG_KMS("found:\n");
13092                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13093         } else {
13094                 DRM_ERROR("mismatch in %s infoframe\n", name);
13095                 DRM_ERROR("expected:\n");
13096                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13097                 DRM_ERROR("found:\n");
13098                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13099         }
13100 }
13101
13102 static void __printf(4, 5)
13103 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13104                      const char *name, const char *format, ...)
13105 {
13106         struct va_format vaf;
13107         va_list args;
13108
13109         va_start(args, format);
13110         vaf.fmt = format;
13111         vaf.va = &args;
13112
13113         if (fastset)
13114                 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13115                               crtc->base.base.id, crtc->base.name, name, &vaf);
13116         else
13117                 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
13118                           crtc->base.base.id, crtc->base.name, name, &vaf);
13119
13120         va_end(args);
13121 }
13122
13123 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13124 {
13125         if (i915_modparams.fastboot != -1)
13126                 return i915_modparams.fastboot;
13127
13128         /* Enable fastboot by default on Skylake and newer */
13129         if (INTEL_GEN(dev_priv) >= 9)
13130                 return true;
13131
13132         /* Enable fastboot by default on VLV and CHV */
13133         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13134                 return true;
13135
13136         /* Disabled by default on all others */
13137         return false;
13138 }
13139
13140 static bool
13141 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13142                           const struct intel_crtc_state *pipe_config,
13143                           bool fastset)
13144 {
13145         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13146         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13147         bool ret = true;
13148         u32 bp_gamma = 0;
13149         bool fixup_inherited = fastset &&
13150                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13151                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13152
13153         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13154                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
13155                 ret = false;
13156         }
13157
13158 #define PIPE_CONF_CHECK_X(name) do { \
13159         if (current_config->name != pipe_config->name) { \
13160                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13161                                      "(expected 0x%08x, found 0x%08x)", \
13162                                      current_config->name, \
13163                                      pipe_config->name); \
13164                 ret = false; \
13165         } \
13166 } while (0)
13167
13168 #define PIPE_CONF_CHECK_I(name) do { \
13169         if (current_config->name != pipe_config->name) { \
13170                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13171                                      "(expected %i, found %i)", \
13172                                      current_config->name, \
13173                                      pipe_config->name); \
13174                 ret = false; \
13175         } \
13176 } while (0)
13177
13178 #define PIPE_CONF_CHECK_BOOL(name) do { \
13179         if (current_config->name != pipe_config->name) { \
13180                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13181                                      "(expected %s, found %s)", \
13182                                      yesno(current_config->name), \
13183                                      yesno(pipe_config->name)); \
13184                 ret = false; \
13185         } \
13186 } while (0)
13187
13188 /*
13189  * Checks state where we only read out the enabling, but not the entire
13190  * state itself (like full infoframes or ELD for audio). These states
13191  * require a full modeset on bootup to fix up.
13192  */
13193 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13194         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13195                 PIPE_CONF_CHECK_BOOL(name); \
13196         } else { \
13197                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13198                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13199                                      yesno(current_config->name), \
13200                                      yesno(pipe_config->name)); \
13201                 ret = false; \
13202         } \
13203 } while (0)
13204
13205 #define PIPE_CONF_CHECK_P(name) do { \
13206         if (current_config->name != pipe_config->name) { \
13207                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13208                                      "(expected %p, found %p)", \
13209                                      current_config->name, \
13210                                      pipe_config->name); \
13211                 ret = false; \
13212         } \
13213 } while (0)
13214
13215 #define PIPE_CONF_CHECK_M_N(name) do { \
13216         if (!intel_compare_link_m_n(&current_config->name, \
13217                                     &pipe_config->name,\
13218                                     !fastset)) { \
13219                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13220                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13221                                      "found tu %i, gmch %i/%i link %i/%i)", \
13222                                      current_config->name.tu, \
13223                                      current_config->name.gmch_m, \
13224                                      current_config->name.gmch_n, \
13225                                      current_config->name.link_m, \
13226                                      current_config->name.link_n, \
13227                                      pipe_config->name.tu, \
13228                                      pipe_config->name.gmch_m, \
13229                                      pipe_config->name.gmch_n, \
13230                                      pipe_config->name.link_m, \
13231                                      pipe_config->name.link_n); \
13232                 ret = false; \
13233         } \
13234 } while (0)
13235
13236 /* This is required for BDW+ where there is only one set of registers for
13237  * switching between high and low RR.
13238  * This macro can be used whenever a comparison has to be made between one
13239  * hw state and multiple sw state variables.
13240  */
13241 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13242         if (!intel_compare_link_m_n(&current_config->name, \
13243                                     &pipe_config->name, !fastset) && \
13244             !intel_compare_link_m_n(&current_config->alt_name, \
13245                                     &pipe_config->name, !fastset)) { \
13246                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13247                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13248                                      "or tu %i gmch %i/%i link %i/%i, " \
13249                                      "found tu %i, gmch %i/%i link %i/%i)", \
13250                                      current_config->name.tu, \
13251                                      current_config->name.gmch_m, \
13252                                      current_config->name.gmch_n, \
13253                                      current_config->name.link_m, \
13254                                      current_config->name.link_n, \
13255                                      current_config->alt_name.tu, \
13256                                      current_config->alt_name.gmch_m, \
13257                                      current_config->alt_name.gmch_n, \
13258                                      current_config->alt_name.link_m, \
13259                                      current_config->alt_name.link_n, \
13260                                      pipe_config->name.tu, \
13261                                      pipe_config->name.gmch_m, \
13262                                      pipe_config->name.gmch_n, \
13263                                      pipe_config->name.link_m, \
13264                                      pipe_config->name.link_n); \
13265                 ret = false; \
13266         } \
13267 } while (0)
13268
13269 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13270         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13271                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13272                                      "(%x) (expected %i, found %i)", \
13273                                      (mask), \
13274                                      current_config->name & (mask), \
13275                                      pipe_config->name & (mask)); \
13276                 ret = false; \
13277         } \
13278 } while (0)
13279
13280 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13281         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13282                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13283                                      "(expected %i, found %i)", \
13284                                      current_config->name, \
13285                                      pipe_config->name); \
13286                 ret = false; \
13287         } \
13288 } while (0)
13289
13290 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13291         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13292                                      &pipe_config->infoframes.name)) { \
13293                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13294                                                &current_config->infoframes.name, \
13295                                                &pipe_config->infoframes.name); \
13296                 ret = false; \
13297         } \
13298 } while (0)
13299
13300 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13301         if (current_config->name1 != pipe_config->name1) { \
13302                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13303                                 "(expected %i, found %i, won't compare lut values)", \
13304                                 current_config->name1, \
13305                                 pipe_config->name1); \
13306                 ret = false;\
13307         } else { \
13308                 if (!intel_color_lut_equal(current_config->name2, \
13309                                         pipe_config->name2, pipe_config->name1, \
13310                                         bit_precision)) { \
13311                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13312                                         "hw_state doesn't match sw_state"); \
13313                         ret = false; \
13314                 } \
13315         } \
13316 } while (0)
13317
13318 #define PIPE_CONF_QUIRK(quirk) \
13319         ((current_config->quirks | pipe_config->quirks) & (quirk))
13320
13321         PIPE_CONF_CHECK_I(cpu_transcoder);
13322
13323         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13324         PIPE_CONF_CHECK_I(fdi_lanes);
13325         PIPE_CONF_CHECK_M_N(fdi_m_n);
13326
13327         PIPE_CONF_CHECK_I(lane_count);
13328         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13329
13330         if (INTEL_GEN(dev_priv) < 8) {
13331                 PIPE_CONF_CHECK_M_N(dp_m_n);
13332
13333                 if (current_config->has_drrs)
13334                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13335         } else
13336                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13337
13338         PIPE_CONF_CHECK_X(output_types);
13339
13340         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13341         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13342         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13343         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13344         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13345         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13346
13347         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13348         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13349         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13350         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13351         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13352         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13353
13354         PIPE_CONF_CHECK_I(pixel_multiplier);
13355         PIPE_CONF_CHECK_I(output_format);
13356         PIPE_CONF_CHECK_I(dc3co_exitline);
13357         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13358         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13359             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13360                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13361
13362         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13363         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13364         PIPE_CONF_CHECK_BOOL(has_infoframe);
13365         PIPE_CONF_CHECK_BOOL(fec_enable);
13366
13367         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13368
13369         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13370                               DRM_MODE_FLAG_INTERLACE);
13371
13372         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13373                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13374                                       DRM_MODE_FLAG_PHSYNC);
13375                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13376                                       DRM_MODE_FLAG_NHSYNC);
13377                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13378                                       DRM_MODE_FLAG_PVSYNC);
13379                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13380                                       DRM_MODE_FLAG_NVSYNC);
13381         }
13382
13383         PIPE_CONF_CHECK_X(gmch_pfit.control);
13384         /* pfit ratios are autocomputed by the hw on gen4+ */
13385         if (INTEL_GEN(dev_priv) < 4)
13386                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13387         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13388
13389         /*
13390          * Changing the EDP transcoder input mux
13391          * (A_ONOFF vs. A_ON) requires a full modeset.
13392          */
13393         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13394
13395         if (!fastset) {
13396                 PIPE_CONF_CHECK_I(pipe_src_w);
13397                 PIPE_CONF_CHECK_I(pipe_src_h);
13398
13399                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13400                 if (current_config->pch_pfit.enabled) {
13401                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13402                         PIPE_CONF_CHECK_X(pch_pfit.size);
13403                 }
13404
13405                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13406                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13407
13408                 PIPE_CONF_CHECK_X(gamma_mode);
13409                 if (IS_CHERRYVIEW(dev_priv))
13410                         PIPE_CONF_CHECK_X(cgm_mode);
13411                 else
13412                         PIPE_CONF_CHECK_X(csc_mode);
13413                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13414                 PIPE_CONF_CHECK_BOOL(csc_enable);
13415
13416                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13417                 if (bp_gamma)
13418                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13419
13420         }
13421
13422         PIPE_CONF_CHECK_BOOL(double_wide);
13423
13424         PIPE_CONF_CHECK_P(shared_dpll);
13425         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13426         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13427         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13428         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13429         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13430         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13431         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13432         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13433         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13434         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13435         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13436         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13437         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13438         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13439         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13440         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13441         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13442         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13443         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13444         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13445         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13446         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13447         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13448         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13449         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13450         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13451         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13452         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13453         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13454         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13455         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13456
13457         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13458         PIPE_CONF_CHECK_X(dsi_pll.div);
13459
13460         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13461                 PIPE_CONF_CHECK_I(pipe_bpp);
13462
13463         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13464         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13465
13466         PIPE_CONF_CHECK_I(min_voltage_level);
13467
13468         PIPE_CONF_CHECK_X(infoframes.enable);
13469         PIPE_CONF_CHECK_X(infoframes.gcp);
13470         PIPE_CONF_CHECK_INFOFRAME(avi);
13471         PIPE_CONF_CHECK_INFOFRAME(spd);
13472         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13473         PIPE_CONF_CHECK_INFOFRAME(drm);
13474
13475         PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13476         PIPE_CONF_CHECK_I(master_transcoder);
13477
13478         PIPE_CONF_CHECK_I(dsc.compression_enable);
13479         PIPE_CONF_CHECK_I(dsc.dsc_split);
13480         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
13481
13482 #undef PIPE_CONF_CHECK_X
13483 #undef PIPE_CONF_CHECK_I
13484 #undef PIPE_CONF_CHECK_BOOL
13485 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13486 #undef PIPE_CONF_CHECK_P
13487 #undef PIPE_CONF_CHECK_FLAGS
13488 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13489 #undef PIPE_CONF_CHECK_COLOR_LUT
13490 #undef PIPE_CONF_QUIRK
13491
13492         return ret;
13493 }
13494
13495 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13496                                            const struct intel_crtc_state *pipe_config)
13497 {
13498         if (pipe_config->has_pch_encoder) {
13499                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13500                                                             &pipe_config->fdi_m_n);
13501                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13502
13503                 /*
13504                  * FDI already provided one idea for the dotclock.
13505                  * Yell if the encoder disagrees.
13506                  */
13507                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13508                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13509                      fdi_dotclock, dotclock);
13510         }
13511 }
13512
13513 static void verify_wm_state(struct intel_crtc *crtc,
13514                             struct intel_crtc_state *new_crtc_state)
13515 {
13516         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13517         struct skl_hw_state {
13518                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13519                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13520                 struct skl_ddb_allocation ddb;
13521                 struct skl_pipe_wm wm;
13522         } *hw;
13523         struct skl_ddb_allocation *sw_ddb;
13524         struct skl_pipe_wm *sw_wm;
13525         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13526         const enum pipe pipe = crtc->pipe;
13527         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13528
13529         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13530                 return;
13531
13532         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13533         if (!hw)
13534                 return;
13535
13536         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13537         sw_wm = &new_crtc_state->wm.skl.optimal;
13538
13539         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13540
13541         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13542         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13543
13544         if (INTEL_GEN(dev_priv) >= 11 &&
13545             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13546                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13547                           sw_ddb->enabled_slices,
13548                           hw->ddb.enabled_slices);
13549
13550         /* planes */
13551         for_each_universal_plane(dev_priv, pipe, plane) {
13552                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13553
13554                 hw_plane_wm = &hw->wm.planes[plane];
13555                 sw_plane_wm = &sw_wm->planes[plane];
13556
13557                 /* Watermarks */
13558                 for (level = 0; level <= max_level; level++) {
13559                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13560                                                 &sw_plane_wm->wm[level]))
13561                                 continue;
13562
13563                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13564                                   pipe_name(pipe), plane + 1, level,
13565                                   sw_plane_wm->wm[level].plane_en,
13566                                   sw_plane_wm->wm[level].plane_res_b,
13567                                   sw_plane_wm->wm[level].plane_res_l,
13568                                   hw_plane_wm->wm[level].plane_en,
13569                                   hw_plane_wm->wm[level].plane_res_b,
13570                                   hw_plane_wm->wm[level].plane_res_l);
13571                 }
13572
13573                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13574                                          &sw_plane_wm->trans_wm)) {
13575                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13576                                   pipe_name(pipe), plane + 1,
13577                                   sw_plane_wm->trans_wm.plane_en,
13578                                   sw_plane_wm->trans_wm.plane_res_b,
13579                                   sw_plane_wm->trans_wm.plane_res_l,
13580                                   hw_plane_wm->trans_wm.plane_en,
13581                                   hw_plane_wm->trans_wm.plane_res_b,
13582                                   hw_plane_wm->trans_wm.plane_res_l);
13583                 }
13584
13585                 /* DDB */
13586                 hw_ddb_entry = &hw->ddb_y[plane];
13587                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13588
13589                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13590                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13591                                   pipe_name(pipe), plane + 1,
13592                                   sw_ddb_entry->start, sw_ddb_entry->end,
13593                                   hw_ddb_entry->start, hw_ddb_entry->end);
13594                 }
13595         }
13596
13597         /*
13598          * cursor
13599          * If the cursor plane isn't active, we may not have updated it's ddb
13600          * allocation. In that case since the ddb allocation will be updated
13601          * once the plane becomes visible, we can skip this check
13602          */
13603         if (1) {
13604                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13605
13606                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13607                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13608
13609                 /* Watermarks */
13610                 for (level = 0; level <= max_level; level++) {
13611                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13612                                                 &sw_plane_wm->wm[level]))
13613                                 continue;
13614
13615                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13616                                   pipe_name(pipe), level,
13617                                   sw_plane_wm->wm[level].plane_en,
13618                                   sw_plane_wm->wm[level].plane_res_b,
13619                                   sw_plane_wm->wm[level].plane_res_l,
13620                                   hw_plane_wm->wm[level].plane_en,
13621                                   hw_plane_wm->wm[level].plane_res_b,
13622                                   hw_plane_wm->wm[level].plane_res_l);
13623                 }
13624
13625                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13626                                          &sw_plane_wm->trans_wm)) {
13627                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13628                                   pipe_name(pipe),
13629                                   sw_plane_wm->trans_wm.plane_en,
13630                                   sw_plane_wm->trans_wm.plane_res_b,
13631                                   sw_plane_wm->trans_wm.plane_res_l,
13632                                   hw_plane_wm->trans_wm.plane_en,
13633                                   hw_plane_wm->trans_wm.plane_res_b,
13634                                   hw_plane_wm->trans_wm.plane_res_l);
13635                 }
13636
13637                 /* DDB */
13638                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13639                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13640
13641                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13642                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13643                                   pipe_name(pipe),
13644                                   sw_ddb_entry->start, sw_ddb_entry->end,
13645                                   hw_ddb_entry->start, hw_ddb_entry->end);
13646                 }
13647         }
13648
13649         kfree(hw);
13650 }
13651
13652 static void
13653 verify_connector_state(struct intel_atomic_state *state,
13654                        struct intel_crtc *crtc)
13655 {
13656         struct drm_connector *connector;
13657         struct drm_connector_state *new_conn_state;
13658         int i;
13659
13660         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13661                 struct drm_encoder *encoder = connector->encoder;
13662                 struct intel_crtc_state *crtc_state = NULL;
13663
13664                 if (new_conn_state->crtc != &crtc->base)
13665                         continue;
13666
13667                 if (crtc)
13668                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13669
13670                 intel_connector_verify_state(crtc_state, new_conn_state);
13671
13672                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13673                      "connector's atomic encoder doesn't match legacy encoder\n");
13674         }
13675 }
13676
13677 static void
13678 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13679 {
13680         struct intel_encoder *encoder;
13681         struct drm_connector *connector;
13682         struct drm_connector_state *old_conn_state, *new_conn_state;
13683         int i;
13684
13685         for_each_intel_encoder(&dev_priv->drm, encoder) {
13686                 bool enabled = false, found = false;
13687                 enum pipe pipe;
13688
13689                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13690                               encoder->base.base.id,
13691                               encoder->base.name);
13692
13693                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13694                                                    new_conn_state, i) {
13695                         if (old_conn_state->best_encoder == &encoder->base)
13696                                 found = true;
13697
13698                         if (new_conn_state->best_encoder != &encoder->base)
13699                                 continue;
13700                         found = enabled = true;
13701
13702                         I915_STATE_WARN(new_conn_state->crtc !=
13703                                         encoder->base.crtc,
13704                              "connector's crtc doesn't match encoder crtc\n");
13705                 }
13706
13707                 if (!found)
13708                         continue;
13709
13710                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13711                      "encoder's enabled state mismatch "
13712                      "(expected %i, found %i)\n",
13713                      !!encoder->base.crtc, enabled);
13714
13715                 if (!encoder->base.crtc) {
13716                         bool active;
13717
13718                         active = encoder->get_hw_state(encoder, &pipe);
13719                         I915_STATE_WARN(active,
13720                              "encoder detached but still enabled on pipe %c.\n",
13721                              pipe_name(pipe));
13722                 }
13723         }
13724 }
13725
13726 static void
13727 verify_crtc_state(struct intel_crtc *crtc,
13728                   struct intel_crtc_state *old_crtc_state,
13729                   struct intel_crtc_state *new_crtc_state)
13730 {
13731         struct drm_device *dev = crtc->base.dev;
13732         struct drm_i915_private *dev_priv = to_i915(dev);
13733         struct intel_encoder *encoder;
13734         struct intel_crtc_state *pipe_config = old_crtc_state;
13735         struct drm_atomic_state *state = old_crtc_state->uapi.state;
13736         bool active;
13737
13738         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13739         intel_crtc_free_hw_state(old_crtc_state);
13740         intel_crtc_state_reset(old_crtc_state, crtc);
13741         old_crtc_state->uapi.state = state;
13742
13743         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13744
13745         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13746
13747         /* we keep both pipes enabled on 830 */
13748         if (IS_I830(dev_priv))
13749                 active = new_crtc_state->hw.active;
13750
13751         I915_STATE_WARN(new_crtc_state->hw.active != active,
13752                         "crtc active state doesn't match with hw state "
13753                         "(expected %i, found %i)\n",
13754                         new_crtc_state->hw.active, active);
13755
13756         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13757                         "transitional active state does not match atomic hw state "
13758                         "(expected %i, found %i)\n",
13759                         new_crtc_state->hw.active, crtc->active);
13760
13761         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13762                 enum pipe pipe;
13763
13764                 active = encoder->get_hw_state(encoder, &pipe);
13765                 I915_STATE_WARN(active != new_crtc_state->hw.active,
13766                                 "[ENCODER:%i] active %i with crtc active %i\n",
13767                                 encoder->base.base.id, active,
13768                                 new_crtc_state->hw.active);
13769
13770                 I915_STATE_WARN(active && crtc->pipe != pipe,
13771                                 "Encoder connected to wrong pipe %c\n",
13772                                 pipe_name(pipe));
13773
13774                 if (active)
13775                         encoder->get_config(encoder, pipe_config);
13776         }
13777
13778         intel_crtc_compute_pixel_rate(pipe_config);
13779
13780         if (!new_crtc_state->hw.active)
13781                 return;
13782
13783         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13784
13785         if (!intel_pipe_config_compare(new_crtc_state,
13786                                        pipe_config, false)) {
13787                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13788                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13789                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13790         }
13791 }
13792
13793 static void
13794 intel_verify_planes(struct intel_atomic_state *state)
13795 {
13796         struct intel_plane *plane;
13797         const struct intel_plane_state *plane_state;
13798         int i;
13799
13800         for_each_new_intel_plane_in_state(state, plane,
13801                                           plane_state, i)
13802                 assert_plane(plane, plane_state->planar_slave ||
13803                              plane_state->uapi.visible);
13804 }
13805
13806 static void
13807 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13808                          struct intel_shared_dpll *pll,
13809                          struct intel_crtc *crtc,
13810                          struct intel_crtc_state *new_crtc_state)
13811 {
13812         struct intel_dpll_hw_state dpll_hw_state;
13813         unsigned int crtc_mask;
13814         bool active;
13815
13816         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13817
13818         DRM_DEBUG_KMS("%s\n", pll->info->name);
13819
13820         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13821
13822         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13823                 I915_STATE_WARN(!pll->on && pll->active_mask,
13824                      "pll in active use but not on in sw tracking\n");
13825                 I915_STATE_WARN(pll->on && !pll->active_mask,
13826                      "pll is on but not used by any active crtc\n");
13827                 I915_STATE_WARN(pll->on != active,
13828                      "pll on state mismatch (expected %i, found %i)\n",
13829                      pll->on, active);
13830         }
13831
13832         if (!crtc) {
13833                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13834                                 "more active pll users than references: %x vs %x\n",
13835                                 pll->active_mask, pll->state.crtc_mask);
13836
13837                 return;
13838         }
13839
13840         crtc_mask = drm_crtc_mask(&crtc->base);
13841
13842         if (new_crtc_state->hw.active)
13843                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13844                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13845                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13846         else
13847                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13848                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13849                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13850
13851         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13852                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13853                         crtc_mask, pll->state.crtc_mask);
13854
13855         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13856                                           &dpll_hw_state,
13857                                           sizeof(dpll_hw_state)),
13858                         "pll hw state mismatch\n");
13859 }
13860
13861 static void
13862 verify_shared_dpll_state(struct intel_crtc *crtc,
13863                          struct intel_crtc_state *old_crtc_state,
13864                          struct intel_crtc_state *new_crtc_state)
13865 {
13866         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13867
13868         if (new_crtc_state->shared_dpll)
13869                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13870
13871         if (old_crtc_state->shared_dpll &&
13872             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13873                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13874                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13875
13876                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13877                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13878                                 pipe_name(drm_crtc_index(&crtc->base)));
13879                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13880                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13881                                 pipe_name(drm_crtc_index(&crtc->base)));
13882         }
13883 }
13884
13885 static void
13886 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13887                           struct intel_atomic_state *state,
13888                           struct intel_crtc_state *old_crtc_state,
13889                           struct intel_crtc_state *new_crtc_state)
13890 {
13891         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13892                 return;
13893
13894         verify_wm_state(crtc, new_crtc_state);
13895         verify_connector_state(state, crtc);
13896         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13897         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13898 }
13899
13900 static void
13901 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13902 {
13903         int i;
13904
13905         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13906                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13907 }
13908
13909 static void
13910 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13911                               struct intel_atomic_state *state)
13912 {
13913         verify_encoder_state(dev_priv, state);
13914         verify_connector_state(state, NULL);
13915         verify_disabled_dpll_state(dev_priv);
13916 }
13917
13918 static void
13919 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13920 {
13921         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13922         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13923         const struct drm_display_mode *adjusted_mode =
13924                 &crtc_state->hw.adjusted_mode;
13925
13926         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13927
13928         /*
13929          * The scanline counter increments at the leading edge of hsync.
13930          *
13931          * On most platforms it starts counting from vtotal-1 on the
13932          * first active line. That means the scanline counter value is
13933          * always one less than what we would expect. Ie. just after
13934          * start of vblank, which also occurs at start of hsync (on the
13935          * last active line), the scanline counter will read vblank_start-1.
13936          *
13937          * On gen2 the scanline counter starts counting from 1 instead
13938          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13939          * to keep the value positive), instead of adding one.
13940          *
13941          * On HSW+ the behaviour of the scanline counter depends on the output
13942          * type. For DP ports it behaves like most other platforms, but on HDMI
13943          * there's an extra 1 line difference. So we need to add two instead of
13944          * one to the value.
13945          *
13946          * On VLV/CHV DSI the scanline counter would appear to increment
13947          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13948          * that means we can't tell whether we're in vblank or not while
13949          * we're on that particular line. We must still set scanline_offset
13950          * to 1 so that the vblank timestamps come out correct when we query
13951          * the scanline counter from within the vblank interrupt handler.
13952          * However if queried just before the start of vblank we'll get an
13953          * answer that's slightly in the future.
13954          */
13955         if (IS_GEN(dev_priv, 2)) {
13956                 int vtotal;
13957
13958                 vtotal = adjusted_mode->crtc_vtotal;
13959                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13960                         vtotal /= 2;
13961
13962                 crtc->scanline_offset = vtotal - 1;
13963         } else if (HAS_DDI(dev_priv) &&
13964                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13965                 crtc->scanline_offset = 2;
13966         } else {
13967                 crtc->scanline_offset = 1;
13968         }
13969 }
13970
13971 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13972 {
13973         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13974         struct intel_crtc_state *new_crtc_state;
13975         struct intel_crtc *crtc;
13976         int i;
13977
13978         if (!dev_priv->display.crtc_compute_clock)
13979                 return;
13980
13981         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13982                 if (!needs_modeset(new_crtc_state))
13983                         continue;
13984
13985                 intel_release_shared_dplls(state, crtc);
13986         }
13987 }
13988
13989 /*
13990  * This implements the workaround described in the "notes" section of the mode
13991  * set sequence documentation. When going from no pipes or single pipe to
13992  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13993  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13994  */
13995 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13996 {
13997         struct intel_crtc_state *crtc_state;
13998         struct intel_crtc *crtc;
13999         struct intel_crtc_state *first_crtc_state = NULL;
14000         struct intel_crtc_state *other_crtc_state = NULL;
14001         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14002         int i;
14003
14004         /* look at all crtc's that are going to be enabled in during modeset */
14005         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14006                 if (!crtc_state->hw.active ||
14007                     !needs_modeset(crtc_state))
14008                         continue;
14009
14010                 if (first_crtc_state) {
14011                         other_crtc_state = crtc_state;
14012                         break;
14013                 } else {
14014                         first_crtc_state = crtc_state;
14015                         first_pipe = crtc->pipe;
14016                 }
14017         }
14018
14019         /* No workaround needed? */
14020         if (!first_crtc_state)
14021                 return 0;
14022
14023         /* w/a possibly needed, check how many crtc's are already enabled. */
14024         for_each_intel_crtc(state->base.dev, crtc) {
14025                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14026                 if (IS_ERR(crtc_state))
14027                         return PTR_ERR(crtc_state);
14028
14029                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14030
14031                 if (!crtc_state->hw.active ||
14032                     needs_modeset(crtc_state))
14033                         continue;
14034
14035                 /* 2 or more enabled crtcs means no need for w/a */
14036                 if (enabled_pipe != INVALID_PIPE)
14037                         return 0;
14038
14039                 enabled_pipe = crtc->pipe;
14040         }
14041
14042         if (enabled_pipe != INVALID_PIPE)
14043                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14044         else if (other_crtc_state)
14045                 other_crtc_state->hsw_workaround_pipe = first_pipe;
14046
14047         return 0;
14048 }
14049
14050 static int intel_modeset_checks(struct intel_atomic_state *state)
14051 {
14052         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14053         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14054         struct intel_crtc *crtc;
14055         int ret, i;
14056
14057         /* keep the current setting */
14058         if (!state->cdclk.force_min_cdclk_changed)
14059                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
14060
14061         state->modeset = true;
14062         state->active_pipes = dev_priv->active_pipes;
14063         state->cdclk.logical = dev_priv->cdclk.logical;
14064         state->cdclk.actual = dev_priv->cdclk.actual;
14065
14066         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14067                                             new_crtc_state, i) {
14068                 if (new_crtc_state->hw.active)
14069                         state->active_pipes |= BIT(crtc->pipe);
14070                 else
14071                         state->active_pipes &= ~BIT(crtc->pipe);
14072
14073                 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
14074                         state->active_pipe_changes |= BIT(crtc->pipe);
14075         }
14076
14077         if (state->active_pipe_changes) {
14078                 ret = intel_atomic_lock_global_state(state);
14079                 if (ret)
14080                         return ret;
14081         }
14082
14083         ret = intel_modeset_calc_cdclk(state);
14084         if (ret)
14085                 return ret;
14086
14087         intel_modeset_clear_plls(state);
14088
14089         if (IS_HASWELL(dev_priv))
14090                 return haswell_mode_set_planes_workaround(state);
14091
14092         return 0;
14093 }
14094
14095 /*
14096  * Handle calculation of various watermark data at the end of the atomic check
14097  * phase.  The code here should be run after the per-crtc and per-plane 'check'
14098  * handlers to ensure that all derived state has been updated.
14099  */
14100 static int calc_watermark_data(struct intel_atomic_state *state)
14101 {
14102         struct drm_device *dev = state->base.dev;
14103         struct drm_i915_private *dev_priv = to_i915(dev);
14104
14105         /* Is there platform-specific watermark information to calculate? */
14106         if (dev_priv->display.compute_global_watermarks)
14107                 return dev_priv->display.compute_global_watermarks(state);
14108
14109         return 0;
14110 }
14111
14112 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14113                                      struct intel_crtc_state *new_crtc_state)
14114 {
14115         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14116                 return;
14117
14118         new_crtc_state->uapi.mode_changed = false;
14119         new_crtc_state->update_pipe = true;
14120
14121         /*
14122          * If we're not doing the full modeset we want to
14123          * keep the current M/N values as they may be
14124          * sufficiently different to the computed values
14125          * to cause problems.
14126          *
14127          * FIXME: should really copy more fuzzy state here
14128          */
14129         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14130         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14131         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14132         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14133 }
14134
14135 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14136                                           struct intel_crtc *crtc,
14137                                           u8 plane_ids_mask)
14138 {
14139         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14140         struct intel_plane *plane;
14141
14142         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14143                 struct intel_plane_state *plane_state;
14144
14145                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14146                         continue;
14147
14148                 plane_state = intel_atomic_get_plane_state(state, plane);
14149                 if (IS_ERR(plane_state))
14150                         return PTR_ERR(plane_state);
14151         }
14152
14153         return 0;
14154 }
14155
14156 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14157 {
14158         /* See {hsw,vlv,ivb}_plane_ratio() */
14159         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14160                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14161                 IS_IVYBRIDGE(dev_priv);
14162 }
14163
14164 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14165                                      bool *need_modeset)
14166 {
14167         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14168         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14169         struct intel_plane_state *plane_state;
14170         struct intel_plane *plane;
14171         struct intel_crtc *crtc;
14172         int i, ret;
14173
14174         ret = icl_add_linked_planes(state);
14175         if (ret)
14176                 return ret;
14177
14178         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14179                 ret = intel_plane_atomic_check(state, plane);
14180                 if (ret) {
14181                         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
14182                                          plane->base.base.id, plane->base.name);
14183                         return ret;
14184                 }
14185         }
14186
14187         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14188                                             new_crtc_state, i) {
14189                 u8 old_active_planes, new_active_planes;
14190
14191                 ret = icl_check_nv12_planes(new_crtc_state);
14192                 if (ret)
14193                         return ret;
14194
14195                 /*
14196                  * On some platforms the number of active planes affects
14197                  * the planes' minimum cdclk calculation. Add such planes
14198                  * to the state before we compute the minimum cdclk.
14199                  */
14200                 if (!active_planes_affects_min_cdclk(dev_priv))
14201                         continue;
14202
14203                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14204                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14205
14206                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14207                         continue;
14208
14209                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14210                 if (ret)
14211                         return ret;
14212         }
14213
14214         /*
14215          * active_planes bitmask has been updated, and potentially
14216          * affected planes are part of the state. We can now
14217          * compute the minimum cdclk for each plane.
14218          */
14219         for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14220                 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14221
14222         return 0;
14223 }
14224
14225 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14226 {
14227         struct intel_crtc_state *crtc_state;
14228         struct intel_crtc *crtc;
14229         int i;
14230
14231         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14232                 int ret = intel_crtc_atomic_check(state, crtc);
14233                 if (ret) {
14234                         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14235                                          crtc->base.base.id, crtc->base.name);
14236                         return ret;
14237                 }
14238         }
14239
14240         return 0;
14241 }
14242
14243 /**
14244  * intel_atomic_check - validate state object
14245  * @dev: drm device
14246  * @_state: state to validate
14247  */
14248 static int intel_atomic_check(struct drm_device *dev,
14249                               struct drm_atomic_state *_state)
14250 {
14251         struct drm_i915_private *dev_priv = to_i915(dev);
14252         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14253         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14254         struct intel_crtc *crtc;
14255         int ret, i;
14256         bool any_ms = false;
14257
14258         /* Catch I915_MODE_FLAG_INHERITED */
14259         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14260                                             new_crtc_state, i) {
14261                 if (new_crtc_state->hw.mode.private_flags !=
14262                     old_crtc_state->hw.mode.private_flags)
14263                         new_crtc_state->uapi.mode_changed = true;
14264         }
14265
14266         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14267         if (ret)
14268                 goto fail;
14269
14270         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14271                                             new_crtc_state, i) {
14272                 if (!needs_modeset(new_crtc_state)) {
14273                         /* Light copy */
14274                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14275
14276                         continue;
14277                 }
14278
14279                 if (!new_crtc_state->uapi.enable) {
14280                         intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14281
14282                         any_ms = true;
14283                         continue;
14284                 }
14285
14286                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14287                 if (ret)
14288                         goto fail;
14289
14290                 ret = intel_modeset_pipe_config(new_crtc_state);
14291                 if (ret)
14292                         goto fail;
14293
14294                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14295
14296                 if (needs_modeset(new_crtc_state))
14297                         any_ms = true;
14298         }
14299
14300         if (any_ms && !check_digital_port_conflicts(state)) {
14301                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14302                 ret = EINVAL;
14303                 goto fail;
14304         }
14305
14306         ret = drm_dp_mst_atomic_check(&state->base);
14307         if (ret)
14308                 goto fail;
14309
14310         any_ms |= state->cdclk.force_min_cdclk_changed;
14311
14312         ret = intel_atomic_check_planes(state, &any_ms);
14313         if (ret)
14314                 goto fail;
14315
14316         if (any_ms) {
14317                 ret = intel_modeset_checks(state);
14318                 if (ret)
14319                         goto fail;
14320         } else {
14321                 state->cdclk.logical = dev_priv->cdclk.logical;
14322         }
14323
14324         ret = intel_atomic_check_crtcs(state);
14325         if (ret)
14326                 goto fail;
14327
14328         intel_fbc_choose_crtc(dev_priv, state);
14329         ret = calc_watermark_data(state);
14330         if (ret)
14331                 goto fail;
14332
14333         ret = intel_bw_atomic_check(state);
14334         if (ret)
14335                 goto fail;
14336
14337         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14338                                             new_crtc_state, i) {
14339                 if (!needs_modeset(new_crtc_state) &&
14340                     !new_crtc_state->update_pipe)
14341                         continue;
14342
14343                 intel_dump_pipe_config(new_crtc_state, state,
14344                                        needs_modeset(new_crtc_state) ?
14345                                        "[modeset]" : "[fastset]");
14346         }
14347
14348         return 0;
14349
14350  fail:
14351         if (ret == -EDEADLK)
14352                 return ret;
14353
14354         /*
14355          * FIXME would probably be nice to know which crtc specifically
14356          * caused the failure, in cases where we can pinpoint it.
14357          */
14358         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14359                                             new_crtc_state, i)
14360                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14361
14362         return ret;
14363 }
14364
14365 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14366 {
14367         return drm_atomic_helper_prepare_planes(state->base.dev,
14368                                                 &state->base);
14369 }
14370
14371 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14372 {
14373         struct drm_device *dev = crtc->base.dev;
14374         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14375
14376         if (!vblank->max_vblank_count)
14377                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14378
14379         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14380 }
14381
14382 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14383                                   struct intel_crtc_state *crtc_state)
14384 {
14385         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14386
14387         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
14388                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14389
14390         if (crtc_state->has_pch_encoder) {
14391                 enum pipe pch_transcoder =
14392                         intel_crtc_pch_transcoder(crtc);
14393
14394                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14395         }
14396 }
14397
14398 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14399                                const struct intel_crtc_state *new_crtc_state)
14400 {
14401         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14402         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14403
14404         /*
14405          * Update pipe size and adjust fitter if needed: the reason for this is
14406          * that in compute_mode_changes we check the native mode (not the pfit
14407          * mode) to see if we can flip rather than do a full mode set. In the
14408          * fastboot case, we'll flip, but if we don't update the pipesrc and
14409          * pfit state, we'll end up with a big fb scanned out into the wrong
14410          * sized surface.
14411          */
14412         intel_set_pipe_src_size(new_crtc_state);
14413
14414         /* on skylake this is done by detaching scalers */
14415         if (INTEL_GEN(dev_priv) >= 9) {
14416                 skl_detach_scalers(new_crtc_state);
14417
14418                 if (new_crtc_state->pch_pfit.enabled)
14419                         skylake_pfit_enable(new_crtc_state);
14420         } else if (HAS_PCH_SPLIT(dev_priv)) {
14421                 if (new_crtc_state->pch_pfit.enabled)
14422                         ironlake_pfit_enable(new_crtc_state);
14423                 else if (old_crtc_state->pch_pfit.enabled)
14424                         ironlake_pfit_disable(old_crtc_state);
14425         }
14426
14427         if (INTEL_GEN(dev_priv) >= 11)
14428                 icl_set_pipe_chicken(crtc);
14429 }
14430
14431 static void commit_pipe_config(struct intel_atomic_state *state,
14432                                struct intel_crtc_state *old_crtc_state,
14433                                struct intel_crtc_state *new_crtc_state)
14434 {
14435         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14436         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14437         bool modeset = needs_modeset(new_crtc_state);
14438
14439         /*
14440          * During modesets pipe configuration was programmed as the
14441          * CRTC was enabled.
14442          */
14443         if (!modeset) {
14444                 if (new_crtc_state->uapi.color_mgmt_changed ||
14445                     new_crtc_state->update_pipe)
14446                         intel_color_commit(new_crtc_state);
14447
14448                 if (INTEL_GEN(dev_priv) >= 9)
14449                         skl_detach_scalers(new_crtc_state);
14450
14451                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14452                         bdw_set_pipemisc(new_crtc_state);
14453
14454                 if (new_crtc_state->update_pipe)
14455                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14456         }
14457
14458         if (dev_priv->display.atomic_update_watermarks)
14459                 dev_priv->display.atomic_update_watermarks(state, crtc);
14460 }
14461
14462 static void intel_update_crtc(struct intel_crtc *crtc,
14463                               struct intel_atomic_state *state,
14464                               struct intel_crtc_state *old_crtc_state,
14465                               struct intel_crtc_state *new_crtc_state)
14466 {
14467         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14468         bool modeset = needs_modeset(new_crtc_state);
14469         struct intel_plane_state *new_plane_state =
14470                 intel_atomic_get_new_plane_state(state,
14471                                                  to_intel_plane(crtc->base.primary));
14472
14473         if (modeset) {
14474                 intel_crtc_update_active_timings(new_crtc_state);
14475
14476                 dev_priv->display.crtc_enable(state, crtc);
14477
14478                 /* vblanks work again, re-enable pipe CRC. */
14479                 intel_crtc_enable_pipe_crc(crtc);
14480         } else {
14481                 if (new_crtc_state->preload_luts &&
14482                     (new_crtc_state->uapi.color_mgmt_changed ||
14483                      new_crtc_state->update_pipe))
14484                         intel_color_load_luts(new_crtc_state);
14485
14486                 intel_pre_plane_update(state, crtc);
14487
14488                 if (new_crtc_state->update_pipe)
14489                         intel_encoders_update_pipe(state, crtc);
14490         }
14491
14492         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14493                 intel_fbc_disable(crtc);
14494         else if (new_plane_state)
14495                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14496
14497         /* Perform vblank evasion around commit operation */
14498         intel_pipe_update_start(new_crtc_state);
14499
14500         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14501
14502         if (INTEL_GEN(dev_priv) >= 9)
14503                 skl_update_planes_on_crtc(state, crtc);
14504         else
14505                 i9xx_update_planes_on_crtc(state, crtc);
14506
14507         intel_pipe_update_end(new_crtc_state);
14508
14509         /*
14510          * We usually enable FIFO underrun interrupts as part of the
14511          * CRTC enable sequence during modesets.  But when we inherit a
14512          * valid pipe configuration from the BIOS we need to take care
14513          * of enabling them on the CRTC's first fastset.
14514          */
14515         if (new_crtc_state->update_pipe && !modeset &&
14516             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14517                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14518 }
14519
14520 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14521 {
14522         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14523         enum transcoder slave_transcoder;
14524
14525         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14526
14527         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14528         return intel_get_crtc_for_pipe(dev_priv,
14529                                        (enum pipe)slave_transcoder);
14530 }
14531
14532 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14533                                           struct intel_crtc_state *old_crtc_state,
14534                                           struct intel_crtc_state *new_crtc_state,
14535                                           struct intel_crtc *crtc)
14536 {
14537         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14538
14539         intel_crtc_disable_planes(state, crtc);
14540
14541         /*
14542          * We need to disable pipe CRC before disabling the pipe,
14543          * or we race against vblank off.
14544          */
14545         intel_crtc_disable_pipe_crc(crtc);
14546
14547         dev_priv->display.crtc_disable(state, crtc);
14548         crtc->active = false;
14549         intel_fbc_disable(crtc);
14550         intel_disable_shared_dpll(old_crtc_state);
14551
14552         /* FIXME unify this for all platforms */
14553         if (!new_crtc_state->hw.active &&
14554             !HAS_GMCH(dev_priv) &&
14555             dev_priv->display.initial_watermarks)
14556                 dev_priv->display.initial_watermarks(state, crtc);
14557 }
14558
14559 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14560 {
14561         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14562         struct intel_crtc *crtc;
14563         u32 handled = 0;
14564         int i;
14565
14566         /* Only disable port sync slaves */
14567         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14568                                             new_crtc_state, i) {
14569                 if (!needs_modeset(new_crtc_state))
14570                         continue;
14571
14572                 if (!old_crtc_state->hw.active)
14573                         continue;
14574
14575                 /* In case of Transcoder port Sync master slave CRTCs can be
14576                  * assigned in any order and we need to make sure that
14577                  * slave CRTCs are disabled first and then master CRTC since
14578                  * Slave vblanks are masked till Master Vblanks.
14579                  */
14580                 if (!is_trans_port_sync_slave(old_crtc_state))
14581                         continue;
14582
14583                 intel_pre_plane_update(state, crtc);
14584                 intel_old_crtc_state_disables(state, old_crtc_state,
14585                                               new_crtc_state, crtc);
14586                 handled |= BIT(crtc->pipe);
14587         }
14588
14589         /* Disable everything else left on */
14590         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14591                                             new_crtc_state, i) {
14592                 if (!needs_modeset(new_crtc_state) ||
14593                     (handled & BIT(crtc->pipe)))
14594                         continue;
14595
14596                 intel_pre_plane_update(state, crtc);
14597                 if (old_crtc_state->hw.active)
14598                         intel_old_crtc_state_disables(state, old_crtc_state,
14599                                                       new_crtc_state, crtc);
14600         }
14601 }
14602
14603 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14604 {
14605         struct intel_crtc *crtc;
14606         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14607         int i;
14608
14609         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14610                 if (!new_crtc_state->hw.active)
14611                         continue;
14612
14613                 intel_update_crtc(crtc, state, old_crtc_state,
14614                                   new_crtc_state);
14615         }
14616 }
14617
14618 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14619                                               struct intel_atomic_state *state,
14620                                               struct intel_crtc_state *new_crtc_state)
14621 {
14622         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14623
14624         intel_crtc_update_active_timings(new_crtc_state);
14625         dev_priv->display.crtc_enable(state, crtc);
14626         intel_crtc_enable_pipe_crc(crtc);
14627 }
14628
14629 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14630                                        struct intel_atomic_state *state)
14631 {
14632         struct drm_connector *uninitialized_var(conn);
14633         struct drm_connector_state *conn_state;
14634         struct intel_dp *intel_dp;
14635         int i;
14636
14637         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14638                 if (conn_state->crtc == &crtc->base)
14639                         break;
14640         }
14641         intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14642         intel_dp_stop_link_train(intel_dp);
14643 }
14644
14645 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14646                                            struct intel_atomic_state *state)
14647 {
14648         struct intel_crtc_state *new_crtc_state =
14649                 intel_atomic_get_new_crtc_state(state, crtc);
14650         struct intel_crtc_state *old_crtc_state =
14651                 intel_atomic_get_old_crtc_state(state, crtc);
14652         struct intel_plane_state *new_plane_state =
14653                 intel_atomic_get_new_plane_state(state,
14654                                                  to_intel_plane(crtc->base.primary));
14655         bool modeset = needs_modeset(new_crtc_state);
14656
14657         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14658                 intel_fbc_disable(crtc);
14659         else if (new_plane_state)
14660                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14661
14662         /* Perform vblank evasion around commit operation */
14663         intel_pipe_update_start(new_crtc_state);
14664         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14665         skl_update_planes_on_crtc(state, crtc);
14666         intel_pipe_update_end(new_crtc_state);
14667
14668         /*
14669          * We usually enable FIFO underrun interrupts as part of the
14670          * CRTC enable sequence during modesets.  But when we inherit a
14671          * valid pipe configuration from the BIOS we need to take care
14672          * of enabling them on the CRTC's first fastset.
14673          */
14674         if (new_crtc_state->update_pipe && !modeset &&
14675             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14676                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14677 }
14678
14679 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14680                                                struct intel_atomic_state *state,
14681                                                struct intel_crtc_state *old_crtc_state,
14682                                                struct intel_crtc_state *new_crtc_state)
14683 {
14684         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14685         struct intel_crtc_state *new_slave_crtc_state =
14686                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14687         struct intel_crtc_state *old_slave_crtc_state =
14688                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14689
14690         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14691                 !old_slave_crtc_state);
14692
14693         DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14694                       crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14695                       slave_crtc->base.name);
14696
14697         /* Enable seq for slave with with DP_TP_CTL left Idle until the
14698          * master is ready
14699          */
14700         intel_crtc_enable_trans_port_sync(slave_crtc,
14701                                           state,
14702                                           new_slave_crtc_state);
14703
14704         /* Enable seq for master with with DP_TP_CTL left Idle */
14705         intel_crtc_enable_trans_port_sync(crtc,
14706                                           state,
14707                                           new_crtc_state);
14708
14709         /* Set Slave's DP_TP_CTL to Normal */
14710         intel_set_dp_tp_ctl_normal(slave_crtc,
14711                                    state);
14712
14713         /* Set Master's DP_TP_CTL To Normal */
14714         usleep_range(200, 400);
14715         intel_set_dp_tp_ctl_normal(crtc,
14716                                    state);
14717
14718         /* Now do the post crtc enable for all master and slaves */
14719         intel_post_crtc_enable_updates(slave_crtc,
14720                                        state);
14721         intel_post_crtc_enable_updates(crtc,
14722                                        state);
14723 }
14724
14725 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14726 {
14727         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14728         struct intel_crtc *crtc;
14729         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14730         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14731         u8 required_slices = state->wm_results.ddb.enabled_slices;
14732         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14733         u8 dirty_pipes = 0;
14734         int i;
14735
14736         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14737                 /* ignore allocations for crtc's that have been turned off. */
14738                 if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active)
14739                         entries[i] = old_crtc_state->wm.skl.ddb;
14740                 if (new_crtc_state->hw.active)
14741                         dirty_pipes |= BIT(crtc->pipe);
14742         }
14743
14744         /* If 2nd DBuf slice required, enable it here */
14745         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14746                 icl_dbuf_slices_update(dev_priv, required_slices);
14747
14748         /*
14749          * Whenever the number of active pipes changes, we need to make sure we
14750          * update the pipes in the right order so that their ddb allocations
14751          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14752          * cause pipe underruns and other bad stuff.
14753          */
14754         while (dirty_pipes) {
14755                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14756                                                     new_crtc_state, i) {
14757                         enum pipe pipe = crtc->pipe;
14758                         bool modeset = needs_modeset(new_crtc_state);
14759
14760                         if ((dirty_pipes & BIT(pipe)) == 0)
14761                                 continue;
14762
14763                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14764                                                         entries,
14765                                                         INTEL_NUM_PIPES(dev_priv), i))
14766                                 continue;
14767
14768                         entries[i] = new_crtc_state->wm.skl.ddb;
14769                         dirty_pipes &= ~BIT(pipe);
14770
14771                         if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14772                                 if (is_trans_port_sync_master(new_crtc_state))
14773                                         intel_update_trans_port_sync_crtcs(crtc,
14774                                                                            state,
14775                                                                            old_crtc_state,
14776                                                                            new_crtc_state);
14777                                 else
14778                                         continue;
14779                         } else {
14780                                 intel_update_crtc(crtc, state, old_crtc_state,
14781                                                   new_crtc_state);
14782                         }
14783
14784                         /*
14785                          * If this is an already active pipe, it's DDB changed,
14786                          * and this isn't the last pipe that needs updating
14787                          * then we need to wait for a vblank to pass for the
14788                          * new ddb allocation to take effect.
14789                          */
14790                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14791                                                  &old_crtc_state->wm.skl.ddb) &&
14792                             !modeset && dirty_pipes)
14793                                 intel_wait_for_vblank(dev_priv, pipe);
14794                 }
14795         }
14796
14797         /* If 2nd DBuf slice is no more required disable it */
14798         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14799                 icl_dbuf_slices_update(dev_priv, required_slices);
14800 }
14801
14802 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14803 {
14804         struct intel_atomic_state *state, *next;
14805         struct llist_node *freed;
14806
14807         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14808         llist_for_each_entry_safe(state, next, freed, freed)
14809                 drm_atomic_state_put(&state->base);
14810 }
14811
14812 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14813 {
14814         struct drm_i915_private *dev_priv =
14815                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14816
14817         intel_atomic_helper_free_state(dev_priv);
14818 }
14819
14820 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14821 {
14822         struct wait_queue_entry wait_fence, wait_reset;
14823         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14824
14825         init_wait_entry(&wait_fence, 0);
14826         init_wait_entry(&wait_reset, 0);
14827         for (;;) {
14828                 prepare_to_wait(&intel_state->commit_ready.wait,
14829                                 &wait_fence, TASK_UNINTERRUPTIBLE);
14830                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14831                                               I915_RESET_MODESET),
14832                                 &wait_reset, TASK_UNINTERRUPTIBLE);
14833
14834
14835                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14836                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14837                         break;
14838
14839                 schedule();
14840         }
14841         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14842         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14843                                   I915_RESET_MODESET),
14844                     &wait_reset);
14845 }
14846
14847 static void intel_atomic_cleanup_work(struct work_struct *work)
14848 {
14849         struct drm_atomic_state *state =
14850                 container_of(work, struct drm_atomic_state, commit_work);
14851         struct drm_i915_private *i915 = to_i915(state->dev);
14852
14853         drm_atomic_helper_cleanup_planes(&i915->drm, state);
14854         drm_atomic_helper_commit_cleanup_done(state);
14855         drm_atomic_state_put(state);
14856
14857         intel_atomic_helper_free_state(i915);
14858 }
14859
14860 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14861 {
14862         struct drm_device *dev = state->base.dev;
14863         struct drm_i915_private *dev_priv = to_i915(dev);
14864         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14865         struct intel_crtc *crtc;
14866         u64 put_domains[I915_MAX_PIPES] = {};
14867         intel_wakeref_t wakeref = 0;
14868         int i;
14869
14870         intel_atomic_commit_fence_wait(state);
14871
14872         drm_atomic_helper_wait_for_dependencies(&state->base);
14873
14874         if (state->modeset)
14875                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14876
14877         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14878                                             new_crtc_state, i) {
14879                 if (needs_modeset(new_crtc_state) ||
14880                     new_crtc_state->update_pipe) {
14881
14882                         put_domains[crtc->pipe] =
14883                                 modeset_get_crtc_power_domains(new_crtc_state);
14884                 }
14885         }
14886
14887         intel_commit_modeset_disables(state);
14888
14889         /* FIXME: Eventually get rid of our crtc->config pointer */
14890         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14891                 crtc->config = new_crtc_state;
14892
14893         if (state->modeset) {
14894                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14895
14896                 intel_set_cdclk_pre_plane_update(dev_priv,
14897                                                  &state->cdclk.actual,
14898                                                  &dev_priv->cdclk.actual,
14899                                                  state->cdclk.pipe);
14900
14901                 /*
14902                  * SKL workaround: bspec recommends we disable the SAGV when we
14903                  * have more then one pipe enabled
14904                  */
14905                 if (!intel_can_enable_sagv(state))
14906                         intel_disable_sagv(dev_priv);
14907
14908                 intel_modeset_verify_disabled(dev_priv, state);
14909         }
14910
14911         /* Complete the events for pipes that have now been disabled */
14912         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14913                 bool modeset = needs_modeset(new_crtc_state);
14914
14915                 /* Complete events for now disable pipes here. */
14916                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14917                         spin_lock_irq(&dev->event_lock);
14918                         drm_crtc_send_vblank_event(&crtc->base,
14919                                                    new_crtc_state->uapi.event);
14920                         spin_unlock_irq(&dev->event_lock);
14921
14922                         new_crtc_state->uapi.event = NULL;
14923                 }
14924         }
14925
14926         if (state->modeset)
14927                 intel_encoders_update_prepare(state);
14928
14929         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14930         dev_priv->display.commit_modeset_enables(state);
14931
14932         if (state->modeset) {
14933                 intel_encoders_update_complete(state);
14934
14935                 intel_set_cdclk_post_plane_update(dev_priv,
14936                                                   &state->cdclk.actual,
14937                                                   &dev_priv->cdclk.actual,
14938                                                   state->cdclk.pipe);
14939         }
14940
14941         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14942          * already, but still need the state for the delayed optimization. To
14943          * fix this:
14944          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14945          * - schedule that vblank worker _before_ calling hw_done
14946          * - at the start of commit_tail, cancel it _synchrously
14947          * - switch over to the vblank wait helper in the core after that since
14948          *   we don't need out special handling any more.
14949          */
14950         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14951
14952         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14953                 if (new_crtc_state->hw.active &&
14954                     !needs_modeset(new_crtc_state) &&
14955                     !new_crtc_state->preload_luts &&
14956                     (new_crtc_state->uapi.color_mgmt_changed ||
14957                      new_crtc_state->update_pipe))
14958                         intel_color_load_luts(new_crtc_state);
14959         }
14960
14961         /*
14962          * Now that the vblank has passed, we can go ahead and program the
14963          * optimal watermarks on platforms that need two-step watermark
14964          * programming.
14965          *
14966          * TODO: Move this (and other cleanup) to an async worker eventually.
14967          */
14968         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14969                                             new_crtc_state, i) {
14970                 /*
14971                  * Gen2 reports pipe underruns whenever all planes are disabled.
14972                  * So re-enable underrun reporting after some planes get enabled.
14973                  *
14974                  * We do this before .optimize_watermarks() so that we have a
14975                  * chance of catching underruns with the intermediate watermarks
14976                  * vs. the new plane configuration.
14977                  */
14978                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
14979                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14980
14981                 if (dev_priv->display.optimize_watermarks)
14982                         dev_priv->display.optimize_watermarks(state, crtc);
14983         }
14984
14985         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14986                 intel_post_plane_update(state, crtc);
14987
14988                 if (put_domains[i])
14989                         modeset_put_power_domains(dev_priv, put_domains[i]);
14990
14991                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14992         }
14993
14994         /* Underruns don't always raise interrupts, so check manually */
14995         intel_check_cpu_fifo_underruns(dev_priv);
14996         intel_check_pch_fifo_underruns(dev_priv);
14997
14998         if (state->modeset)
14999                 intel_verify_planes(state);
15000
15001         if (state->modeset && intel_can_enable_sagv(state))
15002                 intel_enable_sagv(dev_priv);
15003
15004         drm_atomic_helper_commit_hw_done(&state->base);
15005
15006         if (state->modeset) {
15007                 /* As one of the primary mmio accessors, KMS has a high
15008                  * likelihood of triggering bugs in unclaimed access. After we
15009                  * finish modesetting, see if an error has been flagged, and if
15010                  * so enable debugging for the next modeset - and hope we catch
15011                  * the culprit.
15012                  */
15013                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15014                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15015         }
15016         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15017
15018         /*
15019          * Defer the cleanup of the old state to a separate worker to not
15020          * impede the current task (userspace for blocking modesets) that
15021          * are executed inline. For out-of-line asynchronous modesets/flips,
15022          * deferring to a new worker seems overkill, but we would place a
15023          * schedule point (cond_resched()) here anyway to keep latencies
15024          * down.
15025          */
15026         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15027         queue_work(system_highpri_wq, &state->base.commit_work);
15028 }
15029
15030 static void intel_atomic_commit_work(struct work_struct *work)
15031 {
15032         struct intel_atomic_state *state =
15033                 container_of(work, struct intel_atomic_state, base.commit_work);
15034
15035         intel_atomic_commit_tail(state);
15036 }
15037
15038 static int __i915_sw_fence_call
15039 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15040                           enum i915_sw_fence_notify notify)
15041 {
15042         struct intel_atomic_state *state =
15043                 container_of(fence, struct intel_atomic_state, commit_ready);
15044
15045         switch (notify) {
15046         case FENCE_COMPLETE:
15047                 /* we do blocking waits in the worker, nothing to do here */
15048                 break;
15049         case FENCE_FREE:
15050                 {
15051                         struct intel_atomic_helper *helper =
15052                                 &to_i915(state->base.dev)->atomic_helper;
15053
15054                         if (llist_add(&state->freed, &helper->free_list))
15055                                 schedule_work(&helper->free_work);
15056                         break;
15057                 }
15058         }
15059
15060         return NOTIFY_DONE;
15061 }
15062
15063 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15064 {
15065         struct intel_plane_state *old_plane_state, *new_plane_state;
15066         struct intel_plane *plane;
15067         int i;
15068
15069         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15070                                              new_plane_state, i)
15071                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15072                                         to_intel_frontbuffer(new_plane_state->hw.fb),
15073                                         plane->frontbuffer_bit);
15074 }
15075
15076 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15077 {
15078         struct intel_crtc *crtc;
15079
15080         for_each_intel_crtc(&dev_priv->drm, crtc)
15081                 drm_modeset_lock_assert_held(&crtc->base.mutex);
15082 }
15083
15084 static int intel_atomic_commit(struct drm_device *dev,
15085                                struct drm_atomic_state *_state,
15086                                bool nonblock)
15087 {
15088         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15089         struct drm_i915_private *dev_priv = to_i915(dev);
15090         int ret = 0;
15091
15092         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15093
15094         drm_atomic_state_get(&state->base);
15095         i915_sw_fence_init(&state->commit_ready,
15096                            intel_atomic_commit_ready);
15097
15098         /*
15099          * The intel_legacy_cursor_update() fast path takes care
15100          * of avoiding the vblank waits for simple cursor
15101          * movement and flips. For cursor on/off and size changes,
15102          * we want to perform the vblank waits so that watermark
15103          * updates happen during the correct frames. Gen9+ have
15104          * double buffered watermarks and so shouldn't need this.
15105          *
15106          * Unset state->legacy_cursor_update before the call to
15107          * drm_atomic_helper_setup_commit() because otherwise
15108          * drm_atomic_helper_wait_for_flip_done() is a noop and
15109          * we get FIFO underruns because we didn't wait
15110          * for vblank.
15111          *
15112          * FIXME doing watermarks and fb cleanup from a vblank worker
15113          * (assuming we had any) would solve these problems.
15114          */
15115         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15116                 struct intel_crtc_state *new_crtc_state;
15117                 struct intel_crtc *crtc;
15118                 int i;
15119
15120                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15121                         if (new_crtc_state->wm.need_postvbl_update ||
15122                             new_crtc_state->update_wm_post)
15123                                 state->base.legacy_cursor_update = false;
15124         }
15125
15126         ret = intel_atomic_prepare_commit(state);
15127         if (ret) {
15128                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
15129                 i915_sw_fence_commit(&state->commit_ready);
15130                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15131                 return ret;
15132         }
15133
15134         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15135         if (!ret)
15136                 ret = drm_atomic_helper_swap_state(&state->base, true);
15137
15138         if (ret) {
15139                 i915_sw_fence_commit(&state->commit_ready);
15140
15141                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15142                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15143                 return ret;
15144         }
15145         dev_priv->wm.distrust_bios_wm = false;
15146         intel_shared_dpll_swap_state(state);
15147         intel_atomic_track_fbs(state);
15148
15149         if (state->global_state_changed) {
15150                 assert_global_state_locked(dev_priv);
15151
15152                 memcpy(dev_priv->min_cdclk, state->min_cdclk,
15153                        sizeof(state->min_cdclk));
15154                 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
15155                        sizeof(state->min_voltage_level));
15156                 dev_priv->active_pipes = state->active_pipes;
15157                 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
15158
15159                 intel_cdclk_swap_state(state);
15160         }
15161
15162         drm_atomic_state_get(&state->base);
15163         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15164
15165         i915_sw_fence_commit(&state->commit_ready);
15166         if (nonblock && state->modeset) {
15167                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15168         } else if (nonblock) {
15169                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15170         } else {
15171                 if (state->modeset)
15172                         flush_workqueue(dev_priv->modeset_wq);
15173                 intel_atomic_commit_tail(state);
15174         }
15175
15176         return 0;
15177 }
15178
15179 struct wait_rps_boost {
15180         struct wait_queue_entry wait;
15181
15182         struct drm_crtc *crtc;
15183         struct i915_request *request;
15184 };
15185
15186 static int do_rps_boost(struct wait_queue_entry *_wait,
15187                         unsigned mode, int sync, void *key)
15188 {
15189         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15190         struct i915_request *rq = wait->request;
15191
15192         /*
15193          * If we missed the vblank, but the request is already running it
15194          * is reasonable to assume that it will complete before the next
15195          * vblank without our intervention, so leave RPS alone.
15196          */
15197         if (!i915_request_started(rq))
15198                 intel_rps_boost(rq);
15199         i915_request_put(rq);
15200
15201         drm_crtc_vblank_put(wait->crtc);
15202
15203         list_del(&wait->wait.entry);
15204         kfree(wait);
15205         return 1;
15206 }
15207
15208 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15209                                        struct dma_fence *fence)
15210 {
15211         struct wait_rps_boost *wait;
15212
15213         if (!dma_fence_is_i915(fence))
15214                 return;
15215
15216         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15217                 return;
15218
15219         if (drm_crtc_vblank_get(crtc))
15220                 return;
15221
15222         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15223         if (!wait) {
15224                 drm_crtc_vblank_put(crtc);
15225                 return;
15226         }
15227
15228         wait->request = to_request(dma_fence_get(fence));
15229         wait->crtc = crtc;
15230
15231         wait->wait.func = do_rps_boost;
15232         wait->wait.flags = 0;
15233
15234         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15235 }
15236
15237 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15238 {
15239         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15240         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15241         struct drm_framebuffer *fb = plane_state->hw.fb;
15242         struct i915_vma *vma;
15243
15244         if (plane->id == PLANE_CURSOR &&
15245             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15246                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15247                 const int align = intel_cursor_alignment(dev_priv);
15248                 int err;
15249
15250                 err = i915_gem_object_attach_phys(obj, align);
15251                 if (err)
15252                         return err;
15253         }
15254
15255         vma = intel_pin_and_fence_fb_obj(fb,
15256                                          &plane_state->view,
15257                                          intel_plane_uses_fence(plane_state),
15258                                          &plane_state->flags);
15259         if (IS_ERR(vma))
15260                 return PTR_ERR(vma);
15261
15262         plane_state->vma = vma;
15263
15264         return 0;
15265 }
15266
15267 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15268 {
15269         struct i915_vma *vma;
15270
15271         vma = fetch_and_zero(&old_plane_state->vma);
15272         if (vma)
15273                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15274 }
15275
15276 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15277 {
15278         struct i915_sched_attr attr = {
15279                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15280         };
15281
15282         i915_gem_object_wait_priority(obj, 0, &attr);
15283 }
15284
15285 /**
15286  * intel_prepare_plane_fb - Prepare fb for usage on plane
15287  * @plane: drm plane to prepare for
15288  * @_new_plane_state: the plane state being prepared
15289  *
15290  * Prepares a framebuffer for usage on a display plane.  Generally this
15291  * involves pinning the underlying object and updating the frontbuffer tracking
15292  * bits.  Some older platforms need special physical address handling for
15293  * cursor planes.
15294  *
15295  * Returns 0 on success, negative error code on failure.
15296  */
15297 int
15298 intel_prepare_plane_fb(struct drm_plane *plane,
15299                        struct drm_plane_state *_new_plane_state)
15300 {
15301         struct intel_plane_state *new_plane_state =
15302                 to_intel_plane_state(_new_plane_state);
15303         struct intel_atomic_state *intel_state =
15304                 to_intel_atomic_state(new_plane_state->uapi.state);
15305         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15306         struct drm_framebuffer *fb = new_plane_state->hw.fb;
15307         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15308         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15309         int ret;
15310
15311         if (old_obj) {
15312                 struct intel_crtc_state *crtc_state =
15313                         intel_atomic_get_new_crtc_state(intel_state,
15314                                                         to_intel_crtc(plane->state->crtc));
15315
15316                 /* Big Hammer, we also need to ensure that any pending
15317                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15318                  * current scanout is retired before unpinning the old
15319                  * framebuffer. Note that we rely on userspace rendering
15320                  * into the buffer attached to the pipe they are waiting
15321                  * on. If not, userspace generates a GPU hang with IPEHR
15322                  * point to the MI_WAIT_FOR_EVENT.
15323                  *
15324                  * This should only fail upon a hung GPU, in which case we
15325                  * can safely continue.
15326                  */
15327                 if (needs_modeset(crtc_state)) {
15328                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15329                                                               old_obj->base.resv, NULL,
15330                                                               false, 0,
15331                                                               GFP_KERNEL);
15332                         if (ret < 0)
15333                                 return ret;
15334                 }
15335         }
15336
15337         if (new_plane_state->uapi.fence) { /* explicit fencing */
15338                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15339                                                     new_plane_state->uapi.fence,
15340                                                     I915_FENCE_TIMEOUT,
15341                                                     GFP_KERNEL);
15342                 if (ret < 0)
15343                         return ret;
15344         }
15345
15346         if (!obj)
15347                 return 0;
15348
15349         ret = i915_gem_object_pin_pages(obj);
15350         if (ret)
15351                 return ret;
15352
15353         ret = intel_plane_pin_fb(new_plane_state);
15354
15355         i915_gem_object_unpin_pages(obj);
15356         if (ret)
15357                 return ret;
15358
15359         fb_obj_bump_render_priority(obj);
15360         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
15361
15362         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15363                 struct dma_fence *fence;
15364
15365                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15366                                                       obj->base.resv, NULL,
15367                                                       false, I915_FENCE_TIMEOUT,
15368                                                       GFP_KERNEL);
15369                 if (ret < 0)
15370                         return ret;
15371
15372                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15373                 if (fence) {
15374                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15375                                                    fence);
15376                         dma_fence_put(fence);
15377                 }
15378         } else {
15379                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15380                                            new_plane_state->uapi.fence);
15381         }
15382
15383         /*
15384          * We declare pageflips to be interactive and so merit a small bias
15385          * towards upclocking to deliver the frame on time. By only changing
15386          * the RPS thresholds to sample more regularly and aim for higher
15387          * clocks we can hopefully deliver low power workloads (like kodi)
15388          * that are not quite steady state without resorting to forcing
15389          * maximum clocks following a vblank miss (see do_rps_boost()).
15390          */
15391         if (!intel_state->rps_interactive) {
15392                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15393                 intel_state->rps_interactive = true;
15394         }
15395
15396         return 0;
15397 }
15398
15399 /**
15400  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15401  * @plane: drm plane to clean up for
15402  * @_old_plane_state: the state from the previous modeset
15403  *
15404  * Cleans up a framebuffer that has just been removed from a plane.
15405  */
15406 void
15407 intel_cleanup_plane_fb(struct drm_plane *plane,
15408                        struct drm_plane_state *_old_plane_state)
15409 {
15410         struct intel_plane_state *old_plane_state =
15411                 to_intel_plane_state(_old_plane_state);
15412         struct intel_atomic_state *intel_state =
15413                 to_intel_atomic_state(old_plane_state->uapi.state);
15414         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15415
15416         if (intel_state->rps_interactive) {
15417                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15418                 intel_state->rps_interactive = false;
15419         }
15420
15421         /* Should only be called after a successful intel_prepare_plane_fb()! */
15422         intel_plane_unpin_fb(old_plane_state);
15423 }
15424
15425 /**
15426  * intel_plane_destroy - destroy a plane
15427  * @plane: plane to destroy
15428  *
15429  * Common destruction function for all types of planes (primary, cursor,
15430  * sprite).
15431  */
15432 void intel_plane_destroy(struct drm_plane *plane)
15433 {
15434         drm_plane_cleanup(plane);
15435         kfree(to_intel_plane(plane));
15436 }
15437
15438 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15439                                             u32 format, u64 modifier)
15440 {
15441         switch (modifier) {
15442         case DRM_FORMAT_MOD_LINEAR:
15443         case I915_FORMAT_MOD_X_TILED:
15444                 break;
15445         default:
15446                 return false;
15447         }
15448
15449         switch (format) {
15450         case DRM_FORMAT_C8:
15451         case DRM_FORMAT_RGB565:
15452         case DRM_FORMAT_XRGB1555:
15453         case DRM_FORMAT_XRGB8888:
15454                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15455                         modifier == I915_FORMAT_MOD_X_TILED;
15456         default:
15457                 return false;
15458         }
15459 }
15460
15461 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15462                                             u32 format, u64 modifier)
15463 {
15464         switch (modifier) {
15465         case DRM_FORMAT_MOD_LINEAR:
15466         case I915_FORMAT_MOD_X_TILED:
15467                 break;
15468         default:
15469                 return false;
15470         }
15471
15472         switch (format) {
15473         case DRM_FORMAT_C8:
15474         case DRM_FORMAT_RGB565:
15475         case DRM_FORMAT_XRGB8888:
15476         case DRM_FORMAT_XBGR8888:
15477         case DRM_FORMAT_ARGB8888:
15478         case DRM_FORMAT_ABGR8888:
15479         case DRM_FORMAT_XRGB2101010:
15480         case DRM_FORMAT_XBGR2101010:
15481         case DRM_FORMAT_ARGB2101010:
15482         case DRM_FORMAT_ABGR2101010:
15483         case DRM_FORMAT_XBGR16161616F:
15484                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15485                         modifier == I915_FORMAT_MOD_X_TILED;
15486         default:
15487                 return false;
15488         }
15489 }
15490
15491 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15492                                               u32 format, u64 modifier)
15493 {
15494         return modifier == DRM_FORMAT_MOD_LINEAR &&
15495                 format == DRM_FORMAT_ARGB8888;
15496 }
15497
15498 static const struct drm_plane_funcs i965_plane_funcs = {
15499         .update_plane = drm_atomic_helper_update_plane,
15500         .disable_plane = drm_atomic_helper_disable_plane,
15501         .destroy = intel_plane_destroy,
15502         .atomic_duplicate_state = intel_plane_duplicate_state,
15503         .atomic_destroy_state = intel_plane_destroy_state,
15504         .format_mod_supported = i965_plane_format_mod_supported,
15505 };
15506
15507 static const struct drm_plane_funcs i8xx_plane_funcs = {
15508         .update_plane = drm_atomic_helper_update_plane,
15509         .disable_plane = drm_atomic_helper_disable_plane,
15510         .destroy = intel_plane_destroy,
15511         .atomic_duplicate_state = intel_plane_duplicate_state,
15512         .atomic_destroy_state = intel_plane_destroy_state,
15513         .format_mod_supported = i8xx_plane_format_mod_supported,
15514 };
15515
15516 static int
15517 intel_legacy_cursor_update(struct drm_plane *_plane,
15518                            struct drm_crtc *_crtc,
15519                            struct drm_framebuffer *fb,
15520                            int crtc_x, int crtc_y,
15521                            unsigned int crtc_w, unsigned int crtc_h,
15522                            u32 src_x, u32 src_y,
15523                            u32 src_w, u32 src_h,
15524                            struct drm_modeset_acquire_ctx *ctx)
15525 {
15526         struct intel_plane *plane = to_intel_plane(_plane);
15527         struct intel_crtc *crtc = to_intel_crtc(_crtc);
15528         struct intel_plane_state *old_plane_state =
15529                 to_intel_plane_state(plane->base.state);
15530         struct intel_plane_state *new_plane_state;
15531         struct intel_crtc_state *crtc_state =
15532                 to_intel_crtc_state(crtc->base.state);
15533         struct intel_crtc_state *new_crtc_state;
15534         int ret;
15535
15536         /*
15537          * When crtc is inactive or there is a modeset pending,
15538          * wait for it to complete in the slowpath
15539          */
15540         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15541             crtc_state->update_pipe)
15542                 goto slow;
15543
15544         /*
15545          * Don't do an async update if there is an outstanding commit modifying
15546          * the plane.  This prevents our async update's changes from getting
15547          * overridden by a previous synchronous update's state.
15548          */
15549         if (old_plane_state->uapi.commit &&
15550             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15551                 goto slow;
15552
15553         /*
15554          * If any parameters change that may affect watermarks,
15555          * take the slowpath. Only changing fb or position should be
15556          * in the fastpath.
15557          */
15558         if (old_plane_state->uapi.crtc != &crtc->base ||
15559             old_plane_state->uapi.src_w != src_w ||
15560             old_plane_state->uapi.src_h != src_h ||
15561             old_plane_state->uapi.crtc_w != crtc_w ||
15562             old_plane_state->uapi.crtc_h != crtc_h ||
15563             !old_plane_state->uapi.fb != !fb)
15564                 goto slow;
15565
15566         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15567         if (!new_plane_state)
15568                 return -ENOMEM;
15569
15570         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15571         if (!new_crtc_state) {
15572                 ret = -ENOMEM;
15573                 goto out_free;
15574         }
15575
15576         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15577
15578         new_plane_state->uapi.src_x = src_x;
15579         new_plane_state->uapi.src_y = src_y;
15580         new_plane_state->uapi.src_w = src_w;
15581         new_plane_state->uapi.src_h = src_h;
15582         new_plane_state->uapi.crtc_x = crtc_x;
15583         new_plane_state->uapi.crtc_y = crtc_y;
15584         new_plane_state->uapi.crtc_w = crtc_w;
15585         new_plane_state->uapi.crtc_h = crtc_h;
15586
15587         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15588                                                   old_plane_state, new_plane_state);
15589         if (ret)
15590                 goto out_free;
15591
15592         ret = intel_plane_pin_fb(new_plane_state);
15593         if (ret)
15594                 goto out_free;
15595
15596         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15597                                 ORIGIN_FLIP);
15598         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15599                                 to_intel_frontbuffer(new_plane_state->hw.fb),
15600                                 plane->frontbuffer_bit);
15601
15602         /* Swap plane state */
15603         plane->base.state = &new_plane_state->uapi;
15604
15605         /*
15606          * We cannot swap crtc_state as it may be in use by an atomic commit or
15607          * page flip that's running simultaneously. If we swap crtc_state and
15608          * destroy the old state, we will cause a use-after-free there.
15609          *
15610          * Only update active_planes, which is needed for our internal
15611          * bookkeeping. Either value will do the right thing when updating
15612          * planes atomically. If the cursor was part of the atomic update then
15613          * we would have taken the slowpath.
15614          */
15615         crtc_state->active_planes = new_crtc_state->active_planes;
15616
15617         if (new_plane_state->uapi.visible)
15618                 intel_update_plane(plane, crtc_state, new_plane_state);
15619         else
15620                 intel_disable_plane(plane, crtc_state);
15621
15622         intel_plane_unpin_fb(old_plane_state);
15623
15624 out_free:
15625         if (new_crtc_state)
15626                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15627         if (ret)
15628                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15629         else
15630                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15631         return ret;
15632
15633 slow:
15634         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15635                                               crtc_x, crtc_y, crtc_w, crtc_h,
15636                                               src_x, src_y, src_w, src_h, ctx);
15637 }
15638
15639 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15640         .update_plane = intel_legacy_cursor_update,
15641         .disable_plane = drm_atomic_helper_disable_plane,
15642         .destroy = intel_plane_destroy,
15643         .atomic_duplicate_state = intel_plane_duplicate_state,
15644         .atomic_destroy_state = intel_plane_destroy_state,
15645         .format_mod_supported = intel_cursor_format_mod_supported,
15646 };
15647
15648 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15649                                enum i9xx_plane_id i9xx_plane)
15650 {
15651         if (!HAS_FBC(dev_priv))
15652                 return false;
15653
15654         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15655                 return i9xx_plane == PLANE_A; /* tied to pipe A */
15656         else if (IS_IVYBRIDGE(dev_priv))
15657                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15658                         i9xx_plane == PLANE_C;
15659         else if (INTEL_GEN(dev_priv) >= 4)
15660                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15661         else
15662                 return i9xx_plane == PLANE_A;
15663 }
15664
15665 static struct intel_plane *
15666 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15667 {
15668         struct intel_plane *plane;
15669         const struct drm_plane_funcs *plane_funcs;
15670         unsigned int supported_rotations;
15671         unsigned int possible_crtcs;
15672         const u32 *formats;
15673         int num_formats;
15674         int ret, zpos;
15675
15676         if (INTEL_GEN(dev_priv) >= 9)
15677                 return skl_universal_plane_create(dev_priv, pipe,
15678                                                   PLANE_PRIMARY);
15679
15680         plane = intel_plane_alloc();
15681         if (IS_ERR(plane))
15682                 return plane;
15683
15684         plane->pipe = pipe;
15685         /*
15686          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15687          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15688          */
15689         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15690                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15691         else
15692                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15693         plane->id = PLANE_PRIMARY;
15694         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15695
15696         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15697         if (plane->has_fbc) {
15698                 struct intel_fbc *fbc = &dev_priv->fbc;
15699
15700                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15701         }
15702
15703         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15704                 formats = vlv_primary_formats;
15705                 num_formats = ARRAY_SIZE(vlv_primary_formats);
15706         } else if (INTEL_GEN(dev_priv) >= 4) {
15707                 /*
15708                  * WaFP16GammaEnabling:ivb
15709                  * "Workaround : When using the 64-bit format, the plane
15710                  *  output on each color channel has one quarter amplitude.
15711                  *  It can be brought up to full amplitude by using pipe
15712                  *  gamma correction or pipe color space conversion to
15713                  *  multiply the plane output by four."
15714                  *
15715                  * There is no dedicated plane gamma for the primary plane,
15716                  * and using the pipe gamma/csc could conflict with other
15717                  * planes, so we choose not to expose fp16 on IVB primary
15718                  * planes. HSW primary planes no longer have this problem.
15719                  */
15720                 if (IS_IVYBRIDGE(dev_priv)) {
15721                         formats = ivb_primary_formats;
15722                         num_formats = ARRAY_SIZE(ivb_primary_formats);
15723                 } else {
15724                         formats = i965_primary_formats;
15725                         num_formats = ARRAY_SIZE(i965_primary_formats);
15726                 }
15727         } else {
15728                 formats = i8xx_primary_formats;
15729                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15730         }
15731
15732         if (INTEL_GEN(dev_priv) >= 4)
15733                 plane_funcs = &i965_plane_funcs;
15734         else
15735                 plane_funcs = &i8xx_plane_funcs;
15736
15737         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15738                 plane->min_cdclk = vlv_plane_min_cdclk;
15739         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15740                 plane->min_cdclk = hsw_plane_min_cdclk;
15741         else if (IS_IVYBRIDGE(dev_priv))
15742                 plane->min_cdclk = ivb_plane_min_cdclk;
15743         else
15744                 plane->min_cdclk = i9xx_plane_min_cdclk;
15745
15746         plane->max_stride = i9xx_plane_max_stride;
15747         plane->update_plane = i9xx_update_plane;
15748         plane->disable_plane = i9xx_disable_plane;
15749         plane->get_hw_state = i9xx_plane_get_hw_state;
15750         plane->check_plane = i9xx_plane_check;
15751
15752         possible_crtcs = BIT(pipe);
15753
15754         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15755                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15756                                                possible_crtcs, plane_funcs,
15757                                                formats, num_formats,
15758                                                i9xx_format_modifiers,
15759                                                DRM_PLANE_TYPE_PRIMARY,
15760                                                "primary %c", pipe_name(pipe));
15761         else
15762                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15763                                                possible_crtcs, plane_funcs,
15764                                                formats, num_formats,
15765                                                i9xx_format_modifiers,
15766                                                DRM_PLANE_TYPE_PRIMARY,
15767                                                "plane %c",
15768                                                plane_name(plane->i9xx_plane));
15769         if (ret)
15770                 goto fail;
15771
15772         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15773                 supported_rotations =
15774                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15775                         DRM_MODE_REFLECT_X;
15776         } else if (INTEL_GEN(dev_priv) >= 4) {
15777                 supported_rotations =
15778                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15779         } else {
15780                 supported_rotations = DRM_MODE_ROTATE_0;
15781         }
15782
15783         if (INTEL_GEN(dev_priv) >= 4)
15784                 drm_plane_create_rotation_property(&plane->base,
15785                                                    DRM_MODE_ROTATE_0,
15786                                                    supported_rotations);
15787
15788         zpos = 0;
15789         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15790
15791         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15792
15793         return plane;
15794
15795 fail:
15796         intel_plane_free(plane);
15797
15798         return ERR_PTR(ret);
15799 }
15800
15801 static struct intel_plane *
15802 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15803                           enum pipe pipe)
15804 {
15805         unsigned int possible_crtcs;
15806         struct intel_plane *cursor;
15807         int ret, zpos;
15808
15809         cursor = intel_plane_alloc();
15810         if (IS_ERR(cursor))
15811                 return cursor;
15812
15813         cursor->pipe = pipe;
15814         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15815         cursor->id = PLANE_CURSOR;
15816         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15817
15818         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15819                 cursor->max_stride = i845_cursor_max_stride;
15820                 cursor->update_plane = i845_update_cursor;
15821                 cursor->disable_plane = i845_disable_cursor;
15822                 cursor->get_hw_state = i845_cursor_get_hw_state;
15823                 cursor->check_plane = i845_check_cursor;
15824         } else {
15825                 cursor->max_stride = i9xx_cursor_max_stride;
15826                 cursor->update_plane = i9xx_update_cursor;
15827                 cursor->disable_plane = i9xx_disable_cursor;
15828                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15829                 cursor->check_plane = i9xx_check_cursor;
15830         }
15831
15832         cursor->cursor.base = ~0;
15833         cursor->cursor.cntl = ~0;
15834
15835         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15836                 cursor->cursor.size = ~0;
15837
15838         possible_crtcs = BIT(pipe);
15839
15840         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15841                                        possible_crtcs, &intel_cursor_plane_funcs,
15842                                        intel_cursor_formats,
15843                                        ARRAY_SIZE(intel_cursor_formats),
15844                                        cursor_format_modifiers,
15845                                        DRM_PLANE_TYPE_CURSOR,
15846                                        "cursor %c", pipe_name(pipe));
15847         if (ret)
15848                 goto fail;
15849
15850         if (INTEL_GEN(dev_priv) >= 4)
15851                 drm_plane_create_rotation_property(&cursor->base,
15852                                                    DRM_MODE_ROTATE_0,
15853                                                    DRM_MODE_ROTATE_0 |
15854                                                    DRM_MODE_ROTATE_180);
15855
15856         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15857         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15858
15859         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15860
15861         return cursor;
15862
15863 fail:
15864         intel_plane_free(cursor);
15865
15866         return ERR_PTR(ret);
15867 }
15868
15869 #define INTEL_CRTC_FUNCS \
15870         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15871         .set_config = drm_atomic_helper_set_config, \
15872         .destroy = intel_crtc_destroy, \
15873         .page_flip = drm_atomic_helper_page_flip, \
15874         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15875         .atomic_destroy_state = intel_crtc_destroy_state, \
15876         .set_crc_source = intel_crtc_set_crc_source, \
15877         .verify_crc_source = intel_crtc_verify_crc_source, \
15878         .get_crc_sources = intel_crtc_get_crc_sources
15879
15880 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15881         INTEL_CRTC_FUNCS,
15882
15883         .get_vblank_counter = g4x_get_vblank_counter,
15884         .enable_vblank = bdw_enable_vblank,
15885         .disable_vblank = bdw_disable_vblank,
15886 };
15887
15888 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15889         INTEL_CRTC_FUNCS,
15890
15891         .get_vblank_counter = g4x_get_vblank_counter,
15892         .enable_vblank = ilk_enable_vblank,
15893         .disable_vblank = ilk_disable_vblank,
15894 };
15895
15896 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15897         INTEL_CRTC_FUNCS,
15898
15899         .get_vblank_counter = g4x_get_vblank_counter,
15900         .enable_vblank = i965_enable_vblank,
15901         .disable_vblank = i965_disable_vblank,
15902 };
15903
15904 static const struct drm_crtc_funcs i965_crtc_funcs = {
15905         INTEL_CRTC_FUNCS,
15906
15907         .get_vblank_counter = i915_get_vblank_counter,
15908         .enable_vblank = i965_enable_vblank,
15909         .disable_vblank = i965_disable_vblank,
15910 };
15911
15912 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15913         INTEL_CRTC_FUNCS,
15914
15915         .get_vblank_counter = i915_get_vblank_counter,
15916         .enable_vblank = i915gm_enable_vblank,
15917         .disable_vblank = i915gm_disable_vblank,
15918 };
15919
15920 static const struct drm_crtc_funcs i915_crtc_funcs = {
15921         INTEL_CRTC_FUNCS,
15922
15923         .get_vblank_counter = i915_get_vblank_counter,
15924         .enable_vblank = i8xx_enable_vblank,
15925         .disable_vblank = i8xx_disable_vblank,
15926 };
15927
15928 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15929         INTEL_CRTC_FUNCS,
15930
15931         /* no hw vblank counter */
15932         .enable_vblank = i8xx_enable_vblank,
15933         .disable_vblank = i8xx_disable_vblank,
15934 };
15935
15936 static struct intel_crtc *intel_crtc_alloc(void)
15937 {
15938         struct intel_crtc_state *crtc_state;
15939         struct intel_crtc *crtc;
15940
15941         crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
15942         if (!crtc)
15943                 return ERR_PTR(-ENOMEM);
15944
15945         crtc_state = intel_crtc_state_alloc(crtc);
15946         if (!crtc_state) {
15947                 kfree(crtc);
15948                 return ERR_PTR(-ENOMEM);
15949         }
15950
15951         crtc->base.state = &crtc_state->uapi;
15952         crtc->config = crtc_state;
15953
15954         return crtc;
15955 }
15956
15957 static void intel_crtc_free(struct intel_crtc *crtc)
15958 {
15959         intel_crtc_destroy_state(&crtc->base, crtc->base.state);
15960         kfree(crtc);
15961 }
15962
15963 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15964 {
15965         struct intel_plane *primary, *cursor;
15966         const struct drm_crtc_funcs *funcs;
15967         struct intel_crtc *crtc;
15968         int sprite, ret;
15969
15970         crtc = intel_crtc_alloc();
15971         if (IS_ERR(crtc))
15972                 return PTR_ERR(crtc);
15973
15974         crtc->pipe = pipe;
15975         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
15976
15977         primary = intel_primary_plane_create(dev_priv, pipe);
15978         if (IS_ERR(primary)) {
15979                 ret = PTR_ERR(primary);
15980                 goto fail;
15981         }
15982         crtc->plane_ids_mask |= BIT(primary->id);
15983
15984         for_each_sprite(dev_priv, pipe, sprite) {
15985                 struct intel_plane *plane;
15986
15987                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15988                 if (IS_ERR(plane)) {
15989                         ret = PTR_ERR(plane);
15990                         goto fail;
15991                 }
15992                 crtc->plane_ids_mask |= BIT(plane->id);
15993         }
15994
15995         cursor = intel_cursor_plane_create(dev_priv, pipe);
15996         if (IS_ERR(cursor)) {
15997                 ret = PTR_ERR(cursor);
15998                 goto fail;
15999         }
16000         crtc->plane_ids_mask |= BIT(cursor->id);
16001
16002         if (HAS_GMCH(dev_priv)) {
16003                 if (IS_CHERRYVIEW(dev_priv) ||
16004                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16005                         funcs = &g4x_crtc_funcs;
16006                 else if (IS_GEN(dev_priv, 4))
16007                         funcs = &i965_crtc_funcs;
16008                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16009                         funcs = &i915gm_crtc_funcs;
16010                 else if (IS_GEN(dev_priv, 3))
16011                         funcs = &i915_crtc_funcs;
16012                 else
16013                         funcs = &i8xx_crtc_funcs;
16014         } else {
16015                 if (INTEL_GEN(dev_priv) >= 8)
16016                         funcs = &bdw_crtc_funcs;
16017                 else
16018                         funcs = &ilk_crtc_funcs;
16019         }
16020
16021         ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16022                                         &primary->base, &cursor->base,
16023                                         funcs, "pipe %c", pipe_name(pipe));
16024         if (ret)
16025                 goto fail;
16026
16027         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16028                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16029         dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16030
16031         if (INTEL_GEN(dev_priv) < 9) {
16032                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16033
16034                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16035                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16036                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16037         }
16038
16039         intel_color_init(crtc);
16040
16041         WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
16042
16043         return 0;
16044
16045 fail:
16046         intel_crtc_free(crtc);
16047
16048         return ret;
16049 }
16050
16051 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16052                                       struct drm_file *file)
16053 {
16054         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16055         struct drm_crtc *drmmode_crtc;
16056         struct intel_crtc *crtc;
16057
16058         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16059         if (!drmmode_crtc)
16060                 return -ENOENT;
16061
16062         crtc = to_intel_crtc(drmmode_crtc);
16063         pipe_from_crtc_id->pipe = crtc->pipe;
16064
16065         return 0;
16066 }
16067
16068 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16069 {
16070         struct drm_device *dev = encoder->base.dev;
16071         struct intel_encoder *source_encoder;
16072         u32 possible_clones = 0;
16073
16074         for_each_intel_encoder(dev, source_encoder) {
16075                 if (encoders_cloneable(encoder, source_encoder))
16076                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16077         }
16078
16079         return possible_clones;
16080 }
16081
16082 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16083 {
16084         struct drm_device *dev = encoder->base.dev;
16085         struct intel_crtc *crtc;
16086         u32 possible_crtcs = 0;
16087
16088         for_each_intel_crtc(dev, crtc) {
16089                 if (encoder->pipe_mask & BIT(crtc->pipe))
16090                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16091         }
16092
16093         return possible_crtcs;
16094 }
16095
16096 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16097 {
16098         if (!IS_MOBILE(dev_priv))
16099                 return false;
16100
16101         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
16102                 return false;
16103
16104         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
16105                 return false;
16106
16107         return true;
16108 }
16109
16110 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16111 {
16112         if (INTEL_GEN(dev_priv) >= 9)
16113                 return false;
16114
16115         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16116                 return false;
16117
16118         if (HAS_PCH_LPT_H(dev_priv) &&
16119             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16120                 return false;
16121
16122         /* DDI E can't be used if DDI A requires 4 lanes */
16123         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16124                 return false;
16125
16126         if (!dev_priv->vbt.int_crt_support)
16127                 return false;
16128
16129         return true;
16130 }
16131
16132 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16133 {
16134         int pps_num;
16135         int pps_idx;
16136
16137         if (HAS_DDI(dev_priv))
16138                 return;
16139         /*
16140          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16141          * everywhere where registers can be write protected.
16142          */
16143         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16144                 pps_num = 2;
16145         else
16146                 pps_num = 1;
16147
16148         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16149                 u32 val = I915_READ(PP_CONTROL(pps_idx));
16150
16151                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16152                 I915_WRITE(PP_CONTROL(pps_idx), val);
16153         }
16154 }
16155
16156 static void intel_pps_init(struct drm_i915_private *dev_priv)
16157 {
16158         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16159                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16160         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16161                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16162         else
16163                 dev_priv->pps_mmio_base = PPS_BASE;
16164
16165         intel_pps_unlock_regs_wa(dev_priv);
16166 }
16167
16168 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16169 {
16170         struct intel_encoder *encoder;
16171         bool dpd_is_edp = false;
16172
16173         intel_pps_init(dev_priv);
16174
16175         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16176                 return;
16177
16178         if (INTEL_GEN(dev_priv) >= 12) {
16179                 intel_ddi_init(dev_priv, PORT_A);
16180                 intel_ddi_init(dev_priv, PORT_B);
16181                 intel_ddi_init(dev_priv, PORT_D);
16182                 intel_ddi_init(dev_priv, PORT_E);
16183                 intel_ddi_init(dev_priv, PORT_F);
16184                 intel_ddi_init(dev_priv, PORT_G);
16185                 intel_ddi_init(dev_priv, PORT_H);
16186                 intel_ddi_init(dev_priv, PORT_I);
16187                 icl_dsi_init(dev_priv);
16188         } else if (IS_ELKHARTLAKE(dev_priv)) {
16189                 intel_ddi_init(dev_priv, PORT_A);
16190                 intel_ddi_init(dev_priv, PORT_B);
16191                 intel_ddi_init(dev_priv, PORT_C);
16192                 intel_ddi_init(dev_priv, PORT_D);
16193                 icl_dsi_init(dev_priv);
16194         } else if (IS_GEN(dev_priv, 11)) {
16195                 intel_ddi_init(dev_priv, PORT_A);
16196                 intel_ddi_init(dev_priv, PORT_B);
16197                 intel_ddi_init(dev_priv, PORT_C);
16198                 intel_ddi_init(dev_priv, PORT_D);
16199                 intel_ddi_init(dev_priv, PORT_E);
16200                 /*
16201                  * On some ICL SKUs port F is not present. No strap bits for
16202                  * this, so rely on VBT.
16203                  * Work around broken VBTs on SKUs known to have no port F.
16204                  */
16205                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16206                     intel_bios_is_port_present(dev_priv, PORT_F))
16207                         intel_ddi_init(dev_priv, PORT_F);
16208
16209                 icl_dsi_init(dev_priv);
16210         } else if (IS_GEN9_LP(dev_priv)) {
16211                 /*
16212                  * FIXME: Broxton doesn't support port detection via the
16213                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16214                  * detect the ports.
16215                  */
16216                 intel_ddi_init(dev_priv, PORT_A);
16217                 intel_ddi_init(dev_priv, PORT_B);
16218                 intel_ddi_init(dev_priv, PORT_C);
16219
16220                 vlv_dsi_init(dev_priv);
16221         } else if (HAS_DDI(dev_priv)) {
16222                 int found;
16223
16224                 if (intel_ddi_crt_present(dev_priv))
16225                         intel_crt_init(dev_priv);
16226
16227                 /*
16228                  * Haswell uses DDI functions to detect digital outputs.
16229                  * On SKL pre-D0 the strap isn't connected, so we assume
16230                  * it's there.
16231                  */
16232                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16233                 /* WaIgnoreDDIAStrap: skl */
16234                 if (found || IS_GEN9_BC(dev_priv))
16235                         intel_ddi_init(dev_priv, PORT_A);
16236
16237                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16238                  * register */
16239                 found = I915_READ(SFUSE_STRAP);
16240
16241                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16242                         intel_ddi_init(dev_priv, PORT_B);
16243                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16244                         intel_ddi_init(dev_priv, PORT_C);
16245                 if (found & SFUSE_STRAP_DDID_DETECTED)
16246                         intel_ddi_init(dev_priv, PORT_D);
16247                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16248                         intel_ddi_init(dev_priv, PORT_F);
16249                 /*
16250                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16251                  */
16252                 if (IS_GEN9_BC(dev_priv) &&
16253                     intel_bios_is_port_present(dev_priv, PORT_E))
16254                         intel_ddi_init(dev_priv, PORT_E);
16255
16256         } else if (HAS_PCH_SPLIT(dev_priv)) {
16257                 int found;
16258
16259                 /*
16260                  * intel_edp_init_connector() depends on this completing first,
16261                  * to prevent the registration of both eDP and LVDS and the
16262                  * incorrect sharing of the PPS.
16263                  */
16264                 intel_lvds_init(dev_priv);
16265                 intel_crt_init(dev_priv);
16266
16267                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16268
16269                 if (ilk_has_edp_a(dev_priv))
16270                         intel_dp_init(dev_priv, DP_A, PORT_A);
16271
16272                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16273                         /* PCH SDVOB multiplex with HDMIB */
16274                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16275                         if (!found)
16276                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16277                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16278                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16279                 }
16280
16281                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16282                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16283
16284                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16285                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16286
16287                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16288                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16289
16290                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16291                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16292         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16293                 bool has_edp, has_port;
16294
16295                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16296                         intel_crt_init(dev_priv);
16297
16298                 /*
16299                  * The DP_DETECTED bit is the latched state of the DDC
16300                  * SDA pin at boot. However since eDP doesn't require DDC
16301                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16302                  * eDP ports may have been muxed to an alternate function.
16303                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16304                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16305                  * detect eDP ports.
16306                  *
16307                  * Sadly the straps seem to be missing sometimes even for HDMI
16308                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16309                  * and VBT for the presence of the port. Additionally we can't
16310                  * trust the port type the VBT declares as we've seen at least
16311                  * HDMI ports that the VBT claim are DP or eDP.
16312                  */
16313                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16314                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16315                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16316                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16317                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16318                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16319
16320                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16321                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16322                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16323                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16324                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16325                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16326
16327                 if (IS_CHERRYVIEW(dev_priv)) {
16328                         /*
16329                          * eDP not supported on port D,
16330                          * so no need to worry about it
16331                          */
16332                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16333                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16334                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16335                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16336                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16337                 }
16338
16339                 vlv_dsi_init(dev_priv);
16340         } else if (IS_PINEVIEW(dev_priv)) {
16341                 intel_lvds_init(dev_priv);
16342                 intel_crt_init(dev_priv);
16343         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16344                 bool found = false;
16345
16346                 if (IS_MOBILE(dev_priv))
16347                         intel_lvds_init(dev_priv);
16348
16349                 intel_crt_init(dev_priv);
16350
16351                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16352                         DRM_DEBUG_KMS("probing SDVOB\n");
16353                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16354                         if (!found && IS_G4X(dev_priv)) {
16355                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16356                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16357                         }
16358
16359                         if (!found && IS_G4X(dev_priv))
16360                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16361                 }
16362
16363                 /* Before G4X SDVOC doesn't have its own detect register */
16364
16365                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16366                         DRM_DEBUG_KMS("probing SDVOC\n");
16367                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16368                 }
16369
16370                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16371
16372                         if (IS_G4X(dev_priv)) {
16373                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16374                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16375                         }
16376                         if (IS_G4X(dev_priv))
16377                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16378                 }
16379
16380                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16381                         intel_dp_init(dev_priv, DP_D, PORT_D);
16382
16383                 if (SUPPORTS_TV(dev_priv))
16384                         intel_tv_init(dev_priv);
16385         } else if (IS_GEN(dev_priv, 2)) {
16386                 if (IS_I85X(dev_priv))
16387                         intel_lvds_init(dev_priv);
16388
16389                 intel_crt_init(dev_priv);
16390                 intel_dvo_init(dev_priv);
16391         }
16392
16393         intel_psr_init(dev_priv);
16394
16395         for_each_intel_encoder(&dev_priv->drm, encoder) {
16396                 encoder->base.possible_crtcs =
16397                         intel_encoder_possible_crtcs(encoder);
16398                 encoder->base.possible_clones =
16399                         intel_encoder_possible_clones(encoder);
16400         }
16401
16402         intel_init_pch_refclk(dev_priv);
16403
16404         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16405 }
16406
16407 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16408 {
16409         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16410
16411         drm_framebuffer_cleanup(fb);
16412         intel_frontbuffer_put(intel_fb->frontbuffer);
16413
16414         kfree(intel_fb);
16415 }
16416
16417 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16418                                                 struct drm_file *file,
16419                                                 unsigned int *handle)
16420 {
16421         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16422
16423         if (obj->userptr.mm) {
16424                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16425                 return -EINVAL;
16426         }
16427
16428         return drm_gem_handle_create(file, &obj->base, handle);
16429 }
16430
16431 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16432                                         struct drm_file *file,
16433                                         unsigned flags, unsigned color,
16434                                         struct drm_clip_rect *clips,
16435                                         unsigned num_clips)
16436 {
16437         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16438
16439         i915_gem_object_flush_if_display(obj);
16440         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16441
16442         return 0;
16443 }
16444
16445 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16446         .destroy = intel_user_framebuffer_destroy,
16447         .create_handle = intel_user_framebuffer_create_handle,
16448         .dirty = intel_user_framebuffer_dirty,
16449 };
16450
16451 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16452                                   struct drm_i915_gem_object *obj,
16453                                   struct drm_mode_fb_cmd2 *mode_cmd)
16454 {
16455         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16456         struct drm_framebuffer *fb = &intel_fb->base;
16457         u32 max_stride;
16458         unsigned int tiling, stride;
16459         int ret = -EINVAL;
16460         int i;
16461
16462         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16463         if (!intel_fb->frontbuffer)
16464                 return -ENOMEM;
16465
16466         i915_gem_object_lock(obj);
16467         tiling = i915_gem_object_get_tiling(obj);
16468         stride = i915_gem_object_get_stride(obj);
16469         i915_gem_object_unlock(obj);
16470
16471         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16472                 /*
16473                  * If there's a fence, enforce that
16474                  * the fb modifier and tiling mode match.
16475                  */
16476                 if (tiling != I915_TILING_NONE &&
16477                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16478                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16479                         goto err;
16480                 }
16481         } else {
16482                 if (tiling == I915_TILING_X) {
16483                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16484                 } else if (tiling == I915_TILING_Y) {
16485                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16486                         goto err;
16487                 }
16488         }
16489
16490         if (!drm_any_plane_has_format(&dev_priv->drm,
16491                                       mode_cmd->pixel_format,
16492                                       mode_cmd->modifier[0])) {
16493                 struct drm_format_name_buf format_name;
16494
16495                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16496                               drm_get_format_name(mode_cmd->pixel_format,
16497                                                   &format_name),
16498                               mode_cmd->modifier[0]);
16499                 goto err;
16500         }
16501
16502         /*
16503          * gen2/3 display engine uses the fence if present,
16504          * so the tiling mode must match the fb modifier exactly.
16505          */
16506         if (INTEL_GEN(dev_priv) < 4 &&
16507             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16508                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16509                 goto err;
16510         }
16511
16512         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16513                                          mode_cmd->modifier[0]);
16514         if (mode_cmd->pitches[0] > max_stride) {
16515                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16516                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16517                               "tiled" : "linear",
16518                               mode_cmd->pitches[0], max_stride);
16519                 goto err;
16520         }
16521
16522         /*
16523          * If there's a fence, enforce that
16524          * the fb pitch and fence stride match.
16525          */
16526         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16527                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16528                               mode_cmd->pitches[0], stride);
16529                 goto err;
16530         }
16531
16532         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16533         if (mode_cmd->offsets[0] != 0)
16534                 goto err;
16535
16536         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16537
16538         for (i = 0; i < fb->format->num_planes; i++) {
16539                 u32 stride_alignment;
16540
16541                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16542                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
16543                         goto err;
16544                 }
16545
16546                 stride_alignment = intel_fb_stride_alignment(fb, i);
16547                 if (fb->pitches[i] & (stride_alignment - 1)) {
16548                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16549                                       i, fb->pitches[i], stride_alignment);
16550                         goto err;
16551                 }
16552
16553                 fb->obj[i] = &obj->base;
16554         }
16555
16556         ret = intel_fill_fb_info(dev_priv, fb);
16557         if (ret)
16558                 goto err;
16559
16560         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16561         if (ret) {
16562                 DRM_ERROR("framebuffer init failed %d\n", ret);
16563                 goto err;
16564         }
16565
16566         return 0;
16567
16568 err:
16569         intel_frontbuffer_put(intel_fb->frontbuffer);
16570         return ret;
16571 }
16572
16573 static struct drm_framebuffer *
16574 intel_user_framebuffer_create(struct drm_device *dev,
16575                               struct drm_file *filp,
16576                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
16577 {
16578         struct drm_framebuffer *fb;
16579         struct drm_i915_gem_object *obj;
16580         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16581
16582         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16583         if (!obj)
16584                 return ERR_PTR(-ENOENT);
16585
16586         fb = intel_framebuffer_create(obj, &mode_cmd);
16587         i915_gem_object_put(obj);
16588
16589         return fb;
16590 }
16591
16592 static void intel_atomic_state_free(struct drm_atomic_state *state)
16593 {
16594         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16595
16596         drm_atomic_state_default_release(state);
16597
16598         i915_sw_fence_fini(&intel_state->commit_ready);
16599
16600         kfree(state);
16601 }
16602
16603 static enum drm_mode_status
16604 intel_mode_valid(struct drm_device *dev,
16605                  const struct drm_display_mode *mode)
16606 {
16607         struct drm_i915_private *dev_priv = to_i915(dev);
16608         int hdisplay_max, htotal_max;
16609         int vdisplay_max, vtotal_max;
16610
16611         /*
16612          * Can't reject DBLSCAN here because Xorg ddxen can add piles
16613          * of DBLSCAN modes to the output's mode list when they detect
16614          * the scaling mode property on the connector. And they don't
16615          * ask the kernel to validate those modes in any way until
16616          * modeset time at which point the client gets a protocol error.
16617          * So in order to not upset those clients we silently ignore the
16618          * DBLSCAN flag on such connectors. For other connectors we will
16619          * reject modes with the DBLSCAN flag in encoder->compute_config().
16620          * And we always reject DBLSCAN modes in connector->mode_valid()
16621          * as we never want such modes on the connector's mode list.
16622          */
16623
16624         if (mode->vscan > 1)
16625                 return MODE_NO_VSCAN;
16626
16627         if (mode->flags & DRM_MODE_FLAG_HSKEW)
16628                 return MODE_H_ILLEGAL;
16629
16630         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16631                            DRM_MODE_FLAG_NCSYNC |
16632                            DRM_MODE_FLAG_PCSYNC))
16633                 return MODE_HSYNC;
16634
16635         if (mode->flags & (DRM_MODE_FLAG_BCAST |
16636                            DRM_MODE_FLAG_PIXMUX |
16637                            DRM_MODE_FLAG_CLKDIV2))
16638                 return MODE_BAD;
16639
16640         /* Transcoder timing limits */
16641         if (INTEL_GEN(dev_priv) >= 11) {
16642                 hdisplay_max = 16384;
16643                 vdisplay_max = 8192;
16644                 htotal_max = 16384;
16645                 vtotal_max = 8192;
16646         } else if (INTEL_GEN(dev_priv) >= 9 ||
16647                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16648                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16649                 vdisplay_max = 4096;
16650                 htotal_max = 8192;
16651                 vtotal_max = 8192;
16652         } else if (INTEL_GEN(dev_priv) >= 3) {
16653                 hdisplay_max = 4096;
16654                 vdisplay_max = 4096;
16655                 htotal_max = 8192;
16656                 vtotal_max = 8192;
16657         } else {
16658                 hdisplay_max = 2048;
16659                 vdisplay_max = 2048;
16660                 htotal_max = 4096;
16661                 vtotal_max = 4096;
16662         }
16663
16664         if (mode->hdisplay > hdisplay_max ||
16665             mode->hsync_start > htotal_max ||
16666             mode->hsync_end > htotal_max ||
16667             mode->htotal > htotal_max)
16668                 return MODE_H_ILLEGAL;
16669
16670         if (mode->vdisplay > vdisplay_max ||
16671             mode->vsync_start > vtotal_max ||
16672             mode->vsync_end > vtotal_max ||
16673             mode->vtotal > vtotal_max)
16674                 return MODE_V_ILLEGAL;
16675
16676         if (INTEL_GEN(dev_priv) >= 5) {
16677                 if (mode->hdisplay < 64 ||
16678                     mode->htotal - mode->hdisplay < 32)
16679                         return MODE_H_ILLEGAL;
16680
16681                 if (mode->vtotal - mode->vdisplay < 5)
16682                         return MODE_V_ILLEGAL;
16683         } else {
16684                 if (mode->htotal - mode->hdisplay < 32)
16685                         return MODE_H_ILLEGAL;
16686
16687                 if (mode->vtotal - mode->vdisplay < 3)
16688                         return MODE_V_ILLEGAL;
16689         }
16690
16691         return MODE_OK;
16692 }
16693
16694 enum drm_mode_status
16695 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16696                                 const struct drm_display_mode *mode)
16697 {
16698         int plane_width_max, plane_height_max;
16699
16700         /*
16701          * intel_mode_valid() should be
16702          * sufficient on older platforms.
16703          */
16704         if (INTEL_GEN(dev_priv) < 9)
16705                 return MODE_OK;
16706
16707         /*
16708          * Most people will probably want a fullscreen
16709          * plane so let's not advertize modes that are
16710          * too big for that.
16711          */
16712         if (INTEL_GEN(dev_priv) >= 11) {
16713                 plane_width_max = 5120;
16714                 plane_height_max = 4320;
16715         } else {
16716                 plane_width_max = 5120;
16717                 plane_height_max = 4096;
16718         }
16719
16720         if (mode->hdisplay > plane_width_max)
16721                 return MODE_H_ILLEGAL;
16722
16723         if (mode->vdisplay > plane_height_max)
16724                 return MODE_V_ILLEGAL;
16725
16726         return MODE_OK;
16727 }
16728
16729 static const struct drm_mode_config_funcs intel_mode_funcs = {
16730         .fb_create = intel_user_framebuffer_create,
16731         .get_format_info = intel_get_format_info,
16732         .output_poll_changed = intel_fbdev_output_poll_changed,
16733         .mode_valid = intel_mode_valid,
16734         .atomic_check = intel_atomic_check,
16735         .atomic_commit = intel_atomic_commit,
16736         .atomic_state_alloc = intel_atomic_state_alloc,
16737         .atomic_state_clear = intel_atomic_state_clear,
16738         .atomic_state_free = intel_atomic_state_free,
16739 };
16740
16741 /**
16742  * intel_init_display_hooks - initialize the display modesetting hooks
16743  * @dev_priv: device private
16744  */
16745 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16746 {
16747         intel_init_cdclk_hooks(dev_priv);
16748
16749         if (INTEL_GEN(dev_priv) >= 9) {
16750                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16751                 dev_priv->display.get_initial_plane_config =
16752                         skylake_get_initial_plane_config;
16753                 dev_priv->display.crtc_compute_clock =
16754                         haswell_crtc_compute_clock;
16755                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16756                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16757         } else if (HAS_DDI(dev_priv)) {
16758                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16759                 dev_priv->display.get_initial_plane_config =
16760                         i9xx_get_initial_plane_config;
16761                 dev_priv->display.crtc_compute_clock =
16762                         haswell_crtc_compute_clock;
16763                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16764                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16765         } else if (HAS_PCH_SPLIT(dev_priv)) {
16766                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16767                 dev_priv->display.get_initial_plane_config =
16768                         i9xx_get_initial_plane_config;
16769                 dev_priv->display.crtc_compute_clock =
16770                         ironlake_crtc_compute_clock;
16771                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16772                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16773         } else if (IS_CHERRYVIEW(dev_priv)) {
16774                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16775                 dev_priv->display.get_initial_plane_config =
16776                         i9xx_get_initial_plane_config;
16777                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16778                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16779                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16780         } else if (IS_VALLEYVIEW(dev_priv)) {
16781                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16782                 dev_priv->display.get_initial_plane_config =
16783                         i9xx_get_initial_plane_config;
16784                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16785                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16786                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16787         } else if (IS_G4X(dev_priv)) {
16788                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16789                 dev_priv->display.get_initial_plane_config =
16790                         i9xx_get_initial_plane_config;
16791                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16792                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16793                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16794         } else if (IS_PINEVIEW(dev_priv)) {
16795                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16796                 dev_priv->display.get_initial_plane_config =
16797                         i9xx_get_initial_plane_config;
16798                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16799                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16800                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16801         } else if (!IS_GEN(dev_priv, 2)) {
16802                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16803                 dev_priv->display.get_initial_plane_config =
16804                         i9xx_get_initial_plane_config;
16805                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16806                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16807                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16808         } else {
16809                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16810                 dev_priv->display.get_initial_plane_config =
16811                         i9xx_get_initial_plane_config;
16812                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16813                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16814                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16815         }
16816
16817         if (IS_GEN(dev_priv, 5)) {
16818                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16819         } else if (IS_GEN(dev_priv, 6)) {
16820                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16821         } else if (IS_IVYBRIDGE(dev_priv)) {
16822                 /* FIXME: detect B0+ stepping and use auto training */
16823                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16824         }
16825
16826         if (INTEL_GEN(dev_priv) >= 9)
16827                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16828         else
16829                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16830
16831 }
16832
16833 void intel_modeset_init_hw(struct drm_i915_private *i915)
16834 {
16835         intel_update_cdclk(i915);
16836         intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16837         i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16838 }
16839
16840 /*
16841  * Calculate what we think the watermarks should be for the state we've read
16842  * out of the hardware and then immediately program those watermarks so that
16843  * we ensure the hardware settings match our internal state.
16844  *
16845  * We can calculate what we think WM's should be by creating a duplicate of the
16846  * current state (which was constructed during hardware readout) and running it
16847  * through the atomic check code to calculate new watermark values in the
16848  * state object.
16849  */
16850 static void sanitize_watermarks(struct drm_device *dev)
16851 {
16852         struct drm_i915_private *dev_priv = to_i915(dev);
16853         struct drm_atomic_state *state;
16854         struct intel_atomic_state *intel_state;
16855         struct intel_crtc *crtc;
16856         struct intel_crtc_state *crtc_state;
16857         struct drm_modeset_acquire_ctx ctx;
16858         int ret;
16859         int i;
16860
16861         /* Only supported on platforms that use atomic watermark design */
16862         if (!dev_priv->display.optimize_watermarks)
16863                 return;
16864
16865         /*
16866          * We need to hold connection_mutex before calling duplicate_state so
16867          * that the connector loop is protected.
16868          */
16869         drm_modeset_acquire_init(&ctx, 0);
16870 retry:
16871         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16872         if (ret == -EDEADLK) {
16873                 drm_modeset_backoff(&ctx);
16874                 goto retry;
16875         } else if (WARN_ON(ret)) {
16876                 goto fail;
16877         }
16878
16879         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16880         if (WARN_ON(IS_ERR(state)))
16881                 goto fail;
16882
16883         intel_state = to_intel_atomic_state(state);
16884
16885         /*
16886          * Hardware readout is the only time we don't want to calculate
16887          * intermediate watermarks (since we don't trust the current
16888          * watermarks).
16889          */
16890         if (!HAS_GMCH(dev_priv))
16891                 intel_state->skip_intermediate_wm = true;
16892
16893         ret = intel_atomic_check(dev, state);
16894         if (ret) {
16895                 /*
16896                  * If we fail here, it means that the hardware appears to be
16897                  * programmed in a way that shouldn't be possible, given our
16898                  * understanding of watermark requirements.  This might mean a
16899                  * mistake in the hardware readout code or a mistake in the
16900                  * watermark calculations for a given platform.  Raise a WARN
16901                  * so that this is noticeable.
16902                  *
16903                  * If this actually happens, we'll have to just leave the
16904                  * BIOS-programmed watermarks untouched and hope for the best.
16905                  */
16906                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16907                 goto put_state;
16908         }
16909
16910         /* Write calculated watermark values back */
16911         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16912                 crtc_state->wm.need_postvbl_update = true;
16913                 dev_priv->display.optimize_watermarks(intel_state, crtc);
16914
16915                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16916         }
16917
16918 put_state:
16919         drm_atomic_state_put(state);
16920 fail:
16921         drm_modeset_drop_locks(&ctx);
16922         drm_modeset_acquire_fini(&ctx);
16923 }
16924
16925 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16926 {
16927         if (IS_GEN(dev_priv, 5)) {
16928                 u32 fdi_pll_clk =
16929                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16930
16931                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16932         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16933                 dev_priv->fdi_pll_freq = 270000;
16934         } else {
16935                 return;
16936         }
16937
16938         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16939 }
16940
16941 static int intel_initial_commit(struct drm_device *dev)
16942 {
16943         struct drm_atomic_state *state = NULL;
16944         struct drm_modeset_acquire_ctx ctx;
16945         struct intel_crtc *crtc;
16946         int ret = 0;
16947
16948         state = drm_atomic_state_alloc(dev);
16949         if (!state)
16950                 return -ENOMEM;
16951
16952         drm_modeset_acquire_init(&ctx, 0);
16953
16954 retry:
16955         state->acquire_ctx = &ctx;
16956
16957         for_each_intel_crtc(dev, crtc) {
16958                 struct intel_crtc_state *crtc_state =
16959                         intel_atomic_get_crtc_state(state, crtc);
16960
16961                 if (IS_ERR(crtc_state)) {
16962                         ret = PTR_ERR(crtc_state);
16963                         goto out;
16964                 }
16965
16966                 if (crtc_state->hw.active) {
16967                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
16968                         if (ret)
16969                                 goto out;
16970
16971                         /*
16972                          * FIXME hack to force a LUT update to avoid the
16973                          * plane update forcing the pipe gamma on without
16974                          * having a proper LUT loaded. Remove once we
16975                          * have readout for pipe gamma enable.
16976                          */
16977                         crtc_state->uapi.color_mgmt_changed = true;
16978                 }
16979         }
16980
16981         ret = drm_atomic_commit(state);
16982
16983 out:
16984         if (ret == -EDEADLK) {
16985                 drm_atomic_state_clear(state);
16986                 drm_modeset_backoff(&ctx);
16987                 goto retry;
16988         }
16989
16990         drm_atomic_state_put(state);
16991
16992         drm_modeset_drop_locks(&ctx);
16993         drm_modeset_acquire_fini(&ctx);
16994
16995         return ret;
16996 }
16997
16998 static void intel_mode_config_init(struct drm_i915_private *i915)
16999 {
17000         struct drm_mode_config *mode_config = &i915->drm.mode_config;
17001
17002         drm_mode_config_init(&i915->drm);
17003
17004         mode_config->min_width = 0;
17005         mode_config->min_height = 0;
17006
17007         mode_config->preferred_depth = 24;
17008         mode_config->prefer_shadow = 1;
17009
17010         mode_config->allow_fb_modifiers = true;
17011
17012         mode_config->funcs = &intel_mode_funcs;
17013
17014         /*
17015          * Maximum framebuffer dimensions, chosen to match
17016          * the maximum render engine surface size on gen4+.
17017          */
17018         if (INTEL_GEN(i915) >= 7) {
17019                 mode_config->max_width = 16384;
17020                 mode_config->max_height = 16384;
17021         } else if (INTEL_GEN(i915) >= 4) {
17022                 mode_config->max_width = 8192;
17023                 mode_config->max_height = 8192;
17024         } else if (IS_GEN(i915, 3)) {
17025                 mode_config->max_width = 4096;
17026                 mode_config->max_height = 4096;
17027         } else {
17028                 mode_config->max_width = 2048;
17029                 mode_config->max_height = 2048;
17030         }
17031
17032         if (IS_I845G(i915) || IS_I865G(i915)) {
17033                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17034                 mode_config->cursor_height = 1023;
17035         } else if (IS_GEN(i915, 2)) {
17036                 mode_config->cursor_width = 64;
17037                 mode_config->cursor_height = 64;
17038         } else {
17039                 mode_config->cursor_width = 256;
17040                 mode_config->cursor_height = 256;
17041         }
17042 }
17043
17044 int intel_modeset_init(struct drm_i915_private *i915)
17045 {
17046         struct drm_device *dev = &i915->drm;
17047         enum pipe pipe;
17048         struct intel_crtc *crtc;
17049         int ret;
17050
17051         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17052         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17053                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17054
17055         intel_mode_config_init(i915);
17056
17057         ret = intel_bw_init(i915);
17058         if (ret)
17059                 return ret;
17060
17061         init_llist_head(&i915->atomic_helper.free_list);
17062         INIT_WORK(&i915->atomic_helper.free_work,
17063                   intel_atomic_helper_free_state_worker);
17064
17065         intel_init_quirks(i915);
17066
17067         intel_fbc_init(i915);
17068
17069         intel_init_pm(i915);
17070
17071         intel_panel_sanitize_ssc(i915);
17072
17073         intel_gmbus_setup(i915);
17074
17075         DRM_DEBUG_KMS("%d display pipe%s available.\n",
17076                       INTEL_NUM_PIPES(i915),
17077                       INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17078
17079         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17080                 for_each_pipe(i915, pipe) {
17081                         ret = intel_crtc_init(i915, pipe);
17082                         if (ret) {
17083                                 drm_mode_config_cleanup(dev);
17084                                 return ret;
17085                         }
17086                 }
17087         }
17088
17089         intel_shared_dpll_init(dev);
17090         intel_update_fdi_pll_freq(i915);
17091
17092         intel_update_czclk(i915);
17093         intel_modeset_init_hw(i915);
17094
17095         intel_hdcp_component_init(i915);
17096
17097         if (i915->max_cdclk_freq == 0)
17098                 intel_update_max_cdclk(i915);
17099
17100         /* Just disable it once at startup */
17101         intel_vga_disable(i915);
17102         intel_setup_outputs(i915);
17103
17104         drm_modeset_lock_all(dev);
17105         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17106         drm_modeset_unlock_all(dev);
17107
17108         for_each_intel_crtc(dev, crtc) {
17109                 struct intel_initial_plane_config plane_config = {};
17110
17111                 if (!crtc->active)
17112                         continue;
17113
17114                 /*
17115                  * Note that reserving the BIOS fb up front prevents us
17116                  * from stuffing other stolen allocations like the ring
17117                  * on top.  This prevents some ugliness at boot time, and
17118                  * can even allow for smooth boot transitions if the BIOS
17119                  * fb is large enough for the active pipe configuration.
17120                  */
17121                 i915->display.get_initial_plane_config(crtc, &plane_config);
17122
17123                 /*
17124                  * If the fb is shared between multiple heads, we'll
17125                  * just get the first one.
17126                  */
17127                 intel_find_initial_plane_obj(crtc, &plane_config);
17128         }
17129
17130         /*
17131          * Make sure hardware watermarks really match the state we read out.
17132          * Note that we need to do this after reconstructing the BIOS fb's
17133          * since the watermark calculation done here will use pstate->fb.
17134          */
17135         if (!HAS_GMCH(i915))
17136                 sanitize_watermarks(dev);
17137
17138         /*
17139          * Force all active planes to recompute their states. So that on
17140          * mode_setcrtc after probe, all the intel_plane_state variables
17141          * are already calculated and there is no assert_plane warnings
17142          * during bootup.
17143          */
17144         ret = intel_initial_commit(dev);
17145         if (ret)
17146                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
17147
17148         return 0;
17149 }
17150
17151 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17152 {
17153         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17154         /* 640x480@60Hz, ~25175 kHz */
17155         struct dpll clock = {
17156                 .m1 = 18,
17157                 .m2 = 7,
17158                 .p1 = 13,
17159                 .p2 = 4,
17160                 .n = 2,
17161         };
17162         u32 dpll, fp;
17163         int i;
17164
17165         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17166
17167         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17168                       pipe_name(pipe), clock.vco, clock.dot);
17169
17170         fp = i9xx_dpll_compute_fp(&clock);
17171         dpll = DPLL_DVO_2X_MODE |
17172                 DPLL_VGA_MODE_DIS |
17173                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17174                 PLL_P2_DIVIDE_BY_4 |
17175                 PLL_REF_INPUT_DREFCLK |
17176                 DPLL_VCO_ENABLE;
17177
17178         I915_WRITE(FP0(pipe), fp);
17179         I915_WRITE(FP1(pipe), fp);
17180
17181         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17182         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17183         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17184         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17185         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17186         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17187         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17188
17189         /*
17190          * Apparently we need to have VGA mode enabled prior to changing
17191          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17192          * dividers, even though the register value does change.
17193          */
17194         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17195         I915_WRITE(DPLL(pipe), dpll);
17196
17197         /* Wait for the clocks to stabilize. */
17198         POSTING_READ(DPLL(pipe));
17199         udelay(150);
17200
17201         /* The pixel multiplier can only be updated once the
17202          * DPLL is enabled and the clocks are stable.
17203          *
17204          * So write it again.
17205          */
17206         I915_WRITE(DPLL(pipe), dpll);
17207
17208         /* We do this three times for luck */
17209         for (i = 0; i < 3 ; i++) {
17210                 I915_WRITE(DPLL(pipe), dpll);
17211                 POSTING_READ(DPLL(pipe));
17212                 udelay(150); /* wait for warmup */
17213         }
17214
17215         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17216         POSTING_READ(PIPECONF(pipe));
17217
17218         intel_wait_for_pipe_scanline_moving(crtc);
17219 }
17220
17221 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17222 {
17223         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17224
17225         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17226                       pipe_name(pipe));
17227
17228         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17229         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17230         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17231         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17232         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17233
17234         I915_WRITE(PIPECONF(pipe), 0);
17235         POSTING_READ(PIPECONF(pipe));
17236
17237         intel_wait_for_pipe_scanline_stopped(crtc);
17238
17239         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17240         POSTING_READ(DPLL(pipe));
17241 }
17242
17243 static void
17244 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17245 {
17246         struct intel_crtc *crtc;
17247
17248         if (INTEL_GEN(dev_priv) >= 4)
17249                 return;
17250
17251         for_each_intel_crtc(&dev_priv->drm, crtc) {
17252                 struct intel_plane *plane =
17253                         to_intel_plane(crtc->base.primary);
17254                 struct intel_crtc *plane_crtc;
17255                 enum pipe pipe;
17256
17257                 if (!plane->get_hw_state(plane, &pipe))
17258                         continue;
17259
17260                 if (pipe == crtc->pipe)
17261                         continue;
17262
17263                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17264                               plane->base.base.id, plane->base.name);
17265
17266                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17267                 intel_plane_disable_noatomic(plane_crtc, plane);
17268         }
17269 }
17270
17271 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17272 {
17273         struct drm_device *dev = crtc->base.dev;
17274         struct intel_encoder *encoder;
17275
17276         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17277                 return true;
17278
17279         return false;
17280 }
17281
17282 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17283 {
17284         struct drm_device *dev = encoder->base.dev;
17285         struct intel_connector *connector;
17286
17287         for_each_connector_on_encoder(dev, &encoder->base, connector)
17288                 return connector;
17289
17290         return NULL;
17291 }
17292
17293 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17294                               enum pipe pch_transcoder)
17295 {
17296         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17297                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17298 }
17299
17300 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17301 {
17302         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17303         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17304         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17305
17306         if (INTEL_GEN(dev_priv) >= 9 ||
17307             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17308                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17309                 u32 val;
17310
17311                 if (transcoder_is_dsi(cpu_transcoder))
17312                         return;
17313
17314                 val = I915_READ(reg);
17315                 val &= ~HSW_FRAME_START_DELAY_MASK;
17316                 val |= HSW_FRAME_START_DELAY(0);
17317                 I915_WRITE(reg, val);
17318         } else {
17319                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17320                 u32 val;
17321
17322                 val = I915_READ(reg);
17323                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17324                 val |= PIPECONF_FRAME_START_DELAY(0);
17325                 I915_WRITE(reg, val);
17326         }
17327
17328         if (!crtc_state->has_pch_encoder)
17329                 return;
17330
17331         if (HAS_PCH_IBX(dev_priv)) {
17332                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17333                 u32 val;
17334
17335                 val = I915_READ(reg);
17336                 val &= ~TRANS_FRAME_START_DELAY_MASK;
17337                 val |= TRANS_FRAME_START_DELAY(0);
17338                 I915_WRITE(reg, val);
17339         } else {
17340                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
17341                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
17342                 u32 val;
17343
17344                 val = I915_READ(reg);
17345                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17346                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17347                 I915_WRITE(reg, val);
17348         }
17349 }
17350
17351 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17352                                 struct drm_modeset_acquire_ctx *ctx)
17353 {
17354         struct drm_device *dev = crtc->base.dev;
17355         struct drm_i915_private *dev_priv = to_i915(dev);
17356         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17357
17358         if (crtc_state->hw.active) {
17359                 struct intel_plane *plane;
17360
17361                 /* Clear any frame start delays used for debugging left by the BIOS */
17362                 intel_sanitize_frame_start_delay(crtc_state);
17363
17364                 /* Disable everything but the primary plane */
17365                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17366                         const struct intel_plane_state *plane_state =
17367                                 to_intel_plane_state(plane->base.state);
17368
17369                         if (plane_state->uapi.visible &&
17370                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17371                                 intel_plane_disable_noatomic(crtc, plane);
17372                 }
17373
17374                 /*
17375                  * Disable any background color set by the BIOS, but enable the
17376                  * gamma and CSC to match how we program our planes.
17377                  */
17378                 if (INTEL_GEN(dev_priv) >= 9)
17379                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17380                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17381                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
17382         }
17383
17384         /* Adjust the state of the output pipe according to whether we
17385          * have active connectors/encoders. */
17386         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17387                 intel_crtc_disable_noatomic(crtc, ctx);
17388
17389         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17390                 /*
17391                  * We start out with underrun reporting disabled to avoid races.
17392                  * For correct bookkeeping mark this on active crtcs.
17393                  *
17394                  * Also on gmch platforms we dont have any hardware bits to
17395                  * disable the underrun reporting. Which means we need to start
17396                  * out with underrun reporting disabled also on inactive pipes,
17397                  * since otherwise we'll complain about the garbage we read when
17398                  * e.g. coming up after runtime pm.
17399                  *
17400                  * No protection against concurrent access is required - at
17401                  * worst a fifo underrun happens which also sets this to false.
17402                  */
17403                 crtc->cpu_fifo_underrun_disabled = true;
17404                 /*
17405                  * We track the PCH trancoder underrun reporting state
17406                  * within the crtc. With crtc for pipe A housing the underrun
17407                  * reporting state for PCH transcoder A, crtc for pipe B housing
17408                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17409                  * and marking underrun reporting as disabled for the non-existing
17410                  * PCH transcoders B and C would prevent enabling the south
17411                  * error interrupt (see cpt_can_enable_serr_int()).
17412                  */
17413                 if (has_pch_trancoder(dev_priv, crtc->pipe))
17414                         crtc->pch_fifo_underrun_disabled = true;
17415         }
17416 }
17417
17418 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17419 {
17420         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17421
17422         /*
17423          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17424          * the hardware when a high res displays plugged in. DPLL P
17425          * divider is zero, and the pipe timings are bonkers. We'll
17426          * try to disable everything in that case.
17427          *
17428          * FIXME would be nice to be able to sanitize this state
17429          * without several WARNs, but for now let's take the easy
17430          * road.
17431          */
17432         return IS_GEN(dev_priv, 6) &&
17433                 crtc_state->hw.active &&
17434                 crtc_state->shared_dpll &&
17435                 crtc_state->port_clock == 0;
17436 }
17437
17438 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17439 {
17440         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17441         struct intel_connector *connector;
17442         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17443         struct intel_crtc_state *crtc_state = crtc ?
17444                 to_intel_crtc_state(crtc->base.state) : NULL;
17445
17446         /* We need to check both for a crtc link (meaning that the
17447          * encoder is active and trying to read from a pipe) and the
17448          * pipe itself being active. */
17449         bool has_active_crtc = crtc_state &&
17450                 crtc_state->hw.active;
17451
17452         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17453                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17454                               pipe_name(crtc->pipe));
17455                 has_active_crtc = false;
17456         }
17457
17458         connector = intel_encoder_find_connector(encoder);
17459         if (connector && !has_active_crtc) {
17460                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17461                               encoder->base.base.id,
17462                               encoder->base.name);
17463
17464                 /* Connector is active, but has no active pipe. This is
17465                  * fallout from our resume register restoring. Disable
17466                  * the encoder manually again. */
17467                 if (crtc_state) {
17468                         struct drm_encoder *best_encoder;
17469
17470                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17471                                       encoder->base.base.id,
17472                                       encoder->base.name);
17473
17474                         /* avoid oopsing in case the hooks consult best_encoder */
17475                         best_encoder = connector->base.state->best_encoder;
17476                         connector->base.state->best_encoder = &encoder->base;
17477
17478                         if (encoder->disable)
17479                                 encoder->disable(encoder, crtc_state,
17480                                                  connector->base.state);
17481                         if (encoder->post_disable)
17482                                 encoder->post_disable(encoder, crtc_state,
17483                                                       connector->base.state);
17484
17485                         connector->base.state->best_encoder = best_encoder;
17486                 }
17487                 encoder->base.crtc = NULL;
17488
17489                 /* Inconsistent output/port/pipe state happens presumably due to
17490                  * a bug in one of the get_hw_state functions. Or someplace else
17491                  * in our code, like the register restore mess on resume. Clamp
17492                  * things to off as a safer default. */
17493
17494                 connector->base.dpms = DRM_MODE_DPMS_OFF;
17495                 connector->base.encoder = NULL;
17496         }
17497
17498         /* notify opregion of the sanitized encoder state */
17499         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17500
17501         if (INTEL_GEN(dev_priv) >= 11)
17502                 icl_sanitize_encoder_pll_mapping(encoder);
17503 }
17504
17505 /* FIXME read out full plane state for all planes */
17506 static void readout_plane_state(struct drm_i915_private *dev_priv)
17507 {
17508         struct intel_plane *plane;
17509         struct intel_crtc *crtc;
17510
17511         for_each_intel_plane(&dev_priv->drm, plane) {
17512                 struct intel_plane_state *plane_state =
17513                         to_intel_plane_state(plane->base.state);
17514                 struct intel_crtc_state *crtc_state;
17515                 enum pipe pipe = PIPE_A;
17516                 bool visible;
17517
17518                 visible = plane->get_hw_state(plane, &pipe);
17519
17520                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17521                 crtc_state = to_intel_crtc_state(crtc->base.state);
17522
17523                 intel_set_plane_visible(crtc_state, plane_state, visible);
17524
17525                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17526                               plane->base.base.id, plane->base.name,
17527                               enableddisabled(visible), pipe_name(pipe));
17528         }
17529
17530         for_each_intel_crtc(&dev_priv->drm, crtc) {
17531                 struct intel_crtc_state *crtc_state =
17532                         to_intel_crtc_state(crtc->base.state);
17533
17534                 fixup_active_planes(crtc_state);
17535         }
17536 }
17537
17538 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17539 {
17540         struct drm_i915_private *dev_priv = to_i915(dev);
17541         enum pipe pipe;
17542         struct intel_crtc *crtc;
17543         struct intel_encoder *encoder;
17544         struct intel_connector *connector;
17545         struct drm_connector_list_iter conn_iter;
17546         int i;
17547
17548         dev_priv->active_pipes = 0;
17549
17550         for_each_intel_crtc(dev, crtc) {
17551                 struct intel_crtc_state *crtc_state =
17552                         to_intel_crtc_state(crtc->base.state);
17553
17554                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17555                 intel_crtc_free_hw_state(crtc_state);
17556                 intel_crtc_state_reset(crtc_state, crtc);
17557
17558                 crtc_state->hw.active = crtc_state->hw.enable =
17559                         dev_priv->display.get_pipe_config(crtc, crtc_state);
17560
17561                 crtc->base.enabled = crtc_state->hw.enable;
17562                 crtc->active = crtc_state->hw.active;
17563
17564                 if (crtc_state->hw.active)
17565                         dev_priv->active_pipes |= BIT(crtc->pipe);
17566
17567                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17568                               crtc->base.base.id, crtc->base.name,
17569                               enableddisabled(crtc_state->hw.active));
17570         }
17571
17572         readout_plane_state(dev_priv);
17573
17574         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17575                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17576
17577                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17578                                                         &pll->state.hw_state);
17579
17580                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17581                     pll->info->id == DPLL_ID_EHL_DPLL4) {
17582                         pll->wakeref = intel_display_power_get(dev_priv,
17583                                                                POWER_DOMAIN_DPLL_DC_OFF);
17584                 }
17585
17586                 pll->state.crtc_mask = 0;
17587                 for_each_intel_crtc(dev, crtc) {
17588                         struct intel_crtc_state *crtc_state =
17589                                 to_intel_crtc_state(crtc->base.state);
17590
17591                         if (crtc_state->hw.active &&
17592                             crtc_state->shared_dpll == pll)
17593                                 pll->state.crtc_mask |= 1 << crtc->pipe;
17594                 }
17595                 pll->active_mask = pll->state.crtc_mask;
17596
17597                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17598                               pll->info->name, pll->state.crtc_mask, pll->on);
17599         }
17600
17601         for_each_intel_encoder(dev, encoder) {
17602                 pipe = 0;
17603
17604                 if (encoder->get_hw_state(encoder, &pipe)) {
17605                         struct intel_crtc_state *crtc_state;
17606
17607                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17608                         crtc_state = to_intel_crtc_state(crtc->base.state);
17609
17610                         encoder->base.crtc = &crtc->base;
17611                         encoder->get_config(encoder, crtc_state);
17612                 } else {
17613                         encoder->base.crtc = NULL;
17614                 }
17615
17616                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17617                               encoder->base.base.id, encoder->base.name,
17618                               enableddisabled(encoder->base.crtc),
17619                               pipe_name(pipe));
17620         }
17621
17622         drm_connector_list_iter_begin(dev, &conn_iter);
17623         for_each_intel_connector_iter(connector, &conn_iter) {
17624                 if (connector->get_hw_state(connector)) {
17625                         struct intel_crtc_state *crtc_state;
17626                         struct intel_crtc *crtc;
17627
17628                         connector->base.dpms = DRM_MODE_DPMS_ON;
17629
17630                         encoder = connector->encoder;
17631                         connector->base.encoder = &encoder->base;
17632
17633                         crtc = to_intel_crtc(encoder->base.crtc);
17634                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17635
17636                         if (crtc_state && crtc_state->hw.active) {
17637                                 /*
17638                                  * This has to be done during hardware readout
17639                                  * because anything calling .crtc_disable may
17640                                  * rely on the connector_mask being accurate.
17641                                  */
17642                                 crtc_state->uapi.connector_mask |=
17643                                         drm_connector_mask(&connector->base);
17644                                 crtc_state->uapi.encoder_mask |=
17645                                         drm_encoder_mask(&encoder->base);
17646                         }
17647                 } else {
17648                         connector->base.dpms = DRM_MODE_DPMS_OFF;
17649                         connector->base.encoder = NULL;
17650                 }
17651                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17652                               connector->base.base.id, connector->base.name,
17653                               enableddisabled(connector->base.encoder));
17654         }
17655         drm_connector_list_iter_end(&conn_iter);
17656
17657         for_each_intel_crtc(dev, crtc) {
17658                 struct intel_bw_state *bw_state =
17659                         to_intel_bw_state(dev_priv->bw_obj.state);
17660                 struct intel_crtc_state *crtc_state =
17661                         to_intel_crtc_state(crtc->base.state);
17662                 struct intel_plane *plane;
17663                 int min_cdclk = 0;
17664
17665                 if (crtc_state->hw.active) {
17666                         struct drm_display_mode *mode = &crtc_state->hw.mode;
17667
17668                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17669                                                     crtc_state);
17670
17671                         *mode = crtc_state->hw.adjusted_mode;
17672                         mode->hdisplay = crtc_state->pipe_src_w;
17673                         mode->vdisplay = crtc_state->pipe_src_h;
17674
17675                         /*
17676                          * The initial mode needs to be set in order to keep
17677                          * the atomic core happy. It wants a valid mode if the
17678                          * crtc's enabled, so we do the above call.
17679                          *
17680                          * But we don't set all the derived state fully, hence
17681                          * set a flag to indicate that a full recalculation is
17682                          * needed on the next commit.
17683                          */
17684                         mode->private_flags = I915_MODE_FLAG_INHERITED;
17685
17686                         intel_crtc_compute_pixel_rate(crtc_state);
17687
17688                         intel_crtc_update_active_timings(crtc_state);
17689
17690                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
17691                 }
17692
17693                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17694                         const struct intel_plane_state *plane_state =
17695                                 to_intel_plane_state(plane->base.state);
17696
17697                         /*
17698                          * FIXME don't have the fb yet, so can't
17699                          * use intel_plane_data_rate() :(
17700                          */
17701                         if (plane_state->uapi.visible)
17702                                 crtc_state->data_rate[plane->id] =
17703                                         4 * crtc_state->pixel_rate;
17704                         /*
17705                          * FIXME don't have the fb yet, so can't
17706                          * use plane->min_cdclk() :(
17707                          */
17708                         if (plane_state->uapi.visible && plane->min_cdclk) {
17709                                 if (crtc_state->double_wide ||
17710                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17711                                         crtc_state->min_cdclk[plane->id] =
17712                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17713                                 else
17714                                         crtc_state->min_cdclk[plane->id] =
17715                                                 crtc_state->pixel_rate;
17716                         }
17717                         DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17718                                       plane->base.base.id, plane->base.name,
17719                                       crtc_state->min_cdclk[plane->id]);
17720                 }
17721
17722                 if (crtc_state->hw.active) {
17723                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17724                         if (WARN_ON(min_cdclk < 0))
17725                                 min_cdclk = 0;
17726                 }
17727
17728                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17729                 dev_priv->min_voltage_level[crtc->pipe] =
17730                         crtc_state->min_voltage_level;
17731
17732                 intel_bw_crtc_update(bw_state, crtc_state);
17733
17734                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17735         }
17736 }
17737
17738 static void
17739 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17740 {
17741         struct intel_encoder *encoder;
17742
17743         for_each_intel_encoder(&dev_priv->drm, encoder) {
17744                 struct intel_crtc_state *crtc_state;
17745
17746                 if (!encoder->get_power_domains)
17747                         continue;
17748
17749                 /*
17750                  * MST-primary and inactive encoders don't have a crtc state
17751                  * and neither of these require any power domain references.
17752                  */
17753                 if (!encoder->base.crtc)
17754                         continue;
17755
17756                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17757                 encoder->get_power_domains(encoder, crtc_state);
17758         }
17759 }
17760
17761 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17762 {
17763         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17764         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17765                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17766                            DARBF_GATING_DIS);
17767
17768         if (IS_HASWELL(dev_priv)) {
17769                 /*
17770                  * WaRsPkgCStateDisplayPMReq:hsw
17771                  * System hang if this isn't done before disabling all planes!
17772                  */
17773                 I915_WRITE(CHICKEN_PAR1_1,
17774                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17775         }
17776 }
17777
17778 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17779                                        enum port port, i915_reg_t hdmi_reg)
17780 {
17781         u32 val = I915_READ(hdmi_reg);
17782
17783         if (val & SDVO_ENABLE ||
17784             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17785                 return;
17786
17787         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17788                       port_name(port));
17789
17790         val &= ~SDVO_PIPE_SEL_MASK;
17791         val |= SDVO_PIPE_SEL(PIPE_A);
17792
17793         I915_WRITE(hdmi_reg, val);
17794 }
17795
17796 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17797                                      enum port port, i915_reg_t dp_reg)
17798 {
17799         u32 val = I915_READ(dp_reg);
17800
17801         if (val & DP_PORT_EN ||
17802             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17803                 return;
17804
17805         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17806                       port_name(port));
17807
17808         val &= ~DP_PIPE_SEL_MASK;
17809         val |= DP_PIPE_SEL(PIPE_A);
17810
17811         I915_WRITE(dp_reg, val);
17812 }
17813
17814 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17815 {
17816         /*
17817          * The BIOS may select transcoder B on some of the PCH
17818          * ports even it doesn't enable the port. This would trip
17819          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17820          * Sanitize the transcoder select bits to prevent that. We
17821          * assume that the BIOS never actually enabled the port,
17822          * because if it did we'd actually have to toggle the port
17823          * on and back off to make the transcoder A select stick
17824          * (see. intel_dp_link_down(), intel_disable_hdmi(),
17825          * intel_disable_sdvo()).
17826          */
17827         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17828         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17829         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17830
17831         /* PCH SDVOB multiplex with HDMIB */
17832         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17833         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17834         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17835 }
17836
17837 /* Scan out the current hw modeset state,
17838  * and sanitizes it to the current state
17839  */
17840 static void
17841 intel_modeset_setup_hw_state(struct drm_device *dev,
17842                              struct drm_modeset_acquire_ctx *ctx)
17843 {
17844         struct drm_i915_private *dev_priv = to_i915(dev);
17845         struct intel_encoder *encoder;
17846         struct intel_crtc *crtc;
17847         intel_wakeref_t wakeref;
17848         int i;
17849
17850         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17851
17852         intel_early_display_was(dev_priv);
17853         intel_modeset_readout_hw_state(dev);
17854
17855         /* HW state is read out, now we need to sanitize this mess. */
17856
17857         /* Sanitize the TypeC port mode upfront, encoders depend on this */
17858         for_each_intel_encoder(dev, encoder) {
17859                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17860
17861                 /* We need to sanitize only the MST primary port. */
17862                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17863                     intel_phy_is_tc(dev_priv, phy))
17864                         intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17865         }
17866
17867         get_encoder_power_domains(dev_priv);
17868
17869         if (HAS_PCH_IBX(dev_priv))
17870                 ibx_sanitize_pch_ports(dev_priv);
17871
17872         /*
17873          * intel_sanitize_plane_mapping() may need to do vblank
17874          * waits, so we need vblank interrupts restored beforehand.
17875          */
17876         for_each_intel_crtc(&dev_priv->drm, crtc) {
17877                 struct intel_crtc_state *crtc_state =
17878                         to_intel_crtc_state(crtc->base.state);
17879
17880                 drm_crtc_vblank_reset(&crtc->base);
17881
17882                 if (crtc_state->hw.active)
17883                         intel_crtc_vblank_on(crtc_state);
17884         }
17885
17886         intel_sanitize_plane_mapping(dev_priv);
17887
17888         for_each_intel_encoder(dev, encoder)
17889                 intel_sanitize_encoder(encoder);
17890
17891         for_each_intel_crtc(&dev_priv->drm, crtc) {
17892                 struct intel_crtc_state *crtc_state =
17893                         to_intel_crtc_state(crtc->base.state);
17894
17895                 intel_sanitize_crtc(crtc, ctx);
17896                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17897         }
17898
17899         intel_modeset_update_connector_atomic_state(dev);
17900
17901         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17902                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17903
17904                 if (!pll->on || pll->active_mask)
17905                         continue;
17906
17907                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17908                               pll->info->name);
17909
17910                 pll->info->funcs->disable(dev_priv, pll);
17911                 pll->on = false;
17912         }
17913
17914         if (IS_G4X(dev_priv)) {
17915                 g4x_wm_get_hw_state(dev_priv);
17916                 g4x_wm_sanitize(dev_priv);
17917         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17918                 vlv_wm_get_hw_state(dev_priv);
17919                 vlv_wm_sanitize(dev_priv);
17920         } else if (INTEL_GEN(dev_priv) >= 9) {
17921                 skl_wm_get_hw_state(dev_priv);
17922         } else if (HAS_PCH_SPLIT(dev_priv)) {
17923                 ilk_wm_get_hw_state(dev_priv);
17924         }
17925
17926         for_each_intel_crtc(dev, crtc) {
17927                 struct intel_crtc_state *crtc_state =
17928                         to_intel_crtc_state(crtc->base.state);
17929                 u64 put_domains;
17930
17931                 put_domains = modeset_get_crtc_power_domains(crtc_state);
17932                 if (WARN_ON(put_domains))
17933                         modeset_put_power_domains(dev_priv, put_domains);
17934         }
17935
17936         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17937 }
17938
17939 void intel_display_resume(struct drm_device *dev)
17940 {
17941         struct drm_i915_private *dev_priv = to_i915(dev);
17942         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17943         struct drm_modeset_acquire_ctx ctx;
17944         int ret;
17945
17946         dev_priv->modeset_restore_state = NULL;
17947         if (state)
17948                 state->acquire_ctx = &ctx;
17949
17950         drm_modeset_acquire_init(&ctx, 0);
17951
17952         while (1) {
17953                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17954                 if (ret != -EDEADLK)
17955                         break;
17956
17957                 drm_modeset_backoff(&ctx);
17958         }
17959
17960         if (!ret)
17961                 ret = __intel_display_resume(dev, state, &ctx);
17962
17963         intel_enable_ipc(dev_priv);
17964         drm_modeset_drop_locks(&ctx);
17965         drm_modeset_acquire_fini(&ctx);
17966
17967         if (ret)
17968                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17969         if (state)
17970                 drm_atomic_state_put(state);
17971 }
17972
17973 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17974 {
17975         struct intel_connector *connector;
17976         struct drm_connector_list_iter conn_iter;
17977
17978         /* Kill all the work that may have been queued by hpd. */
17979         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17980         for_each_intel_connector_iter(connector, &conn_iter) {
17981                 if (connector->modeset_retry_work.func)
17982                         cancel_work_sync(&connector->modeset_retry_work);
17983                 if (connector->hdcp.shim) {
17984                         cancel_delayed_work_sync(&connector->hdcp.check_work);
17985                         cancel_work_sync(&connector->hdcp.prop_work);
17986                 }
17987         }
17988         drm_connector_list_iter_end(&conn_iter);
17989 }
17990
17991 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17992 {
17993         flush_workqueue(i915->flip_wq);
17994         flush_workqueue(i915->modeset_wq);
17995
17996         flush_work(&i915->atomic_helper.free_work);
17997         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17998
17999         /*
18000          * Interrupts and polling as the first thing to avoid creating havoc.
18001          * Too much stuff here (turning of connectors, ...) would
18002          * experience fancy races otherwise.
18003          */
18004         intel_irq_uninstall(i915);
18005
18006         /*
18007          * Due to the hpd irq storm handling the hotplug work can re-arm the
18008          * poll handlers. Hence disable polling after hpd handling is shut down.
18009          */
18010         intel_hpd_poll_fini(i915);
18011
18012         /*
18013          * MST topology needs to be suspended so we don't have any calls to
18014          * fbdev after it's finalized. MST will be destroyed later as part of
18015          * drm_mode_config_cleanup()
18016          */
18017         intel_dp_mst_suspend(i915);
18018
18019         /* poll work can call into fbdev, hence clean that up afterwards */
18020         intel_fbdev_fini(i915);
18021
18022         intel_unregister_dsm_handler();
18023
18024         intel_fbc_global_disable(i915);
18025
18026         /* flush any delayed tasks or pending work */
18027         flush_scheduled_work();
18028
18029         intel_hdcp_component_fini(i915);
18030
18031         drm_mode_config_cleanup(&i915->drm);
18032
18033         intel_overlay_cleanup(i915);
18034
18035         intel_gmbus_teardown(i915);
18036
18037         destroy_workqueue(i915->flip_wq);
18038         destroy_workqueue(i915->modeset_wq);
18039
18040         intel_fbc_cleanup_cfb(i915);
18041 }
18042
18043 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18044
18045 struct intel_display_error_state {
18046
18047         u32 power_well_driver;
18048
18049         struct intel_cursor_error_state {
18050                 u32 control;
18051                 u32 position;
18052                 u32 base;
18053                 u32 size;
18054         } cursor[I915_MAX_PIPES];
18055
18056         struct intel_pipe_error_state {
18057                 bool power_domain_on;
18058                 u32 source;
18059                 u32 stat;
18060         } pipe[I915_MAX_PIPES];
18061
18062         struct intel_plane_error_state {
18063                 u32 control;
18064                 u32 stride;
18065                 u32 size;
18066                 u32 pos;
18067                 u32 addr;
18068                 u32 surface;
18069                 u32 tile_offset;
18070         } plane[I915_MAX_PIPES];
18071
18072         struct intel_transcoder_error_state {
18073                 bool available;
18074                 bool power_domain_on;
18075                 enum transcoder cpu_transcoder;
18076
18077                 u32 conf;
18078
18079                 u32 htotal;
18080                 u32 hblank;
18081                 u32 hsync;
18082                 u32 vtotal;
18083                 u32 vblank;
18084                 u32 vsync;
18085         } transcoder[5];
18086 };
18087
18088 struct intel_display_error_state *
18089 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18090 {
18091         struct intel_display_error_state *error;
18092         int transcoders[] = {
18093                 TRANSCODER_A,
18094                 TRANSCODER_B,
18095                 TRANSCODER_C,
18096                 TRANSCODER_D,
18097                 TRANSCODER_EDP,
18098         };
18099         int i;
18100
18101         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18102
18103         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18104                 return NULL;
18105
18106         error = kzalloc(sizeof(*error), GFP_ATOMIC);
18107         if (error == NULL)
18108                 return NULL;
18109
18110         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18111                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
18112
18113         for_each_pipe(dev_priv, i) {
18114                 error->pipe[i].power_domain_on =
18115                         __intel_display_power_is_enabled(dev_priv,
18116                                                          POWER_DOMAIN_PIPE(i));
18117                 if (!error->pipe[i].power_domain_on)
18118                         continue;
18119
18120                 error->cursor[i].control = I915_READ(CURCNTR(i));
18121                 error->cursor[i].position = I915_READ(CURPOS(i));
18122                 error->cursor[i].base = I915_READ(CURBASE(i));
18123
18124                 error->plane[i].control = I915_READ(DSPCNTR(i));
18125                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
18126                 if (INTEL_GEN(dev_priv) <= 3) {
18127                         error->plane[i].size = I915_READ(DSPSIZE(i));
18128                         error->plane[i].pos = I915_READ(DSPPOS(i));
18129                 }
18130                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18131                         error->plane[i].addr = I915_READ(DSPADDR(i));
18132                 if (INTEL_GEN(dev_priv) >= 4) {
18133                         error->plane[i].surface = I915_READ(DSPSURF(i));
18134                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
18135                 }
18136
18137                 error->pipe[i].source = I915_READ(PIPESRC(i));
18138
18139                 if (HAS_GMCH(dev_priv))
18140                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
18141         }
18142
18143         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18144                 enum transcoder cpu_transcoder = transcoders[i];
18145
18146                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18147                         continue;
18148
18149                 error->transcoder[i].available = true;
18150                 error->transcoder[i].power_domain_on =
18151                         __intel_display_power_is_enabled(dev_priv,
18152                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18153                 if (!error->transcoder[i].power_domain_on)
18154                         continue;
18155
18156                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18157
18158                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
18159                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
18160                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
18161                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
18162                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
18163                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
18164                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
18165         }
18166
18167         return error;
18168 }
18169
18170 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18171
18172 void
18173 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18174                                 struct intel_display_error_state *error)
18175 {
18176         struct drm_i915_private *dev_priv = m->i915;
18177         int i;
18178
18179         if (!error)
18180                 return;
18181
18182         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18183         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18184                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18185                            error->power_well_driver);
18186         for_each_pipe(dev_priv, i) {
18187                 err_printf(m, "Pipe [%d]:\n", i);
18188                 err_printf(m, "  Power: %s\n",
18189                            onoff(error->pipe[i].power_domain_on));
18190                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18191                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18192
18193                 err_printf(m, "Plane [%d]:\n", i);
18194                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18195                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18196                 if (INTEL_GEN(dev_priv) <= 3) {
18197                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18198                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18199                 }
18200                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18201                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18202                 if (INTEL_GEN(dev_priv) >= 4) {
18203                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18204                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18205                 }
18206
18207                 err_printf(m, "Cursor [%d]:\n", i);
18208                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18209                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18210                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18211         }
18212
18213         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18214                 if (!error->transcoder[i].available)
18215                         continue;
18216
18217                 err_printf(m, "CPU transcoder: %s\n",
18218                            transcoder_name(error->transcoder[i].cpu_transcoder));
18219                 err_printf(m, "  Power: %s\n",
18220                            onoff(error->transcoder[i].power_domain_on));
18221                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18222                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18223                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18224                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18225                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18226                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18227                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18228         }
18229 }
18230
18231 #endif