OSDN Git Service

Merge tag 'drm-misc-next-2020-02-10' of git://anongit.freedesktop.org/drm/drm-misc...
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/pm_runtime.h>
30
31 #include <drm/drm_atomic_helper.h>
32 #include <drm/drm_fourcc.h>
33 #include <drm/drm_plane_helper.h>
34
35 #include "display/intel_atomic.h"
36 #include "display/intel_display_types.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_sprite.h"
39
40 #include "gt/intel_llc.h"
41
42 #include "i915_drv.h"
43 #include "i915_irq.h"
44 #include "i915_trace.h"
45 #include "intel_pm.h"
46 #include "intel_sideband.h"
47 #include "../../../platform/x86/intel_ips.h"
48
49 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
50 {
51         if (HAS_LLC(dev_priv)) {
52                 /*
53                  * WaCompressedResourceDisplayNewHashMode:skl,kbl
54                  * Display WA #0390: skl,kbl
55                  *
56                  * Must match Sampler, Pixel Back End, and Media. See
57                  * WaCompressedResourceSamplerPbeMediaNewHashMode.
58                  */
59                 I915_WRITE(CHICKEN_PAR1_1,
60                            I915_READ(CHICKEN_PAR1_1) |
61                            SKL_DE_COMPRESSED_HASH_MODE);
62         }
63
64         /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
65         I915_WRITE(CHICKEN_PAR1_1,
66                    I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
67
68         /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
69         I915_WRITE(GEN8_CHICKEN_DCPR_1,
70                    I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
71
72         /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
73         /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
74         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
75                    DISP_FBC_WM_DIS |
76                    DISP_FBC_MEMORY_WAKE);
77
78         /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
79         I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80                    ILK_DPFC_DISABLE_DUMMY0);
81
82         if (IS_SKYLAKE(dev_priv)) {
83                 /* WaDisableDopClockGating */
84                 I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
85                            & ~GEN7_DOP_CLOCK_GATE_ENABLE);
86         }
87 }
88
89 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
90 {
91         gen9_init_clock_gating(dev_priv);
92
93         /* WaDisableSDEUnitClockGating:bxt */
94         I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
95                    GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
96
97         /*
98          * FIXME:
99          * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
100          */
101         I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
102                    GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
103
104         /*
105          * Wa: Backlight PWM may stop in the asserted state, causing backlight
106          * to stay fully on.
107          */
108         I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
109                    PWM1_GATING_DIS | PWM2_GATING_DIS);
110
111         /*
112          * Lower the display internal timeout.
113          * This is needed to avoid any hard hangs when DSI port PLL
114          * is off and a MMIO access is attempted by any privilege
115          * application, using batch buffers or any other means.
116          */
117         I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
118 }
119
120 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
121 {
122         gen9_init_clock_gating(dev_priv);
123
124         /*
125          * WaDisablePWMClockGating:glk
126          * Backlight PWM may stop in the asserted state, causing backlight
127          * to stay fully on.
128          */
129         I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
130                    PWM1_GATING_DIS | PWM2_GATING_DIS);
131
132         /* WaDDIIOTimeout:glk */
133         if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
134                 u32 val = I915_READ(CHICKEN_MISC_2);
135                 val &= ~(GLK_CL0_PWR_DOWN |
136                          GLK_CL1_PWR_DOWN |
137                          GLK_CL2_PWR_DOWN);
138                 I915_WRITE(CHICKEN_MISC_2, val);
139         }
140
141 }
142
143 static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
144 {
145         u32 tmp;
146
147         tmp = I915_READ(CLKCFG);
148
149         switch (tmp & CLKCFG_FSB_MASK) {
150         case CLKCFG_FSB_533:
151                 dev_priv->fsb_freq = 533; /* 133*4 */
152                 break;
153         case CLKCFG_FSB_800:
154                 dev_priv->fsb_freq = 800; /* 200*4 */
155                 break;
156         case CLKCFG_FSB_667:
157                 dev_priv->fsb_freq =  667; /* 167*4 */
158                 break;
159         case CLKCFG_FSB_400:
160                 dev_priv->fsb_freq = 400; /* 100*4 */
161                 break;
162         }
163
164         switch (tmp & CLKCFG_MEM_MASK) {
165         case CLKCFG_MEM_533:
166                 dev_priv->mem_freq = 533;
167                 break;
168         case CLKCFG_MEM_667:
169                 dev_priv->mem_freq = 667;
170                 break;
171         case CLKCFG_MEM_800:
172                 dev_priv->mem_freq = 800;
173                 break;
174         }
175
176         /* detect pineview DDR3 setting */
177         tmp = I915_READ(CSHRDDR3CTL);
178         dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
179 }
180
181 static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
182 {
183         u16 ddrpll, csipll;
184
185         ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
186         csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
187
188         switch (ddrpll & 0xff) {
189         case 0xc:
190                 dev_priv->mem_freq = 800;
191                 break;
192         case 0x10:
193                 dev_priv->mem_freq = 1066;
194                 break;
195         case 0x14:
196                 dev_priv->mem_freq = 1333;
197                 break;
198         case 0x18:
199                 dev_priv->mem_freq = 1600;
200                 break;
201         default:
202                 drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
203                         ddrpll & 0xff);
204                 dev_priv->mem_freq = 0;
205                 break;
206         }
207
208         switch (csipll & 0x3ff) {
209         case 0x00c:
210                 dev_priv->fsb_freq = 3200;
211                 break;
212         case 0x00e:
213                 dev_priv->fsb_freq = 3733;
214                 break;
215         case 0x010:
216                 dev_priv->fsb_freq = 4266;
217                 break;
218         case 0x012:
219                 dev_priv->fsb_freq = 4800;
220                 break;
221         case 0x014:
222                 dev_priv->fsb_freq = 5333;
223                 break;
224         case 0x016:
225                 dev_priv->fsb_freq = 5866;
226                 break;
227         case 0x018:
228                 dev_priv->fsb_freq = 6400;
229                 break;
230         default:
231                 drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
232                         csipll & 0x3ff);
233                 dev_priv->fsb_freq = 0;
234                 break;
235         }
236 }
237
238 static const struct cxsr_latency cxsr_latency_table[] = {
239         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
240         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
241         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
242         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
243         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
244
245         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
246         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
247         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
248         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
249         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
250
251         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
252         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
253         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
254         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
255         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
256
257         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
258         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
259         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
260         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
261         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
262
263         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
264         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
265         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
266         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
267         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
268
269         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
270         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
271         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
272         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
273         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
274 };
275
276 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
277                                                          bool is_ddr3,
278                                                          int fsb,
279                                                          int mem)
280 {
281         const struct cxsr_latency *latency;
282         int i;
283
284         if (fsb == 0 || mem == 0)
285                 return NULL;
286
287         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
288                 latency = &cxsr_latency_table[i];
289                 if (is_desktop == latency->is_desktop &&
290                     is_ddr3 == latency->is_ddr3 &&
291                     fsb == latency->fsb_freq && mem == latency->mem_freq)
292                         return latency;
293         }
294
295         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
296
297         return NULL;
298 }
299
300 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
301 {
302         u32 val;
303
304         vlv_punit_get(dev_priv);
305
306         val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
307         if (enable)
308                 val &= ~FORCE_DDR_HIGH_FREQ;
309         else
310                 val |= FORCE_DDR_HIGH_FREQ;
311         val &= ~FORCE_DDR_LOW_FREQ;
312         val |= FORCE_DDR_FREQ_REQ_ACK;
313         vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
314
315         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
316                       FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
317                 drm_err(&dev_priv->drm,
318                         "timed out waiting for Punit DDR DVFS request\n");
319
320         vlv_punit_put(dev_priv);
321 }
322
323 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
324 {
325         u32 val;
326
327         vlv_punit_get(dev_priv);
328
329         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
330         if (enable)
331                 val |= DSP_MAXFIFO_PM5_ENABLE;
332         else
333                 val &= ~DSP_MAXFIFO_PM5_ENABLE;
334         vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
335
336         vlv_punit_put(dev_priv);
337 }
338
339 #define FW_WM(value, plane) \
340         (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
341
342 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
343 {
344         bool was_enabled;
345         u32 val;
346
347         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
348                 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
349                 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
350                 POSTING_READ(FW_BLC_SELF_VLV);
351         } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
352                 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
353                 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
354                 POSTING_READ(FW_BLC_SELF);
355         } else if (IS_PINEVIEW(dev_priv)) {
356                 val = I915_READ(DSPFW3);
357                 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
358                 if (enable)
359                         val |= PINEVIEW_SELF_REFRESH_EN;
360                 else
361                         val &= ~PINEVIEW_SELF_REFRESH_EN;
362                 I915_WRITE(DSPFW3, val);
363                 POSTING_READ(DSPFW3);
364         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
365                 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
366                 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
367                                _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
368                 I915_WRITE(FW_BLC_SELF, val);
369                 POSTING_READ(FW_BLC_SELF);
370         } else if (IS_I915GM(dev_priv)) {
371                 /*
372                  * FIXME can't find a bit like this for 915G, and
373                  * and yet it does have the related watermark in
374                  * FW_BLC_SELF. What's going on?
375                  */
376                 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
377                 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
378                                _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
379                 I915_WRITE(INSTPM, val);
380                 POSTING_READ(INSTPM);
381         } else {
382                 return false;
383         }
384
385         trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
386
387         drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
388                     enableddisabled(enable),
389                     enableddisabled(was_enabled));
390
391         return was_enabled;
392 }
393
394 /**
395  * intel_set_memory_cxsr - Configure CxSR state
396  * @dev_priv: i915 device
397  * @enable: Allow vs. disallow CxSR
398  *
399  * Allow or disallow the system to enter a special CxSR
400  * (C-state self refresh) state. What typically happens in CxSR mode
401  * is that several display FIFOs may get combined into a single larger
402  * FIFO for a particular plane (so called max FIFO mode) to allow the
403  * system to defer memory fetches longer, and the memory will enter
404  * self refresh.
405  *
406  * Note that enabling CxSR does not guarantee that the system enter
407  * this special mode, nor does it guarantee that the system stays
408  * in that mode once entered. So this just allows/disallows the system
409  * to autonomously utilize the CxSR mode. Other factors such as core
410  * C-states will affect when/if the system actually enters/exits the
411  * CxSR mode.
412  *
413  * Note that on VLV/CHV this actually only controls the max FIFO mode,
414  * and the system is free to enter/exit memory self refresh at any time
415  * even when the use of CxSR has been disallowed.
416  *
417  * While the system is actually in the CxSR/max FIFO mode, some plane
418  * control registers will not get latched on vblank. Thus in order to
419  * guarantee the system will respond to changes in the plane registers
420  * we must always disallow CxSR prior to making changes to those registers.
421  * Unfortunately the system will re-evaluate the CxSR conditions at
422  * frame start which happens after vblank start (which is when the plane
423  * registers would get latched), so we can't proceed with the plane update
424  * during the same frame where we disallowed CxSR.
425  *
426  * Certain platforms also have a deeper HPLL SR mode. Fortunately the
427  * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
428  * the hardware w.r.t. HPLL SR when writing to plane registers.
429  * Disallowing just CxSR is sufficient.
430  */
431 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
432 {
433         bool ret;
434
435         mutex_lock(&dev_priv->wm.wm_mutex);
436         ret = _intel_set_memory_cxsr(dev_priv, enable);
437         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
438                 dev_priv->wm.vlv.cxsr = enable;
439         else if (IS_G4X(dev_priv))
440                 dev_priv->wm.g4x.cxsr = enable;
441         mutex_unlock(&dev_priv->wm.wm_mutex);
442
443         return ret;
444 }
445
446 /*
447  * Latency for FIFO fetches is dependent on several factors:
448  *   - memory configuration (speed, channels)
449  *   - chipset
450  *   - current MCH state
451  * It can be fairly high in some situations, so here we assume a fairly
452  * pessimal value.  It's a tradeoff between extra memory fetches (if we
453  * set this value too high, the FIFO will fetch frequently to stay full)
454  * and power consumption (set it too low to save power and we might see
455  * FIFO underruns and display "flicker").
456  *
457  * A value of 5us seems to be a good balance; safe for very low end
458  * platforms but not overly aggressive on lower latency configs.
459  */
460 static const int pessimal_latency_ns = 5000;
461
462 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
463         ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
464
465 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
466 {
467         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
468         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
469         struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
470         enum pipe pipe = crtc->pipe;
471         int sprite0_start, sprite1_start;
472
473         switch (pipe) {
474                 u32 dsparb, dsparb2, dsparb3;
475         case PIPE_A:
476                 dsparb = I915_READ(DSPARB);
477                 dsparb2 = I915_READ(DSPARB2);
478                 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
479                 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
480                 break;
481         case PIPE_B:
482                 dsparb = I915_READ(DSPARB);
483                 dsparb2 = I915_READ(DSPARB2);
484                 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
485                 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
486                 break;
487         case PIPE_C:
488                 dsparb2 = I915_READ(DSPARB2);
489                 dsparb3 = I915_READ(DSPARB3);
490                 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
491                 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
492                 break;
493         default:
494                 MISSING_CASE(pipe);
495                 return;
496         }
497
498         fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
499         fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
500         fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
501         fifo_state->plane[PLANE_CURSOR] = 63;
502 }
503
504 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
505                               enum i9xx_plane_id i9xx_plane)
506 {
507         u32 dsparb = I915_READ(DSPARB);
508         int size;
509
510         size = dsparb & 0x7f;
511         if (i9xx_plane == PLANE_B)
512                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
513
514         drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
515                     dsparb, plane_name(i9xx_plane), size);
516
517         return size;
518 }
519
520 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
521                               enum i9xx_plane_id i9xx_plane)
522 {
523         u32 dsparb = I915_READ(DSPARB);
524         int size;
525
526         size = dsparb & 0x1ff;
527         if (i9xx_plane == PLANE_B)
528                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
529         size >>= 1; /* Convert to cachelines */
530
531         drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
532                     dsparb, plane_name(i9xx_plane), size);
533
534         return size;
535 }
536
537 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
538                               enum i9xx_plane_id i9xx_plane)
539 {
540         u32 dsparb = I915_READ(DSPARB);
541         int size;
542
543         size = dsparb & 0x7f;
544         size >>= 2; /* Convert to cachelines */
545
546         drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
547                     dsparb, plane_name(i9xx_plane), size);
548
549         return size;
550 }
551
552 /* Pineview has different values for various configs */
553 static const struct intel_watermark_params pnv_display_wm = {
554         .fifo_size = PINEVIEW_DISPLAY_FIFO,
555         .max_wm = PINEVIEW_MAX_WM,
556         .default_wm = PINEVIEW_DFT_WM,
557         .guard_size = PINEVIEW_GUARD_WM,
558         .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
559 };
560
561 static const struct intel_watermark_params pnv_display_hplloff_wm = {
562         .fifo_size = PINEVIEW_DISPLAY_FIFO,
563         .max_wm = PINEVIEW_MAX_WM,
564         .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
565         .guard_size = PINEVIEW_GUARD_WM,
566         .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
567 };
568
569 static const struct intel_watermark_params pnv_cursor_wm = {
570         .fifo_size = PINEVIEW_CURSOR_FIFO,
571         .max_wm = PINEVIEW_CURSOR_MAX_WM,
572         .default_wm = PINEVIEW_CURSOR_DFT_WM,
573         .guard_size = PINEVIEW_CURSOR_GUARD_WM,
574         .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
575 };
576
577 static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
578         .fifo_size = PINEVIEW_CURSOR_FIFO,
579         .max_wm = PINEVIEW_CURSOR_MAX_WM,
580         .default_wm = PINEVIEW_CURSOR_DFT_WM,
581         .guard_size = PINEVIEW_CURSOR_GUARD_WM,
582         .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
583 };
584
585 static const struct intel_watermark_params i965_cursor_wm_info = {
586         .fifo_size = I965_CURSOR_FIFO,
587         .max_wm = I965_CURSOR_MAX_WM,
588         .default_wm = I965_CURSOR_DFT_WM,
589         .guard_size = 2,
590         .cacheline_size = I915_FIFO_LINE_SIZE,
591 };
592
593 static const struct intel_watermark_params i945_wm_info = {
594         .fifo_size = I945_FIFO_SIZE,
595         .max_wm = I915_MAX_WM,
596         .default_wm = 1,
597         .guard_size = 2,
598         .cacheline_size = I915_FIFO_LINE_SIZE,
599 };
600
601 static const struct intel_watermark_params i915_wm_info = {
602         .fifo_size = I915_FIFO_SIZE,
603         .max_wm = I915_MAX_WM,
604         .default_wm = 1,
605         .guard_size = 2,
606         .cacheline_size = I915_FIFO_LINE_SIZE,
607 };
608
609 static const struct intel_watermark_params i830_a_wm_info = {
610         .fifo_size = I855GM_FIFO_SIZE,
611         .max_wm = I915_MAX_WM,
612         .default_wm = 1,
613         .guard_size = 2,
614         .cacheline_size = I830_FIFO_LINE_SIZE,
615 };
616
617 static const struct intel_watermark_params i830_bc_wm_info = {
618         .fifo_size = I855GM_FIFO_SIZE,
619         .max_wm = I915_MAX_WM/2,
620         .default_wm = 1,
621         .guard_size = 2,
622         .cacheline_size = I830_FIFO_LINE_SIZE,
623 };
624
625 static const struct intel_watermark_params i845_wm_info = {
626         .fifo_size = I830_FIFO_SIZE,
627         .max_wm = I915_MAX_WM,
628         .default_wm = 1,
629         .guard_size = 2,
630         .cacheline_size = I830_FIFO_LINE_SIZE,
631 };
632
633 /**
634  * intel_wm_method1 - Method 1 / "small buffer" watermark formula
635  * @pixel_rate: Pipe pixel rate in kHz
636  * @cpp: Plane bytes per pixel
637  * @latency: Memory wakeup latency in 0.1us units
638  *
639  * Compute the watermark using the method 1 or "small buffer"
640  * formula. The caller may additonally add extra cachelines
641  * to account for TLB misses and clock crossings.
642  *
643  * This method is concerned with the short term drain rate
644  * of the FIFO, ie. it does not account for blanking periods
645  * which would effectively reduce the average drain rate across
646  * a longer period. The name "small" refers to the fact the
647  * FIFO is relatively small compared to the amount of data
648  * fetched.
649  *
650  * The FIFO level vs. time graph might look something like:
651  *
652  *   |\   |\
653  *   | \  | \
654  * __---__---__ (- plane active, _ blanking)
655  * -> time
656  *
657  * or perhaps like this:
658  *
659  *   |\|\  |\|\
660  * __----__----__ (- plane active, _ blanking)
661  * -> time
662  *
663  * Returns:
664  * The watermark in bytes
665  */
666 static unsigned int intel_wm_method1(unsigned int pixel_rate,
667                                      unsigned int cpp,
668                                      unsigned int latency)
669 {
670         u64 ret;
671
672         ret = mul_u32_u32(pixel_rate, cpp * latency);
673         ret = DIV_ROUND_UP_ULL(ret, 10000);
674
675         return ret;
676 }
677
678 /**
679  * intel_wm_method2 - Method 2 / "large buffer" watermark formula
680  * @pixel_rate: Pipe pixel rate in kHz
681  * @htotal: Pipe horizontal total
682  * @width: Plane width in pixels
683  * @cpp: Plane bytes per pixel
684  * @latency: Memory wakeup latency in 0.1us units
685  *
686  * Compute the watermark using the method 2 or "large buffer"
687  * formula. The caller may additonally add extra cachelines
688  * to account for TLB misses and clock crossings.
689  *
690  * This method is concerned with the long term drain rate
691  * of the FIFO, ie. it does account for blanking periods
692  * which effectively reduce the average drain rate across
693  * a longer period. The name "large" refers to the fact the
694  * FIFO is relatively large compared to the amount of data
695  * fetched.
696  *
697  * The FIFO level vs. time graph might look something like:
698  *
699  *    |\___       |\___
700  *    |    \___   |    \___
701  *    |        \  |        \
702  * __ --__--__--__--__--__--__ (- plane active, _ blanking)
703  * -> time
704  *
705  * Returns:
706  * The watermark in bytes
707  */
708 static unsigned int intel_wm_method2(unsigned int pixel_rate,
709                                      unsigned int htotal,
710                                      unsigned int width,
711                                      unsigned int cpp,
712                                      unsigned int latency)
713 {
714         unsigned int ret;
715
716         /*
717          * FIXME remove once all users are computing
718          * watermarks in the correct place.
719          */
720         if (WARN_ON_ONCE(htotal == 0))
721                 htotal = 1;
722
723         ret = (latency * pixel_rate) / (htotal * 10000);
724         ret = (ret + 1) * width * cpp;
725
726         return ret;
727 }
728
729 /**
730  * intel_calculate_wm - calculate watermark level
731  * @pixel_rate: pixel clock
732  * @wm: chip FIFO params
733  * @fifo_size: size of the FIFO buffer
734  * @cpp: bytes per pixel
735  * @latency_ns: memory latency for the platform
736  *
737  * Calculate the watermark level (the level at which the display plane will
738  * start fetching from memory again).  Each chip has a different display
739  * FIFO size and allocation, so the caller needs to figure that out and pass
740  * in the correct intel_watermark_params structure.
741  *
742  * As the pixel clock runs, the FIFO will be drained at a rate that depends
743  * on the pixel size.  When it reaches the watermark level, it'll start
744  * fetching FIFO line sized based chunks from memory until the FIFO fills
745  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
746  * will occur, and a display engine hang could result.
747  */
748 static unsigned int intel_calculate_wm(int pixel_rate,
749                                        const struct intel_watermark_params *wm,
750                                        int fifo_size, int cpp,
751                                        unsigned int latency_ns)
752 {
753         int entries, wm_size;
754
755         /*
756          * Note: we need to make sure we don't overflow for various clock &
757          * latency values.
758          * clocks go from a few thousand to several hundred thousand.
759          * latency is usually a few thousand
760          */
761         entries = intel_wm_method1(pixel_rate, cpp,
762                                    latency_ns / 100);
763         entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
764                 wm->guard_size;
765         DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
766
767         wm_size = fifo_size - entries;
768         DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
769
770         /* Don't promote wm_size to unsigned... */
771         if (wm_size > wm->max_wm)
772                 wm_size = wm->max_wm;
773         if (wm_size <= 0)
774                 wm_size = wm->default_wm;
775
776         /*
777          * Bspec seems to indicate that the value shouldn't be lower than
778          * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
779          * Lets go for 8 which is the burst size since certain platforms
780          * already use a hardcoded 8 (which is what the spec says should be
781          * done).
782          */
783         if (wm_size <= 8)
784                 wm_size = 8;
785
786         return wm_size;
787 }
788
789 static bool is_disabling(int old, int new, int threshold)
790 {
791         return old >= threshold && new < threshold;
792 }
793
794 static bool is_enabling(int old, int new, int threshold)
795 {
796         return old < threshold && new >= threshold;
797 }
798
799 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
800 {
801         return dev_priv->wm.max_level + 1;
802 }
803
804 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
805                                    const struct intel_plane_state *plane_state)
806 {
807         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
808
809         /* FIXME check the 'enable' instead */
810         if (!crtc_state->hw.active)
811                 return false;
812
813         /*
814          * Treat cursor with fb as always visible since cursor updates
815          * can happen faster than the vrefresh rate, and the current
816          * watermark code doesn't handle that correctly. Cursor updates
817          * which set/clear the fb or change the cursor size are going
818          * to get throttled by intel_legacy_cursor_update() to work
819          * around this problem with the watermark code.
820          */
821         if (plane->id == PLANE_CURSOR)
822                 return plane_state->hw.fb != NULL;
823         else
824                 return plane_state->uapi.visible;
825 }
826
827 static bool intel_crtc_active(struct intel_crtc *crtc)
828 {
829         /* Be paranoid as we can arrive here with only partial
830          * state retrieved from the hardware during setup.
831          *
832          * We can ditch the adjusted_mode.crtc_clock check as soon
833          * as Haswell has gained clock readout/fastboot support.
834          *
835          * We can ditch the crtc->primary->state->fb check as soon as we can
836          * properly reconstruct framebuffers.
837          *
838          * FIXME: The intel_crtc->active here should be switched to
839          * crtc->state->active once we have proper CRTC states wired up
840          * for atomic.
841          */
842         return crtc->active && crtc->base.primary->state->fb &&
843                 crtc->config->hw.adjusted_mode.crtc_clock;
844 }
845
846 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
847 {
848         struct intel_crtc *crtc, *enabled = NULL;
849
850         for_each_intel_crtc(&dev_priv->drm, crtc) {
851                 if (intel_crtc_active(crtc)) {
852                         if (enabled)
853                                 return NULL;
854                         enabled = crtc;
855                 }
856         }
857
858         return enabled;
859 }
860
861 static void pnv_update_wm(struct intel_crtc *unused_crtc)
862 {
863         struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
864         struct intel_crtc *crtc;
865         const struct cxsr_latency *latency;
866         u32 reg;
867         unsigned int wm;
868
869         latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
870                                          dev_priv->is_ddr3,
871                                          dev_priv->fsb_freq,
872                                          dev_priv->mem_freq);
873         if (!latency) {
874                 drm_dbg_kms(&dev_priv->drm,
875                             "Unknown FSB/MEM found, disable CxSR\n");
876                 intel_set_memory_cxsr(dev_priv, false);
877                 return;
878         }
879
880         crtc = single_enabled_crtc(dev_priv);
881         if (crtc) {
882                 const struct drm_display_mode *adjusted_mode =
883                         &crtc->config->hw.adjusted_mode;
884                 const struct drm_framebuffer *fb =
885                         crtc->base.primary->state->fb;
886                 int cpp = fb->format->cpp[0];
887                 int clock = adjusted_mode->crtc_clock;
888
889                 /* Display SR */
890                 wm = intel_calculate_wm(clock, &pnv_display_wm,
891                                         pnv_display_wm.fifo_size,
892                                         cpp, latency->display_sr);
893                 reg = I915_READ(DSPFW1);
894                 reg &= ~DSPFW_SR_MASK;
895                 reg |= FW_WM(wm, SR);
896                 I915_WRITE(DSPFW1, reg);
897                 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
898
899                 /* cursor SR */
900                 wm = intel_calculate_wm(clock, &pnv_cursor_wm,
901                                         pnv_display_wm.fifo_size,
902                                         4, latency->cursor_sr);
903                 reg = I915_READ(DSPFW3);
904                 reg &= ~DSPFW_CURSOR_SR_MASK;
905                 reg |= FW_WM(wm, CURSOR_SR);
906                 I915_WRITE(DSPFW3, reg);
907
908                 /* Display HPLL off SR */
909                 wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
910                                         pnv_display_hplloff_wm.fifo_size,
911                                         cpp, latency->display_hpll_disable);
912                 reg = I915_READ(DSPFW3);
913                 reg &= ~DSPFW_HPLL_SR_MASK;
914                 reg |= FW_WM(wm, HPLL_SR);
915                 I915_WRITE(DSPFW3, reg);
916
917                 /* cursor HPLL off SR */
918                 wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
919                                         pnv_display_hplloff_wm.fifo_size,
920                                         4, latency->cursor_hpll_disable);
921                 reg = I915_READ(DSPFW3);
922                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
923                 reg |= FW_WM(wm, HPLL_CURSOR);
924                 I915_WRITE(DSPFW3, reg);
925                 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
926
927                 intel_set_memory_cxsr(dev_priv, true);
928         } else {
929                 intel_set_memory_cxsr(dev_priv, false);
930         }
931 }
932
933 /*
934  * Documentation says:
935  * "If the line size is small, the TLB fetches can get in the way of the
936  *  data fetches, causing some lag in the pixel data return which is not
937  *  accounted for in the above formulas. The following adjustment only
938  *  needs to be applied if eight whole lines fit in the buffer at once.
939  *  The WM is adjusted upwards by the difference between the FIFO size
940  *  and the size of 8 whole lines. This adjustment is always performed
941  *  in the actual pixel depth regardless of whether FBC is enabled or not."
942  */
943 static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
944 {
945         int tlb_miss = fifo_size * 64 - width * cpp * 8;
946
947         return max(0, tlb_miss);
948 }
949
950 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
951                                 const struct g4x_wm_values *wm)
952 {
953         enum pipe pipe;
954
955         for_each_pipe(dev_priv, pipe)
956                 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
957
958         I915_WRITE(DSPFW1,
959                    FW_WM(wm->sr.plane, SR) |
960                    FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
961                    FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
962                    FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
963         I915_WRITE(DSPFW2,
964                    (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
965                    FW_WM(wm->sr.fbc, FBC_SR) |
966                    FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
967                    FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
968                    FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
969                    FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
970         I915_WRITE(DSPFW3,
971                    (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
972                    FW_WM(wm->sr.cursor, CURSOR_SR) |
973                    FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
974                    FW_WM(wm->hpll.plane, HPLL_SR));
975
976         POSTING_READ(DSPFW1);
977 }
978
979 #define FW_WM_VLV(value, plane) \
980         (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
981
982 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
983                                 const struct vlv_wm_values *wm)
984 {
985         enum pipe pipe;
986
987         for_each_pipe(dev_priv, pipe) {
988                 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
989
990                 I915_WRITE(VLV_DDL(pipe),
991                            (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
992                            (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
993                            (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
994                            (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
995         }
996
997         /*
998          * Zero the (unused) WM1 watermarks, and also clear all the
999          * high order bits so that there are no out of bounds values
1000          * present in the registers during the reprogramming.
1001          */
1002         I915_WRITE(DSPHOWM, 0);
1003         I915_WRITE(DSPHOWM1, 0);
1004         I915_WRITE(DSPFW4, 0);
1005         I915_WRITE(DSPFW5, 0);
1006         I915_WRITE(DSPFW6, 0);
1007
1008         I915_WRITE(DSPFW1,
1009                    FW_WM(wm->sr.plane, SR) |
1010                    FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1011                    FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1012                    FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1013         I915_WRITE(DSPFW2,
1014                    FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1015                    FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1016                    FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1017         I915_WRITE(DSPFW3,
1018                    FW_WM(wm->sr.cursor, CURSOR_SR));
1019
1020         if (IS_CHERRYVIEW(dev_priv)) {
1021                 I915_WRITE(DSPFW7_CHV,
1022                            FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1023                            FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1024                 I915_WRITE(DSPFW8_CHV,
1025                            FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1026                            FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1027                 I915_WRITE(DSPFW9_CHV,
1028                            FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1029                            FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1030                 I915_WRITE(DSPHOWM,
1031                            FW_WM(wm->sr.plane >> 9, SR_HI) |
1032                            FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1033                            FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1034                            FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1035                            FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1036                            FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1037                            FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1038                            FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1039                            FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1040                            FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1041         } else {
1042                 I915_WRITE(DSPFW7,
1043                            FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1044                            FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1045                 I915_WRITE(DSPHOWM,
1046                            FW_WM(wm->sr.plane >> 9, SR_HI) |
1047                            FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1048                            FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1049                            FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1050                            FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1051                            FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1052                            FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1053         }
1054
1055         POSTING_READ(DSPFW1);
1056 }
1057
1058 #undef FW_WM_VLV
1059
1060 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1061 {
1062         /* all latencies in usec */
1063         dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1064         dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1065         dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1066
1067         dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1068 }
1069
1070 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1071 {
1072         /*
1073          * DSPCNTR[13] supposedly controls whether the
1074          * primary plane can use the FIFO space otherwise
1075          * reserved for the sprite plane. It's not 100% clear
1076          * what the actual FIFO size is, but it looks like we
1077          * can happily set both primary and sprite watermarks
1078          * up to 127 cachelines. So that would seem to mean
1079          * that either DSPCNTR[13] doesn't do anything, or that
1080          * the total FIFO is >= 256 cachelines in size. Either
1081          * way, we don't seem to have to worry about this
1082          * repartitioning as the maximum watermark value the
1083          * register can hold for each plane is lower than the
1084          * minimum FIFO size.
1085          */
1086         switch (plane_id) {
1087         case PLANE_CURSOR:
1088                 return 63;
1089         case PLANE_PRIMARY:
1090                 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1091         case PLANE_SPRITE0:
1092                 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1093         default:
1094                 MISSING_CASE(plane_id);
1095                 return 0;
1096         }
1097 }
1098
1099 static int g4x_fbc_fifo_size(int level)
1100 {
1101         switch (level) {
1102         case G4X_WM_LEVEL_SR:
1103                 return 7;
1104         case G4X_WM_LEVEL_HPLL:
1105                 return 15;
1106         default:
1107                 MISSING_CASE(level);
1108                 return 0;
1109         }
1110 }
1111
1112 static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1113                           const struct intel_plane_state *plane_state,
1114                           int level)
1115 {
1116         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1117         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1118         const struct drm_display_mode *adjusted_mode =
1119                 &crtc_state->hw.adjusted_mode;
1120         unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1121         unsigned int clock, htotal, cpp, width, wm;
1122
1123         if (latency == 0)
1124                 return USHRT_MAX;
1125
1126         if (!intel_wm_plane_visible(crtc_state, plane_state))
1127                 return 0;
1128
1129         cpp = plane_state->hw.fb->format->cpp[0];
1130
1131         /*
1132          * Not 100% sure which way ELK should go here as the
1133          * spec only says CL/CTG should assume 32bpp and BW
1134          * doesn't need to. But as these things followed the
1135          * mobile vs. desktop lines on gen3 as well, let's
1136          * assume ELK doesn't need this.
1137          *
1138          * The spec also fails to list such a restriction for
1139          * the HPLL watermark, which seems a little strange.
1140          * Let's use 32bpp for the HPLL watermark as well.
1141          */
1142         if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1143             level != G4X_WM_LEVEL_NORMAL)
1144                 cpp = max(cpp, 4u);
1145
1146         clock = adjusted_mode->crtc_clock;
1147         htotal = adjusted_mode->crtc_htotal;
1148
1149         width = drm_rect_width(&plane_state->uapi.dst);
1150
1151         if (plane->id == PLANE_CURSOR) {
1152                 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1153         } else if (plane->id == PLANE_PRIMARY &&
1154                    level == G4X_WM_LEVEL_NORMAL) {
1155                 wm = intel_wm_method1(clock, cpp, latency);
1156         } else {
1157                 unsigned int small, large;
1158
1159                 small = intel_wm_method1(clock, cpp, latency);
1160                 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1161
1162                 wm = min(small, large);
1163         }
1164
1165         wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1166                               width, cpp);
1167
1168         wm = DIV_ROUND_UP(wm, 64) + 2;
1169
1170         return min_t(unsigned int, wm, USHRT_MAX);
1171 }
1172
1173 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1174                                  int level, enum plane_id plane_id, u16 value)
1175 {
1176         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1177         bool dirty = false;
1178
1179         for (; level < intel_wm_num_levels(dev_priv); level++) {
1180                 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1181
1182                 dirty |= raw->plane[plane_id] != value;
1183                 raw->plane[plane_id] = value;
1184         }
1185
1186         return dirty;
1187 }
1188
1189 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1190                                int level, u16 value)
1191 {
1192         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1193         bool dirty = false;
1194
1195         /* NORMAL level doesn't have an FBC watermark */
1196         level = max(level, G4X_WM_LEVEL_SR);
1197
1198         for (; level < intel_wm_num_levels(dev_priv); level++) {
1199                 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1200
1201                 dirty |= raw->fbc != value;
1202                 raw->fbc = value;
1203         }
1204
1205         return dirty;
1206 }
1207
1208 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1209                               const struct intel_plane_state *plane_state,
1210                               u32 pri_val);
1211
1212 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1213                                      const struct intel_plane_state *plane_state)
1214 {
1215         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1216         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1217         int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1218         enum plane_id plane_id = plane->id;
1219         bool dirty = false;
1220         int level;
1221
1222         if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1223                 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1224                 if (plane_id == PLANE_PRIMARY)
1225                         dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1226                 goto out;
1227         }
1228
1229         for (level = 0; level < num_levels; level++) {
1230                 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1231                 int wm, max_wm;
1232
1233                 wm = g4x_compute_wm(crtc_state, plane_state, level);
1234                 max_wm = g4x_plane_fifo_size(plane_id, level);
1235
1236                 if (wm > max_wm)
1237                         break;
1238
1239                 dirty |= raw->plane[plane_id] != wm;
1240                 raw->plane[plane_id] = wm;
1241
1242                 if (plane_id != PLANE_PRIMARY ||
1243                     level == G4X_WM_LEVEL_NORMAL)
1244                         continue;
1245
1246                 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1247                                         raw->plane[plane_id]);
1248                 max_wm = g4x_fbc_fifo_size(level);
1249
1250                 /*
1251                  * FBC wm is not mandatory as we
1252                  * can always just disable its use.
1253                  */
1254                 if (wm > max_wm)
1255                         wm = USHRT_MAX;
1256
1257                 dirty |= raw->fbc != wm;
1258                 raw->fbc = wm;
1259         }
1260
1261         /* mark watermarks as invalid */
1262         dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1263
1264         if (plane_id == PLANE_PRIMARY)
1265                 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1266
1267  out:
1268         if (dirty) {
1269                 drm_dbg_kms(&dev_priv->drm,
1270                             "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1271                             plane->base.name,
1272                             crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1273                             crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1274                             crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1275
1276                 if (plane_id == PLANE_PRIMARY)
1277                         drm_dbg_kms(&dev_priv->drm,
1278                                     "FBC watermarks: SR=%d, HPLL=%d\n",
1279                                     crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1280                                     crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1281         }
1282
1283         return dirty;
1284 }
1285
1286 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1287                                       enum plane_id plane_id, int level)
1288 {
1289         const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1290
1291         return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1292 }
1293
1294 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1295                                      int level)
1296 {
1297         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1298
1299         if (level > dev_priv->wm.max_level)
1300                 return false;
1301
1302         return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1303                 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1304                 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1305 }
1306
1307 /* mark all levels starting from 'level' as invalid */
1308 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1309                                struct g4x_wm_state *wm_state, int level)
1310 {
1311         if (level <= G4X_WM_LEVEL_NORMAL) {
1312                 enum plane_id plane_id;
1313
1314                 for_each_plane_id_on_crtc(crtc, plane_id)
1315                         wm_state->wm.plane[plane_id] = USHRT_MAX;
1316         }
1317
1318         if (level <= G4X_WM_LEVEL_SR) {
1319                 wm_state->cxsr = false;
1320                 wm_state->sr.cursor = USHRT_MAX;
1321                 wm_state->sr.plane = USHRT_MAX;
1322                 wm_state->sr.fbc = USHRT_MAX;
1323         }
1324
1325         if (level <= G4X_WM_LEVEL_HPLL) {
1326                 wm_state->hpll_en = false;
1327                 wm_state->hpll.cursor = USHRT_MAX;
1328                 wm_state->hpll.plane = USHRT_MAX;
1329                 wm_state->hpll.fbc = USHRT_MAX;
1330         }
1331 }
1332
1333 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1334 {
1335         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1336         struct intel_atomic_state *state =
1337                 to_intel_atomic_state(crtc_state->uapi.state);
1338         struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1339         int num_active_planes = hweight8(crtc_state->active_planes &
1340                                          ~BIT(PLANE_CURSOR));
1341         const struct g4x_pipe_wm *raw;
1342         const struct intel_plane_state *old_plane_state;
1343         const struct intel_plane_state *new_plane_state;
1344         struct intel_plane *plane;
1345         enum plane_id plane_id;
1346         int i, level;
1347         unsigned int dirty = 0;
1348
1349         for_each_oldnew_intel_plane_in_state(state, plane,
1350                                              old_plane_state,
1351                                              new_plane_state, i) {
1352                 if (new_plane_state->hw.crtc != &crtc->base &&
1353                     old_plane_state->hw.crtc != &crtc->base)
1354                         continue;
1355
1356                 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1357                         dirty |= BIT(plane->id);
1358         }
1359
1360         if (!dirty)
1361                 return 0;
1362
1363         level = G4X_WM_LEVEL_NORMAL;
1364         if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1365                 goto out;
1366
1367         raw = &crtc_state->wm.g4x.raw[level];
1368         for_each_plane_id_on_crtc(crtc, plane_id)
1369                 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1370
1371         level = G4X_WM_LEVEL_SR;
1372
1373         if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1374                 goto out;
1375
1376         raw = &crtc_state->wm.g4x.raw[level];
1377         wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1378         wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1379         wm_state->sr.fbc = raw->fbc;
1380
1381         wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1382
1383         level = G4X_WM_LEVEL_HPLL;
1384
1385         if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1386                 goto out;
1387
1388         raw = &crtc_state->wm.g4x.raw[level];
1389         wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1390         wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1391         wm_state->hpll.fbc = raw->fbc;
1392
1393         wm_state->hpll_en = wm_state->cxsr;
1394
1395         level++;
1396
1397  out:
1398         if (level == G4X_WM_LEVEL_NORMAL)
1399                 return -EINVAL;
1400
1401         /* invalidate the higher levels */
1402         g4x_invalidate_wms(crtc, wm_state, level);
1403
1404         /*
1405          * Determine if the FBC watermark(s) can be used. IF
1406          * this isn't the case we prefer to disable the FBC
1407          ( watermark(s) rather than disable the SR/HPLL
1408          * level(s) entirely.
1409          */
1410         wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1411
1412         if (level >= G4X_WM_LEVEL_SR &&
1413             wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1414                 wm_state->fbc_en = false;
1415         else if (level >= G4X_WM_LEVEL_HPLL &&
1416                  wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1417                 wm_state->fbc_en = false;
1418
1419         return 0;
1420 }
1421
1422 static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
1423 {
1424         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1425         struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1426         const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1427         struct intel_atomic_state *intel_state =
1428                 to_intel_atomic_state(new_crtc_state->uapi.state);
1429         const struct intel_crtc_state *old_crtc_state =
1430                 intel_atomic_get_old_crtc_state(intel_state, crtc);
1431         const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1432         enum plane_id plane_id;
1433
1434         if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
1435                 *intermediate = *optimal;
1436
1437                 intermediate->cxsr = false;
1438                 intermediate->hpll_en = false;
1439                 goto out;
1440         }
1441
1442         intermediate->cxsr = optimal->cxsr && active->cxsr &&
1443                 !new_crtc_state->disable_cxsr;
1444         intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1445                 !new_crtc_state->disable_cxsr;
1446         intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1447
1448         for_each_plane_id_on_crtc(crtc, plane_id) {
1449                 intermediate->wm.plane[plane_id] =
1450                         max(optimal->wm.plane[plane_id],
1451                             active->wm.plane[plane_id]);
1452
1453                 WARN_ON(intermediate->wm.plane[plane_id] >
1454                         g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1455         }
1456
1457         intermediate->sr.plane = max(optimal->sr.plane,
1458                                      active->sr.plane);
1459         intermediate->sr.cursor = max(optimal->sr.cursor,
1460                                       active->sr.cursor);
1461         intermediate->sr.fbc = max(optimal->sr.fbc,
1462                                    active->sr.fbc);
1463
1464         intermediate->hpll.plane = max(optimal->hpll.plane,
1465                                        active->hpll.plane);
1466         intermediate->hpll.cursor = max(optimal->hpll.cursor,
1467                                         active->hpll.cursor);
1468         intermediate->hpll.fbc = max(optimal->hpll.fbc,
1469                                      active->hpll.fbc);
1470
1471         WARN_ON((intermediate->sr.plane >
1472                  g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1473                  intermediate->sr.cursor >
1474                  g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1475                 intermediate->cxsr);
1476         WARN_ON((intermediate->sr.plane >
1477                  g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1478                  intermediate->sr.cursor >
1479                  g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1480                 intermediate->hpll_en);
1481
1482         WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1483                 intermediate->fbc_en && intermediate->cxsr);
1484         WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1485                 intermediate->fbc_en && intermediate->hpll_en);
1486
1487 out:
1488         /*
1489          * If our intermediate WM are identical to the final WM, then we can
1490          * omit the post-vblank programming; only update if it's different.
1491          */
1492         if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1493                 new_crtc_state->wm.need_postvbl_update = true;
1494
1495         return 0;
1496 }
1497
1498 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1499                          struct g4x_wm_values *wm)
1500 {
1501         struct intel_crtc *crtc;
1502         int num_active_pipes = 0;
1503
1504         wm->cxsr = true;
1505         wm->hpll_en = true;
1506         wm->fbc_en = true;
1507
1508         for_each_intel_crtc(&dev_priv->drm, crtc) {
1509                 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1510
1511                 if (!crtc->active)
1512                         continue;
1513
1514                 if (!wm_state->cxsr)
1515                         wm->cxsr = false;
1516                 if (!wm_state->hpll_en)
1517                         wm->hpll_en = false;
1518                 if (!wm_state->fbc_en)
1519                         wm->fbc_en = false;
1520
1521                 num_active_pipes++;
1522         }
1523
1524         if (num_active_pipes != 1) {
1525                 wm->cxsr = false;
1526                 wm->hpll_en = false;
1527                 wm->fbc_en = false;
1528         }
1529
1530         for_each_intel_crtc(&dev_priv->drm, crtc) {
1531                 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1532                 enum pipe pipe = crtc->pipe;
1533
1534                 wm->pipe[pipe] = wm_state->wm;
1535                 if (crtc->active && wm->cxsr)
1536                         wm->sr = wm_state->sr;
1537                 if (crtc->active && wm->hpll_en)
1538                         wm->hpll = wm_state->hpll;
1539         }
1540 }
1541
1542 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1543 {
1544         struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1545         struct g4x_wm_values new_wm = {};
1546
1547         g4x_merge_wm(dev_priv, &new_wm);
1548
1549         if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1550                 return;
1551
1552         if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1553                 _intel_set_memory_cxsr(dev_priv, false);
1554
1555         g4x_write_wm_values(dev_priv, &new_wm);
1556
1557         if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1558                 _intel_set_memory_cxsr(dev_priv, true);
1559
1560         *old_wm = new_wm;
1561 }
1562
1563 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1564                                    struct intel_crtc *crtc)
1565 {
1566         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1567         const struct intel_crtc_state *crtc_state =
1568                 intel_atomic_get_new_crtc_state(state, crtc);
1569
1570         mutex_lock(&dev_priv->wm.wm_mutex);
1571         crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1572         g4x_program_watermarks(dev_priv);
1573         mutex_unlock(&dev_priv->wm.wm_mutex);
1574 }
1575
1576 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1577                                     struct intel_crtc *crtc)
1578 {
1579         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1580         const struct intel_crtc_state *crtc_state =
1581                 intel_atomic_get_new_crtc_state(state, crtc);
1582
1583         if (!crtc_state->wm.need_postvbl_update)
1584                 return;
1585
1586         mutex_lock(&dev_priv->wm.wm_mutex);
1587         crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1588         g4x_program_watermarks(dev_priv);
1589         mutex_unlock(&dev_priv->wm.wm_mutex);
1590 }
1591
1592 /* latency must be in 0.1us units. */
1593 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1594                                    unsigned int htotal,
1595                                    unsigned int width,
1596                                    unsigned int cpp,
1597                                    unsigned int latency)
1598 {
1599         unsigned int ret;
1600
1601         ret = intel_wm_method2(pixel_rate, htotal,
1602                                width, cpp, latency);
1603         ret = DIV_ROUND_UP(ret, 64);
1604
1605         return ret;
1606 }
1607
1608 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1609 {
1610         /* all latencies in usec */
1611         dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1612
1613         dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1614
1615         if (IS_CHERRYVIEW(dev_priv)) {
1616                 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1617                 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1618
1619                 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1620         }
1621 }
1622
1623 static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1624                                 const struct intel_plane_state *plane_state,
1625                                 int level)
1626 {
1627         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1628         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1629         const struct drm_display_mode *adjusted_mode =
1630                 &crtc_state->hw.adjusted_mode;
1631         unsigned int clock, htotal, cpp, width, wm;
1632
1633         if (dev_priv->wm.pri_latency[level] == 0)
1634                 return USHRT_MAX;
1635
1636         if (!intel_wm_plane_visible(crtc_state, plane_state))
1637                 return 0;
1638
1639         cpp = plane_state->hw.fb->format->cpp[0];
1640         clock = adjusted_mode->crtc_clock;
1641         htotal = adjusted_mode->crtc_htotal;
1642         width = crtc_state->pipe_src_w;
1643
1644         if (plane->id == PLANE_CURSOR) {
1645                 /*
1646                  * FIXME the formula gives values that are
1647                  * too big for the cursor FIFO, and hence we
1648                  * would never be able to use cursors. For
1649                  * now just hardcode the watermark.
1650                  */
1651                 wm = 63;
1652         } else {
1653                 wm = vlv_wm_method2(clock, htotal, width, cpp,
1654                                     dev_priv->wm.pri_latency[level] * 10);
1655         }
1656
1657         return min_t(unsigned int, wm, USHRT_MAX);
1658 }
1659
1660 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1661 {
1662         return (active_planes & (BIT(PLANE_SPRITE0) |
1663                                  BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1664 }
1665
1666 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1667 {
1668         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1669         const struct g4x_pipe_wm *raw =
1670                 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1671         struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1672         unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1673         int num_active_planes = hweight8(active_planes);
1674         const int fifo_size = 511;
1675         int fifo_extra, fifo_left = fifo_size;
1676         int sprite0_fifo_extra = 0;
1677         unsigned int total_rate;
1678         enum plane_id plane_id;
1679
1680         /*
1681          * When enabling sprite0 after sprite1 has already been enabled
1682          * we tend to get an underrun unless sprite0 already has some
1683          * FIFO space allcoated. Hence we always allocate at least one
1684          * cacheline for sprite0 whenever sprite1 is enabled.
1685          *
1686          * All other plane enable sequences appear immune to this problem.
1687          */
1688         if (vlv_need_sprite0_fifo_workaround(active_planes))
1689                 sprite0_fifo_extra = 1;
1690
1691         total_rate = raw->plane[PLANE_PRIMARY] +
1692                 raw->plane[PLANE_SPRITE0] +
1693                 raw->plane[PLANE_SPRITE1] +
1694                 sprite0_fifo_extra;
1695
1696         if (total_rate > fifo_size)
1697                 return -EINVAL;
1698
1699         if (total_rate == 0)
1700                 total_rate = 1;
1701
1702         for_each_plane_id_on_crtc(crtc, plane_id) {
1703                 unsigned int rate;
1704
1705                 if ((active_planes & BIT(plane_id)) == 0) {
1706                         fifo_state->plane[plane_id] = 0;
1707                         continue;
1708                 }
1709
1710                 rate = raw->plane[plane_id];
1711                 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1712                 fifo_left -= fifo_state->plane[plane_id];
1713         }
1714
1715         fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1716         fifo_left -= sprite0_fifo_extra;
1717
1718         fifo_state->plane[PLANE_CURSOR] = 63;
1719
1720         fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1721
1722         /* spread the remainder evenly */
1723         for_each_plane_id_on_crtc(crtc, plane_id) {
1724                 int plane_extra;
1725
1726                 if (fifo_left == 0)
1727                         break;
1728
1729                 if ((active_planes & BIT(plane_id)) == 0)
1730                         continue;
1731
1732                 plane_extra = min(fifo_extra, fifo_left);
1733                 fifo_state->plane[plane_id] += plane_extra;
1734                 fifo_left -= plane_extra;
1735         }
1736
1737         WARN_ON(active_planes != 0 && fifo_left != 0);
1738
1739         /* give it all to the first plane if none are active */
1740         if (active_planes == 0) {
1741                 WARN_ON(fifo_left != fifo_size);
1742                 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1743         }
1744
1745         return 0;
1746 }
1747
1748 /* mark all levels starting from 'level' as invalid */
1749 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1750                                struct vlv_wm_state *wm_state, int level)
1751 {
1752         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1753
1754         for (; level < intel_wm_num_levels(dev_priv); level++) {
1755                 enum plane_id plane_id;
1756
1757                 for_each_plane_id_on_crtc(crtc, plane_id)
1758                         wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1759
1760                 wm_state->sr[level].cursor = USHRT_MAX;
1761                 wm_state->sr[level].plane = USHRT_MAX;
1762         }
1763 }
1764
1765 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1766 {
1767         if (wm > fifo_size)
1768                 return USHRT_MAX;
1769         else
1770                 return fifo_size - wm;
1771 }
1772
1773 /*
1774  * Starting from 'level' set all higher
1775  * levels to 'value' in the "raw" watermarks.
1776  */
1777 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1778                                  int level, enum plane_id plane_id, u16 value)
1779 {
1780         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1781         int num_levels = intel_wm_num_levels(dev_priv);
1782         bool dirty = false;
1783
1784         for (; level < num_levels; level++) {
1785                 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1786
1787                 dirty |= raw->plane[plane_id] != value;
1788                 raw->plane[plane_id] = value;
1789         }
1790
1791         return dirty;
1792 }
1793
1794 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1795                                      const struct intel_plane_state *plane_state)
1796 {
1797         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1798         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1799         enum plane_id plane_id = plane->id;
1800         int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1801         int level;
1802         bool dirty = false;
1803
1804         if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1805                 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1806                 goto out;
1807         }
1808
1809         for (level = 0; level < num_levels; level++) {
1810                 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1811                 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1812                 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1813
1814                 if (wm > max_wm)
1815                         break;
1816
1817                 dirty |= raw->plane[plane_id] != wm;
1818                 raw->plane[plane_id] = wm;
1819         }
1820
1821         /* mark all higher levels as invalid */
1822         dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1823
1824 out:
1825         if (dirty)
1826                 drm_dbg_kms(&dev_priv->drm,
1827                             "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1828                             plane->base.name,
1829                             crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1830                             crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1831                             crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1832
1833         return dirty;
1834 }
1835
1836 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1837                                       enum plane_id plane_id, int level)
1838 {
1839         const struct g4x_pipe_wm *raw =
1840                 &crtc_state->wm.vlv.raw[level];
1841         const struct vlv_fifo_state *fifo_state =
1842                 &crtc_state->wm.vlv.fifo_state;
1843
1844         return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1845 }
1846
1847 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1848 {
1849         return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1850                 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1851                 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1852                 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1853 }
1854
1855 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1856 {
1857         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1858         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1859         struct intel_atomic_state *state =
1860                 to_intel_atomic_state(crtc_state->uapi.state);
1861         struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1862         const struct vlv_fifo_state *fifo_state =
1863                 &crtc_state->wm.vlv.fifo_state;
1864         int num_active_planes = hweight8(crtc_state->active_planes &
1865                                          ~BIT(PLANE_CURSOR));
1866         bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
1867         const struct intel_plane_state *old_plane_state;
1868         const struct intel_plane_state *new_plane_state;
1869         struct intel_plane *plane;
1870         enum plane_id plane_id;
1871         int level, ret, i;
1872         unsigned int dirty = 0;
1873
1874         for_each_oldnew_intel_plane_in_state(state, plane,
1875                                              old_plane_state,
1876                                              new_plane_state, i) {
1877                 if (new_plane_state->hw.crtc != &crtc->base &&
1878                     old_plane_state->hw.crtc != &crtc->base)
1879                         continue;
1880
1881                 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1882                         dirty |= BIT(plane->id);
1883         }
1884
1885         /*
1886          * DSPARB registers may have been reset due to the
1887          * power well being turned off. Make sure we restore
1888          * them to a consistent state even if no primary/sprite
1889          * planes are initially active.
1890          */
1891         if (needs_modeset)
1892                 crtc_state->fifo_changed = true;
1893
1894         if (!dirty)
1895                 return 0;
1896
1897         /* cursor changes don't warrant a FIFO recompute */
1898         if (dirty & ~BIT(PLANE_CURSOR)) {
1899                 const struct intel_crtc_state *old_crtc_state =
1900                         intel_atomic_get_old_crtc_state(state, crtc);
1901                 const struct vlv_fifo_state *old_fifo_state =
1902                         &old_crtc_state->wm.vlv.fifo_state;
1903
1904                 ret = vlv_compute_fifo(crtc_state);
1905                 if (ret)
1906                         return ret;
1907
1908                 if (needs_modeset ||
1909                     memcmp(old_fifo_state, fifo_state,
1910                            sizeof(*fifo_state)) != 0)
1911                         crtc_state->fifo_changed = true;
1912         }
1913
1914         /* initially allow all levels */
1915         wm_state->num_levels = intel_wm_num_levels(dev_priv);
1916         /*
1917          * Note that enabling cxsr with no primary/sprite planes
1918          * enabled can wedge the pipe. Hence we only allow cxsr
1919          * with exactly one enabled primary/sprite plane.
1920          */
1921         wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1922
1923         for (level = 0; level < wm_state->num_levels; level++) {
1924                 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1925                 const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1926
1927                 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1928                         break;
1929
1930                 for_each_plane_id_on_crtc(crtc, plane_id) {
1931                         wm_state->wm[level].plane[plane_id] =
1932                                 vlv_invert_wm_value(raw->plane[plane_id],
1933                                                     fifo_state->plane[plane_id]);
1934                 }
1935
1936                 wm_state->sr[level].plane =
1937                         vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1938                                                  raw->plane[PLANE_SPRITE0],
1939                                                  raw->plane[PLANE_SPRITE1]),
1940                                             sr_fifo_size);
1941
1942                 wm_state->sr[level].cursor =
1943                         vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1944                                             63);
1945         }
1946
1947         if (level == 0)
1948                 return -EINVAL;
1949
1950         /* limit to only levels we can actually handle */
1951         wm_state->num_levels = level;
1952
1953         /* invalidate the higher levels */
1954         vlv_invalidate_wms(crtc, wm_state, level);
1955
1956         return 0;
1957 }
1958
1959 #define VLV_FIFO(plane, value) \
1960         (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1961
1962 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1963                                    struct intel_crtc *crtc)
1964 {
1965         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1966         struct intel_uncore *uncore = &dev_priv->uncore;
1967         const struct intel_crtc_state *crtc_state =
1968                 intel_atomic_get_new_crtc_state(state, crtc);
1969         const struct vlv_fifo_state *fifo_state =
1970                 &crtc_state->wm.vlv.fifo_state;
1971         int sprite0_start, sprite1_start, fifo_size;
1972
1973         if (!crtc_state->fifo_changed)
1974                 return;
1975
1976         sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1977         sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1978         fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1979
1980         WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1981         WARN_ON(fifo_size != 511);
1982
1983         trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1984
1985         /*
1986          * uncore.lock serves a double purpose here. It allows us to
1987          * use the less expensive I915_{READ,WRITE}_FW() functions, and
1988          * it protects the DSPARB registers from getting clobbered by
1989          * parallel updates from multiple pipes.
1990          *
1991          * intel_pipe_update_start() has already disabled interrupts
1992          * for us, so a plain spin_lock() is sufficient here.
1993          */
1994         spin_lock(&uncore->lock);
1995
1996         switch (crtc->pipe) {
1997                 u32 dsparb, dsparb2, dsparb3;
1998         case PIPE_A:
1999                 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2000                 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2001
2002                 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
2003                             VLV_FIFO(SPRITEB, 0xff));
2004                 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
2005                            VLV_FIFO(SPRITEB, sprite1_start));
2006
2007                 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
2008                              VLV_FIFO(SPRITEB_HI, 0x1));
2009                 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
2010                            VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
2011
2012                 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2013                 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2014                 break;
2015         case PIPE_B:
2016                 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2017                 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2018
2019                 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2020                             VLV_FIFO(SPRITED, 0xff));
2021                 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2022                            VLV_FIFO(SPRITED, sprite1_start));
2023
2024                 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2025                              VLV_FIFO(SPRITED_HI, 0xff));
2026                 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2027                            VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2028
2029                 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2030                 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2031                 break;
2032         case PIPE_C:
2033                 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2034                 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2035
2036                 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2037                              VLV_FIFO(SPRITEF, 0xff));
2038                 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2039                             VLV_FIFO(SPRITEF, sprite1_start));
2040
2041                 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2042                              VLV_FIFO(SPRITEF_HI, 0xff));
2043                 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2044                            VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2045
2046                 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2047                 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2048                 break;
2049         default:
2050                 break;
2051         }
2052
2053         intel_uncore_posting_read_fw(uncore, DSPARB);
2054
2055         spin_unlock(&uncore->lock);
2056 }
2057
2058 #undef VLV_FIFO
2059
2060 static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
2061 {
2062         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2063         struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2064         const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2065         struct intel_atomic_state *intel_state =
2066                 to_intel_atomic_state(new_crtc_state->uapi.state);
2067         const struct intel_crtc_state *old_crtc_state =
2068                 intel_atomic_get_old_crtc_state(intel_state, crtc);
2069         const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2070         int level;
2071
2072         if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
2073                 *intermediate = *optimal;
2074
2075                 intermediate->cxsr = false;
2076                 goto out;
2077         }
2078
2079         intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2080         intermediate->cxsr = optimal->cxsr && active->cxsr &&
2081                 !new_crtc_state->disable_cxsr;
2082
2083         for (level = 0; level < intermediate->num_levels; level++) {
2084                 enum plane_id plane_id;
2085
2086                 for_each_plane_id_on_crtc(crtc, plane_id) {
2087                         intermediate->wm[level].plane[plane_id] =
2088                                 min(optimal->wm[level].plane[plane_id],
2089                                     active->wm[level].plane[plane_id]);
2090                 }
2091
2092                 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2093                                                     active->sr[level].plane);
2094                 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2095                                                      active->sr[level].cursor);
2096         }
2097
2098         vlv_invalidate_wms(crtc, intermediate, level);
2099
2100 out:
2101         /*
2102          * If our intermediate WM are identical to the final WM, then we can
2103          * omit the post-vblank programming; only update if it's different.
2104          */
2105         if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2106                 new_crtc_state->wm.need_postvbl_update = true;
2107
2108         return 0;
2109 }
2110
2111 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2112                          struct vlv_wm_values *wm)
2113 {
2114         struct intel_crtc *crtc;
2115         int num_active_pipes = 0;
2116
2117         wm->level = dev_priv->wm.max_level;
2118         wm->cxsr = true;
2119
2120         for_each_intel_crtc(&dev_priv->drm, crtc) {
2121                 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2122
2123                 if (!crtc->active)
2124                         continue;
2125
2126                 if (!wm_state->cxsr)
2127                         wm->cxsr = false;
2128
2129                 num_active_pipes++;
2130                 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2131         }
2132
2133         if (num_active_pipes != 1)
2134                 wm->cxsr = false;
2135
2136         if (num_active_pipes > 1)
2137                 wm->level = VLV_WM_LEVEL_PM2;
2138
2139         for_each_intel_crtc(&dev_priv->drm, crtc) {
2140                 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2141                 enum pipe pipe = crtc->pipe;
2142
2143                 wm->pipe[pipe] = wm_state->wm[wm->level];
2144                 if (crtc->active && wm->cxsr)
2145                         wm->sr = wm_state->sr[wm->level];
2146
2147                 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2148                 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2149                 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2150                 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2151         }
2152 }
2153
2154 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2155 {
2156         struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2157         struct vlv_wm_values new_wm = {};
2158
2159         vlv_merge_wm(dev_priv, &new_wm);
2160
2161         if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2162                 return;
2163
2164         if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2165                 chv_set_memory_dvfs(dev_priv, false);
2166
2167         if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2168                 chv_set_memory_pm5(dev_priv, false);
2169
2170         if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2171                 _intel_set_memory_cxsr(dev_priv, false);
2172
2173         vlv_write_wm_values(dev_priv, &new_wm);
2174
2175         if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2176                 _intel_set_memory_cxsr(dev_priv, true);
2177
2178         if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2179                 chv_set_memory_pm5(dev_priv, true);
2180
2181         if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2182                 chv_set_memory_dvfs(dev_priv, true);
2183
2184         *old_wm = new_wm;
2185 }
2186
2187 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2188                                    struct intel_crtc *crtc)
2189 {
2190         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2191         const struct intel_crtc_state *crtc_state =
2192                 intel_atomic_get_new_crtc_state(state, crtc);
2193
2194         mutex_lock(&dev_priv->wm.wm_mutex);
2195         crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2196         vlv_program_watermarks(dev_priv);
2197         mutex_unlock(&dev_priv->wm.wm_mutex);
2198 }
2199
2200 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2201                                     struct intel_crtc *crtc)
2202 {
2203         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2204         const struct intel_crtc_state *crtc_state =
2205                 intel_atomic_get_new_crtc_state(state, crtc);
2206
2207         if (!crtc_state->wm.need_postvbl_update)
2208                 return;
2209
2210         mutex_lock(&dev_priv->wm.wm_mutex);
2211         crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2212         vlv_program_watermarks(dev_priv);
2213         mutex_unlock(&dev_priv->wm.wm_mutex);
2214 }
2215
2216 static void i965_update_wm(struct intel_crtc *unused_crtc)
2217 {
2218         struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2219         struct intel_crtc *crtc;
2220         int srwm = 1;
2221         int cursor_sr = 16;
2222         bool cxsr_enabled;
2223
2224         /* Calc sr entries for one plane configs */
2225         crtc = single_enabled_crtc(dev_priv);
2226         if (crtc) {
2227                 /* self-refresh has much higher latency */
2228                 static const int sr_latency_ns = 12000;
2229                 const struct drm_display_mode *adjusted_mode =
2230                         &crtc->config->hw.adjusted_mode;
2231                 const struct drm_framebuffer *fb =
2232                         crtc->base.primary->state->fb;
2233                 int clock = adjusted_mode->crtc_clock;
2234                 int htotal = adjusted_mode->crtc_htotal;
2235                 int hdisplay = crtc->config->pipe_src_w;
2236                 int cpp = fb->format->cpp[0];
2237                 int entries;
2238
2239                 entries = intel_wm_method2(clock, htotal,
2240                                            hdisplay, cpp, sr_latency_ns / 100);
2241                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2242                 srwm = I965_FIFO_SIZE - entries;
2243                 if (srwm < 0)
2244                         srwm = 1;
2245                 srwm &= 0x1ff;
2246                 drm_dbg_kms(&dev_priv->drm,
2247                             "self-refresh entries: %d, wm: %d\n",
2248                             entries, srwm);
2249
2250                 entries = intel_wm_method2(clock, htotal,
2251                                            crtc->base.cursor->state->crtc_w, 4,
2252                                            sr_latency_ns / 100);
2253                 entries = DIV_ROUND_UP(entries,
2254                                        i965_cursor_wm_info.cacheline_size) +
2255                         i965_cursor_wm_info.guard_size;
2256
2257                 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2258                 if (cursor_sr > i965_cursor_wm_info.max_wm)
2259                         cursor_sr = i965_cursor_wm_info.max_wm;
2260
2261                 drm_dbg_kms(&dev_priv->drm,
2262                             "self-refresh watermark: display plane %d "
2263                             "cursor %d\n", srwm, cursor_sr);
2264
2265                 cxsr_enabled = true;
2266         } else {
2267                 cxsr_enabled = false;
2268                 /* Turn off self refresh if both pipes are enabled */
2269                 intel_set_memory_cxsr(dev_priv, false);
2270         }
2271
2272         drm_dbg_kms(&dev_priv->drm,
2273                     "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2274                     srwm);
2275
2276         /* 965 has limitations... */
2277         I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2278                    FW_WM(8, CURSORB) |
2279                    FW_WM(8, PLANEB) |
2280                    FW_WM(8, PLANEA));
2281         I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2282                    FW_WM(8, PLANEC_OLD));
2283         /* update cursor SR watermark */
2284         I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2285
2286         if (cxsr_enabled)
2287                 intel_set_memory_cxsr(dev_priv, true);
2288 }
2289
2290 #undef FW_WM
2291
2292 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2293 {
2294         struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2295         const struct intel_watermark_params *wm_info;
2296         u32 fwater_lo;
2297         u32 fwater_hi;
2298         int cwm, srwm = 1;
2299         int fifo_size;
2300         int planea_wm, planeb_wm;
2301         struct intel_crtc *crtc, *enabled = NULL;
2302
2303         if (IS_I945GM(dev_priv))
2304                 wm_info = &i945_wm_info;
2305         else if (!IS_GEN(dev_priv, 2))
2306                 wm_info = &i915_wm_info;
2307         else
2308                 wm_info = &i830_a_wm_info;
2309
2310         fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
2311         crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2312         if (intel_crtc_active(crtc)) {
2313                 const struct drm_display_mode *adjusted_mode =
2314                         &crtc->config->hw.adjusted_mode;
2315                 const struct drm_framebuffer *fb =
2316                         crtc->base.primary->state->fb;
2317                 int cpp;
2318
2319                 if (IS_GEN(dev_priv, 2))
2320                         cpp = 4;
2321                 else
2322                         cpp = fb->format->cpp[0];
2323
2324                 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2325                                                wm_info, fifo_size, cpp,
2326                                                pessimal_latency_ns);
2327                 enabled = crtc;
2328         } else {
2329                 planea_wm = fifo_size - wm_info->guard_size;
2330                 if (planea_wm > (long)wm_info->max_wm)
2331                         planea_wm = wm_info->max_wm;
2332         }
2333
2334         if (IS_GEN(dev_priv, 2))
2335                 wm_info = &i830_bc_wm_info;
2336
2337         fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
2338         crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2339         if (intel_crtc_active(crtc)) {
2340                 const struct drm_display_mode *adjusted_mode =
2341                         &crtc->config->hw.adjusted_mode;
2342                 const struct drm_framebuffer *fb =
2343                         crtc->base.primary->state->fb;
2344                 int cpp;
2345
2346                 if (IS_GEN(dev_priv, 2))
2347                         cpp = 4;
2348                 else
2349                         cpp = fb->format->cpp[0];
2350
2351                 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2352                                                wm_info, fifo_size, cpp,
2353                                                pessimal_latency_ns);
2354                 if (enabled == NULL)
2355                         enabled = crtc;
2356                 else
2357                         enabled = NULL;
2358         } else {
2359                 planeb_wm = fifo_size - wm_info->guard_size;
2360                 if (planeb_wm > (long)wm_info->max_wm)
2361                         planeb_wm = wm_info->max_wm;
2362         }
2363
2364         drm_dbg_kms(&dev_priv->drm,
2365                     "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2366
2367         if (IS_I915GM(dev_priv) && enabled) {
2368                 struct drm_i915_gem_object *obj;
2369
2370                 obj = intel_fb_obj(enabled->base.primary->state->fb);
2371
2372                 /* self-refresh seems busted with untiled */
2373                 if (!i915_gem_object_is_tiled(obj))
2374                         enabled = NULL;
2375         }
2376
2377         /*
2378          * Overlay gets an aggressive default since video jitter is bad.
2379          */
2380         cwm = 2;
2381
2382         /* Play safe and disable self-refresh before adjusting watermarks. */
2383         intel_set_memory_cxsr(dev_priv, false);
2384
2385         /* Calc sr entries for one plane configs */
2386         if (HAS_FW_BLC(dev_priv) && enabled) {
2387                 /* self-refresh has much higher latency */
2388                 static const int sr_latency_ns = 6000;
2389                 const struct drm_display_mode *adjusted_mode =
2390                         &enabled->config->hw.adjusted_mode;
2391                 const struct drm_framebuffer *fb =
2392                         enabled->base.primary->state->fb;
2393                 int clock = adjusted_mode->crtc_clock;
2394                 int htotal = adjusted_mode->crtc_htotal;
2395                 int hdisplay = enabled->config->pipe_src_w;
2396                 int cpp;
2397                 int entries;
2398
2399                 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2400                         cpp = 4;
2401                 else
2402                         cpp = fb->format->cpp[0];
2403
2404                 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2405                                            sr_latency_ns / 100);
2406                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2407                 drm_dbg_kms(&dev_priv->drm,
2408                             "self-refresh entries: %d\n", entries);
2409                 srwm = wm_info->fifo_size - entries;
2410                 if (srwm < 0)
2411                         srwm = 1;
2412
2413                 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2414                         I915_WRITE(FW_BLC_SELF,
2415                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2416                 else
2417                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2418         }
2419
2420         drm_dbg_kms(&dev_priv->drm,
2421                     "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2422                      planea_wm, planeb_wm, cwm, srwm);
2423
2424         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2425         fwater_hi = (cwm & 0x1f);
2426
2427         /* Set request length to 8 cachelines per fetch */
2428         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2429         fwater_hi = fwater_hi | (1 << 8);
2430
2431         I915_WRITE(FW_BLC, fwater_lo);
2432         I915_WRITE(FW_BLC2, fwater_hi);
2433
2434         if (enabled)
2435                 intel_set_memory_cxsr(dev_priv, true);
2436 }
2437
2438 static void i845_update_wm(struct intel_crtc *unused_crtc)
2439 {
2440         struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2441         struct intel_crtc *crtc;
2442         const struct drm_display_mode *adjusted_mode;
2443         u32 fwater_lo;
2444         int planea_wm;
2445
2446         crtc = single_enabled_crtc(dev_priv);
2447         if (crtc == NULL)
2448                 return;
2449
2450         adjusted_mode = &crtc->config->hw.adjusted_mode;
2451         planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2452                                        &i845_wm_info,
2453                                        dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
2454                                        4, pessimal_latency_ns);
2455         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2456         fwater_lo |= (3<<8) | planea_wm;
2457
2458         drm_dbg_kms(&dev_priv->drm,
2459                     "Setting FIFO watermarks - A: %d\n", planea_wm);
2460
2461         I915_WRITE(FW_BLC, fwater_lo);
2462 }
2463
2464 /* latency must be in 0.1us units. */
2465 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2466                                    unsigned int cpp,
2467                                    unsigned int latency)
2468 {
2469         unsigned int ret;
2470
2471         ret = intel_wm_method1(pixel_rate, cpp, latency);
2472         ret = DIV_ROUND_UP(ret, 64) + 2;
2473
2474         return ret;
2475 }
2476
2477 /* latency must be in 0.1us units. */
2478 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2479                                    unsigned int htotal,
2480                                    unsigned int width,
2481                                    unsigned int cpp,
2482                                    unsigned int latency)
2483 {
2484         unsigned int ret;
2485
2486         ret = intel_wm_method2(pixel_rate, htotal,
2487                                width, cpp, latency);
2488         ret = DIV_ROUND_UP(ret, 64) + 2;
2489
2490         return ret;
2491 }
2492
2493 static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2494 {
2495         /*
2496          * Neither of these should be possible since this function shouldn't be
2497          * called if the CRTC is off or the plane is invisible.  But let's be
2498          * extra paranoid to avoid a potential divide-by-zero if we screw up
2499          * elsewhere in the driver.
2500          */
2501         if (WARN_ON(!cpp))
2502                 return 0;
2503         if (WARN_ON(!horiz_pixels))
2504                 return 0;
2505
2506         return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2507 }
2508
2509 struct ilk_wm_maximums {
2510         u16 pri;
2511         u16 spr;
2512         u16 cur;
2513         u16 fbc;
2514 };
2515
2516 /*
2517  * For both WM_PIPE and WM_LP.
2518  * mem_value must be in 0.1us units.
2519  */
2520 static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2521                               const struct intel_plane_state *plane_state,
2522                               u32 mem_value, bool is_lp)
2523 {
2524         u32 method1, method2;
2525         int cpp;
2526
2527         if (mem_value == 0)
2528                 return U32_MAX;
2529
2530         if (!intel_wm_plane_visible(crtc_state, plane_state))
2531                 return 0;
2532
2533         cpp = plane_state->hw.fb->format->cpp[0];
2534
2535         method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2536
2537         if (!is_lp)
2538                 return method1;
2539
2540         method2 = ilk_wm_method2(crtc_state->pixel_rate,
2541                                  crtc_state->hw.adjusted_mode.crtc_htotal,
2542                                  drm_rect_width(&plane_state->uapi.dst),
2543                                  cpp, mem_value);
2544
2545         return min(method1, method2);
2546 }
2547
2548 /*
2549  * For both WM_PIPE and WM_LP.
2550  * mem_value must be in 0.1us units.
2551  */
2552 static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2553                               const struct intel_plane_state *plane_state,
2554                               u32 mem_value)
2555 {
2556         u32 method1, method2;
2557         int cpp;
2558
2559         if (mem_value == 0)
2560                 return U32_MAX;
2561
2562         if (!intel_wm_plane_visible(crtc_state, plane_state))
2563                 return 0;
2564
2565         cpp = plane_state->hw.fb->format->cpp[0];
2566
2567         method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2568         method2 = ilk_wm_method2(crtc_state->pixel_rate,
2569                                  crtc_state->hw.adjusted_mode.crtc_htotal,
2570                                  drm_rect_width(&plane_state->uapi.dst),
2571                                  cpp, mem_value);
2572         return min(method1, method2);
2573 }
2574
2575 /*
2576  * For both WM_PIPE and WM_LP.
2577  * mem_value must be in 0.1us units.
2578  */
2579 static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2580                               const struct intel_plane_state *plane_state,
2581                               u32 mem_value)
2582 {
2583         int cpp;
2584
2585         if (mem_value == 0)
2586                 return U32_MAX;
2587
2588         if (!intel_wm_plane_visible(crtc_state, plane_state))
2589                 return 0;
2590
2591         cpp = plane_state->hw.fb->format->cpp[0];
2592
2593         return ilk_wm_method2(crtc_state->pixel_rate,
2594                               crtc_state->hw.adjusted_mode.crtc_htotal,
2595                               drm_rect_width(&plane_state->uapi.dst),
2596                               cpp, mem_value);
2597 }
2598
2599 /* Only for WM_LP. */
2600 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2601                               const struct intel_plane_state *plane_state,
2602                               u32 pri_val)
2603 {
2604         int cpp;
2605
2606         if (!intel_wm_plane_visible(crtc_state, plane_state))
2607                 return 0;
2608
2609         cpp = plane_state->hw.fb->format->cpp[0];
2610
2611         return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
2612                           cpp);
2613 }
2614
2615 static unsigned int
2616 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2617 {
2618         if (INTEL_GEN(dev_priv) >= 8)
2619                 return 3072;
2620         else if (INTEL_GEN(dev_priv) >= 7)
2621                 return 768;
2622         else
2623                 return 512;
2624 }
2625
2626 static unsigned int
2627 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2628                      int level, bool is_sprite)
2629 {
2630         if (INTEL_GEN(dev_priv) >= 8)
2631                 /* BDW primary/sprite plane watermarks */
2632                 return level == 0 ? 255 : 2047;
2633         else if (INTEL_GEN(dev_priv) >= 7)
2634                 /* IVB/HSW primary/sprite plane watermarks */
2635                 return level == 0 ? 127 : 1023;
2636         else if (!is_sprite)
2637                 /* ILK/SNB primary plane watermarks */
2638                 return level == 0 ? 127 : 511;
2639         else
2640                 /* ILK/SNB sprite plane watermarks */
2641                 return level == 0 ? 63 : 255;
2642 }
2643
2644 static unsigned int
2645 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2646 {
2647         if (INTEL_GEN(dev_priv) >= 7)
2648                 return level == 0 ? 63 : 255;
2649         else
2650                 return level == 0 ? 31 : 63;
2651 }
2652
2653 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2654 {
2655         if (INTEL_GEN(dev_priv) >= 8)
2656                 return 31;
2657         else
2658                 return 15;
2659 }
2660
2661 /* Calculate the maximum primary/sprite plane watermark */
2662 static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2663                                      int level,
2664                                      const struct intel_wm_config *config,
2665                                      enum intel_ddb_partitioning ddb_partitioning,
2666                                      bool is_sprite)
2667 {
2668         unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2669
2670         /* if sprites aren't enabled, sprites get nothing */
2671         if (is_sprite && !config->sprites_enabled)
2672                 return 0;
2673
2674         /* HSW allows LP1+ watermarks even with multiple pipes */
2675         if (level == 0 || config->num_pipes_active > 1) {
2676                 fifo_size /= INTEL_NUM_PIPES(dev_priv);
2677
2678                 /*
2679                  * For some reason the non self refresh
2680                  * FIFO size is only half of the self
2681                  * refresh FIFO size on ILK/SNB.
2682                  */
2683                 if (INTEL_GEN(dev_priv) <= 6)
2684                         fifo_size /= 2;
2685         }
2686
2687         if (config->sprites_enabled) {
2688                 /* level 0 is always calculated with 1:1 split */
2689                 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2690                         if (is_sprite)
2691                                 fifo_size *= 5;
2692                         fifo_size /= 6;
2693                 } else {
2694                         fifo_size /= 2;
2695                 }
2696         }
2697
2698         /* clamp to max that the registers can hold */
2699         return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2700 }
2701
2702 /* Calculate the maximum cursor plane watermark */
2703 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2704                                       int level,
2705                                       const struct intel_wm_config *config)
2706 {
2707         /* HSW LP1+ watermarks w/ multiple pipes */
2708         if (level > 0 && config->num_pipes_active > 1)
2709                 return 64;
2710
2711         /* otherwise just report max that registers can hold */
2712         return ilk_cursor_wm_reg_max(dev_priv, level);
2713 }
2714
2715 static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2716                                     int level,
2717                                     const struct intel_wm_config *config,
2718                                     enum intel_ddb_partitioning ddb_partitioning,
2719                                     struct ilk_wm_maximums *max)
2720 {
2721         max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2722         max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2723         max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2724         max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2725 }
2726
2727 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2728                                         int level,
2729                                         struct ilk_wm_maximums *max)
2730 {
2731         max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2732         max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2733         max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2734         max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2735 }
2736
2737 static bool ilk_validate_wm_level(int level,
2738                                   const struct ilk_wm_maximums *max,
2739                                   struct intel_wm_level *result)
2740 {
2741         bool ret;
2742
2743         /* already determined to be invalid? */
2744         if (!result->enable)
2745                 return false;
2746
2747         result->enable = result->pri_val <= max->pri &&
2748                          result->spr_val <= max->spr &&
2749                          result->cur_val <= max->cur;
2750
2751         ret = result->enable;
2752
2753         /*
2754          * HACK until we can pre-compute everything,
2755          * and thus fail gracefully if LP0 watermarks
2756          * are exceeded...
2757          */
2758         if (level == 0 && !result->enable) {
2759                 if (result->pri_val > max->pri)
2760                         DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2761                                       level, result->pri_val, max->pri);
2762                 if (result->spr_val > max->spr)
2763                         DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2764                                       level, result->spr_val, max->spr);
2765                 if (result->cur_val > max->cur)
2766                         DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2767                                       level, result->cur_val, max->cur);
2768
2769                 result->pri_val = min_t(u32, result->pri_val, max->pri);
2770                 result->spr_val = min_t(u32, result->spr_val, max->spr);
2771                 result->cur_val = min_t(u32, result->cur_val, max->cur);
2772                 result->enable = true;
2773         }
2774
2775         return ret;
2776 }
2777
2778 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2779                                  const struct intel_crtc *intel_crtc,
2780                                  int level,
2781                                  struct intel_crtc_state *crtc_state,
2782                                  const struct intel_plane_state *pristate,
2783                                  const struct intel_plane_state *sprstate,
2784                                  const struct intel_plane_state *curstate,
2785                                  struct intel_wm_level *result)
2786 {
2787         u16 pri_latency = dev_priv->wm.pri_latency[level];
2788         u16 spr_latency = dev_priv->wm.spr_latency[level];
2789         u16 cur_latency = dev_priv->wm.cur_latency[level];
2790
2791         /* WM1+ latency values stored in 0.5us units */
2792         if (level > 0) {
2793                 pri_latency *= 5;
2794                 spr_latency *= 5;
2795                 cur_latency *= 5;
2796         }
2797
2798         if (pristate) {
2799                 result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2800                                                      pri_latency, level);
2801                 result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2802         }
2803
2804         if (sprstate)
2805                 result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2806
2807         if (curstate)
2808                 result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2809
2810         result->enable = true;
2811 }
2812
2813 static u32
2814 hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
2815 {
2816         const struct intel_atomic_state *intel_state =
2817                 to_intel_atomic_state(crtc_state->uapi.state);
2818         const struct drm_display_mode *adjusted_mode =
2819                 &crtc_state->hw.adjusted_mode;
2820         u32 linetime, ips_linetime;
2821
2822         if (!crtc_state->hw.active)
2823                 return 0;
2824         if (WARN_ON(adjusted_mode->crtc_clock == 0))
2825                 return 0;
2826         if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2827                 return 0;
2828
2829         /* The WM are computed with base on how long it takes to fill a single
2830          * row at the given clock rate, multiplied by 8.
2831          * */
2832         linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2833                                      adjusted_mode->crtc_clock);
2834         ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2835                                          intel_state->cdclk.logical.cdclk);
2836
2837         return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2838                PIPE_WM_LINETIME_TIME(linetime);
2839 }
2840
2841 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2842                                   u16 wm[8])
2843 {
2844         struct intel_uncore *uncore = &dev_priv->uncore;
2845
2846         if (INTEL_GEN(dev_priv) >= 9) {
2847                 u32 val;
2848                 int ret, i;
2849                 int level, max_level = ilk_wm_max_level(dev_priv);
2850
2851                 /* read the first set of memory latencies[0:3] */
2852                 val = 0; /* data0 to be programmed to 0 for first set */
2853                 ret = sandybridge_pcode_read(dev_priv,
2854                                              GEN9_PCODE_READ_MEM_LATENCY,
2855                                              &val, NULL);
2856
2857                 if (ret) {
2858                         drm_err(&dev_priv->drm,
2859                                 "SKL Mailbox read error = %d\n", ret);
2860                         return;
2861                 }
2862
2863                 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2864                 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2865                                 GEN9_MEM_LATENCY_LEVEL_MASK;
2866                 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2867                                 GEN9_MEM_LATENCY_LEVEL_MASK;
2868                 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2869                                 GEN9_MEM_LATENCY_LEVEL_MASK;
2870
2871                 /* read the second set of memory latencies[4:7] */
2872                 val = 1; /* data0 to be programmed to 1 for second set */
2873                 ret = sandybridge_pcode_read(dev_priv,
2874                                              GEN9_PCODE_READ_MEM_LATENCY,
2875                                              &val, NULL);
2876                 if (ret) {
2877                         drm_err(&dev_priv->drm,
2878                                 "SKL Mailbox read error = %d\n", ret);
2879                         return;
2880                 }
2881
2882                 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2883                 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2884                                 GEN9_MEM_LATENCY_LEVEL_MASK;
2885                 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2886                                 GEN9_MEM_LATENCY_LEVEL_MASK;
2887                 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2888                                 GEN9_MEM_LATENCY_LEVEL_MASK;
2889
2890                 /*
2891                  * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2892                  * need to be disabled. We make sure to sanitize the values out
2893                  * of the punit to satisfy this requirement.
2894                  */
2895                 for (level = 1; level <= max_level; level++) {
2896                         if (wm[level] == 0) {
2897                                 for (i = level + 1; i <= max_level; i++)
2898                                         wm[i] = 0;
2899                                 break;
2900                         }
2901                 }
2902
2903                 /*
2904                  * WaWmMemoryReadLatency:skl+,glk
2905                  *
2906                  * punit doesn't take into account the read latency so we need
2907                  * to add 2us to the various latency levels we retrieve from the
2908                  * punit when level 0 response data us 0us.
2909                  */
2910                 if (wm[0] == 0) {
2911                         wm[0] += 2;
2912                         for (level = 1; level <= max_level; level++) {
2913                                 if (wm[level] == 0)
2914                                         break;
2915                                 wm[level] += 2;
2916                         }
2917                 }
2918
2919                 /*
2920                  * WA Level-0 adjustment for 16GB DIMMs: SKL+
2921                  * If we could not get dimm info enable this WA to prevent from
2922                  * any underrun. If not able to get Dimm info assume 16GB dimm
2923                  * to avoid any underrun.
2924                  */
2925                 if (dev_priv->dram_info.is_16gb_dimm)
2926                         wm[0] += 1;
2927
2928         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2929                 u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2930
2931                 wm[0] = (sskpd >> 56) & 0xFF;
2932                 if (wm[0] == 0)
2933                         wm[0] = sskpd & 0xF;
2934                 wm[1] = (sskpd >> 4) & 0xFF;
2935                 wm[2] = (sskpd >> 12) & 0xFF;
2936                 wm[3] = (sskpd >> 20) & 0x1FF;
2937                 wm[4] = (sskpd >> 32) & 0x1FF;
2938         } else if (INTEL_GEN(dev_priv) >= 6) {
2939                 u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2940
2941                 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2942                 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2943                 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2944                 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2945         } else if (INTEL_GEN(dev_priv) >= 5) {
2946                 u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2947
2948                 /* ILK primary LP0 latency is 700 ns */
2949                 wm[0] = 7;
2950                 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2951                 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2952         } else {
2953                 MISSING_CASE(INTEL_DEVID(dev_priv));
2954         }
2955 }
2956
2957 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2958                                        u16 wm[5])
2959 {
2960         /* ILK sprite LP0 latency is 1300 ns */
2961         if (IS_GEN(dev_priv, 5))
2962                 wm[0] = 13;
2963 }
2964
2965 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2966                                        u16 wm[5])
2967 {
2968         /* ILK cursor LP0 latency is 1300 ns */
2969         if (IS_GEN(dev_priv, 5))
2970                 wm[0] = 13;
2971 }
2972
2973 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2974 {
2975         /* how many WM levels are we expecting */
2976         if (INTEL_GEN(dev_priv) >= 9)
2977                 return 7;
2978         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2979                 return 4;
2980         else if (INTEL_GEN(dev_priv) >= 6)
2981                 return 3;
2982         else
2983                 return 2;
2984 }
2985
2986 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2987                                    const char *name,
2988                                    const u16 wm[8])
2989 {
2990         int level, max_level = ilk_wm_max_level(dev_priv);
2991
2992         for (level = 0; level <= max_level; level++) {
2993                 unsigned int latency = wm[level];
2994
2995                 if (latency == 0) {
2996                         drm_dbg_kms(&dev_priv->drm,
2997                                     "%s WM%d latency not provided\n",
2998                                     name, level);
2999                         continue;
3000                 }
3001
3002                 /*
3003                  * - latencies are in us on gen9.
3004                  * - before then, WM1+ latency values are in 0.5us units
3005                  */
3006                 if (INTEL_GEN(dev_priv) >= 9)
3007                         latency *= 10;
3008                 else if (level > 0)
3009                         latency *= 5;
3010
3011                 drm_dbg_kms(&dev_priv->drm,
3012                             "%s WM%d latency %u (%u.%u usec)\n", name, level,
3013                             wm[level], latency / 10, latency % 10);
3014         }
3015 }
3016
3017 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
3018                                     u16 wm[5], u16 min)
3019 {
3020         int level, max_level = ilk_wm_max_level(dev_priv);
3021
3022         if (wm[0] >= min)
3023                 return false;
3024
3025         wm[0] = max(wm[0], min);
3026         for (level = 1; level <= max_level; level++)
3027                 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3028
3029         return true;
3030 }
3031
3032 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3033 {
3034         bool changed;
3035
3036         /*
3037          * The BIOS provided WM memory latency values are often
3038          * inadequate for high resolution displays. Adjust them.
3039          */
3040         changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
3041                 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
3042                 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3043
3044         if (!changed)
3045                 return;
3046
3047         drm_dbg_kms(&dev_priv->drm,
3048                     "WM latency values increased to avoid potential underruns\n");
3049         intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3050         intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3051         intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3052 }
3053
3054 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3055 {
3056         /*
3057          * On some SNB machines (Thinkpad X220 Tablet at least)
3058          * LP3 usage can cause vblank interrupts to be lost.
3059          * The DEIIR bit will go high but it looks like the CPU
3060          * never gets interrupted.
3061          *
3062          * It's not clear whether other interrupt source could
3063          * be affected or if this is somehow limited to vblank
3064          * interrupts only. To play it safe we disable LP3
3065          * watermarks entirely.
3066          */
3067         if (dev_priv->wm.pri_latency[3] == 0 &&
3068             dev_priv->wm.spr_latency[3] == 0 &&
3069             dev_priv->wm.cur_latency[3] == 0)
3070                 return;
3071
3072         dev_priv->wm.pri_latency[3] = 0;
3073         dev_priv->wm.spr_latency[3] = 0;
3074         dev_priv->wm.cur_latency[3] = 0;
3075
3076         drm_dbg_kms(&dev_priv->drm,
3077                     "LP3 watermarks disabled due to potential for lost interrupts\n");
3078         intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3079         intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3080         intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3081 }
3082
3083 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3084 {
3085         intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3086
3087         memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3088                sizeof(dev_priv->wm.pri_latency));
3089         memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3090                sizeof(dev_priv->wm.pri_latency));
3091
3092         intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3093         intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3094
3095         intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3096         intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3097         intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3098
3099         if (IS_GEN(dev_priv, 6)) {
3100                 snb_wm_latency_quirk(dev_priv);
3101                 snb_wm_lp3_irq_quirk(dev_priv);
3102         }
3103 }
3104
3105 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3106 {
3107         intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3108         intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3109 }
3110
3111 static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3112                                  struct intel_pipe_wm *pipe_wm)
3113 {
3114         /* LP0 watermark maximums depend on this pipe alone */
3115         const struct intel_wm_config config = {
3116                 .num_pipes_active = 1,
3117                 .sprites_enabled = pipe_wm->sprites_enabled,
3118                 .sprites_scaled = pipe_wm->sprites_scaled,
3119         };
3120         struct ilk_wm_maximums max;
3121
3122         /* LP0 watermarks always use 1/2 DDB partitioning */
3123         ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3124
3125         /* At least LP0 must be valid */
3126         if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3127                 drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
3128                 return false;
3129         }
3130
3131         return true;
3132 }
3133
3134 /* Compute new watermarks for the pipe */
3135 static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
3136 {
3137         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3138         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
3139         struct intel_pipe_wm *pipe_wm;
3140         struct intel_plane *plane;
3141         const struct intel_plane_state *plane_state;
3142         const struct intel_plane_state *pristate = NULL;
3143         const struct intel_plane_state *sprstate = NULL;
3144         const struct intel_plane_state *curstate = NULL;
3145         int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3146         struct ilk_wm_maximums max;
3147
3148         pipe_wm = &crtc_state->wm.ilk.optimal;
3149
3150         intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3151                 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3152                         pristate = plane_state;
3153                 else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3154                         sprstate = plane_state;
3155                 else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
3156                         curstate = plane_state;
3157         }
3158
3159         pipe_wm->pipe_enabled = crtc_state->hw.active;
3160         if (sprstate) {
3161                 pipe_wm->sprites_enabled = sprstate->uapi.visible;
3162                 pipe_wm->sprites_scaled = sprstate->uapi.visible &&
3163                         (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
3164                          drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
3165         }
3166
3167         usable_level = max_level;
3168
3169         /* ILK/SNB: LP2+ watermarks only w/o sprites */
3170         if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3171                 usable_level = 1;
3172
3173         /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3174         if (pipe_wm->sprites_scaled)
3175                 usable_level = 0;
3176
3177         memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3178         ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
3179                              pristate, sprstate, curstate, &pipe_wm->wm[0]);
3180
3181         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3182                 pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
3183
3184         if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3185                 return -EINVAL;
3186
3187         ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3188
3189         for (level = 1; level <= usable_level; level++) {
3190                 struct intel_wm_level *wm = &pipe_wm->wm[level];
3191
3192                 ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
3193                                      pristate, sprstate, curstate, wm);
3194
3195                 /*
3196                  * Disable any watermark level that exceeds the
3197                  * register maximums since such watermarks are
3198                  * always invalid.
3199                  */
3200                 if (!ilk_validate_wm_level(level, &max, wm)) {
3201                         memset(wm, 0, sizeof(*wm));
3202                         break;
3203                 }
3204         }
3205
3206         return 0;
3207 }
3208
3209 /*
3210  * Build a set of 'intermediate' watermark values that satisfy both the old
3211  * state and the new state.  These can be programmed to the hardware
3212  * immediately.
3213  */
3214 static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
3215 {
3216         struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
3217         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3218         struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3219         struct intel_atomic_state *intel_state =
3220                 to_intel_atomic_state(newstate->uapi.state);
3221         const struct intel_crtc_state *oldstate =
3222                 intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
3223         const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3224         int level, max_level = ilk_wm_max_level(dev_priv);
3225
3226         /*
3227          * Start with the final, target watermarks, then combine with the
3228          * currently active watermarks to get values that are safe both before
3229          * and after the vblank.
3230          */
3231         *a = newstate->wm.ilk.optimal;
3232         if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
3233             intel_state->skip_intermediate_wm)
3234                 return 0;
3235
3236         a->pipe_enabled |= b->pipe_enabled;
3237         a->sprites_enabled |= b->sprites_enabled;
3238         a->sprites_scaled |= b->sprites_scaled;
3239
3240         for (level = 0; level <= max_level; level++) {
3241                 struct intel_wm_level *a_wm = &a->wm[level];
3242                 const struct intel_wm_level *b_wm = &b->wm[level];
3243
3244                 a_wm->enable &= b_wm->enable;
3245                 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3246                 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3247                 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3248                 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3249         }
3250
3251         /*
3252          * We need to make sure that these merged watermark values are
3253          * actually a valid configuration themselves.  If they're not,
3254          * there's no safe way to transition from the old state to
3255          * the new state, so we need to fail the atomic transaction.
3256          */
3257         if (!ilk_validate_pipe_wm(dev_priv, a))
3258                 return -EINVAL;
3259
3260         /*
3261          * If our intermediate WM are identical to the final WM, then we can
3262          * omit the post-vblank programming; only update if it's different.
3263          */
3264         if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3265                 newstate->wm.need_postvbl_update = true;
3266
3267         return 0;
3268 }
3269
3270 /*
3271  * Merge the watermarks from all active pipes for a specific level.
3272  */
3273 static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3274                                int level,
3275                                struct intel_wm_level *ret_wm)
3276 {
3277         const struct intel_crtc *intel_crtc;
3278
3279         ret_wm->enable = true;
3280
3281         for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3282                 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3283                 const struct intel_wm_level *wm = &active->wm[level];
3284
3285                 if (!active->pipe_enabled)
3286                         continue;
3287
3288                 /*
3289                  * The watermark values may have been used in the past,
3290                  * so we must maintain them in the registers for some
3291                  * time even if the level is now disabled.
3292                  */
3293                 if (!wm->enable)
3294                         ret_wm->enable = false;
3295
3296                 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3297                 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3298                 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3299                 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3300         }
3301 }
3302
3303 /*
3304  * Merge all low power watermarks for all active pipes.
3305  */
3306 static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3307                          const struct intel_wm_config *config,
3308                          const struct ilk_wm_maximums *max,
3309                          struct intel_pipe_wm *merged)
3310 {
3311         int level, max_level = ilk_wm_max_level(dev_priv);
3312         int last_enabled_level = max_level;
3313
3314         /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3315         if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3316             config->num_pipes_active > 1)
3317                 last_enabled_level = 0;
3318
3319         /* ILK: FBC WM must be disabled always */
3320         merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3321
3322         /* merge each WM1+ level */
3323         for (level = 1; level <= max_level; level++) {
3324                 struct intel_wm_level *wm = &merged->wm[level];
3325
3326                 ilk_merge_wm_level(dev_priv, level, wm);
3327
3328                 if (level > last_enabled_level)
3329                         wm->enable = false;
3330                 else if (!ilk_validate_wm_level(level, max, wm))
3331                         /* make sure all following levels get disabled */
3332                         last_enabled_level = level - 1;
3333
3334                 /*
3335                  * The spec says it is preferred to disable
3336                  * FBC WMs instead of disabling a WM level.
3337                  */
3338                 if (wm->fbc_val > max->fbc) {
3339                         if (wm->enable)
3340                                 merged->fbc_wm_enabled = false;
3341                         wm->fbc_val = 0;
3342                 }
3343         }
3344
3345         /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3346         /*
3347          * FIXME this is racy. FBC might get enabled later.
3348          * What we should check here is whether FBC can be
3349          * enabled sometime later.
3350          */
3351         if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
3352             intel_fbc_is_active(dev_priv)) {
3353                 for (level = 2; level <= max_level; level++) {
3354                         struct intel_wm_level *wm = &merged->wm[level];
3355
3356                         wm->enable = false;
3357                 }
3358         }
3359 }
3360
3361 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3362 {
3363         /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3364         return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3365 }
3366
3367 /* The value we need to program into the WM_LPx latency field */
3368 static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3369                                       int level)
3370 {
3371         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3372                 return 2 * level;
3373         else
3374                 return dev_priv->wm.pri_latency[level];
3375 }
3376
3377 static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3378                                    const struct intel_pipe_wm *merged,
3379                                    enum intel_ddb_partitioning partitioning,
3380                                    struct ilk_wm_values *results)
3381 {
3382         struct intel_crtc *intel_crtc;
3383         int level, wm_lp;
3384
3385         results->enable_fbc_wm = merged->fbc_wm_enabled;
3386         results->partitioning = partitioning;
3387
3388         /* LP1+ register values */
3389         for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3390                 const struct intel_wm_level *r;
3391
3392                 level = ilk_wm_lp_to_level(wm_lp, merged);
3393
3394                 r = &merged->wm[level];
3395
3396                 /*
3397                  * Maintain the watermark values even if the level is
3398                  * disabled. Doing otherwise could cause underruns.
3399                  */
3400                 results->wm_lp[wm_lp - 1] =
3401                         (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3402                         (r->pri_val << WM1_LP_SR_SHIFT) |
3403                         r->cur_val;
3404
3405                 if (r->enable)
3406                         results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3407
3408                 if (INTEL_GEN(dev_priv) >= 8)
3409                         results->wm_lp[wm_lp - 1] |=
3410                                 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3411                 else
3412                         results->wm_lp[wm_lp - 1] |=
3413                                 r->fbc_val << WM1_LP_FBC_SHIFT;
3414
3415                 /*
3416                  * Always set WM1S_LP_EN when spr_val != 0, even if the
3417                  * level is disabled. Doing otherwise could cause underruns.
3418                  */
3419                 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3420                         WARN_ON(wm_lp != 1);
3421                         results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3422                 } else
3423                         results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3424         }
3425
3426         /* LP0 register values */
3427         for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3428                 enum pipe pipe = intel_crtc->pipe;
3429                 const struct intel_wm_level *r =
3430                         &intel_crtc->wm.active.ilk.wm[0];
3431
3432                 if (WARN_ON(!r->enable))
3433                         continue;
3434
3435                 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3436
3437                 results->wm_pipe[pipe] =
3438                         (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3439                         (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3440                         r->cur_val;
3441         }
3442 }
3443
3444 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3445  * case both are at the same level. Prefer r1 in case they're the same. */
3446 static struct intel_pipe_wm *
3447 ilk_find_best_result(struct drm_i915_private *dev_priv,
3448                      struct intel_pipe_wm *r1,
3449                      struct intel_pipe_wm *r2)
3450 {
3451         int level, max_level = ilk_wm_max_level(dev_priv);
3452         int level1 = 0, level2 = 0;
3453
3454         for (level = 1; level <= max_level; level++) {
3455                 if (r1->wm[level].enable)
3456                         level1 = level;
3457                 if (r2->wm[level].enable)
3458                         level2 = level;
3459         }
3460
3461         if (level1 == level2) {
3462                 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3463                         return r2;
3464                 else
3465                         return r1;
3466         } else if (level1 > level2) {
3467                 return r1;
3468         } else {
3469                 return r2;
3470         }
3471 }
3472
3473 /* dirty bits used to track which watermarks need changes */
3474 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3475 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3476 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3477 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3478 #define WM_DIRTY_FBC (1 << 24)
3479 #define WM_DIRTY_DDB (1 << 25)
3480
3481 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3482                                          const struct ilk_wm_values *old,
3483                                          const struct ilk_wm_values *new)
3484 {
3485         unsigned int dirty = 0;
3486         enum pipe pipe;
3487         int wm_lp;
3488
3489         for_each_pipe(dev_priv, pipe) {
3490                 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
3491                         dirty |= WM_DIRTY_LINETIME(pipe);
3492                         /* Must disable LP1+ watermarks too */
3493                         dirty |= WM_DIRTY_LP_ALL;
3494                 }
3495
3496                 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3497                         dirty |= WM_DIRTY_PIPE(pipe);
3498                         /* Must disable LP1+ watermarks too */
3499                         dirty |= WM_DIRTY_LP_ALL;
3500                 }
3501         }
3502
3503         if (old->enable_fbc_wm != new->enable_fbc_wm) {
3504                 dirty |= WM_DIRTY_FBC;
3505                 /* Must disable LP1+ watermarks too */
3506                 dirty |= WM_DIRTY_LP_ALL;
3507         }
3508
3509         if (old->partitioning != new->partitioning) {
3510                 dirty |= WM_DIRTY_DDB;
3511                 /* Must disable LP1+ watermarks too */
3512                 dirty |= WM_DIRTY_LP_ALL;
3513         }
3514
3515         /* LP1+ watermarks already deemed dirty, no need to continue */
3516         if (dirty & WM_DIRTY_LP_ALL)
3517                 return dirty;
3518
3519         /* Find the lowest numbered LP1+ watermark in need of an update... */
3520         for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3521                 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3522                     old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3523                         break;
3524         }
3525
3526         /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3527         for (; wm_lp <= 3; wm_lp++)
3528                 dirty |= WM_DIRTY_LP(wm_lp);
3529
3530         return dirty;
3531 }
3532
3533 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3534                                unsigned int dirty)
3535 {
3536         struct ilk_wm_values *previous = &dev_priv->wm.hw;
3537         bool changed = false;
3538
3539         if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3540                 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3541                 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3542                 changed = true;
3543         }
3544         if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3545                 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3546                 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3547                 changed = true;
3548         }
3549         if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3550                 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3551                 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3552                 changed = true;
3553         }
3554
3555         /*
3556          * Don't touch WM1S_LP_EN here.
3557          * Doing so could cause underruns.
3558          */
3559
3560         return changed;
3561 }
3562
3563 /*
3564  * The spec says we shouldn't write when we don't need, because every write
3565  * causes WMs to be re-evaluated, expending some power.
3566  */
3567 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3568                                 struct ilk_wm_values *results)
3569 {
3570         struct ilk_wm_values *previous = &dev_priv->wm.hw;
3571         unsigned int dirty;
3572         u32 val;
3573
3574         dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3575         if (!dirty)
3576                 return;
3577
3578         _ilk_disable_lp_wm(dev_priv, dirty);
3579
3580         if (dirty & WM_DIRTY_PIPE(PIPE_A))
3581                 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3582         if (dirty & WM_DIRTY_PIPE(PIPE_B))
3583                 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3584         if (dirty & WM_DIRTY_PIPE(PIPE_C))
3585                 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3586
3587         if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3588                 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3589         if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3590                 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3591         if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3592                 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3593
3594         if (dirty & WM_DIRTY_DDB) {
3595                 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3596                         val = I915_READ(WM_MISC);
3597                         if (results->partitioning == INTEL_DDB_PART_1_2)
3598                                 val &= ~WM_MISC_DATA_PARTITION_5_6;
3599                         else
3600                                 val |= WM_MISC_DATA_PARTITION_5_6;
3601                         I915_WRITE(WM_MISC, val);
3602                 } else {
3603                         val = I915_READ(DISP_ARB_CTL2);
3604                         if (results->partitioning == INTEL_DDB_PART_1_2)
3605                                 val &= ~DISP_DATA_PARTITION_5_6;
3606                         else
3607                                 val |= DISP_DATA_PARTITION_5_6;
3608                         I915_WRITE(DISP_ARB_CTL2, val);
3609                 }
3610         }
3611
3612         if (dirty & WM_DIRTY_FBC) {
3613                 val = I915_READ(DISP_ARB_CTL);
3614                 if (results->enable_fbc_wm)
3615                         val &= ~DISP_FBC_WM_DIS;
3616                 else
3617                         val |= DISP_FBC_WM_DIS;
3618                 I915_WRITE(DISP_ARB_CTL, val);
3619         }
3620
3621         if (dirty & WM_DIRTY_LP(1) &&
3622             previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3623                 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3624
3625         if (INTEL_GEN(dev_priv) >= 7) {
3626                 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3627                         I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3628                 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3629                         I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3630         }
3631
3632         if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3633                 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3634         if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3635                 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3636         if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3637                 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3638
3639         dev_priv->wm.hw = *results;
3640 }
3641
3642 bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3643 {
3644         return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3645 }
3646
3647 static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
3648 {
3649         u8 enabled_slices;
3650
3651         /* Slice 1 will always be enabled */
3652         enabled_slices = 1;
3653
3654         /* Gen prior to GEN11 have only one DBuf slice */
3655         if (INTEL_GEN(dev_priv) < 11)
3656                 return enabled_slices;
3657
3658         /*
3659          * FIXME: for now we'll only ever use 1 slice; pretend that we have
3660          * only that 1 slice enabled until we have a proper way for on-demand
3661          * toggling of the second slice.
3662          */
3663         if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
3664                 enabled_slices++;
3665
3666         return enabled_slices;
3667 }
3668
3669 /*
3670  * FIXME: We still don't have the proper code detect if we need to apply the WA,
3671  * so assume we'll always need it in order to avoid underruns.
3672  */
3673 static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3674 {
3675         return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
3676 }
3677
3678 static bool
3679 intel_has_sagv(struct drm_i915_private *dev_priv)
3680 {
3681         /* HACK! */
3682         if (IS_GEN(dev_priv, 12))
3683                 return false;
3684
3685         return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3686                 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3687 }
3688
3689 static void
3690 skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
3691 {
3692         if (INTEL_GEN(dev_priv) >= 12) {
3693                 u32 val = 0;
3694                 int ret;
3695
3696                 ret = sandybridge_pcode_read(dev_priv,
3697                                              GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
3698                                              &val, NULL);
3699                 if (!ret) {
3700                         dev_priv->sagv_block_time_us = val;
3701                         return;
3702                 }
3703
3704                 drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
3705         } else if (IS_GEN(dev_priv, 11)) {
3706                 dev_priv->sagv_block_time_us = 10;
3707                 return;
3708         } else if (IS_GEN(dev_priv, 10)) {
3709                 dev_priv->sagv_block_time_us = 20;
3710                 return;
3711         } else if (IS_GEN(dev_priv, 9)) {
3712                 dev_priv->sagv_block_time_us = 30;
3713                 return;
3714         } else {
3715                 MISSING_CASE(INTEL_GEN(dev_priv));
3716         }
3717
3718         /* Default to an unusable block time */
3719         dev_priv->sagv_block_time_us = -1;
3720 }
3721
3722 /*
3723  * SAGV dynamically adjusts the system agent voltage and clock frequencies
3724  * depending on power and performance requirements. The display engine access
3725  * to system memory is blocked during the adjustment time. Because of the
3726  * blocking time, having this enabled can cause full system hangs and/or pipe
3727  * underruns if we don't meet all of the following requirements:
3728  *
3729  *  - <= 1 pipe enabled
3730  *  - All planes can enable watermarks for latencies >= SAGV engine block time
3731  *  - We're not using an interlaced display configuration
3732  */
3733 int
3734 intel_enable_sagv(struct drm_i915_private *dev_priv)
3735 {
3736         int ret;
3737
3738         if (!intel_has_sagv(dev_priv))
3739                 return 0;
3740
3741         if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3742                 return 0;
3743
3744         drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
3745         ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3746                                       GEN9_SAGV_ENABLE);
3747
3748         /* We don't need to wait for SAGV when enabling */
3749
3750         /*
3751          * Some skl systems, pre-release machines in particular,
3752          * don't actually have SAGV.
3753          */
3754         if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3755                 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3756                 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3757                 return 0;
3758         } else if (ret < 0) {
3759                 drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
3760                 return ret;
3761         }
3762
3763         dev_priv->sagv_status = I915_SAGV_ENABLED;
3764         return 0;
3765 }
3766
3767 int
3768 intel_disable_sagv(struct drm_i915_private *dev_priv)
3769 {
3770         int ret;
3771
3772         if (!intel_has_sagv(dev_priv))
3773                 return 0;
3774
3775         if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3776                 return 0;
3777
3778         drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
3779         /* bspec says to keep retrying for at least 1 ms */
3780         ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3781                                 GEN9_SAGV_DISABLE,
3782                                 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3783                                 1);
3784         /*
3785          * Some skl systems, pre-release machines in particular,
3786          * don't actually have SAGV.
3787          */
3788         if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3789                 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3790                 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3791                 return 0;
3792         } else if (ret < 0) {
3793                 drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
3794                 return ret;
3795         }
3796
3797         dev_priv->sagv_status = I915_SAGV_DISABLED;
3798         return 0;
3799 }
3800
3801 bool intel_can_enable_sagv(struct intel_atomic_state *state)
3802 {
3803         struct drm_device *dev = state->base.dev;
3804         struct drm_i915_private *dev_priv = to_i915(dev);
3805         struct intel_crtc *crtc;
3806         struct intel_plane *plane;
3807         struct intel_crtc_state *crtc_state;
3808         enum pipe pipe;
3809         int level, latency;
3810
3811         if (!intel_has_sagv(dev_priv))
3812                 return false;
3813
3814         /*
3815          * If there are no active CRTCs, no additional checks need be performed
3816          */
3817         if (hweight8(state->active_pipes) == 0)
3818                 return true;
3819
3820         /*
3821          * SKL+ workaround: bspec recommends we disable SAGV when we have
3822          * more then one pipe enabled
3823          */
3824         if (hweight8(state->active_pipes) > 1)
3825                 return false;
3826
3827         /* Since we're now guaranteed to only have one active CRTC... */
3828         pipe = ffs(state->active_pipes) - 1;
3829         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3830         crtc_state = to_intel_crtc_state(crtc->base.state);
3831
3832         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3833                 return false;
3834
3835         for_each_intel_plane_on_crtc(dev, crtc, plane) {
3836                 struct skl_plane_wm *wm =
3837                         &crtc_state->wm.skl.optimal.planes[plane->id];
3838
3839                 /* Skip this plane if it's not enabled */
3840                 if (!wm->wm[0].plane_en)
3841                         continue;
3842
3843                 /* Find the highest enabled wm level for this plane */
3844                 for (level = ilk_wm_max_level(dev_priv);
3845                      !wm->wm[level].plane_en; --level)
3846                      { }
3847
3848                 latency = dev_priv->wm.skl_latency[level];
3849
3850                 if (skl_needs_memory_bw_wa(dev_priv) &&
3851                     plane->base.state->fb->modifier ==
3852                     I915_FORMAT_MOD_X_TILED)
3853                         latency += 15;
3854
3855                 /*
3856                  * If any of the planes on this pipe don't enable wm levels that
3857                  * incur memory latencies higher than sagv_block_time_us we
3858                  * can't enable SAGV.
3859                  */
3860                 if (latency < dev_priv->sagv_block_time_us)
3861                         return false;
3862         }
3863
3864         return true;
3865 }
3866
3867 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3868                               const struct intel_crtc_state *crtc_state,
3869                               const u64 total_data_rate,
3870                               const int num_active,
3871                               struct skl_ddb_allocation *ddb)
3872 {
3873         const struct drm_display_mode *adjusted_mode;
3874         u64 total_data_bw;
3875         u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3876
3877         WARN_ON(ddb_size == 0);
3878
3879         if (INTEL_GEN(dev_priv) < 11)
3880                 return ddb_size - 4; /* 4 blocks for bypass path allocation */
3881
3882         adjusted_mode = &crtc_state->hw.adjusted_mode;
3883         total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
3884
3885         /*
3886          * 12GB/s is maximum BW supported by single DBuf slice.
3887          *
3888          * FIXME dbuf slice code is broken:
3889          * - must wait for planes to stop using the slice before powering it off
3890          * - plane straddling both slices is illegal in multi-pipe scenarios
3891          * - should validate we stay within the hw bandwidth limits
3892          */
3893         if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
3894                 ddb->enabled_slices = 2;
3895         } else {
3896                 ddb->enabled_slices = 1;
3897                 ddb_size /= 2;
3898         }
3899
3900         return ddb_size;
3901 }
3902
3903 static void
3904 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3905                                    const struct intel_crtc_state *crtc_state,
3906                                    const u64 total_data_rate,
3907                                    struct skl_ddb_allocation *ddb,
3908                                    struct skl_ddb_entry *alloc, /* out */
3909                                    int *num_active /* out */)
3910 {
3911         struct drm_atomic_state *state = crtc_state->uapi.state;
3912         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3913         struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
3914         const struct intel_crtc *crtc;
3915         u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
3916         enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
3917         u16 ddb_size;
3918         u32 i;
3919
3920         if (WARN_ON(!state) || !crtc_state->hw.active) {
3921                 alloc->start = 0;
3922                 alloc->end = 0;
3923                 *num_active = hweight8(dev_priv->active_pipes);
3924                 return;
3925         }
3926
3927         if (intel_state->active_pipe_changes)
3928                 *num_active = hweight8(intel_state->active_pipes);
3929         else
3930                 *num_active = hweight8(dev_priv->active_pipes);
3931
3932         ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
3933                                       *num_active, ddb);
3934
3935         /*
3936          * If the state doesn't change the active CRTC's or there is no
3937          * modeset request, then there's no need to recalculate;
3938          * the existing pipe allocation limits should remain unchanged.
3939          * Note that we're safe from racing commits since any racing commit
3940          * that changes the active CRTC list or do modeset would need to
3941          * grab _all_ crtc locks, including the one we currently hold.
3942          */
3943         if (!intel_state->active_pipe_changes && !intel_state->modeset) {
3944                 /*
3945                  * alloc may be cleared by clear_intel_crtc_state,
3946                  * copy from old state to be sure
3947                  */
3948                 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3949                 return;
3950         }
3951
3952         /*
3953          * Watermark/ddb requirement highly depends upon width of the
3954          * framebuffer, So instead of allocating DDB equally among pipes
3955          * distribute DDB based on resolution/width of the display.
3956          */
3957         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
3958                 const struct drm_display_mode *adjusted_mode =
3959                         &crtc_state->hw.adjusted_mode;
3960                 enum pipe pipe = crtc->pipe;
3961                 int hdisplay, vdisplay;
3962
3963                 if (!crtc_state->hw.enable)
3964                         continue;
3965
3966                 drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
3967                 total_width += hdisplay;
3968
3969                 if (pipe < for_pipe)
3970                         width_before_pipe += hdisplay;
3971                 else if (pipe == for_pipe)
3972                         pipe_width = hdisplay;
3973         }
3974
3975         alloc->start = ddb_size * width_before_pipe / total_width;
3976         alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
3977 }
3978
3979 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
3980                                  int width, const struct drm_format_info *format,
3981                                  u64 modifier, unsigned int rotation,
3982                                  u32 plane_pixel_rate, struct skl_wm_params *wp,
3983                                  int color_plane);
3984 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
3985                                  int level,
3986                                  const struct skl_wm_params *wp,
3987                                  const struct skl_wm_level *result_prev,
3988                                  struct skl_wm_level *result /* out */);
3989
3990 static unsigned int
3991 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
3992                       int num_active)
3993 {
3994         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3995         int level, max_level = ilk_wm_max_level(dev_priv);
3996         struct skl_wm_level wm = {};
3997         int ret, min_ddb_alloc = 0;
3998         struct skl_wm_params wp;
3999
4000         ret = skl_compute_wm_params(crtc_state, 256,
4001                                     drm_format_info(DRM_FORMAT_ARGB8888),
4002                                     DRM_FORMAT_MOD_LINEAR,
4003                                     DRM_MODE_ROTATE_0,
4004                                     crtc_state->pixel_rate, &wp, 0);
4005         WARN_ON(ret);
4006
4007         for (level = 0; level <= max_level; level++) {
4008                 skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
4009                 if (wm.min_ddb_alloc == U16_MAX)
4010                         break;
4011
4012                 min_ddb_alloc = wm.min_ddb_alloc;
4013         }
4014
4015         return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
4016 }
4017
4018 static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
4019                                        struct skl_ddb_entry *entry, u32 reg)
4020 {
4021
4022         entry->start = reg & DDB_ENTRY_MASK;
4023         entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
4024
4025         if (entry->end)
4026                 entry->end += 1;
4027 }
4028
4029 static void
4030 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
4031                            const enum pipe pipe,
4032                            const enum plane_id plane_id,
4033                            struct skl_ddb_entry *ddb_y,
4034                            struct skl_ddb_entry *ddb_uv)
4035 {
4036         u32 val, val2;
4037         u32 fourcc = 0;
4038
4039         /* Cursor doesn't support NV12/planar, so no extra calculation needed */
4040         if (plane_id == PLANE_CURSOR) {
4041                 val = I915_READ(CUR_BUF_CFG(pipe));
4042                 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4043                 return;
4044         }
4045
4046         val = I915_READ(PLANE_CTL(pipe, plane_id));
4047
4048         /* No DDB allocated for disabled planes */
4049         if (val & PLANE_CTL_ENABLE)
4050                 fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
4051                                               val & PLANE_CTL_ORDER_RGBX,
4052                                               val & PLANE_CTL_ALPHA_MASK);
4053
4054         if (INTEL_GEN(dev_priv) >= 11) {
4055                 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4056                 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4057         } else {
4058                 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4059                 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
4060
4061                 if (fourcc &&
4062                     drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
4063                         swap(val, val2);
4064
4065                 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4066                 skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4067         }
4068 }
4069
4070 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4071                                struct skl_ddb_entry *ddb_y,
4072                                struct skl_ddb_entry *ddb_uv)
4073 {
4074         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4075         enum intel_display_power_domain power_domain;
4076         enum pipe pipe = crtc->pipe;
4077         intel_wakeref_t wakeref;
4078         enum plane_id plane_id;
4079
4080         power_domain = POWER_DOMAIN_PIPE(pipe);
4081         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4082         if (!wakeref)
4083                 return;
4084
4085         for_each_plane_id_on_crtc(crtc, plane_id)
4086                 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4087                                            plane_id,
4088                                            &ddb_y[plane_id],
4089                                            &ddb_uv[plane_id]);
4090
4091         intel_display_power_put(dev_priv, power_domain, wakeref);
4092 }
4093
4094 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
4095                           struct skl_ddb_allocation *ddb /* out */)
4096 {
4097         ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
4098 }
4099
4100 /*
4101  * Determines the downscale amount of a plane for the purposes of watermark calculations.
4102  * The bspec defines downscale amount as:
4103  *
4104  * """
4105  * Horizontal down scale amount = maximum[1, Horizontal source size /
4106  *                                           Horizontal destination size]
4107  * Vertical down scale amount = maximum[1, Vertical source size /
4108  *                                         Vertical destination size]
4109  * Total down scale amount = Horizontal down scale amount *
4110  *                           Vertical down scale amount
4111  * """
4112  *
4113  * Return value is provided in 16.16 fixed point form to retain fractional part.
4114  * Caller should take care of dividing & rounding off the value.
4115  */
4116 static uint_fixed_16_16_t
4117 skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
4118                            const struct intel_plane_state *plane_state)
4119 {
4120         u32 src_w, src_h, dst_w, dst_h;
4121         uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4122         uint_fixed_16_16_t downscale_h, downscale_w;
4123
4124         if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
4125                 return u32_to_fixed16(0);
4126
4127         /*
4128          * Src coordinates are already rotated by 270 degrees for
4129          * the 90/270 degree plane rotation cases (to match the
4130          * GTT mapping), hence no need to account for rotation here.
4131          *
4132          * n.b., src is 16.16 fixed point, dst is whole integer.
4133          */
4134         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4135         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4136         dst_w = drm_rect_width(&plane_state->uapi.dst);
4137         dst_h = drm_rect_height(&plane_state->uapi.dst);
4138
4139         fp_w_ratio = div_fixed16(src_w, dst_w);
4140         fp_h_ratio = div_fixed16(src_h, dst_h);
4141         downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4142         downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4143
4144         return mul_fixed16(downscale_w, downscale_h);
4145 }
4146
4147 static u64
4148 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
4149                              const struct intel_plane_state *plane_state,
4150                              int color_plane)
4151 {
4152         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4153         const struct drm_framebuffer *fb = plane_state->hw.fb;
4154         u32 data_rate;
4155         u32 width = 0, height = 0;
4156         uint_fixed_16_16_t down_scale_amount;
4157         u64 rate;
4158
4159         if (!plane_state->uapi.visible)
4160                 return 0;
4161
4162         if (plane->id == PLANE_CURSOR)
4163                 return 0;
4164
4165         if (color_plane == 1 &&
4166             !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
4167                 return 0;
4168
4169         /*
4170          * Src coordinates are already rotated by 270 degrees for
4171          * the 90/270 degree plane rotation cases (to match the
4172          * GTT mapping), hence no need to account for rotation here.
4173          */
4174         width = drm_rect_width(&plane_state->uapi.src) >> 16;
4175         height = drm_rect_height(&plane_state->uapi.src) >> 16;
4176
4177         /* UV plane does 1/2 pixel sub-sampling */
4178         if (color_plane == 1) {
4179                 width /= 2;
4180                 height /= 2;
4181         }
4182
4183         data_rate = width * height;
4184
4185         down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4186
4187         rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4188
4189         rate *= fb->format->cpp[color_plane];
4190         return rate;
4191 }
4192
4193 static u64
4194 skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4195                                  u64 *plane_data_rate,
4196                                  u64 *uv_plane_data_rate)
4197 {
4198         struct drm_atomic_state *state = crtc_state->uapi.state;
4199         struct intel_plane *plane;
4200         const struct intel_plane_state *plane_state;
4201         u64 total_data_rate = 0;
4202
4203         if (WARN_ON(!state))
4204                 return 0;
4205
4206         /* Calculate and cache data rate for each plane */
4207         intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4208                 enum plane_id plane_id = plane->id;
4209                 u64 rate;
4210
4211                 /* packed/y */
4212                 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4213                 plane_data_rate[plane_id] = rate;
4214                 total_data_rate += rate;
4215
4216                 /* uv-plane */
4217                 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4218                 uv_plane_data_rate[plane_id] = rate;
4219                 total_data_rate += rate;
4220         }
4221
4222         return total_data_rate;
4223 }
4224
4225 static u64
4226 icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4227                                  u64 *plane_data_rate)
4228 {
4229         struct intel_plane *plane;
4230         const struct intel_plane_state *plane_state;
4231         u64 total_data_rate = 0;
4232
4233         if (WARN_ON(!crtc_state->uapi.state))
4234                 return 0;
4235
4236         /* Calculate and cache data rate for each plane */
4237         intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4238                 enum plane_id plane_id = plane->id;
4239                 u64 rate;
4240
4241                 if (!plane_state->planar_linked_plane) {
4242                         rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4243                         plane_data_rate[plane_id] = rate;
4244                         total_data_rate += rate;
4245                 } else {
4246                         enum plane_id y_plane_id;
4247
4248                         /*
4249                          * The slave plane might not iterate in
4250                          * intel_atomic_crtc_state_for_each_plane_state(),
4251                          * and needs the master plane state which may be
4252                          * NULL if we try get_new_plane_state(), so we
4253                          * always calculate from the master.
4254                          */
4255                         if (plane_state->planar_slave)
4256                                 continue;
4257
4258                         /* Y plane rate is calculated on the slave */
4259                         rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4260                         y_plane_id = plane_state->planar_linked_plane->id;
4261                         plane_data_rate[y_plane_id] = rate;
4262                         total_data_rate += rate;
4263
4264                         rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4265                         plane_data_rate[plane_id] = rate;
4266                         total_data_rate += rate;
4267                 }
4268         }
4269
4270         return total_data_rate;
4271 }
4272
4273 static int
4274 skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
4275                       struct skl_ddb_allocation *ddb /* out */)
4276 {
4277         struct drm_atomic_state *state = crtc_state->uapi.state;
4278         struct drm_crtc *crtc = crtc_state->uapi.crtc;
4279         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4280         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4281         struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
4282         u16 alloc_size, start = 0;
4283         u16 total[I915_MAX_PLANES] = {};
4284         u16 uv_total[I915_MAX_PLANES] = {};
4285         u64 total_data_rate;
4286         enum plane_id plane_id;
4287         int num_active;
4288         u64 plane_data_rate[I915_MAX_PLANES] = {};
4289         u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4290         u32 blocks;
4291         int level;
4292
4293         /* Clear the partitioning for disabled planes. */
4294         memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
4295         memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
4296
4297         if (WARN_ON(!state))
4298                 return 0;
4299
4300         if (!crtc_state->hw.active) {
4301                 alloc->start = alloc->end = 0;
4302                 return 0;
4303         }
4304
4305         if (INTEL_GEN(dev_priv) >= 11)
4306                 total_data_rate =
4307                         icl_get_total_relative_data_rate(crtc_state,
4308                                                          plane_data_rate);
4309         else
4310                 total_data_rate =
4311                         skl_get_total_relative_data_rate(crtc_state,
4312                                                          plane_data_rate,
4313                                                          uv_plane_data_rate);
4314
4315
4316         skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
4317                                            ddb, alloc, &num_active);
4318         alloc_size = skl_ddb_entry_size(alloc);
4319         if (alloc_size == 0)
4320                 return 0;
4321
4322         /* Allocate fixed number of blocks for cursor. */
4323         total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
4324         alloc_size -= total[PLANE_CURSOR];
4325         crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4326                 alloc->end - total[PLANE_CURSOR];
4327         crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4328
4329         if (total_data_rate == 0)
4330                 return 0;
4331
4332         /*
4333          * Find the highest watermark level for which we can satisfy the block
4334          * requirement of active planes.
4335          */
4336         for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
4337                 blocks = 0;
4338                 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4339                         const struct skl_plane_wm *wm =
4340                                 &crtc_state->wm.skl.optimal.planes[plane_id];
4341
4342                         if (plane_id == PLANE_CURSOR) {
4343                                 if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
4344                                         WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
4345                                         blocks = U32_MAX;
4346                                         break;
4347                                 }
4348                                 continue;
4349                         }
4350
4351                         blocks += wm->wm[level].min_ddb_alloc;
4352                         blocks += wm->uv_wm[level].min_ddb_alloc;
4353                 }
4354
4355                 if (blocks <= alloc_size) {
4356                         alloc_size -= blocks;
4357                         break;
4358                 }
4359         }
4360
4361         if (level < 0) {
4362                 drm_dbg_kms(&dev_priv->drm,
4363                             "Requested display configuration exceeds system DDB limitations");
4364                 drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
4365                             blocks, alloc_size);
4366                 return -EINVAL;
4367         }
4368
4369         /*
4370          * Grant each plane the blocks it requires at the highest achievable
4371          * watermark level, plus an extra share of the leftover blocks
4372          * proportional to its relative data rate.
4373          */
4374         for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4375                 const struct skl_plane_wm *wm =
4376                         &crtc_state->wm.skl.optimal.planes[plane_id];
4377                 u64 rate;
4378                 u16 extra;
4379
4380                 if (plane_id == PLANE_CURSOR)
4381                         continue;
4382
4383                 /*
4384                  * We've accounted for all active planes; remaining planes are
4385                  * all disabled.
4386                  */
4387                 if (total_data_rate == 0)
4388                         break;
4389
4390                 rate = plane_data_rate[plane_id];
4391                 extra = min_t(u16, alloc_size,
4392                               DIV64_U64_ROUND_UP(alloc_size * rate,
4393                                                  total_data_rate));
4394                 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
4395                 alloc_size -= extra;
4396                 total_data_rate -= rate;
4397
4398                 if (total_data_rate == 0)
4399                         break;
4400
4401                 rate = uv_plane_data_rate[plane_id];
4402                 extra = min_t(u16, alloc_size,
4403                               DIV64_U64_ROUND_UP(alloc_size * rate,
4404                                                  total_data_rate));
4405                 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
4406                 alloc_size -= extra;
4407                 total_data_rate -= rate;
4408         }
4409         WARN_ON(alloc_size != 0 || total_data_rate != 0);
4410
4411         /* Set the actual DDB start/end points for each plane */
4412         start = alloc->start;
4413         for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4414                 struct skl_ddb_entry *plane_alloc =
4415                         &crtc_state->wm.skl.plane_ddb_y[plane_id];
4416                 struct skl_ddb_entry *uv_plane_alloc =
4417                         &crtc_state->wm.skl.plane_ddb_uv[plane_id];
4418
4419                 if (plane_id == PLANE_CURSOR)
4420                         continue;
4421
4422                 /* Gen11+ uses a separate plane for UV watermarks */
4423                 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
4424
4425                 /* Leave disabled planes at (0,0) */
4426                 if (total[plane_id]) {
4427                         plane_alloc->start = start;
4428                         start += total[plane_id];
4429                         plane_alloc->end = start;
4430                 }
4431
4432                 if (uv_total[plane_id]) {
4433                         uv_plane_alloc->start = start;
4434                         start += uv_total[plane_id];
4435                         uv_plane_alloc->end = start;
4436                 }
4437         }
4438
4439         /*
4440          * When we calculated watermark values we didn't know how high
4441          * of a level we'd actually be able to hit, so we just marked
4442          * all levels as "enabled."  Go back now and disable the ones
4443          * that aren't actually possible.
4444          */
4445         for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
4446                 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4447                         struct skl_plane_wm *wm =
4448                                 &crtc_state->wm.skl.optimal.planes[plane_id];
4449
4450                         /*
4451                          * We only disable the watermarks for each plane if
4452                          * they exceed the ddb allocation of said plane. This
4453                          * is done so that we don't end up touching cursor
4454                          * watermarks needlessly when some other plane reduces
4455                          * our max possible watermark level.
4456                          *
4457                          * Bspec has this to say about the PLANE_WM enable bit:
4458                          * "All the watermarks at this level for all enabled
4459                          *  planes must be enabled before the level will be used."
4460                          * So this is actually safe to do.
4461                          */
4462                         if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
4463                             wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
4464                                 memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
4465
4466                         /*
4467                          * Wa_1408961008:icl, ehl
4468                          * Underruns with WM1+ disabled
4469                          */
4470                         if (IS_GEN(dev_priv, 11) &&
4471                             level == 1 && wm->wm[0].plane_en) {
4472                                 wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
4473                                 wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
4474                                 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
4475                         }
4476                 }
4477         }
4478
4479         /*
4480          * Go back and disable the transition watermark if it turns out we
4481          * don't have enough DDB blocks for it.
4482          */
4483         for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4484                 struct skl_plane_wm *wm =
4485                         &crtc_state->wm.skl.optimal.planes[plane_id];
4486
4487                 if (wm->trans_wm.plane_res_b >= total[plane_id])
4488                         memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
4489         }
4490
4491         return 0;
4492 }
4493
4494 /*
4495  * The max latency should be 257 (max the punit can code is 255 and we add 2us
4496  * for the read latency) and cpp should always be <= 8, so that
4497  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4498  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4499 */
4500 static uint_fixed_16_16_t
4501 skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
4502                u8 cpp, u32 latency, u32 dbuf_block_size)
4503 {
4504         u32 wm_intermediate_val;
4505         uint_fixed_16_16_t ret;
4506
4507         if (latency == 0)
4508                 return FP_16_16_MAX;
4509
4510         wm_intermediate_val = latency * pixel_rate * cpp;
4511         ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
4512
4513         if (INTEL_GEN(dev_priv) >= 10)
4514                 ret = add_fixed16_u32(ret, 1);
4515
4516         return ret;
4517 }
4518
4519 static uint_fixed_16_16_t
4520 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
4521                uint_fixed_16_16_t plane_blocks_per_line)
4522 {
4523         u32 wm_intermediate_val;
4524         uint_fixed_16_16_t ret;
4525
4526         if (latency == 0)
4527                 return FP_16_16_MAX;
4528
4529         wm_intermediate_val = latency * pixel_rate;
4530         wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4531                                            pipe_htotal * 1000);
4532         ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4533         return ret;
4534 }
4535
4536 static uint_fixed_16_16_t
4537 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
4538 {
4539         u32 pixel_rate;
4540         u32 crtc_htotal;
4541         uint_fixed_16_16_t linetime_us;
4542
4543         if (!crtc_state->hw.active)
4544                 return u32_to_fixed16(0);
4545
4546         pixel_rate = crtc_state->pixel_rate;
4547
4548         if (WARN_ON(pixel_rate == 0))
4549                 return u32_to_fixed16(0);
4550
4551         crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
4552         linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4553
4554         return linetime_us;
4555 }
4556
4557 static u32
4558 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
4559                               const struct intel_plane_state *plane_state)
4560 {
4561         u64 adjusted_pixel_rate;
4562         uint_fixed_16_16_t downscale_amount;
4563
4564         /* Shouldn't reach here on disabled planes... */
4565         if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
4566                 return 0;
4567
4568         /*
4569          * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4570          * with additional adjustments for plane-specific scaling.
4571          */
4572         adjusted_pixel_rate = crtc_state->pixel_rate;
4573         downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4574
4575         return mul_round_up_u32_fixed16(adjusted_pixel_rate,
4576                                             downscale_amount);
4577 }
4578
4579 static int
4580 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4581                       int width, const struct drm_format_info *format,
4582                       u64 modifier, unsigned int rotation,
4583                       u32 plane_pixel_rate, struct skl_wm_params *wp,
4584                       int color_plane)
4585 {
4586         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4587         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4588         u32 interm_pbpl;
4589
4590         /* only planar format has two planes */
4591         if (color_plane == 1 &&
4592             !intel_format_info_is_yuv_semiplanar(format, modifier)) {
4593                 drm_dbg_kms(&dev_priv->drm,
4594                             "Non planar format have single plane\n");
4595                 return -EINVAL;
4596         }
4597
4598         wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
4599                       modifier == I915_FORMAT_MOD_Yf_TILED ||
4600                       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4601                       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4602         wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
4603         wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4604                          modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4605         wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
4606
4607         wp->width = width;
4608         if (color_plane == 1 && wp->is_planar)
4609                 wp->width /= 2;
4610
4611         wp->cpp = format->cpp[color_plane];
4612         wp->plane_pixel_rate = plane_pixel_rate;
4613
4614         if (INTEL_GEN(dev_priv) >= 11 &&
4615             modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
4616                 wp->dbuf_block_size = 256;
4617         else
4618                 wp->dbuf_block_size = 512;
4619
4620         if (drm_rotation_90_or_270(rotation)) {
4621                 switch (wp->cpp) {
4622                 case 1:
4623                         wp->y_min_scanlines = 16;
4624                         break;
4625                 case 2:
4626                         wp->y_min_scanlines = 8;
4627                         break;
4628                 case 4:
4629                         wp->y_min_scanlines = 4;
4630                         break;
4631                 default:
4632                         MISSING_CASE(wp->cpp);
4633                         return -EINVAL;
4634                 }
4635         } else {
4636                 wp->y_min_scanlines = 4;
4637         }
4638
4639         if (skl_needs_memory_bw_wa(dev_priv))
4640                 wp->y_min_scanlines *= 2;
4641
4642         wp->plane_bytes_per_line = wp->width * wp->cpp;
4643         if (wp->y_tiled) {
4644                 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4645                                            wp->y_min_scanlines,
4646                                            wp->dbuf_block_size);
4647
4648                 if (INTEL_GEN(dev_priv) >= 10)
4649                         interm_pbpl++;
4650
4651                 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4652                                                         wp->y_min_scanlines);
4653         } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
4654                 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4655                                            wp->dbuf_block_size);
4656                 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4657         } else {
4658                 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4659                                            wp->dbuf_block_size) + 1;
4660                 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4661         }
4662
4663         wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
4664                                              wp->plane_blocks_per_line);
4665
4666         wp->linetime_us = fixed16_to_u32_round_up(
4667                                         intel_get_linetime_us(crtc_state));
4668
4669         return 0;
4670 }
4671
4672 static int
4673 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
4674                             const struct intel_plane_state *plane_state,
4675                             struct skl_wm_params *wp, int color_plane)
4676 {
4677         const struct drm_framebuffer *fb = plane_state->hw.fb;
4678         int width;
4679
4680         /*
4681          * Src coordinates are already rotated by 270 degrees for
4682          * the 90/270 degree plane rotation cases (to match the
4683          * GTT mapping), hence no need to account for rotation here.
4684          */
4685         width = drm_rect_width(&plane_state->uapi.src) >> 16;
4686
4687         return skl_compute_wm_params(crtc_state, width,
4688                                      fb->format, fb->modifier,
4689                                      plane_state->hw.rotation,
4690                                      skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
4691                                      wp, color_plane);
4692 }
4693
4694 static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
4695 {
4696         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4697                 return true;
4698
4699         /* The number of lines are ignored for the level 0 watermark. */
4700         return level > 0;
4701 }
4702
4703 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4704                                  int level,
4705                                  const struct skl_wm_params *wp,
4706                                  const struct skl_wm_level *result_prev,
4707                                  struct skl_wm_level *result /* out */)
4708 {
4709         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4710         u32 latency = dev_priv->wm.skl_latency[level];
4711         uint_fixed_16_16_t method1, method2;
4712         uint_fixed_16_16_t selected_result;
4713         u32 res_blocks, res_lines, min_ddb_alloc = 0;
4714
4715         if (latency == 0) {
4716                 /* reject it */
4717                 result->min_ddb_alloc = U16_MAX;
4718                 return;
4719         }
4720
4721         /*
4722          * WaIncreaseLatencyIPCEnabled: kbl,cfl
4723          * Display WA #1141: kbl,cfl
4724          */
4725         if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
4726             dev_priv->ipc_enabled)
4727                 latency += 4;
4728
4729         if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
4730                 latency += 15;
4731
4732         method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
4733                                  wp->cpp, latency, wp->dbuf_block_size);
4734         method2 = skl_wm_method2(wp->plane_pixel_rate,
4735                                  crtc_state->hw.adjusted_mode.crtc_htotal,
4736                                  latency,
4737                                  wp->plane_blocks_per_line);
4738
4739         if (wp->y_tiled) {
4740                 selected_result = max_fixed16(method2, wp->y_tile_minimum);
4741         } else {
4742                 if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
4743                      wp->dbuf_block_size < 1) &&
4744                      (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4745                         selected_result = method2;
4746                 } else if (latency >= wp->linetime_us) {
4747                         if (IS_GEN(dev_priv, 9) &&
4748                             !IS_GEMINILAKE(dev_priv))
4749                                 selected_result = min_fixed16(method1, method2);
4750                         else
4751                                 selected_result = method2;
4752                 } else {
4753                         selected_result = method1;
4754                 }
4755         }
4756
4757         res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4758         res_lines = div_round_up_fixed16(selected_result,
4759                                          wp->plane_blocks_per_line);
4760
4761         if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
4762                 /* Display WA #1125: skl,bxt,kbl */
4763                 if (level == 0 && wp->rc_surface)
4764                         res_blocks +=
4765                                 fixed16_to_u32_round_up(wp->y_tile_minimum);
4766
4767                 /* Display WA #1126: skl,bxt,kbl */
4768                 if (level >= 1 && level <= 7) {
4769                         if (wp->y_tiled) {
4770                                 res_blocks +=
4771                                     fixed16_to_u32_round_up(wp->y_tile_minimum);
4772                                 res_lines += wp->y_min_scanlines;
4773                         } else {
4774                                 res_blocks++;
4775                         }
4776
4777                         /*
4778                          * Make sure result blocks for higher latency levels are
4779                          * atleast as high as level below the current level.
4780                          * Assumption in DDB algorithm optimization for special
4781                          * cases. Also covers Display WA #1125 for RC.
4782                          */
4783                         if (result_prev->plane_res_b > res_blocks)
4784                                 res_blocks = result_prev->plane_res_b;
4785                 }
4786         }
4787
4788         if (INTEL_GEN(dev_priv) >= 11) {
4789                 if (wp->y_tiled) {
4790                         int extra_lines;
4791
4792                         if (res_lines % wp->y_min_scanlines == 0)
4793                                 extra_lines = wp->y_min_scanlines;
4794                         else
4795                                 extra_lines = wp->y_min_scanlines * 2 -
4796                                         res_lines % wp->y_min_scanlines;
4797
4798                         min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
4799                                                                  wp->plane_blocks_per_line);
4800                 } else {
4801                         min_ddb_alloc = res_blocks +
4802                                 DIV_ROUND_UP(res_blocks, 10);
4803                 }
4804         }
4805
4806         if (!skl_wm_has_lines(dev_priv, level))
4807                 res_lines = 0;
4808
4809         if (res_lines > 31) {
4810                 /* reject it */
4811                 result->min_ddb_alloc = U16_MAX;
4812                 return;
4813         }
4814
4815         /*
4816          * If res_lines is valid, assume we can use this watermark level
4817          * for now.  We'll come back and disable it after we calculate the
4818          * DDB allocation if it turns out we don't actually have enough
4819          * blocks to satisfy it.
4820          */
4821         result->plane_res_b = res_blocks;
4822         result->plane_res_l = res_lines;
4823         /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
4824         result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
4825         result->plane_en = true;
4826 }
4827
4828 static void
4829 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
4830                       const struct skl_wm_params *wm_params,
4831                       struct skl_wm_level *levels)
4832 {
4833         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4834         int level, max_level = ilk_wm_max_level(dev_priv);
4835         struct skl_wm_level *result_prev = &levels[0];
4836
4837         for (level = 0; level <= max_level; level++) {
4838                 struct skl_wm_level *result = &levels[level];
4839
4840                 skl_compute_plane_wm(crtc_state, level, wm_params,
4841                                      result_prev, result);
4842
4843                 result_prev = result;
4844         }
4845 }
4846
4847 static u32
4848 skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
4849 {
4850         struct drm_atomic_state *state = crtc_state->uapi.state;
4851         struct drm_i915_private *dev_priv = to_i915(state->dev);
4852         uint_fixed_16_16_t linetime_us;
4853         u32 linetime_wm;
4854
4855         linetime_us = intel_get_linetime_us(crtc_state);
4856         linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
4857
4858         /* Display WA #1135: BXT:ALL GLK:ALL */
4859         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
4860                 linetime_wm /= 2;
4861
4862         return linetime_wm;
4863 }
4864
4865 static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
4866                                       const struct skl_wm_params *wp,
4867                                       struct skl_plane_wm *wm)
4868 {
4869         struct drm_device *dev = crtc_state->uapi.crtc->dev;
4870         const struct drm_i915_private *dev_priv = to_i915(dev);
4871         u16 trans_min, trans_y_tile_min;
4872         const u16 trans_amount = 10; /* This is configurable amount */
4873         u16 wm0_sel_res_b, trans_offset_b, res_blocks;
4874
4875         /* Transition WM are not recommended by HW team for GEN9 */
4876         if (INTEL_GEN(dev_priv) <= 9)
4877                 return;
4878
4879         /* Transition WM don't make any sense if ipc is disabled */
4880         if (!dev_priv->ipc_enabled)
4881                 return;
4882
4883         trans_min = 14;
4884         if (INTEL_GEN(dev_priv) >= 11)
4885                 trans_min = 4;
4886
4887         trans_offset_b = trans_min + trans_amount;
4888
4889         /*
4890          * The spec asks for Selected Result Blocks for wm0 (the real value),
4891          * not Result Blocks (the integer value). Pay attention to the capital
4892          * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
4893          * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
4894          * and since we later will have to get the ceiling of the sum in the
4895          * transition watermarks calculation, we can just pretend Selected
4896          * Result Blocks is Result Blocks minus 1 and it should work for the
4897          * current platforms.
4898          */
4899         wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
4900
4901         if (wp->y_tiled) {
4902                 trans_y_tile_min =
4903                         (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
4904                 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
4905                                 trans_offset_b;
4906         } else {
4907                 res_blocks = wm0_sel_res_b + trans_offset_b;
4908
4909                 /* WA BUG:1938466 add one block for non y-tile planes */
4910                 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
4911                         res_blocks += 1;
4912
4913         }
4914
4915         /*
4916          * Just assume we can enable the transition watermark.  After
4917          * computing the DDB we'll come back and disable it if that
4918          * assumption turns out to be false.
4919          */
4920         wm->trans_wm.plane_res_b = res_blocks + 1;
4921         wm->trans_wm.plane_en = true;
4922 }
4923
4924 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
4925                                      const struct intel_plane_state *plane_state,
4926                                      enum plane_id plane_id, int color_plane)
4927 {
4928         struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4929         struct skl_wm_params wm_params;
4930         int ret;
4931
4932         ret = skl_compute_plane_wm_params(crtc_state, plane_state,
4933                                           &wm_params, color_plane);
4934         if (ret)
4935                 return ret;
4936
4937         skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
4938         skl_compute_transition_wm(crtc_state, &wm_params, wm);
4939
4940         return 0;
4941 }
4942
4943 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
4944                                  const struct intel_plane_state *plane_state,
4945                                  enum plane_id plane_id)
4946 {
4947         struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4948         struct skl_wm_params wm_params;
4949         int ret;
4950
4951         wm->is_planar = true;
4952
4953         /* uv plane watermarks must also be validated for NV12/Planar */
4954         ret = skl_compute_plane_wm_params(crtc_state, plane_state,
4955                                           &wm_params, 1);
4956         if (ret)
4957                 return ret;
4958
4959         skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
4960
4961         return 0;
4962 }
4963
4964 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
4965                               const struct intel_plane_state *plane_state)
4966 {
4967         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4968         const struct drm_framebuffer *fb = plane_state->hw.fb;
4969         enum plane_id plane_id = plane->id;
4970         int ret;
4971
4972         if (!intel_wm_plane_visible(crtc_state, plane_state))
4973                 return 0;
4974
4975         ret = skl_build_plane_wm_single(crtc_state, plane_state,
4976                                         plane_id, 0);
4977         if (ret)
4978                 return ret;
4979
4980         if (fb->format->is_yuv && fb->format->num_planes > 1) {
4981                 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
4982                                             plane_id);
4983                 if (ret)
4984                         return ret;
4985         }
4986
4987         return 0;
4988 }
4989
4990 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
4991                               const struct intel_plane_state *plane_state)
4992 {
4993         enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
4994         int ret;
4995
4996         /* Watermarks calculated in master */
4997         if (plane_state->planar_slave)
4998                 return 0;
4999
5000         if (plane_state->planar_linked_plane) {
5001                 const struct drm_framebuffer *fb = plane_state->hw.fb;
5002                 enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
5003
5004                 WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
5005                 WARN_ON(!fb->format->is_yuv ||
5006                         fb->format->num_planes == 1);
5007
5008                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5009                                                 y_plane_id, 0);
5010                 if (ret)
5011                         return ret;
5012
5013                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5014                                                 plane_id, 1);
5015                 if (ret)
5016                         return ret;
5017         } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5018                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5019                                                 plane_id, 0);
5020                 if (ret)
5021                         return ret;
5022         }
5023
5024         return 0;
5025 }
5026
5027 static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
5028 {
5029         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5030         struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5031         struct intel_plane *plane;
5032         const struct intel_plane_state *plane_state;
5033         int ret;
5034
5035         /*
5036          * We'll only calculate watermarks for planes that are actually
5037          * enabled, so make sure all other planes are set as disabled.
5038          */
5039         memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
5040
5041         intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
5042                                                      crtc_state) {
5043
5044                 if (INTEL_GEN(dev_priv) >= 11)
5045                         ret = icl_build_plane_wm(crtc_state, plane_state);
5046                 else
5047                         ret = skl_build_plane_wm(crtc_state, plane_state);
5048                 if (ret)
5049                         return ret;
5050         }
5051
5052         pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
5053
5054         return 0;
5055 }
5056
5057 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5058                                 i915_reg_t reg,
5059                                 const struct skl_ddb_entry *entry)
5060 {
5061         if (entry->end)
5062                 I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
5063         else
5064                 I915_WRITE_FW(reg, 0);
5065 }
5066
5067 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5068                                i915_reg_t reg,
5069                                const struct skl_wm_level *level)
5070 {
5071         u32 val = 0;
5072
5073         if (level->plane_en)
5074                 val |= PLANE_WM_EN;
5075         if (level->ignore_lines)
5076                 val |= PLANE_WM_IGNORE_LINES;
5077         val |= level->plane_res_b;
5078         val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
5079
5080         I915_WRITE_FW(reg, val);
5081 }
5082
5083 void skl_write_plane_wm(struct intel_plane *plane,
5084                         const struct intel_crtc_state *crtc_state)
5085 {
5086         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5087         int level, max_level = ilk_wm_max_level(dev_priv);
5088         enum plane_id plane_id = plane->id;
5089         enum pipe pipe = plane->pipe;
5090         const struct skl_plane_wm *wm =
5091                 &crtc_state->wm.skl.optimal.planes[plane_id];
5092         const struct skl_ddb_entry *ddb_y =
5093                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5094         const struct skl_ddb_entry *ddb_uv =
5095                 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5096
5097         for (level = 0; level <= max_level; level++) {
5098                 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5099                                    &wm->wm[level]);
5100         }
5101         skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5102                            &wm->trans_wm);
5103
5104         if (INTEL_GEN(dev_priv) >= 11) {
5105                 skl_ddb_entry_write(dev_priv,
5106                                     PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5107                 return;
5108         }
5109
5110         if (wm->is_planar)
5111                 swap(ddb_y, ddb_uv);
5112
5113         skl_ddb_entry_write(dev_priv,
5114                             PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5115         skl_ddb_entry_write(dev_priv,
5116                             PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5117 }
5118
5119 void skl_write_cursor_wm(struct intel_plane *plane,
5120                          const struct intel_crtc_state *crtc_state)
5121 {
5122         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5123         int level, max_level = ilk_wm_max_level(dev_priv);
5124         enum plane_id plane_id = plane->id;
5125         enum pipe pipe = plane->pipe;
5126         const struct skl_plane_wm *wm =
5127                 &crtc_state->wm.skl.optimal.planes[plane_id];
5128         const struct skl_ddb_entry *ddb =
5129                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5130
5131         for (level = 0; level <= max_level; level++) {
5132                 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5133                                    &wm->wm[level]);
5134         }
5135         skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5136
5137         skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5138 }
5139
5140 bool skl_wm_level_equals(const struct skl_wm_level *l1,
5141                          const struct skl_wm_level *l2)
5142 {
5143         return l1->plane_en == l2->plane_en &&
5144                 l1->ignore_lines == l2->ignore_lines &&
5145                 l1->plane_res_l == l2->plane_res_l &&
5146                 l1->plane_res_b == l2->plane_res_b;
5147 }
5148
5149 static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5150                                 const struct skl_plane_wm *wm1,
5151                                 const struct skl_plane_wm *wm2)
5152 {
5153         int level, max_level = ilk_wm_max_level(dev_priv);
5154
5155         for (level = 0; level <= max_level; level++) {
5156                 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
5157                     !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
5158                         return false;
5159         }
5160
5161         return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
5162 }
5163
5164 static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
5165                                const struct skl_pipe_wm *wm1,
5166                                const struct skl_pipe_wm *wm2)
5167 {
5168         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5169         enum plane_id plane_id;
5170
5171         for_each_plane_id_on_crtc(crtc, plane_id) {
5172                 if (!skl_plane_wm_equals(dev_priv,
5173                                          &wm1->planes[plane_id],
5174                                          &wm2->planes[plane_id]))
5175                         return false;
5176         }
5177
5178         return wm1->linetime == wm2->linetime;
5179 }
5180
5181 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5182                                            const struct skl_ddb_entry *b)
5183 {
5184         return a->start < b->end && b->start < a->end;
5185 }
5186
5187 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5188                                  const struct skl_ddb_entry *entries,
5189                                  int num_entries, int ignore_idx)
5190 {
5191         int i;
5192
5193         for (i = 0; i < num_entries; i++) {
5194                 if (i != ignore_idx &&
5195                     skl_ddb_entries_overlap(ddb, &entries[i]))
5196                         return true;
5197         }
5198
5199         return false;
5200 }
5201
5202 static int
5203 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5204                             struct intel_crtc_state *new_crtc_state)
5205 {
5206         struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
5207         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5208         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5209         struct intel_plane *plane;
5210
5211         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5212                 struct intel_plane_state *plane_state;
5213                 enum plane_id plane_id = plane->id;
5214
5215                 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5216                                         &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
5217                     skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
5218                                         &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
5219                         continue;
5220
5221                 plane_state = intel_atomic_get_plane_state(state, plane);
5222                 if (IS_ERR(plane_state))
5223                         return PTR_ERR(plane_state);
5224
5225                 new_crtc_state->update_planes |= BIT(plane_id);
5226         }
5227
5228         return 0;
5229 }
5230
5231 static int
5232 skl_compute_ddb(struct intel_atomic_state *state)
5233 {
5234         const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5235         struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5236         struct intel_crtc_state *old_crtc_state;
5237         struct intel_crtc_state *new_crtc_state;
5238         struct intel_crtc *crtc;
5239         int ret, i;
5240
5241         memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
5242
5243         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5244                                             new_crtc_state, i) {
5245                 ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
5246                 if (ret)
5247                         return ret;
5248
5249                 ret = skl_ddb_add_affected_planes(old_crtc_state,
5250                                                   new_crtc_state);
5251                 if (ret)
5252                         return ret;
5253         }
5254
5255         return 0;
5256 }
5257
5258 static char enast(bool enable)
5259 {
5260         return enable ? '*' : ' ';
5261 }
5262
5263 static void
5264 skl_print_wm_changes(struct intel_atomic_state *state)
5265 {
5266         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5267         const struct intel_crtc_state *old_crtc_state;
5268         const struct intel_crtc_state *new_crtc_state;
5269         struct intel_plane *plane;
5270         struct intel_crtc *crtc;
5271         int i;
5272
5273         if (!drm_debug_enabled(DRM_UT_KMS))
5274                 return;
5275
5276         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5277                                             new_crtc_state, i) {
5278                 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
5279
5280                 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
5281                 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
5282
5283                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5284                         enum plane_id plane_id = plane->id;
5285                         const struct skl_ddb_entry *old, *new;
5286
5287                         old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
5288                         new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
5289
5290                         if (skl_ddb_entry_equal(old, new))
5291                                 continue;
5292
5293                         drm_dbg_kms(&dev_priv->drm,
5294                                     "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5295                                     plane->base.base.id, plane->base.name,
5296                                     old->start, old->end, new->start, new->end,
5297                                     skl_ddb_entry_size(old), skl_ddb_entry_size(new));
5298                 }
5299
5300                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5301                         enum plane_id plane_id = plane->id;
5302                         const struct skl_plane_wm *old_wm, *new_wm;
5303
5304                         old_wm = &old_pipe_wm->planes[plane_id];
5305                         new_wm = &new_pipe_wm->planes[plane_id];
5306
5307                         if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
5308                                 continue;
5309
5310                         drm_dbg_kms(&dev_priv->drm,
5311                                     "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
5312                                     " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
5313                                     plane->base.base.id, plane->base.name,
5314                                     enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
5315                                     enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
5316                                     enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
5317                                     enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
5318                                     enast(old_wm->trans_wm.plane_en),
5319                                     enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
5320                                     enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
5321                                     enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
5322                                     enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
5323                                     enast(new_wm->trans_wm.plane_en));
5324
5325                         drm_dbg_kms(&dev_priv->drm,
5326                                     "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5327                                       " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5328                                     plane->base.base.id, plane->base.name,
5329                                     enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
5330                                     enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
5331                                     enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
5332                                     enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
5333                                     enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
5334                                     enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
5335                                     enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
5336                                     enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
5337                                     enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
5338
5339                                     enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
5340                                     enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
5341                                     enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
5342                                     enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
5343                                     enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
5344                                     enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
5345                                     enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
5346                                     enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
5347                                     enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
5348
5349                         drm_dbg_kms(&dev_priv->drm,
5350                                     "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5351                                     " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5352                                     plane->base.base.id, plane->base.name,
5353                                     old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
5354                                     old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
5355                                     old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
5356                                     old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
5357                                     old_wm->trans_wm.plane_res_b,
5358                                     new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
5359                                     new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
5360                                     new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
5361                                     new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
5362                                     new_wm->trans_wm.plane_res_b);
5363
5364                         drm_dbg_kms(&dev_priv->drm,
5365                                     "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5366                                     " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5367                                     plane->base.base.id, plane->base.name,
5368                                     old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
5369                                     old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
5370                                     old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
5371                                     old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
5372                                     old_wm->trans_wm.min_ddb_alloc,
5373                                     new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
5374                                     new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
5375                                     new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
5376                                     new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
5377                                     new_wm->trans_wm.min_ddb_alloc);
5378                 }
5379         }
5380 }
5381
5382 static int intel_add_all_pipes(struct intel_atomic_state *state)
5383 {
5384         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5385         struct intel_crtc *crtc;
5386
5387         for_each_intel_crtc(&dev_priv->drm, crtc) {
5388                 struct intel_crtc_state *crtc_state;
5389
5390                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5391                 if (IS_ERR(crtc_state))
5392                         return PTR_ERR(crtc_state);
5393         }
5394
5395         return 0;
5396 }
5397
5398 static int
5399 skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
5400 {
5401         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5402         int ret;
5403
5404         /*
5405          * If this is our first atomic update following hardware readout,
5406          * we can't trust the DDB that the BIOS programmed for us.  Let's
5407          * pretend that all pipes switched active status so that we'll
5408          * ensure a full DDB recompute.
5409          */
5410         if (dev_priv->wm.distrust_bios_wm) {
5411                 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5412                                        state->base.acquire_ctx);
5413                 if (ret)
5414                         return ret;
5415
5416                 state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
5417
5418                 /*
5419                  * We usually only initialize state->active_pipes if we
5420                  * we're doing a modeset; make sure this field is always
5421                  * initialized during the sanitization process that happens
5422                  * on the first commit too.
5423                  */
5424                 if (!state->modeset)
5425                         state->active_pipes = dev_priv->active_pipes;
5426         }
5427
5428         /*
5429          * If the modeset changes which CRTC's are active, we need to
5430          * recompute the DDB allocation for *all* active pipes, even
5431          * those that weren't otherwise being modified in any way by this
5432          * atomic commit.  Due to the shrinking of the per-pipe allocations
5433          * when new active CRTC's are added, it's possible for a pipe that
5434          * we were already using and aren't changing at all here to suddenly
5435          * become invalid if its DDB needs exceeds its new allocation.
5436          *
5437          * Note that if we wind up doing a full DDB recompute, we can't let
5438          * any other display updates race with this transaction, so we need
5439          * to grab the lock on *all* CRTC's.
5440          */
5441         if (state->active_pipe_changes || state->modeset) {
5442                 state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
5443
5444                 ret = intel_add_all_pipes(state);
5445                 if (ret)
5446                         return ret;
5447         }
5448
5449         return 0;
5450 }
5451
5452 /*
5453  * To make sure the cursor watermark registers are always consistent
5454  * with our computed state the following scenario needs special
5455  * treatment:
5456  *
5457  * 1. enable cursor
5458  * 2. move cursor entirely offscreen
5459  * 3. disable cursor
5460  *
5461  * Step 2. does call .disable_plane() but does not zero the watermarks
5462  * (since we consider an offscreen cursor still active for the purposes
5463  * of watermarks). Step 3. would not normally call .disable_plane()
5464  * because the actual plane visibility isn't changing, and we don't
5465  * deallocate the cursor ddb until the pipe gets disabled. So we must
5466  * force step 3. to call .disable_plane() to update the watermark
5467  * registers properly.
5468  *
5469  * Other planes do not suffer from this issues as their watermarks are
5470  * calculated based on the actual plane visibility. The only time this
5471  * can trigger for the other planes is during the initial readout as the
5472  * default value of the watermarks registers is not zero.
5473  */
5474 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
5475                                       struct intel_crtc *crtc)
5476 {
5477         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5478         const struct intel_crtc_state *old_crtc_state =
5479                 intel_atomic_get_old_crtc_state(state, crtc);
5480         struct intel_crtc_state *new_crtc_state =
5481                 intel_atomic_get_new_crtc_state(state, crtc);
5482         struct intel_plane *plane;
5483
5484         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5485                 struct intel_plane_state *plane_state;
5486                 enum plane_id plane_id = plane->id;
5487
5488                 /*
5489                  * Force a full wm update for every plane on modeset.
5490                  * Required because the reset value of the wm registers
5491                  * is non-zero, whereas we want all disabled planes to
5492                  * have zero watermarks. So if we turn off the relevant
5493                  * power well the hardware state will go out of sync
5494                  * with the software state.
5495                  */
5496                 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
5497                     skl_plane_wm_equals(dev_priv,
5498                                         &old_crtc_state->wm.skl.optimal.planes[plane_id],
5499                                         &new_crtc_state->wm.skl.optimal.planes[plane_id]))
5500                         continue;
5501
5502                 plane_state = intel_atomic_get_plane_state(state, plane);
5503                 if (IS_ERR(plane_state))
5504                         return PTR_ERR(plane_state);
5505
5506                 new_crtc_state->update_planes |= BIT(plane_id);
5507         }
5508
5509         return 0;
5510 }
5511
5512 static int
5513 skl_compute_wm(struct intel_atomic_state *state)
5514 {
5515         struct intel_crtc *crtc;
5516         struct intel_crtc_state *new_crtc_state;
5517         struct intel_crtc_state *old_crtc_state;
5518         struct skl_ddb_values *results = &state->wm_results;
5519         int ret, i;
5520
5521         /* Clear all dirty flags */
5522         results->dirty_pipes = 0;
5523
5524         ret = skl_ddb_add_affected_pipes(state);
5525         if (ret)
5526                 return ret;
5527
5528         /*
5529          * Calculate WM's for all pipes that are part of this transaction.
5530          * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
5531          * weren't otherwise being modified (and set bits in dirty_pipes) if
5532          * pipe allocations had to change.
5533          */
5534         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5535                                             new_crtc_state, i) {
5536                 ret = skl_build_pipe_wm(new_crtc_state);
5537                 if (ret)
5538                         return ret;
5539
5540                 ret = skl_wm_add_affected_planes(state, crtc);
5541                 if (ret)
5542                         return ret;
5543
5544                 if (!skl_pipe_wm_equals(crtc,
5545                                         &old_crtc_state->wm.skl.optimal,
5546                                         &new_crtc_state->wm.skl.optimal))
5547                         results->dirty_pipes |= BIT(crtc->pipe);
5548         }
5549
5550         ret = skl_compute_ddb(state);
5551         if (ret)
5552                 return ret;
5553
5554         skl_print_wm_changes(state);
5555
5556         return 0;
5557 }
5558
5559 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
5560                                       struct intel_crtc *crtc)
5561 {
5562         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5563         const struct intel_crtc_state *crtc_state =
5564                 intel_atomic_get_new_crtc_state(state, crtc);
5565         const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5566         enum pipe pipe = crtc->pipe;
5567
5568         if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
5569                 return;
5570
5571         I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
5572 }
5573
5574 static void skl_initial_wm(struct intel_atomic_state *state,
5575                            struct intel_crtc *crtc)
5576 {
5577         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5578         const struct intel_crtc_state *crtc_state =
5579                 intel_atomic_get_new_crtc_state(state, crtc);
5580         struct skl_ddb_values *results = &state->wm_results;
5581
5582         if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
5583                 return;
5584
5585         mutex_lock(&dev_priv->wm.wm_mutex);
5586
5587         if (crtc_state->uapi.active_changed)
5588                 skl_atomic_update_crtc_wm(state, crtc);
5589
5590         mutex_unlock(&dev_priv->wm.wm_mutex);
5591 }
5592
5593 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
5594                                   struct intel_wm_config *config)
5595 {
5596         struct intel_crtc *crtc;
5597
5598         /* Compute the currently _active_ config */
5599         for_each_intel_crtc(&dev_priv->drm, crtc) {
5600                 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5601
5602                 if (!wm->pipe_enabled)
5603                         continue;
5604
5605                 config->sprites_enabled |= wm->sprites_enabled;
5606                 config->sprites_scaled |= wm->sprites_scaled;
5607                 config->num_pipes_active++;
5608         }
5609 }
5610
5611 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5612 {
5613         struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5614         struct ilk_wm_maximums max;
5615         struct intel_wm_config config = {};
5616         struct ilk_wm_values results = {};
5617         enum intel_ddb_partitioning partitioning;
5618
5619         ilk_compute_wm_config(dev_priv, &config);
5620
5621         ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
5622         ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
5623
5624         /* 5/6 split only in single pipe config on IVB+ */
5625         if (INTEL_GEN(dev_priv) >= 7 &&
5626             config.num_pipes_active == 1 && config.sprites_enabled) {
5627                 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
5628                 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
5629
5630                 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
5631         } else {
5632                 best_lp_wm = &lp_wm_1_2;
5633         }
5634
5635         partitioning = (best_lp_wm == &lp_wm_1_2) ?
5636                        INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5637
5638         ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
5639
5640         ilk_write_wm_values(dev_priv, &results);
5641 }
5642
5643 static void ilk_initial_watermarks(struct intel_atomic_state *state,
5644                                    struct intel_crtc *crtc)
5645 {
5646         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5647         const struct intel_crtc_state *crtc_state =
5648                 intel_atomic_get_new_crtc_state(state, crtc);
5649
5650         mutex_lock(&dev_priv->wm.wm_mutex);
5651         crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
5652         ilk_program_watermarks(dev_priv);
5653         mutex_unlock(&dev_priv->wm.wm_mutex);
5654 }
5655
5656 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
5657                                     struct intel_crtc *crtc)
5658 {
5659         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5660         const struct intel_crtc_state *crtc_state =
5661                 intel_atomic_get_new_crtc_state(state, crtc);
5662
5663         if (!crtc_state->wm.need_postvbl_update)
5664                 return;
5665
5666         mutex_lock(&dev_priv->wm.wm_mutex);
5667         crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
5668         ilk_program_watermarks(dev_priv);
5669         mutex_unlock(&dev_priv->wm.wm_mutex);
5670 }
5671
5672 static inline void skl_wm_level_from_reg_val(u32 val,
5673                                              struct skl_wm_level *level)
5674 {
5675         level->plane_en = val & PLANE_WM_EN;
5676         level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
5677         level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
5678         level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
5679                 PLANE_WM_LINES_MASK;
5680 }
5681
5682 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
5683                               struct skl_pipe_wm *out)
5684 {
5685         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5686         enum pipe pipe = crtc->pipe;
5687         int level, max_level;
5688         enum plane_id plane_id;
5689         u32 val;
5690
5691         max_level = ilk_wm_max_level(dev_priv);
5692
5693         for_each_plane_id_on_crtc(crtc, plane_id) {
5694                 struct skl_plane_wm *wm = &out->planes[plane_id];
5695
5696                 for (level = 0; level <= max_level; level++) {
5697                         if (plane_id != PLANE_CURSOR)
5698                                 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5699                         else
5700                                 val = I915_READ(CUR_WM(pipe, level));
5701
5702                         skl_wm_level_from_reg_val(val, &wm->wm[level]);
5703                 }
5704
5705                 if (plane_id != PLANE_CURSOR)
5706                         val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5707                 else
5708                         val = I915_READ(CUR_WM_TRANS(pipe));
5709
5710                 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5711         }
5712
5713         if (!crtc->active)
5714                 return;
5715
5716         out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5717 }
5718
5719 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
5720 {
5721         struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
5722         struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5723         struct intel_crtc *crtc;
5724         struct intel_crtc_state *crtc_state;
5725
5726         skl_ddb_get_hw_state(dev_priv, ddb);
5727         for_each_intel_crtc(&dev_priv->drm, crtc) {
5728                 crtc_state = to_intel_crtc_state(crtc->base.state);
5729
5730                 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
5731
5732                 if (crtc->active)
5733                         hw->dirty_pipes |= BIT(crtc->pipe);
5734         }
5735
5736         if (dev_priv->active_pipes) {
5737                 /* Fully recompute DDB on first atomic commit */
5738                 dev_priv->wm.distrust_bios_wm = true;
5739         }
5740 }
5741
5742 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
5743 {
5744         struct drm_device *dev = crtc->base.dev;
5745         struct drm_i915_private *dev_priv = to_i915(dev);
5746         struct ilk_wm_values *hw = &dev_priv->wm.hw;
5747         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
5748         struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
5749         enum pipe pipe = crtc->pipe;
5750         static const i915_reg_t wm0_pipe_reg[] = {
5751                 [PIPE_A] = WM0_PIPEA_ILK,
5752                 [PIPE_B] = WM0_PIPEB_ILK,
5753                 [PIPE_C] = WM0_PIPEC_IVB,
5754         };
5755
5756         hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5757         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5758                 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5759
5760         memset(active, 0, sizeof(*active));
5761
5762         active->pipe_enabled = crtc->active;
5763
5764         if (active->pipe_enabled) {
5765                 u32 tmp = hw->wm_pipe[pipe];
5766
5767                 /*
5768                  * For active pipes LP0 watermark is marked as
5769                  * enabled, and LP1+ watermaks as disabled since
5770                  * we can't really reverse compute them in case
5771                  * multiple pipes are active.
5772                  */
5773                 active->wm[0].enable = true;
5774                 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5775                 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5776                 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5777                 active->linetime = hw->wm_linetime[pipe];
5778         } else {
5779                 int level, max_level = ilk_wm_max_level(dev_priv);
5780
5781                 /*
5782                  * For inactive pipes, all watermark levels
5783                  * should be marked as enabled but zeroed,
5784                  * which is what we'd compute them to.
5785                  */
5786                 for (level = 0; level <= max_level; level++)
5787                         active->wm[level].enable = true;
5788         }
5789
5790         crtc->wm.active.ilk = *active;
5791 }
5792
5793 #define _FW_WM(value, plane) \
5794         (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5795 #define _FW_WM_VLV(value, plane) \
5796         (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5797
5798 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5799                                struct g4x_wm_values *wm)
5800 {
5801         u32 tmp;
5802
5803         tmp = I915_READ(DSPFW1);
5804         wm->sr.plane = _FW_WM(tmp, SR);
5805         wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5806         wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5807         wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5808
5809         tmp = I915_READ(DSPFW2);
5810         wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5811         wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5812         wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5813         wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5814         wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5815         wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5816
5817         tmp = I915_READ(DSPFW3);
5818         wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5819         wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5820         wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5821         wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5822 }
5823
5824 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5825                                struct vlv_wm_values *wm)
5826 {
5827         enum pipe pipe;
5828         u32 tmp;
5829
5830         for_each_pipe(dev_priv, pipe) {
5831                 tmp = I915_READ(VLV_DDL(pipe));
5832
5833                 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5834                         (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5835                 wm->ddl[pipe].plane[PLANE_CURSOR] =
5836                         (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5837                 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5838                         (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5839                 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5840                         (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5841         }
5842
5843         tmp = I915_READ(DSPFW1);
5844         wm->sr.plane = _FW_WM(tmp, SR);
5845         wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5846         wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5847         wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5848
5849         tmp = I915_READ(DSPFW2);
5850         wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5851         wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5852         wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5853
5854         tmp = I915_READ(DSPFW3);
5855         wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5856
5857         if (IS_CHERRYVIEW(dev_priv)) {
5858                 tmp = I915_READ(DSPFW7_CHV);
5859                 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5860                 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5861
5862                 tmp = I915_READ(DSPFW8_CHV);
5863                 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5864                 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5865
5866                 tmp = I915_READ(DSPFW9_CHV);
5867                 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5868                 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5869
5870                 tmp = I915_READ(DSPHOWM);
5871                 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5872                 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5873                 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5874                 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5875                 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5876                 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5877                 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5878                 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5879                 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5880                 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5881         } else {
5882                 tmp = I915_READ(DSPFW7);
5883                 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5884                 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5885
5886                 tmp = I915_READ(DSPHOWM);
5887                 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5888                 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5889                 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5890                 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5891                 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5892                 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5893                 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5894         }
5895 }
5896
5897 #undef _FW_WM
5898 #undef _FW_WM_VLV
5899
5900 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
5901 {
5902         struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5903         struct intel_crtc *crtc;
5904
5905         g4x_read_wm_values(dev_priv, wm);
5906
5907         wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5908
5909         for_each_intel_crtc(&dev_priv->drm, crtc) {
5910                 struct intel_crtc_state *crtc_state =
5911                         to_intel_crtc_state(crtc->base.state);
5912                 struct g4x_wm_state *active = &crtc->wm.active.g4x;
5913                 struct g4x_pipe_wm *raw;
5914                 enum pipe pipe = crtc->pipe;
5915                 enum plane_id plane_id;
5916                 int level, max_level;
5917
5918                 active->cxsr = wm->cxsr;
5919                 active->hpll_en = wm->hpll_en;
5920                 active->fbc_en = wm->fbc_en;
5921
5922                 active->sr = wm->sr;
5923                 active->hpll = wm->hpll;
5924
5925                 for_each_plane_id_on_crtc(crtc, plane_id) {
5926                         active->wm.plane[plane_id] =
5927                                 wm->pipe[pipe].plane[plane_id];
5928                 }
5929
5930                 if (wm->cxsr && wm->hpll_en)
5931                         max_level = G4X_WM_LEVEL_HPLL;
5932                 else if (wm->cxsr)
5933                         max_level = G4X_WM_LEVEL_SR;
5934                 else
5935                         max_level = G4X_WM_LEVEL_NORMAL;
5936
5937                 level = G4X_WM_LEVEL_NORMAL;
5938                 raw = &crtc_state->wm.g4x.raw[level];
5939                 for_each_plane_id_on_crtc(crtc, plane_id)
5940                         raw->plane[plane_id] = active->wm.plane[plane_id];
5941
5942                 if (++level > max_level)
5943                         goto out;
5944
5945                 raw = &crtc_state->wm.g4x.raw[level];
5946                 raw->plane[PLANE_PRIMARY] = active->sr.plane;
5947                 raw->plane[PLANE_CURSOR] = active->sr.cursor;
5948                 raw->plane[PLANE_SPRITE0] = 0;
5949                 raw->fbc = active->sr.fbc;
5950
5951                 if (++level > max_level)
5952                         goto out;
5953
5954                 raw = &crtc_state->wm.g4x.raw[level];
5955                 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
5956                 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
5957                 raw->plane[PLANE_SPRITE0] = 0;
5958                 raw->fbc = active->hpll.fbc;
5959
5960         out:
5961                 for_each_plane_id_on_crtc(crtc, plane_id)
5962                         g4x_raw_plane_wm_set(crtc_state, level,
5963                                              plane_id, USHRT_MAX);
5964                 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
5965
5966                 crtc_state->wm.g4x.optimal = *active;
5967                 crtc_state->wm.g4x.intermediate = *active;
5968
5969                 drm_dbg_kms(&dev_priv->drm,
5970                             "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
5971                             pipe_name(pipe),
5972                             wm->pipe[pipe].plane[PLANE_PRIMARY],
5973                             wm->pipe[pipe].plane[PLANE_CURSOR],
5974                             wm->pipe[pipe].plane[PLANE_SPRITE0]);
5975         }
5976
5977         drm_dbg_kms(&dev_priv->drm,
5978                     "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
5979                     wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
5980         drm_dbg_kms(&dev_priv->drm,
5981                     "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
5982                     wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
5983         drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
5984                     yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
5985 }
5986
5987 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
5988 {
5989         struct intel_plane *plane;
5990         struct intel_crtc *crtc;
5991
5992         mutex_lock(&dev_priv->wm.wm_mutex);
5993
5994         for_each_intel_plane(&dev_priv->drm, plane) {
5995                 struct intel_crtc *crtc =
5996                         intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5997                 struct intel_crtc_state *crtc_state =
5998                         to_intel_crtc_state(crtc->base.state);
5999                 struct intel_plane_state *plane_state =
6000                         to_intel_plane_state(plane->base.state);
6001                 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6002                 enum plane_id plane_id = plane->id;
6003                 int level;
6004
6005                 if (plane_state->uapi.visible)
6006                         continue;
6007
6008                 for (level = 0; level < 3; level++) {
6009                         struct g4x_pipe_wm *raw =
6010                                 &crtc_state->wm.g4x.raw[level];
6011
6012                         raw->plane[plane_id] = 0;
6013                         wm_state->wm.plane[plane_id] = 0;
6014                 }
6015
6016                 if (plane_id == PLANE_PRIMARY) {
6017                         for (level = 0; level < 3; level++) {
6018                                 struct g4x_pipe_wm *raw =
6019                                         &crtc_state->wm.g4x.raw[level];
6020                                 raw->fbc = 0;
6021                         }
6022
6023                         wm_state->sr.fbc = 0;
6024                         wm_state->hpll.fbc = 0;
6025                         wm_state->fbc_en = false;
6026                 }
6027         }
6028
6029         for_each_intel_crtc(&dev_priv->drm, crtc) {
6030                 struct intel_crtc_state *crtc_state =
6031                         to_intel_crtc_state(crtc->base.state);
6032
6033                 crtc_state->wm.g4x.intermediate =
6034                         crtc_state->wm.g4x.optimal;
6035                 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6036         }
6037
6038         g4x_program_watermarks(dev_priv);
6039
6040         mutex_unlock(&dev_priv->wm.wm_mutex);
6041 }
6042
6043 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6044 {
6045         struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6046         struct intel_crtc *crtc;
6047         u32 val;
6048
6049         vlv_read_wm_values(dev_priv, wm);
6050
6051         wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6052         wm->level = VLV_WM_LEVEL_PM2;
6053
6054         if (IS_CHERRYVIEW(dev_priv)) {
6055                 vlv_punit_get(dev_priv);
6056
6057                 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6058                 if (val & DSP_MAXFIFO_PM5_ENABLE)
6059                         wm->level = VLV_WM_LEVEL_PM5;
6060
6061                 /*
6062                  * If DDR DVFS is disabled in the BIOS, Punit
6063                  * will never ack the request. So if that happens
6064                  * assume we don't have to enable/disable DDR DVFS
6065                  * dynamically. To test that just set the REQ_ACK
6066                  * bit to poke the Punit, but don't change the
6067                  * HIGH/LOW bits so that we don't actually change
6068                  * the current state.
6069                  */
6070                 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6071                 val |= FORCE_DDR_FREQ_REQ_ACK;
6072                 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
6073
6074                 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
6075                               FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
6076                         drm_dbg_kms(&dev_priv->drm,
6077                                     "Punit not acking DDR DVFS request, "
6078                                     "assuming DDR DVFS is disabled\n");
6079                         dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6080                 } else {
6081                         val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6082                         if ((val & FORCE_DDR_HIGH_FREQ) == 0)
6083                                 wm->level = VLV_WM_LEVEL_DDR_DVFS;
6084                 }
6085
6086                 vlv_punit_put(dev_priv);
6087         }
6088
6089         for_each_intel_crtc(&dev_priv->drm, crtc) {
6090                 struct intel_crtc_state *crtc_state =
6091                         to_intel_crtc_state(crtc->base.state);
6092                 struct vlv_wm_state *active = &crtc->wm.active.vlv;
6093                 const struct vlv_fifo_state *fifo_state =
6094                         &crtc_state->wm.vlv.fifo_state;
6095                 enum pipe pipe = crtc->pipe;
6096                 enum plane_id plane_id;
6097                 int level;
6098
6099                 vlv_get_fifo_size(crtc_state);
6100
6101                 active->num_levels = wm->level + 1;
6102                 active->cxsr = wm->cxsr;
6103
6104                 for (level = 0; level < active->num_levels; level++) {
6105                         struct g4x_pipe_wm *raw =
6106                                 &crtc_state->wm.vlv.raw[level];
6107
6108                         active->sr[level].plane = wm->sr.plane;
6109                         active->sr[level].cursor = wm->sr.cursor;
6110
6111                         for_each_plane_id_on_crtc(crtc, plane_id) {
6112                                 active->wm[level].plane[plane_id] =
6113                                         wm->pipe[pipe].plane[plane_id];
6114
6115                                 raw->plane[plane_id] =
6116                                         vlv_invert_wm_value(active->wm[level].plane[plane_id],
6117                                                             fifo_state->plane[plane_id]);
6118                         }
6119                 }
6120
6121                 for_each_plane_id_on_crtc(crtc, plane_id)
6122                         vlv_raw_plane_wm_set(crtc_state, level,
6123                                              plane_id, USHRT_MAX);
6124                 vlv_invalidate_wms(crtc, active, level);
6125
6126                 crtc_state->wm.vlv.optimal = *active;
6127                 crtc_state->wm.vlv.intermediate = *active;
6128
6129                 drm_dbg_kms(&dev_priv->drm,
6130                             "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6131                             pipe_name(pipe),
6132                             wm->pipe[pipe].plane[PLANE_PRIMARY],
6133                             wm->pipe[pipe].plane[PLANE_CURSOR],
6134                             wm->pipe[pipe].plane[PLANE_SPRITE0],
6135                             wm->pipe[pipe].plane[PLANE_SPRITE1]);
6136         }
6137
6138         drm_dbg_kms(&dev_priv->drm,
6139                     "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6140                     wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
6141 }
6142
6143 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
6144 {
6145         struct intel_plane *plane;
6146         struct intel_crtc *crtc;
6147
6148         mutex_lock(&dev_priv->wm.wm_mutex);
6149
6150         for_each_intel_plane(&dev_priv->drm, plane) {
6151                 struct intel_crtc *crtc =
6152                         intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6153                 struct intel_crtc_state *crtc_state =
6154                         to_intel_crtc_state(crtc->base.state);
6155                 struct intel_plane_state *plane_state =
6156                         to_intel_plane_state(plane->base.state);
6157                 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
6158                 const struct vlv_fifo_state *fifo_state =
6159                         &crtc_state->wm.vlv.fifo_state;
6160                 enum plane_id plane_id = plane->id;
6161                 int level;
6162
6163                 if (plane_state->uapi.visible)
6164                         continue;
6165
6166                 for (level = 0; level < wm_state->num_levels; level++) {
6167                         struct g4x_pipe_wm *raw =
6168                                 &crtc_state->wm.vlv.raw[level];
6169
6170                         raw->plane[plane_id] = 0;
6171
6172                         wm_state->wm[level].plane[plane_id] =
6173                                 vlv_invert_wm_value(raw->plane[plane_id],
6174                                                     fifo_state->plane[plane_id]);
6175                 }
6176         }
6177
6178         for_each_intel_crtc(&dev_priv->drm, crtc) {
6179                 struct intel_crtc_state *crtc_state =
6180                         to_intel_crtc_state(crtc->base.state);
6181
6182                 crtc_state->wm.vlv.intermediate =
6183                         crtc_state->wm.vlv.optimal;
6184                 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
6185         }
6186
6187         vlv_program_watermarks(dev_priv);
6188
6189         mutex_unlock(&dev_priv->wm.wm_mutex);
6190 }
6191
6192 /*
6193  * FIXME should probably kill this and improve
6194  * the real watermark readout/sanitation instead
6195  */
6196 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6197 {
6198         I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6199         I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6200         I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6201
6202         /*
6203          * Don't touch WM1S_LP_EN here.
6204          * Doing so could cause underruns.
6205          */
6206 }
6207
6208 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
6209 {
6210         struct ilk_wm_values *hw = &dev_priv->wm.hw;
6211         struct intel_crtc *crtc;
6212
6213         ilk_init_lp_watermarks(dev_priv);
6214
6215         for_each_intel_crtc(&dev_priv->drm, crtc)
6216                 ilk_pipe_wm_get_hw_state(crtc);
6217
6218         hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
6219         hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
6220         hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
6221
6222         hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
6223         if (INTEL_GEN(dev_priv) >= 7) {
6224                 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
6225                 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
6226         }
6227
6228         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6229                 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
6230                         INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6231         else if (IS_IVYBRIDGE(dev_priv))
6232                 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
6233                         INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6234
6235         hw->enable_fbc_wm =
6236                 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
6237 }
6238
6239 /**
6240  * intel_update_watermarks - update FIFO watermark values based on current modes
6241  * @crtc: the #intel_crtc on which to compute the WM
6242  *
6243  * Calculate watermark values for the various WM regs based on current mode
6244  * and plane configuration.
6245  *
6246  * There are several cases to deal with here:
6247  *   - normal (i.e. non-self-refresh)
6248  *   - self-refresh (SR) mode
6249  *   - lines are large relative to FIFO size (buffer can hold up to 2)
6250  *   - lines are small relative to FIFO size (buffer can hold more than 2
6251  *     lines), so need to account for TLB latency
6252  *
6253  *   The normal calculation is:
6254  *     watermark = dotclock * bytes per pixel * latency
6255  *   where latency is platform & configuration dependent (we assume pessimal
6256  *   values here).
6257  *
6258  *   The SR calculation is:
6259  *     watermark = (trunc(latency/line time)+1) * surface width *
6260  *       bytes per pixel
6261  *   where
6262  *     line time = htotal / dotclock
6263  *     surface width = hdisplay for normal plane and 64 for cursor
6264  *   and latency is assumed to be high, as above.
6265  *
6266  * The final value programmed to the register should always be rounded up,
6267  * and include an extra 2 entries to account for clock crossings.
6268  *
6269  * We don't use the sprite, so we can ignore that.  And on Crestline we have
6270  * to set the non-SR watermarks to 8.
6271  */
6272 void intel_update_watermarks(struct intel_crtc *crtc)
6273 {
6274         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6275
6276         if (dev_priv->display.update_wm)
6277                 dev_priv->display.update_wm(crtc);
6278 }
6279
6280 void intel_enable_ipc(struct drm_i915_private *dev_priv)
6281 {
6282         u32 val;
6283
6284         if (!HAS_IPC(dev_priv))
6285                 return;
6286
6287         val = I915_READ(DISP_ARB_CTL2);
6288
6289         if (dev_priv->ipc_enabled)
6290                 val |= DISP_IPC_ENABLE;
6291         else
6292                 val &= ~DISP_IPC_ENABLE;
6293
6294         I915_WRITE(DISP_ARB_CTL2, val);
6295 }
6296
6297 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
6298 {
6299         /* Display WA #0477 WaDisableIPC: skl */
6300         if (IS_SKYLAKE(dev_priv))
6301                 return false;
6302
6303         /* Display WA #1141: SKL:all KBL:all CFL */
6304         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6305                 return dev_priv->dram_info.symmetric_memory;
6306
6307         return true;
6308 }
6309
6310 void intel_init_ipc(struct drm_i915_private *dev_priv)
6311 {
6312         if (!HAS_IPC(dev_priv))
6313                 return;
6314
6315         dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
6316
6317         intel_enable_ipc(dev_priv);
6318 }
6319
6320 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
6321 {
6322         /*
6323          * On Ibex Peak and Cougar Point, we need to disable clock
6324          * gating for the panel power sequencer or it will fail to
6325          * start up when no ports are active.
6326          */
6327         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6328 }
6329
6330 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
6331 {
6332         enum pipe pipe;
6333
6334         for_each_pipe(dev_priv, pipe) {
6335                 I915_WRITE(DSPCNTR(pipe),
6336                            I915_READ(DSPCNTR(pipe)) |
6337                            DISPPLANE_TRICKLE_FEED_DISABLE);
6338
6339                 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6340                 POSTING_READ(DSPSURF(pipe));
6341         }
6342 }
6343
6344 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
6345 {
6346         u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6347
6348         /*
6349          * Required for FBC
6350          * WaFbcDisableDpfcClockGating:ilk
6351          */
6352         dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6353                    ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6354                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6355
6356         I915_WRITE(PCH_3DCGDIS0,
6357                    MARIUNIT_CLOCK_GATE_DISABLE |
6358                    SVSMUNIT_CLOCK_GATE_DISABLE);
6359         I915_WRITE(PCH_3DCGDIS1,
6360                    VFMUNIT_CLOCK_GATE_DISABLE);
6361
6362         /*
6363          * According to the spec the following bits should be set in
6364          * order to enable memory self-refresh
6365          * The bit 22/21 of 0x42004
6366          * The bit 5 of 0x42020
6367          * The bit 15 of 0x45000
6368          */
6369         I915_WRITE(ILK_DISPLAY_CHICKEN2,
6370                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
6371                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6372         dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6373         I915_WRITE(DISP_ARB_CTL,
6374                    (I915_READ(DISP_ARB_CTL) |
6375                     DISP_FBC_WM_DIS));
6376
6377         /*
6378          * Based on the document from hardware guys the following bits
6379          * should be set unconditionally in order to enable FBC.
6380          * The bit 22 of 0x42000
6381          * The bit 22 of 0x42004
6382          * The bit 7,8,9 of 0x42020.
6383          */
6384         if (IS_IRONLAKE_M(dev_priv)) {
6385                 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6386                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6387                            I915_READ(ILK_DISPLAY_CHICKEN1) |
6388                            ILK_FBCQ_DIS);
6389                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6390                            I915_READ(ILK_DISPLAY_CHICKEN2) |
6391                            ILK_DPARB_GATE);
6392         }
6393
6394         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6395
6396         I915_WRITE(ILK_DISPLAY_CHICKEN2,
6397                    I915_READ(ILK_DISPLAY_CHICKEN2) |
6398                    ILK_ELPIN_409_SELECT);
6399         I915_WRITE(_3D_CHICKEN2,
6400                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6401                    _3D_CHICKEN2_WM_READ_PIPELINED);
6402
6403         /* WaDisableRenderCachePipelinedFlush:ilk */
6404         I915_WRITE(CACHE_MODE_0,
6405                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6406
6407         /* WaDisable_RenderCache_OperationalFlush:ilk */
6408         I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6409
6410         g4x_disable_trickle_feed(dev_priv);
6411
6412         ibx_init_clock_gating(dev_priv);
6413 }
6414
6415 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
6416 {
6417         enum pipe pipe;
6418         u32 val;
6419
6420         /*
6421          * On Ibex Peak and Cougar Point, we need to disable clock
6422          * gating for the panel power sequencer or it will fail to
6423          * start up when no ports are active.
6424          */
6425         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6426                    PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6427                    PCH_CPUNIT_CLOCK_GATE_DISABLE);
6428         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6429                    DPLS_EDP_PPS_FIX_DIS);
6430         /* The below fixes the weird display corruption, a few pixels shifted
6431          * downward, on (only) LVDS of some HP laptops with IVY.
6432          */
6433         for_each_pipe(dev_priv, pipe) {
6434                 val = I915_READ(TRANS_CHICKEN2(pipe));
6435                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6436                 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6437                 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6438                         val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6439                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6440                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6441                 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6442         }
6443         /* WADP0ClockGatingDisable */
6444         for_each_pipe(dev_priv, pipe) {
6445                 I915_WRITE(TRANS_CHICKEN1(pipe),
6446                            TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6447         }
6448 }
6449
6450 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
6451 {
6452         u32 tmp;
6453
6454         tmp = I915_READ(MCH_SSKPD);
6455         if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6456                 drm_dbg_kms(&dev_priv->drm,
6457                             "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6458                             tmp);
6459 }
6460
6461 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
6462 {
6463         u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6464
6465         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6466
6467         I915_WRITE(ILK_DISPLAY_CHICKEN2,
6468                    I915_READ(ILK_DISPLAY_CHICKEN2) |
6469                    ILK_ELPIN_409_SELECT);
6470
6471         /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6472         I915_WRITE(_3D_CHICKEN,
6473                    _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6474
6475         /* WaDisable_RenderCache_OperationalFlush:snb */
6476         I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6477
6478         /*
6479          * BSpec recoomends 8x4 when MSAA is used,
6480          * however in practice 16x4 seems fastest.
6481          *
6482          * Note that PS/WM thread counts depend on the WIZ hashing
6483          * disable bit, which we don't touch here, but it's good
6484          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6485          */
6486         I915_WRITE(GEN6_GT_MODE,
6487                    _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6488
6489         I915_WRITE(CACHE_MODE_0,
6490                    _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6491
6492         I915_WRITE(GEN6_UCGCTL1,
6493                    I915_READ(GEN6_UCGCTL1) |
6494                    GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6495                    GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6496
6497         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6498          * gating disable must be set.  Failure to set it results in
6499          * flickering pixels due to Z write ordering failures after
6500          * some amount of runtime in the Mesa "fire" demo, and Unigine
6501          * Sanctuary and Tropics, and apparently anything else with
6502          * alpha test or pixel discard.
6503          *
6504          * According to the spec, bit 11 (RCCUNIT) must also be set,
6505          * but we didn't debug actual testcases to find it out.
6506          *
6507          * WaDisableRCCUnitClockGating:snb
6508          * WaDisableRCPBUnitClockGating:snb
6509          */
6510         I915_WRITE(GEN6_UCGCTL2,
6511                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6512                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6513
6514         /* WaStripsFansDisableFastClipPerformanceFix:snb */
6515         I915_WRITE(_3D_CHICKEN3,
6516                    _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6517
6518         /*
6519          * Bspec says:
6520          * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6521          * 3DSTATE_SF number of SF output attributes is more than 16."
6522          */
6523         I915_WRITE(_3D_CHICKEN3,
6524                    _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6525
6526         /*
6527          * According to the spec the following bits should be
6528          * set in order to enable memory self-refresh and fbc:
6529          * The bit21 and bit22 of 0x42000
6530          * The bit21 and bit22 of 0x42004
6531          * The bit5 and bit7 of 0x42020
6532          * The bit14 of 0x70180
6533          * The bit14 of 0x71180
6534          *
6535          * WaFbcAsynchFlipDisableFbcQueue:snb
6536          */
6537         I915_WRITE(ILK_DISPLAY_CHICKEN1,
6538                    I915_READ(ILK_DISPLAY_CHICKEN1) |
6539                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6540         I915_WRITE(ILK_DISPLAY_CHICKEN2,
6541                    I915_READ(ILK_DISPLAY_CHICKEN2) |
6542                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6543         I915_WRITE(ILK_DSPCLK_GATE_D,
6544                    I915_READ(ILK_DSPCLK_GATE_D) |
6545                    ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
6546                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6547
6548         g4x_disable_trickle_feed(dev_priv);
6549
6550         cpt_init_clock_gating(dev_priv);
6551
6552         gen6_check_mch_setup(dev_priv);
6553 }
6554
6555 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6556 {
6557         u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
6558
6559         /*
6560          * WaVSThreadDispatchOverride:ivb,vlv
6561          *
6562          * This actually overrides the dispatch
6563          * mode for all thread types.
6564          */
6565         reg &= ~GEN7_FF_SCHED_MASK;
6566         reg |= GEN7_FF_TS_SCHED_HW;
6567         reg |= GEN7_FF_VS_SCHED_HW;
6568         reg |= GEN7_FF_DS_SCHED_HW;
6569
6570         I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6571 }
6572
6573 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
6574 {
6575         /*
6576          * TODO: this bit should only be enabled when really needed, then
6577          * disabled when not needed anymore in order to save power.
6578          */
6579         if (HAS_PCH_LPT_LP(dev_priv))
6580                 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6581                            I915_READ(SOUTH_DSPCLK_GATE_D) |
6582                            PCH_LP_PARTITION_LEVEL_DISABLE);
6583
6584         /* WADPOClockGatingDisable:hsw */
6585         I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6586                    I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6587                    TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6588 }
6589
6590 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
6591 {
6592         if (HAS_PCH_LPT_LP(dev_priv)) {
6593                 u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6594
6595                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6596                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6597         }
6598 }
6599
6600 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6601                                    int general_prio_credits,
6602                                    int high_prio_credits)
6603 {
6604         u32 misccpctl;
6605         u32 val;
6606
6607         /* WaTempDisableDOPClkGating:bdw */
6608         misccpctl = I915_READ(GEN7_MISCCPCTL);
6609         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6610
6611         val = I915_READ(GEN8_L3SQCREG1);
6612         val &= ~L3_PRIO_CREDITS_MASK;
6613         val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
6614         val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
6615         I915_WRITE(GEN8_L3SQCREG1, val);
6616
6617         /*
6618          * Wait at least 100 clocks before re-enabling clock gating.
6619          * See the definition of L3SQCREG1 in BSpec.
6620          */
6621         POSTING_READ(GEN8_L3SQCREG1);
6622         udelay(1);
6623         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6624 }
6625
6626 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
6627 {
6628         /* This is not an Wa. Enable to reduce Sampler power */
6629         I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
6630                    I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
6631
6632         /* WaEnable32PlaneMode:icl */
6633         I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
6634                    _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
6635
6636         /*
6637          * Wa_1408615072:icl,ehl  (vsunit)
6638          * Wa_1407596294:icl,ehl  (hsunit)
6639          */
6640         intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
6641                          0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
6642
6643         /* Wa_1407352427:icl,ehl */
6644         intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
6645                          0, PSDUNIT_CLKGATE_DIS);
6646 }
6647
6648 static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
6649 {
6650         u32 vd_pg_enable = 0;
6651         unsigned int i;
6652
6653         /* Wa_1408615072:tgl */
6654         intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
6655                          0, VSUNIT_CLKGATE_DIS_TGL);
6656
6657         /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
6658         for (i = 0; i < I915_MAX_VCS; i++) {
6659                 if (HAS_ENGINE(dev_priv, _VCS(i)))
6660                         vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
6661                                         VDN_MFX_POWERGATE_ENABLE(i);
6662         }
6663
6664         I915_WRITE(POWERGATE_ENABLE,
6665                    I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
6666 }
6667
6668 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
6669 {
6670         if (!HAS_PCH_CNP(dev_priv))
6671                 return;
6672
6673         /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
6674         I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
6675                    CNP_PWM_CGE_GATING_DISABLE);
6676 }
6677
6678 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
6679 {
6680         u32 val;
6681         cnp_init_clock_gating(dev_priv);
6682
6683         /* This is not an Wa. Enable for better image quality */
6684         I915_WRITE(_3D_CHICKEN3,
6685                    _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
6686
6687         /* WaEnableChickenDCPR:cnl */
6688         I915_WRITE(GEN8_CHICKEN_DCPR_1,
6689                    I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
6690
6691         /* WaFbcWakeMemOn:cnl */
6692         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
6693                    DISP_FBC_MEMORY_WAKE);
6694
6695         val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
6696         /* ReadHitWriteOnlyDisable:cnl */
6697         val |= RCCUNIT_CLKGATE_DIS;
6698         /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
6699         if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
6700                 val |= SARBUNIT_CLKGATE_DIS;
6701         I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
6702
6703         /* Wa_2201832410:cnl */
6704         val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
6705         val |= GWUNIT_CLKGATE_DIS;
6706         I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
6707
6708         /* WaDisableVFclkgate:cnl */
6709         /* WaVFUnitClockGatingDisable:cnl */
6710         val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
6711         val |= VFUNIT_CLKGATE_DIS;
6712         I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
6713 }
6714
6715 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
6716 {
6717         cnp_init_clock_gating(dev_priv);
6718         gen9_init_clock_gating(dev_priv);
6719
6720         /* WaFbcNukeOnHostModify:cfl */
6721         I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6722                    ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6723 }
6724
6725 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
6726 {
6727         gen9_init_clock_gating(dev_priv);
6728
6729         /* WaDisableSDEUnitClockGating:kbl */
6730         if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6731                 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6732                            GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6733
6734         /* WaDisableGamClockGating:kbl */
6735         if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6736                 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6737                            GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
6738
6739         /* WaFbcNukeOnHostModify:kbl */
6740         I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6741                    ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6742 }
6743
6744 static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
6745 {
6746         gen9_init_clock_gating(dev_priv);
6747
6748         /* WAC6entrylatency:skl */
6749         I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
6750                    FBC_LLC_FULLY_OPEN);
6751
6752         /* WaFbcNukeOnHostModify:skl */
6753         I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6754                    ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6755 }
6756
6757 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
6758 {
6759         enum pipe pipe;
6760
6761         /* WaSwitchSolVfFArbitrationPriority:bdw */
6762         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6763
6764         /* WaPsrDPAMaskVBlankInSRD:bdw */
6765         I915_WRITE(CHICKEN_PAR1_1,
6766                    I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6767
6768         /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6769         for_each_pipe(dev_priv, pipe) {
6770                 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6771                            I915_READ(CHICKEN_PIPESL_1(pipe)) |
6772                            BDW_DPRS_MASK_VBLANK_SRD);
6773         }
6774
6775         /* WaVSRefCountFullforceMissDisable:bdw */
6776         /* WaDSRefCountFullforceMissDisable:bdw */
6777         I915_WRITE(GEN7_FF_THREAD_MODE,
6778                    I915_READ(GEN7_FF_THREAD_MODE) &
6779                    ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6780
6781         I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6782                    _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6783
6784         /* WaDisableSDEUnitClockGating:bdw */
6785         I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6786                    GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6787
6788         /* WaProgramL3SqcReg1Default:bdw */
6789         gen8_set_l3sqc_credits(dev_priv, 30, 2);
6790
6791         /* WaKVMNotificationOnConfigChange:bdw */
6792         I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
6793                    | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
6794
6795         lpt_init_clock_gating(dev_priv);
6796
6797         /* WaDisableDopClockGating:bdw
6798          *
6799          * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
6800          * clock gating.
6801          */
6802         I915_WRITE(GEN6_UCGCTL1,
6803                    I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
6804 }
6805
6806 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
6807 {
6808         /* L3 caching of data atomics doesn't work -- disable it. */
6809         I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6810         I915_WRITE(HSW_ROW_CHICKEN3,
6811                    _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6812
6813         /* This is required by WaCatErrorRejectionIssue:hsw */
6814         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6815                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6816                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6817
6818         /* WaVSRefCountFullforceMissDisable:hsw */
6819         I915_WRITE(GEN7_FF_THREAD_MODE,
6820                    I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6821
6822         /* WaDisable_RenderCache_OperationalFlush:hsw */
6823         I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6824
6825         /* enable HiZ Raw Stall Optimization */
6826         I915_WRITE(CACHE_MODE_0_GEN7,
6827                    _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6828
6829         /* WaDisable4x2SubspanOptimization:hsw */
6830         I915_WRITE(CACHE_MODE_1,
6831                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6832
6833         /*
6834          * BSpec recommends 8x4 when MSAA is used,
6835          * however in practice 16x4 seems fastest.
6836          *
6837          * Note that PS/WM thread counts depend on the WIZ hashing
6838          * disable bit, which we don't touch here, but it's good
6839          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6840          */
6841         I915_WRITE(GEN7_GT_MODE,
6842                    _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6843
6844         /* WaSampleCChickenBitEnable:hsw */
6845         I915_WRITE(HALF_SLICE_CHICKEN3,
6846                    _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6847
6848         /* WaSwitchSolVfFArbitrationPriority:hsw */
6849         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6850
6851         lpt_init_clock_gating(dev_priv);
6852 }
6853
6854 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
6855 {
6856         u32 snpcr;
6857
6858         I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6859
6860         /* WaDisableEarlyCull:ivb */
6861         I915_WRITE(_3D_CHICKEN3,
6862                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6863
6864         /* WaDisableBackToBackFlipFix:ivb */
6865         I915_WRITE(IVB_CHICKEN3,
6866                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6867                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
6868
6869         /* WaDisablePSDDualDispatchEnable:ivb */
6870         if (IS_IVB_GT1(dev_priv))
6871                 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6872                            _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6873
6874         /* WaDisable_RenderCache_OperationalFlush:ivb */
6875         I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6876
6877         /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6878         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6879                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6880
6881         /* WaApplyL3ControlAndL3ChickenMode:ivb */
6882         I915_WRITE(GEN7_L3CNTLREG1,
6883                         GEN7_WA_FOR_GEN7_L3_CONTROL);
6884         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6885                    GEN7_WA_L3_CHICKEN_MODE);
6886         if (IS_IVB_GT1(dev_priv))
6887                 I915_WRITE(GEN7_ROW_CHICKEN2,
6888                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6889         else {
6890                 /* must write both registers */
6891                 I915_WRITE(GEN7_ROW_CHICKEN2,
6892                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6893                 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6894                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6895         }
6896
6897         /* WaForceL3Serialization:ivb */
6898         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6899                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6900
6901         /*
6902          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6903          * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6904          */
6905         I915_WRITE(GEN6_UCGCTL2,
6906                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6907
6908         /* This is required by WaCatErrorRejectionIssue:ivb */
6909         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6910                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6911                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6912
6913         g4x_disable_trickle_feed(dev_priv);
6914
6915         gen7_setup_fixed_func_scheduler(dev_priv);
6916
6917         if (0) { /* causes HiZ corruption on ivb:gt1 */
6918                 /* enable HiZ Raw Stall Optimization */
6919                 I915_WRITE(CACHE_MODE_0_GEN7,
6920                            _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6921         }
6922
6923         /* WaDisable4x2SubspanOptimization:ivb */
6924         I915_WRITE(CACHE_MODE_1,
6925                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6926
6927         /*
6928          * BSpec recommends 8x4 when MSAA is used,
6929          * however in practice 16x4 seems fastest.
6930          *
6931          * Note that PS/WM thread counts depend on the WIZ hashing
6932          * disable bit, which we don't touch here, but it's good
6933          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6934          */
6935         I915_WRITE(GEN7_GT_MODE,
6936                    _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6937
6938         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6939         snpcr &= ~GEN6_MBC_SNPCR_MASK;
6940         snpcr |= GEN6_MBC_SNPCR_MED;
6941         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6942
6943         if (!HAS_PCH_NOP(dev_priv))
6944                 cpt_init_clock_gating(dev_priv);
6945
6946         gen6_check_mch_setup(dev_priv);
6947 }
6948
6949 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
6950 {
6951         /* WaDisableEarlyCull:vlv */
6952         I915_WRITE(_3D_CHICKEN3,
6953                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6954
6955         /* WaDisableBackToBackFlipFix:vlv */
6956         I915_WRITE(IVB_CHICKEN3,
6957                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6958                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
6959
6960         /* WaPsdDispatchEnable:vlv */
6961         /* WaDisablePSDDualDispatchEnable:vlv */
6962         I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6963                    _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6964                                       GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6965
6966         /* WaDisable_RenderCache_OperationalFlush:vlv */
6967         I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6968
6969         /* WaForceL3Serialization:vlv */
6970         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6971                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6972
6973         /* WaDisableDopClockGating:vlv */
6974         I915_WRITE(GEN7_ROW_CHICKEN2,
6975                    _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6976
6977         /* This is required by WaCatErrorRejectionIssue:vlv */
6978         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6979                    I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6980                    GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6981
6982         gen7_setup_fixed_func_scheduler(dev_priv);
6983
6984         /*
6985          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6986          * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6987          */
6988         I915_WRITE(GEN6_UCGCTL2,
6989                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6990
6991         /* WaDisableL3Bank2xClockGate:vlv
6992          * Disabling L3 clock gating- MMIO 940c[25] = 1
6993          * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6994         I915_WRITE(GEN7_UCGCTL4,
6995                    I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6996
6997         /*
6998          * BSpec says this must be set, even though
6999          * WaDisable4x2SubspanOptimization isn't listed for VLV.
7000          */
7001         I915_WRITE(CACHE_MODE_1,
7002                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7003
7004         /*
7005          * BSpec recommends 8x4 when MSAA is used,
7006          * however in practice 16x4 seems fastest.
7007          *
7008          * Note that PS/WM thread counts depend on the WIZ hashing
7009          * disable bit, which we don't touch here, but it's good
7010          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7011          */
7012         I915_WRITE(GEN7_GT_MODE,
7013                    _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7014
7015         /*
7016          * WaIncreaseL3CreditsForVLVB0:vlv
7017          * This is the hardware default actually.
7018          */
7019         I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7020
7021         /*
7022          * WaDisableVLVClockGating_VBIIssue:vlv
7023          * Disable clock gating on th GCFG unit to prevent a delay
7024          * in the reporting of vblank events.
7025          */
7026         I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7027 }
7028
7029 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
7030 {
7031         /* WaVSRefCountFullforceMissDisable:chv */
7032         /* WaDSRefCountFullforceMissDisable:chv */
7033         I915_WRITE(GEN7_FF_THREAD_MODE,
7034                    I915_READ(GEN7_FF_THREAD_MODE) &
7035                    ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7036
7037         /* WaDisableSemaphoreAndSyncFlipWait:chv */
7038         I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7039                    _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7040
7041         /* WaDisableCSUnitClockGating:chv */
7042         I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7043                    GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7044
7045         /* WaDisableSDEUnitClockGating:chv */
7046         I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7047                    GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7048
7049         /*
7050          * WaProgramL3SqcReg1Default:chv
7051          * See gfxspecs/Related Documents/Performance Guide/
7052          * LSQC Setting Recommendations.
7053          */
7054         gen8_set_l3sqc_credits(dev_priv, 38, 2);
7055 }
7056
7057 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7058 {
7059         u32 dspclk_gate;
7060
7061         I915_WRITE(RENCLK_GATE_D1, 0);
7062         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7063                    GS_UNIT_CLOCK_GATE_DISABLE |
7064                    CL_UNIT_CLOCK_GATE_DISABLE);
7065         I915_WRITE(RAMCLK_GATE_D, 0);
7066         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7067                 OVRUNIT_CLOCK_GATE_DISABLE |
7068                 OVCUNIT_CLOCK_GATE_DISABLE;
7069         if (IS_GM45(dev_priv))
7070                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7071         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7072
7073         /* WaDisableRenderCachePipelinedFlush */
7074         I915_WRITE(CACHE_MODE_0,
7075                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7076
7077         /* WaDisable_RenderCache_OperationalFlush:g4x */
7078         I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7079
7080         g4x_disable_trickle_feed(dev_priv);
7081 }
7082
7083 static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
7084 {
7085         struct intel_uncore *uncore = &dev_priv->uncore;
7086
7087         intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7088         intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
7089         intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
7090         intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
7091         intel_uncore_write16(uncore, DEUC, 0);
7092         intel_uncore_write(uncore,
7093                            MI_ARB_STATE,
7094                            _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7095
7096         /* WaDisable_RenderCache_OperationalFlush:gen4 */
7097         intel_uncore_write(uncore,
7098                            CACHE_MODE_0,
7099                            _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7100 }
7101
7102 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
7103 {
7104         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7105                    I965_RCC_CLOCK_GATE_DISABLE |
7106                    I965_RCPB_CLOCK_GATE_DISABLE |
7107                    I965_ISC_CLOCK_GATE_DISABLE |
7108                    I965_FBC_CLOCK_GATE_DISABLE);
7109         I915_WRITE(RENCLK_GATE_D2, 0);
7110         I915_WRITE(MI_ARB_STATE,
7111                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7112
7113         /* WaDisable_RenderCache_OperationalFlush:gen4 */
7114         I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7115 }
7116
7117 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7118 {
7119         u32 dstate = I915_READ(D_STATE);
7120
7121         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7122                 DSTATE_DOT_CLOCK_GATING;
7123         I915_WRITE(D_STATE, dstate);
7124
7125         if (IS_PINEVIEW(dev_priv))
7126                 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7127
7128         /* IIR "flip pending" means done if this bit is set */
7129         I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7130
7131         /* interrupts should cause a wake up from C3 */
7132         I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7133
7134         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7135         I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7136
7137         I915_WRITE(MI_ARB_STATE,
7138                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7139 }
7140
7141 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7142 {
7143         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7144
7145         /* interrupts should cause a wake up from C3 */
7146         I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7147                    _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7148
7149         I915_WRITE(MEM_MODE,
7150                    _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7151 }
7152
7153 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7154 {
7155         I915_WRITE(MEM_MODE,
7156                    _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7157                    _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7158 }
7159
7160 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7161 {
7162         dev_priv->display.init_clock_gating(dev_priv);
7163 }
7164
7165 void intel_suspend_hw(struct drm_i915_private *dev_priv)
7166 {
7167         if (HAS_PCH_LPT(dev_priv))
7168                 lpt_suspend_hw(dev_priv);
7169 }
7170
7171 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7172 {
7173         drm_dbg_kms(&dev_priv->drm,
7174                     "No clock gating settings or workarounds applied.\n");
7175 }
7176
7177 /**
7178  * intel_init_clock_gating_hooks - setup the clock gating hooks
7179  * @dev_priv: device private
7180  *
7181  * Setup the hooks that configure which clocks of a given platform can be
7182  * gated and also apply various GT and display specific workarounds for these
7183  * platforms. Note that some GT specific workarounds are applied separately
7184  * when GPU contexts or batchbuffers start their execution.
7185  */
7186 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7187 {
7188         if (IS_GEN(dev_priv, 12))
7189                 dev_priv->display.init_clock_gating = tgl_init_clock_gating;
7190         else if (IS_GEN(dev_priv, 11))
7191                 dev_priv->display.init_clock_gating = icl_init_clock_gating;
7192         else if (IS_CANNONLAKE(dev_priv))
7193                 dev_priv->display.init_clock_gating = cnl_init_clock_gating;
7194         else if (IS_COFFEELAKE(dev_priv))
7195                 dev_priv->display.init_clock_gating = cfl_init_clock_gating;
7196         else if (IS_SKYLAKE(dev_priv))
7197                 dev_priv->display.init_clock_gating = skl_init_clock_gating;
7198         else if (IS_KABYLAKE(dev_priv))
7199                 dev_priv->display.init_clock_gating = kbl_init_clock_gating;
7200         else if (IS_BROXTON(dev_priv))
7201                 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7202         else if (IS_GEMINILAKE(dev_priv))
7203                 dev_priv->display.init_clock_gating = glk_init_clock_gating;
7204         else if (IS_BROADWELL(dev_priv))
7205                 dev_priv->display.init_clock_gating = bdw_init_clock_gating;
7206         else if (IS_CHERRYVIEW(dev_priv))
7207                 dev_priv->display.init_clock_gating = chv_init_clock_gating;
7208         else if (IS_HASWELL(dev_priv))
7209                 dev_priv->display.init_clock_gating = hsw_init_clock_gating;
7210         else if (IS_IVYBRIDGE(dev_priv))
7211                 dev_priv->display.init_clock_gating = ivb_init_clock_gating;
7212         else if (IS_VALLEYVIEW(dev_priv))
7213                 dev_priv->display.init_clock_gating = vlv_init_clock_gating;
7214         else if (IS_GEN(dev_priv, 6))
7215                 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7216         else if (IS_GEN(dev_priv, 5))
7217                 dev_priv->display.init_clock_gating = ilk_init_clock_gating;
7218         else if (IS_G4X(dev_priv))
7219                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7220         else if (IS_I965GM(dev_priv))
7221                 dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
7222         else if (IS_I965G(dev_priv))
7223                 dev_priv->display.init_clock_gating = i965g_init_clock_gating;
7224         else if (IS_GEN(dev_priv, 3))
7225                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7226         else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7227                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7228         else if (IS_GEN(dev_priv, 2))
7229                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7230         else {
7231                 MISSING_CASE(INTEL_DEVID(dev_priv));
7232                 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7233         }
7234 }
7235
7236 /* Set up chip specific power management-related functions */
7237 void intel_init_pm(struct drm_i915_private *dev_priv)
7238 {
7239         /* For cxsr */
7240         if (IS_PINEVIEW(dev_priv))
7241                 pnv_get_mem_freq(dev_priv);
7242         else if (IS_GEN(dev_priv, 5))
7243                 ilk_get_mem_freq(dev_priv);
7244
7245         if (intel_has_sagv(dev_priv))
7246                 skl_setup_sagv_block_time(dev_priv);
7247
7248         /* For FIFO watermark updates */
7249         if (INTEL_GEN(dev_priv) >= 9) {
7250                 skl_setup_wm_latency(dev_priv);
7251                 dev_priv->display.initial_watermarks = skl_initial_wm;
7252                 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
7253                 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7254         } else if (HAS_PCH_SPLIT(dev_priv)) {
7255                 ilk_setup_wm_latency(dev_priv);
7256
7257                 if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
7258                      dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7259                     (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
7260                      dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7261                         dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7262                         dev_priv->display.compute_intermediate_wm =
7263                                 ilk_compute_intermediate_wm;
7264                         dev_priv->display.initial_watermarks =
7265                                 ilk_initial_watermarks;
7266                         dev_priv->display.optimize_watermarks =
7267                                 ilk_optimize_watermarks;
7268                 } else {
7269                         drm_dbg_kms(&dev_priv->drm,
7270                                     "Failed to read display plane latency. "
7271                                     "Disable CxSR\n");
7272                 }
7273         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7274                 vlv_setup_wm_latency(dev_priv);
7275                 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
7276                 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
7277                 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
7278                 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
7279                 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
7280         } else if (IS_G4X(dev_priv)) {
7281                 g4x_setup_wm_latency(dev_priv);
7282                 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
7283                 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
7284                 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
7285                 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
7286         } else if (IS_PINEVIEW(dev_priv)) {
7287                 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
7288                                             dev_priv->is_ddr3,
7289                                             dev_priv->fsb_freq,
7290                                             dev_priv->mem_freq)) {
7291                         drm_info(&dev_priv->drm,
7292                                  "failed to find known CxSR latency "
7293                                  "(found ddr%s fsb freq %d, mem freq %d), "
7294                                  "disabling CxSR\n",
7295                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
7296                                  dev_priv->fsb_freq, dev_priv->mem_freq);
7297                         /* Disable CxSR and never update its watermark again */
7298                         intel_set_memory_cxsr(dev_priv, false);
7299                         dev_priv->display.update_wm = NULL;
7300                 } else
7301                         dev_priv->display.update_wm = pnv_update_wm;
7302         } else if (IS_GEN(dev_priv, 4)) {
7303                 dev_priv->display.update_wm = i965_update_wm;
7304         } else if (IS_GEN(dev_priv, 3)) {
7305                 dev_priv->display.update_wm = i9xx_update_wm;
7306                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7307         } else if (IS_GEN(dev_priv, 2)) {
7308                 if (INTEL_NUM_PIPES(dev_priv) == 1) {
7309                         dev_priv->display.update_wm = i845_update_wm;
7310                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
7311                 } else {
7312                         dev_priv->display.update_wm = i9xx_update_wm;
7313                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
7314                 }
7315         } else {
7316                 drm_err(&dev_priv->drm,
7317                         "unexpected fall-through in %s\n", __func__);
7318         }
7319 }
7320
7321 void intel_pm_setup(struct drm_i915_private *dev_priv)
7322 {
7323         dev_priv->runtime_pm.suspended = false;
7324         atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
7325 }