OSDN Git Service

drm/radeon/dpm: add smc fan control for CI (v2)
[android-x86/kernel.git] / drivers / gpu / drm / radeon / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "radeon.h"
27 #include "radeon_asic.h"
28 #include "radeon_ucode.h"
29 #include "cikd.h"
30 #include "r600_dpm.h"
31 #include "ci_dpm.h"
32 #include "atom.h"
33 #include <linux/seq_file.h>
34
35 #define MC_CG_ARB_FREQ_F0           0x0a
36 #define MC_CG_ARB_FREQ_F1           0x0b
37 #define MC_CG_ARB_FREQ_F2           0x0c
38 #define MC_CG_ARB_FREQ_F3           0x0d
39
40 #define SMC_RAM_END 0x40000
41
42 #define VOLTAGE_SCALE               4
43 #define VOLTAGE_VID_OFFSET_SCALE1    625
44 #define VOLTAGE_VID_OFFSET_SCALE2    100
45
46 static const struct ci_pt_defaults defaults_hawaii_xt =
47 {
48         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
49         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
50         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
51 };
52
53 static const struct ci_pt_defaults defaults_hawaii_pro =
54 {
55         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
56         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
57         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
58 };
59
60 static const struct ci_pt_defaults defaults_bonaire_xt =
61 {
62         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
63         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
64         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
65 };
66
67 static const struct ci_pt_defaults defaults_bonaire_pro =
68 {
69         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
70         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
71         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
72 };
73
74 static const struct ci_pt_defaults defaults_saturn_xt =
75 {
76         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
77         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
78         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
79 };
80
81 static const struct ci_pt_defaults defaults_saturn_pro =
82 {
83         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
84         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
85         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
86 };
87
88 static const struct ci_pt_config_reg didt_config_ci[] =
89 {
90         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
103         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
104         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
105         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
106         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0xFFFFFFFF }
163 };
164
165 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
166 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
167                                        u32 arb_freq_src, u32 arb_freq_dest);
168 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
169 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
170 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
171                                                      u32 max_voltage_steps,
172                                                      struct atom_voltage_table *voltage_table);
173 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
174 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
175 extern int ci_mc_load_microcode(struct radeon_device *rdev);
176 extern void cik_update_cg(struct radeon_device *rdev,
177                           u32 block, bool enable);
178
179 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
180                                          struct atom_voltage_table_entry *voltage_table,
181                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
182 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
183 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
184                                        u32 target_tdp);
185 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
186
187 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
188                                                       PPSMC_Msg msg, u32 parameter);
189
190 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
191 {
192         struct ci_power_info *pi = rdev->pm.dpm.priv;
193
194         return pi;
195 }
196
197 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
198 {
199         struct ci_ps *ps = rps->ps_priv;
200
201         return ps;
202 }
203
204 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
205 {
206         struct ci_power_info *pi = ci_get_pi(rdev);
207
208         switch (rdev->pdev->device) {
209         case 0x6649:
210         case 0x6650:
211         case 0x6651:
212         case 0x6658:
213         case 0x665C:
214         case 0x665D:
215         default:
216                 pi->powertune_defaults = &defaults_bonaire_xt;
217                 break;
218         case 0x6640:
219         case 0x6641:
220         case 0x6646:
221         case 0x6647:
222                 pi->powertune_defaults = &defaults_saturn_xt;
223                 break;
224         case 0x67B8:
225         case 0x67B0:
226                 pi->powertune_defaults = &defaults_hawaii_xt;
227                 break;
228         case 0x67BA:
229         case 0x67B1:
230                 pi->powertune_defaults = &defaults_hawaii_pro;
231                 break;
232         case 0x67A0:
233         case 0x67A1:
234         case 0x67A2:
235         case 0x67A8:
236         case 0x67A9:
237         case 0x67AA:
238         case 0x67B9:
239         case 0x67BE:
240                 pi->powertune_defaults = &defaults_bonaire_xt;
241                 break;
242         }
243
244         pi->dte_tj_offset = 0;
245
246         pi->caps_power_containment = true;
247         pi->caps_cac = false;
248         pi->caps_sq_ramping = false;
249         pi->caps_db_ramping = false;
250         pi->caps_td_ramping = false;
251         pi->caps_tcp_ramping = false;
252
253         if (pi->caps_power_containment) {
254                 pi->caps_cac = true;
255                 if (rdev->family == CHIP_HAWAII)
256                         pi->enable_bapm_feature = false;
257                 else
258                         pi->enable_bapm_feature = true;
259                 pi->enable_tdc_limit_feature = true;
260                 pi->enable_pkg_pwr_tracking_feature = true;
261         }
262 }
263
264 static u8 ci_convert_to_vid(u16 vddc)
265 {
266         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
267 }
268
269 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
270 {
271         struct ci_power_info *pi = ci_get_pi(rdev);
272         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
273         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
274         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
275         u32 i;
276
277         if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
278                 return -EINVAL;
279         if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
280                 return -EINVAL;
281         if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
282             rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
283                 return -EINVAL;
284
285         for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
286                 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
287                         lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
288                         hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
289                         hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
290                 } else {
291                         lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
292                         hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
293                 }
294         }
295         return 0;
296 }
297
298 static int ci_populate_vddc_vid(struct radeon_device *rdev)
299 {
300         struct ci_power_info *pi = ci_get_pi(rdev);
301         u8 *vid = pi->smc_powertune_table.VddCVid;
302         u32 i;
303
304         if (pi->vddc_voltage_table.count > 8)
305                 return -EINVAL;
306
307         for (i = 0; i < pi->vddc_voltage_table.count; i++)
308                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
309
310         return 0;
311 }
312
313 static int ci_populate_svi_load_line(struct radeon_device *rdev)
314 {
315         struct ci_power_info *pi = ci_get_pi(rdev);
316         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
317
318         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
319         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
320         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
321         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
322
323         return 0;
324 }
325
326 static int ci_populate_tdc_limit(struct radeon_device *rdev)
327 {
328         struct ci_power_info *pi = ci_get_pi(rdev);
329         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
330         u16 tdc_limit;
331
332         tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
333         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
334         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
335                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
336         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
337
338         return 0;
339 }
340
341 static int ci_populate_dw8(struct radeon_device *rdev)
342 {
343         struct ci_power_info *pi = ci_get_pi(rdev);
344         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
345         int ret;
346
347         ret = ci_read_smc_sram_dword(rdev,
348                                      SMU7_FIRMWARE_HEADER_LOCATION +
349                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
350                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
351                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
352                                      pi->sram_end);
353         if (ret)
354                 return -EINVAL;
355         else
356                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
357
358         return 0;
359 }
360
361 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
362 {
363         struct ci_power_info *pi = ci_get_pi(rdev);
364
365         if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
366             (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
367                 rdev->pm.dpm.fan.fan_output_sensitivity =
368                         rdev->pm.dpm.fan.default_fan_output_sensitivity;
369
370         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
371                 cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
372
373         return 0;
374 }
375
376 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
377 {
378         struct ci_power_info *pi = ci_get_pi(rdev);
379         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
380         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
381         int i, min, max;
382
383         min = max = hi_vid[0];
384         for (i = 0; i < 8; i++) {
385                 if (0 != hi_vid[i]) {
386                         if (min > hi_vid[i])
387                                 min = hi_vid[i];
388                         if (max < hi_vid[i])
389                                 max = hi_vid[i];
390                 }
391
392                 if (0 != lo_vid[i]) {
393                         if (min > lo_vid[i])
394                                 min = lo_vid[i];
395                         if (max < lo_vid[i])
396                                 max = lo_vid[i];
397                 }
398         }
399
400         if ((min == 0) || (max == 0))
401                 return -EINVAL;
402         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
403         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
404
405         return 0;
406 }
407
408 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
409 {
410         struct ci_power_info *pi = ci_get_pi(rdev);
411         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
412         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
413         struct radeon_cac_tdp_table *cac_tdp_table =
414                 rdev->pm.dpm.dyn_state.cac_tdp_table;
415
416         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
417         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
418
419         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
420         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
421
422         return 0;
423 }
424
425 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
426 {
427         struct ci_power_info *pi = ci_get_pi(rdev);
428         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
429         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
430         struct radeon_cac_tdp_table *cac_tdp_table =
431                 rdev->pm.dpm.dyn_state.cac_tdp_table;
432         struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
433         int i, j, k;
434         const u16 *def1;
435         const u16 *def2;
436
437         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
438         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
439
440         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
441         dpm_table->GpuTjMax =
442                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
443         dpm_table->GpuTjHyst = 8;
444
445         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
446
447         if (ppm) {
448                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
449                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
450         } else {
451                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
452                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
453         }
454
455         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
456         def1 = pt_defaults->bapmti_r;
457         def2 = pt_defaults->bapmti_rc;
458
459         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
460                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
461                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
462                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
463                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
464                                 def1++;
465                                 def2++;
466                         }
467                 }
468         }
469
470         return 0;
471 }
472
473 static int ci_populate_pm_base(struct radeon_device *rdev)
474 {
475         struct ci_power_info *pi = ci_get_pi(rdev);
476         u32 pm_fuse_table_offset;
477         int ret;
478
479         if (pi->caps_power_containment) {
480                 ret = ci_read_smc_sram_dword(rdev,
481                                              SMU7_FIRMWARE_HEADER_LOCATION +
482                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
483                                              &pm_fuse_table_offset, pi->sram_end);
484                 if (ret)
485                         return ret;
486                 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
487                 if (ret)
488                         return ret;
489                 ret = ci_populate_vddc_vid(rdev);
490                 if (ret)
491                         return ret;
492                 ret = ci_populate_svi_load_line(rdev);
493                 if (ret)
494                         return ret;
495                 ret = ci_populate_tdc_limit(rdev);
496                 if (ret)
497                         return ret;
498                 ret = ci_populate_dw8(rdev);
499                 if (ret)
500                         return ret;
501                 ret = ci_populate_fuzzy_fan(rdev);
502                 if (ret)
503                         return ret;
504                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
505                 if (ret)
506                         return ret;
507                 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
508                 if (ret)
509                         return ret;
510                 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
511                                            (u8 *)&pi->smc_powertune_table,
512                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
513                 if (ret)
514                         return ret;
515         }
516
517         return 0;
518 }
519
520 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
521 {
522         struct ci_power_info *pi = ci_get_pi(rdev);
523         u32 data;
524
525         if (pi->caps_sq_ramping) {
526                 data = RREG32_DIDT(DIDT_SQ_CTRL0);
527                 if (enable)
528                         data |= DIDT_CTRL_EN;
529                 else
530                         data &= ~DIDT_CTRL_EN;
531                 WREG32_DIDT(DIDT_SQ_CTRL0, data);
532         }
533
534         if (pi->caps_db_ramping) {
535                 data = RREG32_DIDT(DIDT_DB_CTRL0);
536                 if (enable)
537                         data |= DIDT_CTRL_EN;
538                 else
539                         data &= ~DIDT_CTRL_EN;
540                 WREG32_DIDT(DIDT_DB_CTRL0, data);
541         }
542
543         if (pi->caps_td_ramping) {
544                 data = RREG32_DIDT(DIDT_TD_CTRL0);
545                 if (enable)
546                         data |= DIDT_CTRL_EN;
547                 else
548                         data &= ~DIDT_CTRL_EN;
549                 WREG32_DIDT(DIDT_TD_CTRL0, data);
550         }
551
552         if (pi->caps_tcp_ramping) {
553                 data = RREG32_DIDT(DIDT_TCP_CTRL0);
554                 if (enable)
555                         data |= DIDT_CTRL_EN;
556                 else
557                         data &= ~DIDT_CTRL_EN;
558                 WREG32_DIDT(DIDT_TCP_CTRL0, data);
559         }
560 }
561
562 static int ci_program_pt_config_registers(struct radeon_device *rdev,
563                                           const struct ci_pt_config_reg *cac_config_regs)
564 {
565         const struct ci_pt_config_reg *config_regs = cac_config_regs;
566         u32 data;
567         u32 cache = 0;
568
569         if (config_regs == NULL)
570                 return -EINVAL;
571
572         while (config_regs->offset != 0xFFFFFFFF) {
573                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
574                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
575                 } else {
576                         switch (config_regs->type) {
577                         case CISLANDS_CONFIGREG_SMC_IND:
578                                 data = RREG32_SMC(config_regs->offset);
579                                 break;
580                         case CISLANDS_CONFIGREG_DIDT_IND:
581                                 data = RREG32_DIDT(config_regs->offset);
582                                 break;
583                         default:
584                                 data = RREG32(config_regs->offset << 2);
585                                 break;
586                         }
587
588                         data &= ~config_regs->mask;
589                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
590                         data |= cache;
591
592                         switch (config_regs->type) {
593                         case CISLANDS_CONFIGREG_SMC_IND:
594                                 WREG32_SMC(config_regs->offset, data);
595                                 break;
596                         case CISLANDS_CONFIGREG_DIDT_IND:
597                                 WREG32_DIDT(config_regs->offset, data);
598                                 break;
599                         default:
600                                 WREG32(config_regs->offset << 2, data);
601                                 break;
602                         }
603                         cache = 0;
604                 }
605                 config_regs++;
606         }
607         return 0;
608 }
609
610 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
611 {
612         struct ci_power_info *pi = ci_get_pi(rdev);
613         int ret;
614
615         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
616             pi->caps_td_ramping || pi->caps_tcp_ramping) {
617                 cik_enter_rlc_safe_mode(rdev);
618
619                 if (enable) {
620                         ret = ci_program_pt_config_registers(rdev, didt_config_ci);
621                         if (ret) {
622                                 cik_exit_rlc_safe_mode(rdev);
623                                 return ret;
624                         }
625                 }
626
627                 ci_do_enable_didt(rdev, enable);
628
629                 cik_exit_rlc_safe_mode(rdev);
630         }
631
632         return 0;
633 }
634
635 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
636 {
637         struct ci_power_info *pi = ci_get_pi(rdev);
638         PPSMC_Result smc_result;
639         int ret = 0;
640
641         if (enable) {
642                 pi->power_containment_features = 0;
643                 if (pi->caps_power_containment) {
644                         if (pi->enable_bapm_feature) {
645                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
646                                 if (smc_result != PPSMC_Result_OK)
647                                         ret = -EINVAL;
648                                 else
649                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
650                         }
651
652                         if (pi->enable_tdc_limit_feature) {
653                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
654                                 if (smc_result != PPSMC_Result_OK)
655                                         ret = -EINVAL;
656                                 else
657                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
658                         }
659
660                         if (pi->enable_pkg_pwr_tracking_feature) {
661                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
662                                 if (smc_result != PPSMC_Result_OK) {
663                                         ret = -EINVAL;
664                                 } else {
665                                         struct radeon_cac_tdp_table *cac_tdp_table =
666                                                 rdev->pm.dpm.dyn_state.cac_tdp_table;
667                                         u32 default_pwr_limit =
668                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
669
670                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
671
672                                         ci_set_power_limit(rdev, default_pwr_limit);
673                                 }
674                         }
675                 }
676         } else {
677                 if (pi->caps_power_containment && pi->power_containment_features) {
678                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
679                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
680
681                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
682                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
683
684                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
685                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
686                         pi->power_containment_features = 0;
687                 }
688         }
689
690         return ret;
691 }
692
693 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
694 {
695         struct ci_power_info *pi = ci_get_pi(rdev);
696         PPSMC_Result smc_result;
697         int ret = 0;
698
699         if (pi->caps_cac) {
700                 if (enable) {
701                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
702                         if (smc_result != PPSMC_Result_OK) {
703                                 ret = -EINVAL;
704                                 pi->cac_enabled = false;
705                         } else {
706                                 pi->cac_enabled = true;
707                         }
708                 } else if (pi->cac_enabled) {
709                         ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
710                         pi->cac_enabled = false;
711                 }
712         }
713
714         return ret;
715 }
716
717 static int ci_power_control_set_level(struct radeon_device *rdev)
718 {
719         struct ci_power_info *pi = ci_get_pi(rdev);
720         struct radeon_cac_tdp_table *cac_tdp_table =
721                 rdev->pm.dpm.dyn_state.cac_tdp_table;
722         s32 adjust_percent;
723         s32 target_tdp;
724         int ret = 0;
725         bool adjust_polarity = false; /* ??? */
726
727         if (pi->caps_power_containment) {
728                 adjust_percent = adjust_polarity ?
729                         rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
730                 target_tdp = ((100 + adjust_percent) *
731                               (s32)cac_tdp_table->configurable_tdp) / 100;
732
733                 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
734         }
735
736         return ret;
737 }
738
739 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
740 {
741         struct ci_power_info *pi = ci_get_pi(rdev);
742
743         if (pi->uvd_power_gated == gate)
744                 return;
745
746         pi->uvd_power_gated = gate;
747
748         ci_update_uvd_dpm(rdev, gate);
749 }
750
751 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
752 {
753         struct ci_power_info *pi = ci_get_pi(rdev);
754         u32 vblank_time = r600_dpm_get_vblank_time(rdev);
755         u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
756
757         if (vblank_time < switch_limit)
758                 return true;
759         else
760                 return false;
761
762 }
763
764 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
765                                         struct radeon_ps *rps)
766 {
767         struct ci_ps *ps = ci_get_ps(rps);
768         struct ci_power_info *pi = ci_get_pi(rdev);
769         struct radeon_clock_and_voltage_limits *max_limits;
770         bool disable_mclk_switching;
771         u32 sclk, mclk;
772         int i;
773
774         if (rps->vce_active) {
775                 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
776                 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
777         } else {
778                 rps->evclk = 0;
779                 rps->ecclk = 0;
780         }
781
782         if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
783             ci_dpm_vblank_too_short(rdev))
784                 disable_mclk_switching = true;
785         else
786                 disable_mclk_switching = false;
787
788         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
789                 pi->battery_state = true;
790         else
791                 pi->battery_state = false;
792
793         if (rdev->pm.dpm.ac_power)
794                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
795         else
796                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
797
798         if (rdev->pm.dpm.ac_power == false) {
799                 for (i = 0; i < ps->performance_level_count; i++) {
800                         if (ps->performance_levels[i].mclk > max_limits->mclk)
801                                 ps->performance_levels[i].mclk = max_limits->mclk;
802                         if (ps->performance_levels[i].sclk > max_limits->sclk)
803                                 ps->performance_levels[i].sclk = max_limits->sclk;
804                 }
805         }
806
807         /* XXX validate the min clocks required for display */
808
809         if (disable_mclk_switching) {
810                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
811                 sclk = ps->performance_levels[0].sclk;
812         } else {
813                 mclk = ps->performance_levels[0].mclk;
814                 sclk = ps->performance_levels[0].sclk;
815         }
816
817         if (rps->vce_active) {
818                 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
819                         sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
820                 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
821                         mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
822         }
823
824         ps->performance_levels[0].sclk = sclk;
825         ps->performance_levels[0].mclk = mclk;
826
827         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
828                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
829
830         if (disable_mclk_switching) {
831                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
832                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
833         } else {
834                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
835                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
836         }
837 }
838
839 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
840                                             int min_temp, int max_temp)
841 {
842         int low_temp = 0 * 1000;
843         int high_temp = 255 * 1000;
844         u32 tmp;
845
846         if (low_temp < min_temp)
847                 low_temp = min_temp;
848         if (high_temp > max_temp)
849                 high_temp = max_temp;
850         if (high_temp < low_temp) {
851                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
852                 return -EINVAL;
853         }
854
855         tmp = RREG32_SMC(CG_THERMAL_INT);
856         tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
857         tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
858                 CI_DIG_THERM_INTL(low_temp / 1000);
859         WREG32_SMC(CG_THERMAL_INT, tmp);
860
861 #if 0
862         /* XXX: need to figure out how to handle this properly */
863         tmp = RREG32_SMC(CG_THERMAL_CTRL);
864         tmp &= DIG_THERM_DPM_MASK;
865         tmp |= DIG_THERM_DPM(high_temp / 1000);
866         WREG32_SMC(CG_THERMAL_CTRL, tmp);
867 #endif
868
869         rdev->pm.dpm.thermal.min_temp = low_temp;
870         rdev->pm.dpm.thermal.max_temp = high_temp;
871
872         return 0;
873 }
874
875 static int ci_thermal_enable_alert(struct radeon_device *rdev,
876                                    bool enable)
877 {
878         u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
879         PPSMC_Result result;
880
881         if (enable) {
882                 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
883                 WREG32_SMC(CG_THERMAL_INT, thermal_int);
884                 rdev->irq.dpm_thermal = false;
885                 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
886                 if (result != PPSMC_Result_OK) {
887                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
888                         return -EINVAL;
889                 }
890         } else {
891                 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
892                 WREG32_SMC(CG_THERMAL_INT, thermal_int);
893                 rdev->irq.dpm_thermal = true;
894                 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
895                 if (result != PPSMC_Result_OK) {
896                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
897                         return -EINVAL;
898                 }
899         }
900
901         return 0;
902 }
903
904 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
905 {
906         struct ci_power_info *pi = ci_get_pi(rdev);
907         u32 tmp;
908
909         if (pi->fan_ctrl_is_in_default_mode) {
910                 tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
911                 pi->fan_ctrl_default_mode = tmp;
912                 tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
913                 pi->t_min = tmp;
914                 pi->fan_ctrl_is_in_default_mode = false;
915         }
916
917         tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
918         tmp |= TMIN(0);
919         WREG32_SMC(CG_FDO_CTRL2, tmp);
920
921         tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
922         tmp |= FDO_PWM_MODE(mode);
923         WREG32_SMC(CG_FDO_CTRL2, tmp);
924 }
925
926 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
927 {
928         struct ci_power_info *pi = ci_get_pi(rdev);
929         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
930         u32 duty100;
931         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
932         u16 fdo_min, slope1, slope2;
933         u32 reference_clock, tmp;
934         int ret;
935         u64 tmp64;
936
937         if (!pi->fan_table_start) {
938                 rdev->pm.dpm.fan.ucode_fan_control = false;
939                 return 0;
940         }
941
942         duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
943
944         if (duty100 == 0) {
945                 rdev->pm.dpm.fan.ucode_fan_control = false;
946                 return 0;
947         }
948
949         tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
950         do_div(tmp64, 10000);
951         fdo_min = (u16)tmp64;
952
953         t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
954         t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
955
956         pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
957         pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
958
959         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
960         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
961
962         fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
963         fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
964         fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
965
966         fan_table.Slope1 = cpu_to_be16(slope1);
967         fan_table.Slope2 = cpu_to_be16(slope2);
968
969         fan_table.FdoMin = cpu_to_be16(fdo_min);
970
971         fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
972
973         fan_table.HystUp = cpu_to_be16(1);
974
975         fan_table.HystSlope = cpu_to_be16(1);
976
977         fan_table.TempRespLim = cpu_to_be16(5);
978
979         reference_clock = radeon_get_xclk(rdev);
980
981         fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
982                                                reference_clock) / 1600);
983
984         fan_table.FdoMax = cpu_to_be16((u16)duty100);
985
986         tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
987         fan_table.TempSrc = (uint8_t)tmp;
988
989         ret = ci_copy_bytes_to_smc(rdev,
990                                    pi->fan_table_start,
991                                    (u8 *)(&fan_table),
992                                    sizeof(fan_table),
993                                    pi->sram_end);
994
995         if (ret) {
996                 DRM_ERROR("Failed to load fan table to the SMC.");
997                 rdev->pm.dpm.fan.ucode_fan_control = false;
998         }
999
1000         return 0;
1001 }
1002
1003 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1004 {
1005         struct ci_power_info *pi = ci_get_pi(rdev);
1006         PPSMC_Result ret;
1007
1008         if (pi->caps_od_fuzzy_fan_control_support) {
1009                 ret = ci_send_msg_to_smc_with_parameter(rdev,
1010                                                         PPSMC_StartFanControl,
1011                                                         FAN_CONTROL_FUZZY);
1012                 if (ret != PPSMC_Result_OK)
1013                         return -EINVAL;
1014                 ret = ci_send_msg_to_smc_with_parameter(rdev,
1015                                                         PPSMC_MSG_SetFanPwmMax,
1016                                                         rdev->pm.dpm.fan.default_max_fan_pwm);
1017                 if (ret != PPSMC_Result_OK)
1018                         return -EINVAL;
1019         } else {
1020                 ret = ci_send_msg_to_smc_with_parameter(rdev,
1021                                                         PPSMC_StartFanControl,
1022                                                         FAN_CONTROL_TABLE);
1023                 if (ret != PPSMC_Result_OK)
1024                         return -EINVAL;
1025         }
1026
1027         return 0;
1028 }
1029
1030 #if 0
1031 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1032 {
1033         PPSMC_Result ret;
1034
1035         ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1036         if (ret == PPSMC_Result_OK)
1037                 return 0;
1038         else
1039                 return -EINVAL;
1040 }
1041
1042 static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1043                                              u32 *speed)
1044 {
1045         u32 duty, duty100;
1046         u64 tmp64;
1047
1048         if (rdev->pm.no_fan)
1049                 return -ENOENT;
1050
1051         duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1052         duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1053
1054         if (duty100 == 0)
1055                 return -EINVAL;
1056
1057         tmp64 = (u64)duty * 100;
1058         do_div(tmp64, duty100);
1059         *speed = (u32)tmp64;
1060
1061         if (*speed > 100)
1062                 *speed = 100;
1063
1064         return 0;
1065 }
1066
1067 static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1068                                              u32 speed)
1069 {
1070         u32 tmp;
1071         u32 duty, duty100;
1072         u64 tmp64;
1073
1074         if (rdev->pm.no_fan)
1075                 return -ENOENT;
1076
1077         if (speed > 100)
1078                 return -EINVAL;
1079
1080         if (rdev->pm.dpm.fan.ucode_fan_control)
1081                 ci_fan_ctrl_stop_smc_fan_control(rdev);
1082
1083         duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1084
1085         if (duty100 == 0)
1086                 return -EINVAL;
1087
1088         tmp64 = (u64)speed * duty100;
1089         do_div(tmp64, 100);
1090         duty = (u32)tmp64;
1091
1092         tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1093         tmp |= FDO_STATIC_DUTY(duty);
1094         WREG32_SMC(CG_FDO_CTRL0, tmp);
1095
1096         ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1097
1098         return 0;
1099 }
1100
1101 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1102                                          u32 *speed)
1103 {
1104         u32 tach_period;
1105         u32 xclk = radeon_get_xclk(rdev);
1106
1107         if (rdev->pm.no_fan)
1108                 return -ENOENT;
1109
1110         if (rdev->pm.fan_pulses_per_revolution == 0)
1111                 return -ENOENT;
1112
1113         tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1114         if (tach_period == 0)
1115                 return -ENOENT;
1116
1117         *speed = 60 * xclk * 10000 / tach_period;
1118
1119         return 0;
1120 }
1121
1122 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1123                                          u32 speed)
1124 {
1125         u32 tach_period, tmp;
1126         u32 xclk = radeon_get_xclk(rdev);
1127
1128         if (rdev->pm.no_fan)
1129                 return -ENOENT;
1130
1131         if (rdev->pm.fan_pulses_per_revolution == 0)
1132                 return -ENOENT;
1133
1134         if ((speed < rdev->pm.fan_min_rpm) ||
1135             (speed > rdev->pm.fan_max_rpm))
1136                 return -EINVAL;
1137
1138         if (rdev->pm.dpm.fan.ucode_fan_control)
1139                 ci_fan_ctrl_stop_smc_fan_control(rdev);
1140
1141         tach_period = 60 * xclk * 10000 / (8 * speed);
1142         tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1143         tmp |= TARGET_PERIOD(tach_period);
1144         WREG32_SMC(CG_TACH_CTRL, tmp);
1145
1146         ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1147
1148         return 0;
1149 }
1150 #endif
1151
1152 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1153 {
1154         struct ci_power_info *pi = ci_get_pi(rdev);
1155         u32 tmp;
1156
1157         if (!pi->fan_ctrl_is_in_default_mode) {
1158                 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1159                 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1160                 WREG32_SMC(CG_FDO_CTRL2, tmp);
1161
1162                 tmp = RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK;
1163                 tmp |= TMIN(pi->t_min);
1164                 WREG32_SMC(CG_FDO_CTRL2, tmp);
1165                 pi->fan_ctrl_is_in_default_mode = true;
1166         }
1167 }
1168
1169 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1170 {
1171         if (rdev->pm.dpm.fan.ucode_fan_control) {
1172                 ci_fan_ctrl_start_smc_fan_control(rdev);
1173                 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1174         }
1175 }
1176
1177 static void ci_thermal_initialize(struct radeon_device *rdev)
1178 {
1179         u32 tmp;
1180
1181         if (rdev->pm.fan_pulses_per_revolution) {
1182                 tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1183                 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1184                 WREG32_SMC(CG_TACH_CTRL, tmp);
1185         }
1186
1187         tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1188         tmp |= TACH_PWM_RESP_RATE(0x28);
1189         WREG32_SMC(CG_FDO_CTRL2, tmp);
1190 }
1191
1192 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1193 {
1194         int ret;
1195
1196         ci_thermal_initialize(rdev);
1197         ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1198         if (ret)
1199                 return ret;
1200         ret = ci_thermal_enable_alert(rdev, true);
1201         if (ret)
1202                 return ret;
1203         if (rdev->pm.dpm.fan.ucode_fan_control) {
1204                 ret = ci_thermal_setup_fan_table(rdev);
1205                 if (ret)
1206                         return ret;
1207                 ci_thermal_start_smc_fan_control(rdev);
1208         }
1209
1210         return 0;
1211 }
1212
1213 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1214 {
1215         if (!rdev->pm.no_fan)
1216                 ci_fan_ctrl_set_default_mode(rdev);
1217 }
1218
1219 #if 0
1220 static int ci_read_smc_soft_register(struct radeon_device *rdev,
1221                                      u16 reg_offset, u32 *value)
1222 {
1223         struct ci_power_info *pi = ci_get_pi(rdev);
1224
1225         return ci_read_smc_sram_dword(rdev,
1226                                       pi->soft_regs_start + reg_offset,
1227                                       value, pi->sram_end);
1228 }
1229 #endif
1230
1231 static int ci_write_smc_soft_register(struct radeon_device *rdev,
1232                                       u16 reg_offset, u32 value)
1233 {
1234         struct ci_power_info *pi = ci_get_pi(rdev);
1235
1236         return ci_write_smc_sram_dword(rdev,
1237                                        pi->soft_regs_start + reg_offset,
1238                                        value, pi->sram_end);
1239 }
1240
1241 static void ci_init_fps_limits(struct radeon_device *rdev)
1242 {
1243         struct ci_power_info *pi = ci_get_pi(rdev);
1244         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1245
1246         if (pi->caps_fps) {
1247                 u16 tmp;
1248
1249                 tmp = 45;
1250                 table->FpsHighT = cpu_to_be16(tmp);
1251
1252                 tmp = 30;
1253                 table->FpsLowT = cpu_to_be16(tmp);
1254         }
1255 }
1256
1257 static int ci_update_sclk_t(struct radeon_device *rdev)
1258 {
1259         struct ci_power_info *pi = ci_get_pi(rdev);
1260         int ret = 0;
1261         u32 low_sclk_interrupt_t = 0;
1262
1263         if (pi->caps_sclk_throttle_low_notification) {
1264                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1265
1266                 ret = ci_copy_bytes_to_smc(rdev,
1267                                            pi->dpm_table_start +
1268                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1269                                            (u8 *)&low_sclk_interrupt_t,
1270                                            sizeof(u32), pi->sram_end);
1271
1272         }
1273
1274         return ret;
1275 }
1276
1277 static void ci_get_leakage_voltages(struct radeon_device *rdev)
1278 {
1279         struct ci_power_info *pi = ci_get_pi(rdev);
1280         u16 leakage_id, virtual_voltage_id;
1281         u16 vddc, vddci;
1282         int i;
1283
1284         pi->vddc_leakage.count = 0;
1285         pi->vddci_leakage.count = 0;
1286
1287         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1288                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1289                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1290                         if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1291                                 continue;
1292                         if (vddc != 0 && vddc != virtual_voltage_id) {
1293                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1294                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1295                                 pi->vddc_leakage.count++;
1296                         }
1297                 }
1298         } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1299                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1300                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1301                         if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1302                                                                                  virtual_voltage_id,
1303                                                                                  leakage_id) == 0) {
1304                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1305                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1306                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1307                                         pi->vddc_leakage.count++;
1308                                 }
1309                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1310                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1311                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1312                                         pi->vddci_leakage.count++;
1313                                 }
1314                         }
1315                 }
1316         }
1317 }
1318
1319 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1320 {
1321         struct ci_power_info *pi = ci_get_pi(rdev);
1322         bool want_thermal_protection;
1323         enum radeon_dpm_event_src dpm_event_src;
1324         u32 tmp;
1325
1326         switch (sources) {
1327         case 0:
1328         default:
1329                 want_thermal_protection = false;
1330                 break;
1331         case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1332                 want_thermal_protection = true;
1333                 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1334                 break;
1335         case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1336                 want_thermal_protection = true;
1337                 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1338                 break;
1339         case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1340               (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1341                 want_thermal_protection = true;
1342                 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1343                 break;
1344         }
1345
1346         if (want_thermal_protection) {
1347 #if 0
1348                 /* XXX: need to figure out how to handle this properly */
1349                 tmp = RREG32_SMC(CG_THERMAL_CTRL);
1350                 tmp &= DPM_EVENT_SRC_MASK;
1351                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1352                 WREG32_SMC(CG_THERMAL_CTRL, tmp);
1353 #endif
1354
1355                 tmp = RREG32_SMC(GENERAL_PWRMGT);
1356                 if (pi->thermal_protection)
1357                         tmp &= ~THERMAL_PROTECTION_DIS;
1358                 else
1359                         tmp |= THERMAL_PROTECTION_DIS;
1360                 WREG32_SMC(GENERAL_PWRMGT, tmp);
1361         } else {
1362                 tmp = RREG32_SMC(GENERAL_PWRMGT);
1363                 tmp |= THERMAL_PROTECTION_DIS;
1364                 WREG32_SMC(GENERAL_PWRMGT, tmp);
1365         }
1366 }
1367
1368 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1369                                            enum radeon_dpm_auto_throttle_src source,
1370                                            bool enable)
1371 {
1372         struct ci_power_info *pi = ci_get_pi(rdev);
1373
1374         if (enable) {
1375                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1376                         pi->active_auto_throttle_sources |= 1 << source;
1377                         ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1378                 }
1379         } else {
1380                 if (pi->active_auto_throttle_sources & (1 << source)) {
1381                         pi->active_auto_throttle_sources &= ~(1 << source);
1382                         ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1383                 }
1384         }
1385 }
1386
1387 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1388 {
1389         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1390                 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1391 }
1392
1393 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1394 {
1395         struct ci_power_info *pi = ci_get_pi(rdev);
1396         PPSMC_Result smc_result;
1397
1398         if (!pi->need_update_smu7_dpm_table)
1399                 return 0;
1400
1401         if ((!pi->sclk_dpm_key_disabled) &&
1402             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1403                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1404                 if (smc_result != PPSMC_Result_OK)
1405                         return -EINVAL;
1406         }
1407
1408         if ((!pi->mclk_dpm_key_disabled) &&
1409             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1410                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1411                 if (smc_result != PPSMC_Result_OK)
1412                         return -EINVAL;
1413         }
1414
1415         pi->need_update_smu7_dpm_table = 0;
1416         return 0;
1417 }
1418
1419 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1420 {
1421         struct ci_power_info *pi = ci_get_pi(rdev);
1422         PPSMC_Result smc_result;
1423
1424         if (enable) {
1425                 if (!pi->sclk_dpm_key_disabled) {
1426                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1427                         if (smc_result != PPSMC_Result_OK)
1428                                 return -EINVAL;
1429                 }
1430
1431                 if (!pi->mclk_dpm_key_disabled) {
1432                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1433                         if (smc_result != PPSMC_Result_OK)
1434                                 return -EINVAL;
1435
1436                         WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1437
1438                         WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1439                         WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1440                         WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1441
1442                         udelay(10);
1443
1444                         WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1445                         WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1446                         WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1447                 }
1448         } else {
1449                 if (!pi->sclk_dpm_key_disabled) {
1450                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1451                         if (smc_result != PPSMC_Result_OK)
1452                                 return -EINVAL;
1453                 }
1454
1455                 if (!pi->mclk_dpm_key_disabled) {
1456                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1457                         if (smc_result != PPSMC_Result_OK)
1458                                 return -EINVAL;
1459                 }
1460         }
1461
1462         return 0;
1463 }
1464
1465 static int ci_start_dpm(struct radeon_device *rdev)
1466 {
1467         struct ci_power_info *pi = ci_get_pi(rdev);
1468         PPSMC_Result smc_result;
1469         int ret;
1470         u32 tmp;
1471
1472         tmp = RREG32_SMC(GENERAL_PWRMGT);
1473         tmp |= GLOBAL_PWRMGT_EN;
1474         WREG32_SMC(GENERAL_PWRMGT, tmp);
1475
1476         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1477         tmp |= DYNAMIC_PM_EN;
1478         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1479
1480         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1481
1482         WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1483
1484         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1485         if (smc_result != PPSMC_Result_OK)
1486                 return -EINVAL;
1487
1488         ret = ci_enable_sclk_mclk_dpm(rdev, true);
1489         if (ret)
1490                 return ret;
1491
1492         if (!pi->pcie_dpm_key_disabled) {
1493                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1494                 if (smc_result != PPSMC_Result_OK)
1495                         return -EINVAL;
1496         }
1497
1498         return 0;
1499 }
1500
1501 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1502 {
1503         struct ci_power_info *pi = ci_get_pi(rdev);
1504         PPSMC_Result smc_result;
1505
1506         if (!pi->need_update_smu7_dpm_table)
1507                 return 0;
1508
1509         if ((!pi->sclk_dpm_key_disabled) &&
1510             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1511                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1512                 if (smc_result != PPSMC_Result_OK)
1513                         return -EINVAL;
1514         }
1515
1516         if ((!pi->mclk_dpm_key_disabled) &&
1517             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1518                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1519                 if (smc_result != PPSMC_Result_OK)
1520                         return -EINVAL;
1521         }
1522
1523         return 0;
1524 }
1525
1526 static int ci_stop_dpm(struct radeon_device *rdev)
1527 {
1528         struct ci_power_info *pi = ci_get_pi(rdev);
1529         PPSMC_Result smc_result;
1530         int ret;
1531         u32 tmp;
1532
1533         tmp = RREG32_SMC(GENERAL_PWRMGT);
1534         tmp &= ~GLOBAL_PWRMGT_EN;
1535         WREG32_SMC(GENERAL_PWRMGT, tmp);
1536
1537         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1538         tmp &= ~DYNAMIC_PM_EN;
1539         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1540
1541         if (!pi->pcie_dpm_key_disabled) {
1542                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1543                 if (smc_result != PPSMC_Result_OK)
1544                         return -EINVAL;
1545         }
1546
1547         ret = ci_enable_sclk_mclk_dpm(rdev, false);
1548         if (ret)
1549                 return ret;
1550
1551         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1552         if (smc_result != PPSMC_Result_OK)
1553                 return -EINVAL;
1554
1555         return 0;
1556 }
1557
1558 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1559 {
1560         u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1561
1562         if (enable)
1563                 tmp &= ~SCLK_PWRMGT_OFF;
1564         else
1565                 tmp |= SCLK_PWRMGT_OFF;
1566         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1567 }
1568
1569 #if 0
1570 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1571                                         bool ac_power)
1572 {
1573         struct ci_power_info *pi = ci_get_pi(rdev);
1574         struct radeon_cac_tdp_table *cac_tdp_table =
1575                 rdev->pm.dpm.dyn_state.cac_tdp_table;
1576         u32 power_limit;
1577
1578         if (ac_power)
1579                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1580         else
1581                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1582
1583         ci_set_power_limit(rdev, power_limit);
1584
1585         if (pi->caps_automatic_dc_transition) {
1586                 if (ac_power)
1587                         ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1588                 else
1589                         ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1590         }
1591
1592         return 0;
1593 }
1594 #endif
1595
1596 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1597                                                       PPSMC_Msg msg, u32 parameter)
1598 {
1599         WREG32(SMC_MSG_ARG_0, parameter);
1600         return ci_send_msg_to_smc(rdev, msg);
1601 }
1602
1603 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1604                                                         PPSMC_Msg msg, u32 *parameter)
1605 {
1606         PPSMC_Result smc_result;
1607
1608         smc_result = ci_send_msg_to_smc(rdev, msg);
1609
1610         if ((smc_result == PPSMC_Result_OK) && parameter)
1611                 *parameter = RREG32(SMC_MSG_ARG_0);
1612
1613         return smc_result;
1614 }
1615
1616 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1617 {
1618         struct ci_power_info *pi = ci_get_pi(rdev);
1619
1620         if (!pi->sclk_dpm_key_disabled) {
1621                 PPSMC_Result smc_result =
1622                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1623                 if (smc_result != PPSMC_Result_OK)
1624                         return -EINVAL;
1625         }
1626
1627         return 0;
1628 }
1629
1630 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1631 {
1632         struct ci_power_info *pi = ci_get_pi(rdev);
1633
1634         if (!pi->mclk_dpm_key_disabled) {
1635                 PPSMC_Result smc_result =
1636                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1637                 if (smc_result != PPSMC_Result_OK)
1638                         return -EINVAL;
1639         }
1640
1641         return 0;
1642 }
1643
1644 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1645 {
1646         struct ci_power_info *pi = ci_get_pi(rdev);
1647
1648         if (!pi->pcie_dpm_key_disabled) {
1649                 PPSMC_Result smc_result =
1650                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1651                 if (smc_result != PPSMC_Result_OK)
1652                         return -EINVAL;
1653         }
1654
1655         return 0;
1656 }
1657
1658 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1659 {
1660         struct ci_power_info *pi = ci_get_pi(rdev);
1661
1662         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1663                 PPSMC_Result smc_result =
1664                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1665                 if (smc_result != PPSMC_Result_OK)
1666                         return -EINVAL;
1667         }
1668
1669         return 0;
1670 }
1671
1672 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1673                                        u32 target_tdp)
1674 {
1675         PPSMC_Result smc_result =
1676                 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1677         if (smc_result != PPSMC_Result_OK)
1678                 return -EINVAL;
1679         return 0;
1680 }
1681
1682 static int ci_set_boot_state(struct radeon_device *rdev)
1683 {
1684         return ci_enable_sclk_mclk_dpm(rdev, false);
1685 }
1686
1687 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1688 {
1689         u32 sclk_freq;
1690         PPSMC_Result smc_result =
1691                 ci_send_msg_to_smc_return_parameter(rdev,
1692                                                     PPSMC_MSG_API_GetSclkFrequency,
1693                                                     &sclk_freq);
1694         if (smc_result != PPSMC_Result_OK)
1695                 sclk_freq = 0;
1696
1697         return sclk_freq;
1698 }
1699
1700 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1701 {
1702         u32 mclk_freq;
1703         PPSMC_Result smc_result =
1704                 ci_send_msg_to_smc_return_parameter(rdev,
1705                                                     PPSMC_MSG_API_GetMclkFrequency,
1706                                                     &mclk_freq);
1707         if (smc_result != PPSMC_Result_OK)
1708                 mclk_freq = 0;
1709
1710         return mclk_freq;
1711 }
1712
1713 static void ci_dpm_start_smc(struct radeon_device *rdev)
1714 {
1715         int i;
1716
1717         ci_program_jump_on_start(rdev);
1718         ci_start_smc_clock(rdev);
1719         ci_start_smc(rdev);
1720         for (i = 0; i < rdev->usec_timeout; i++) {
1721                 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1722                         break;
1723         }
1724 }
1725
1726 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1727 {
1728         ci_reset_smc(rdev);
1729         ci_stop_smc_clock(rdev);
1730 }
1731
1732 static int ci_process_firmware_header(struct radeon_device *rdev)
1733 {
1734         struct ci_power_info *pi = ci_get_pi(rdev);
1735         u32 tmp;
1736         int ret;
1737
1738         ret = ci_read_smc_sram_dword(rdev,
1739                                      SMU7_FIRMWARE_HEADER_LOCATION +
1740                                      offsetof(SMU7_Firmware_Header, DpmTable),
1741                                      &tmp, pi->sram_end);
1742         if (ret)
1743                 return ret;
1744
1745         pi->dpm_table_start = tmp;
1746
1747         ret = ci_read_smc_sram_dword(rdev,
1748                                      SMU7_FIRMWARE_HEADER_LOCATION +
1749                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1750                                      &tmp, pi->sram_end);
1751         if (ret)
1752                 return ret;
1753
1754         pi->soft_regs_start = tmp;
1755
1756         ret = ci_read_smc_sram_dword(rdev,
1757                                      SMU7_FIRMWARE_HEADER_LOCATION +
1758                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1759                                      &tmp, pi->sram_end);
1760         if (ret)
1761                 return ret;
1762
1763         pi->mc_reg_table_start = tmp;
1764
1765         ret = ci_read_smc_sram_dword(rdev,
1766                                      SMU7_FIRMWARE_HEADER_LOCATION +
1767                                      offsetof(SMU7_Firmware_Header, FanTable),
1768                                      &tmp, pi->sram_end);
1769         if (ret)
1770                 return ret;
1771
1772         pi->fan_table_start = tmp;
1773
1774         ret = ci_read_smc_sram_dword(rdev,
1775                                      SMU7_FIRMWARE_HEADER_LOCATION +
1776                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1777                                      &tmp, pi->sram_end);
1778         if (ret)
1779                 return ret;
1780
1781         pi->arb_table_start = tmp;
1782
1783         return 0;
1784 }
1785
1786 static void ci_read_clock_registers(struct radeon_device *rdev)
1787 {
1788         struct ci_power_info *pi = ci_get_pi(rdev);
1789
1790         pi->clock_registers.cg_spll_func_cntl =
1791                 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1792         pi->clock_registers.cg_spll_func_cntl_2 =
1793                 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1794         pi->clock_registers.cg_spll_func_cntl_3 =
1795                 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1796         pi->clock_registers.cg_spll_func_cntl_4 =
1797                 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1798         pi->clock_registers.cg_spll_spread_spectrum =
1799                 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1800         pi->clock_registers.cg_spll_spread_spectrum_2 =
1801                 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1802         pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1803         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1804         pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1805         pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1806         pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1807         pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1808         pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1809         pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1810         pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1811 }
1812
1813 static void ci_init_sclk_t(struct radeon_device *rdev)
1814 {
1815         struct ci_power_info *pi = ci_get_pi(rdev);
1816
1817         pi->low_sclk_interrupt_t = 0;
1818 }
1819
1820 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1821                                          bool enable)
1822 {
1823         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1824
1825         if (enable)
1826                 tmp &= ~THERMAL_PROTECTION_DIS;
1827         else
1828                 tmp |= THERMAL_PROTECTION_DIS;
1829         WREG32_SMC(GENERAL_PWRMGT, tmp);
1830 }
1831
1832 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1833 {
1834         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1835
1836         tmp |= STATIC_PM_EN;
1837
1838         WREG32_SMC(GENERAL_PWRMGT, tmp);
1839 }
1840
1841 #if 0
1842 static int ci_enter_ulp_state(struct radeon_device *rdev)
1843 {
1844
1845         WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1846
1847         udelay(25000);
1848
1849         return 0;
1850 }
1851
1852 static int ci_exit_ulp_state(struct radeon_device *rdev)
1853 {
1854         int i;
1855
1856         WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1857
1858         udelay(7000);
1859
1860         for (i = 0; i < rdev->usec_timeout; i++) {
1861                 if (RREG32(SMC_RESP_0) == 1)
1862                         break;
1863                 udelay(1000);
1864         }
1865
1866         return 0;
1867 }
1868 #endif
1869
1870 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1871                                         bool has_display)
1872 {
1873         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1874
1875         return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1876 }
1877
1878 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1879                                       bool enable)
1880 {
1881         struct ci_power_info *pi = ci_get_pi(rdev);
1882
1883         if (enable) {
1884                 if (pi->caps_sclk_ds) {
1885                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1886                                 return -EINVAL;
1887                 } else {
1888                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1889                                 return -EINVAL;
1890                 }
1891         } else {
1892                 if (pi->caps_sclk_ds) {
1893                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1894                                 return -EINVAL;
1895                 }
1896         }
1897
1898         return 0;
1899 }
1900
1901 static void ci_program_display_gap(struct radeon_device *rdev)
1902 {
1903         u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1904         u32 pre_vbi_time_in_us;
1905         u32 frame_time_in_us;
1906         u32 ref_clock = rdev->clock.spll.reference_freq;
1907         u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1908         u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1909
1910         tmp &= ~DISP_GAP_MASK;
1911         if (rdev->pm.dpm.new_active_crtc_count > 0)
1912                 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1913         else
1914                 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1915         WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1916
1917         if (refresh_rate == 0)
1918                 refresh_rate = 60;
1919         if (vblank_time == 0xffffffff)
1920                 vblank_time = 500;
1921         frame_time_in_us = 1000000 / refresh_rate;
1922         pre_vbi_time_in_us =
1923                 frame_time_in_us - 200 - vblank_time;
1924         tmp = pre_vbi_time_in_us * (ref_clock / 100);
1925
1926         WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1927         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1928         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1929
1930
1931         ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1932
1933 }
1934
1935 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1936 {
1937         struct ci_power_info *pi = ci_get_pi(rdev);
1938         u32 tmp;
1939
1940         if (enable) {
1941                 if (pi->caps_sclk_ss_support) {
1942                         tmp = RREG32_SMC(GENERAL_PWRMGT);
1943                         tmp |= DYN_SPREAD_SPECTRUM_EN;
1944                         WREG32_SMC(GENERAL_PWRMGT, tmp);
1945                 }
1946         } else {
1947                 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1948                 tmp &= ~SSEN;
1949                 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1950
1951                 tmp = RREG32_SMC(GENERAL_PWRMGT);
1952                 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1953                 WREG32_SMC(GENERAL_PWRMGT, tmp);
1954         }
1955 }
1956
1957 static void ci_program_sstp(struct radeon_device *rdev)
1958 {
1959         WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1960 }
1961
1962 static void ci_enable_display_gap(struct radeon_device *rdev)
1963 {
1964         u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1965
1966         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1967         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1968                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1969
1970         WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1971 }
1972
1973 static void ci_program_vc(struct radeon_device *rdev)
1974 {
1975         u32 tmp;
1976
1977         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1978         tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1979         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1980
1981         WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1982         WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1983         WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1984         WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1985         WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1986         WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1987         WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1988         WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1989 }
1990
1991 static void ci_clear_vc(struct radeon_device *rdev)
1992 {
1993         u32 tmp;
1994
1995         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1996         tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1997         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1998
1999         WREG32_SMC(CG_FTV_0, 0);
2000         WREG32_SMC(CG_FTV_1, 0);
2001         WREG32_SMC(CG_FTV_2, 0);
2002         WREG32_SMC(CG_FTV_3, 0);
2003         WREG32_SMC(CG_FTV_4, 0);
2004         WREG32_SMC(CG_FTV_5, 0);
2005         WREG32_SMC(CG_FTV_6, 0);
2006         WREG32_SMC(CG_FTV_7, 0);
2007 }
2008
2009 static int ci_upload_firmware(struct radeon_device *rdev)
2010 {
2011         struct ci_power_info *pi = ci_get_pi(rdev);
2012         int i, ret;
2013
2014         for (i = 0; i < rdev->usec_timeout; i++) {
2015                 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2016                         break;
2017         }
2018         WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2019
2020         ci_stop_smc_clock(rdev);
2021         ci_reset_smc(rdev);
2022
2023         ret = ci_load_smc_ucode(rdev, pi->sram_end);
2024
2025         return ret;
2026
2027 }
2028
2029 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2030                                      struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2031                                      struct atom_voltage_table *voltage_table)
2032 {
2033         u32 i;
2034
2035         if (voltage_dependency_table == NULL)
2036                 return -EINVAL;
2037
2038         voltage_table->mask_low = 0;
2039         voltage_table->phase_delay = 0;
2040
2041         voltage_table->count = voltage_dependency_table->count;
2042         for (i = 0; i < voltage_table->count; i++) {
2043                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2044                 voltage_table->entries[i].smio_low = 0;
2045         }
2046
2047         return 0;
2048 }
2049
2050 static int ci_construct_voltage_tables(struct radeon_device *rdev)
2051 {
2052         struct ci_power_info *pi = ci_get_pi(rdev);
2053         int ret;
2054
2055         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2056                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2057                                                     VOLTAGE_OBJ_GPIO_LUT,
2058                                                     &pi->vddc_voltage_table);
2059                 if (ret)
2060                         return ret;
2061         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2062                 ret = ci_get_svi2_voltage_table(rdev,
2063                                                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2064                                                 &pi->vddc_voltage_table);
2065                 if (ret)
2066                         return ret;
2067         }
2068
2069         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2070                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2071                                                          &pi->vddc_voltage_table);
2072
2073         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2074                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2075                                                     VOLTAGE_OBJ_GPIO_LUT,
2076                                                     &pi->vddci_voltage_table);
2077                 if (ret)
2078                         return ret;
2079         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2080                 ret = ci_get_svi2_voltage_table(rdev,
2081                                                 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2082                                                 &pi->vddci_voltage_table);
2083                 if (ret)
2084                         return ret;
2085         }
2086
2087         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2088                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2089                                                          &pi->vddci_voltage_table);
2090
2091         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2092                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2093                                                     VOLTAGE_OBJ_GPIO_LUT,
2094                                                     &pi->mvdd_voltage_table);
2095                 if (ret)
2096                         return ret;
2097         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2098                 ret = ci_get_svi2_voltage_table(rdev,
2099                                                 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2100                                                 &pi->mvdd_voltage_table);
2101                 if (ret)
2102                         return ret;
2103         }
2104
2105         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2106                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2107                                                          &pi->mvdd_voltage_table);
2108
2109         return 0;
2110 }
2111
2112 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2113                                           struct atom_voltage_table_entry *voltage_table,
2114                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2115 {
2116         int ret;
2117
2118         ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2119                                             &smc_voltage_table->StdVoltageHiSidd,
2120                                             &smc_voltage_table->StdVoltageLoSidd);
2121
2122         if (ret) {
2123                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2124                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2125         }
2126
2127         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2128         smc_voltage_table->StdVoltageHiSidd =
2129                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2130         smc_voltage_table->StdVoltageLoSidd =
2131                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2132 }
2133
2134 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2135                                       SMU7_Discrete_DpmTable *table)
2136 {
2137         struct ci_power_info *pi = ci_get_pi(rdev);
2138         unsigned int count;
2139
2140         table->VddcLevelCount = pi->vddc_voltage_table.count;
2141         for (count = 0; count < table->VddcLevelCount; count++) {
2142                 ci_populate_smc_voltage_table(rdev,
2143                                               &pi->vddc_voltage_table.entries[count],
2144                                               &table->VddcLevel[count]);
2145
2146                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2147                         table->VddcLevel[count].Smio |=
2148                                 pi->vddc_voltage_table.entries[count].smio_low;
2149                 else
2150                         table->VddcLevel[count].Smio = 0;
2151         }
2152         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2153
2154         return 0;
2155 }
2156
2157 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2158                                        SMU7_Discrete_DpmTable *table)
2159 {
2160         unsigned int count;
2161         struct ci_power_info *pi = ci_get_pi(rdev);
2162
2163         table->VddciLevelCount = pi->vddci_voltage_table.count;
2164         for (count = 0; count < table->VddciLevelCount; count++) {
2165                 ci_populate_smc_voltage_table(rdev,
2166                                               &pi->vddci_voltage_table.entries[count],
2167                                               &table->VddciLevel[count]);
2168
2169                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2170                         table->VddciLevel[count].Smio |=
2171                                 pi->vddci_voltage_table.entries[count].smio_low;
2172                 else
2173                         table->VddciLevel[count].Smio = 0;
2174         }
2175         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2176
2177         return 0;
2178 }
2179
2180 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2181                                       SMU7_Discrete_DpmTable *table)
2182 {
2183         struct ci_power_info *pi = ci_get_pi(rdev);
2184         unsigned int count;
2185
2186         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2187         for (count = 0; count < table->MvddLevelCount; count++) {
2188                 ci_populate_smc_voltage_table(rdev,
2189                                               &pi->mvdd_voltage_table.entries[count],
2190                                               &table->MvddLevel[count]);
2191
2192                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2193                         table->MvddLevel[count].Smio |=
2194                                 pi->mvdd_voltage_table.entries[count].smio_low;
2195                 else
2196                         table->MvddLevel[count].Smio = 0;
2197         }
2198         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2199
2200         return 0;
2201 }
2202
2203 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2204                                           SMU7_Discrete_DpmTable *table)
2205 {
2206         int ret;
2207
2208         ret = ci_populate_smc_vddc_table(rdev, table);
2209         if (ret)
2210                 return ret;
2211
2212         ret = ci_populate_smc_vddci_table(rdev, table);
2213         if (ret)
2214                 return ret;
2215
2216         ret = ci_populate_smc_mvdd_table(rdev, table);
2217         if (ret)
2218                 return ret;
2219
2220         return 0;
2221 }
2222
2223 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2224                                   SMU7_Discrete_VoltageLevel *voltage)
2225 {
2226         struct ci_power_info *pi = ci_get_pi(rdev);
2227         u32 i = 0;
2228
2229         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2230                 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2231                         if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2232                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2233                                 break;
2234                         }
2235                 }
2236
2237                 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2238                         return -EINVAL;
2239         }
2240
2241         return -EINVAL;
2242 }
2243
2244 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2245                                          struct atom_voltage_table_entry *voltage_table,
2246                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2247 {
2248         u16 v_index, idx;
2249         bool voltage_found = false;
2250         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2251         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2252
2253         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2254                 return -EINVAL;
2255
2256         if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2257                 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2258                         if (voltage_table->value ==
2259                             rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2260                                 voltage_found = true;
2261                                 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2262                                         idx = v_index;
2263                                 else
2264                                         idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2265                                 *std_voltage_lo_sidd =
2266                                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2267                                 *std_voltage_hi_sidd =
2268                                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2269                                 break;
2270                         }
2271                 }
2272
2273                 if (!voltage_found) {
2274                         for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2275                                 if (voltage_table->value <=
2276                                     rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2277                                         voltage_found = true;
2278                                         if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2279                                                 idx = v_index;
2280                                         else
2281                                                 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2282                                         *std_voltage_lo_sidd =
2283                                                 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2284                                         *std_voltage_hi_sidd =
2285                                                 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2286                                         break;
2287                                 }
2288                         }
2289                 }
2290         }
2291
2292         return 0;
2293 }
2294
2295 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2296                                                   const struct radeon_phase_shedding_limits_table *limits,
2297                                                   u32 sclk,
2298                                                   u32 *phase_shedding)
2299 {
2300         unsigned int i;
2301
2302         *phase_shedding = 1;
2303
2304         for (i = 0; i < limits->count; i++) {
2305                 if (sclk < limits->entries[i].sclk) {
2306                         *phase_shedding = i;
2307                         break;
2308                 }
2309         }
2310 }
2311
2312 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2313                                                   const struct radeon_phase_shedding_limits_table *limits,
2314                                                   u32 mclk,
2315                                                   u32 *phase_shedding)
2316 {
2317         unsigned int i;
2318
2319         *phase_shedding = 1;
2320
2321         for (i = 0; i < limits->count; i++) {
2322                 if (mclk < limits->entries[i].mclk) {
2323                         *phase_shedding = i;
2324                         break;
2325                 }
2326         }
2327 }
2328
2329 static int ci_init_arb_table_index(struct radeon_device *rdev)
2330 {
2331         struct ci_power_info *pi = ci_get_pi(rdev);
2332         u32 tmp;
2333         int ret;
2334
2335         ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2336                                      &tmp, pi->sram_end);
2337         if (ret)
2338                 return ret;
2339
2340         tmp &= 0x00FFFFFF;
2341         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2342
2343         return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2344                                        tmp, pi->sram_end);
2345 }
2346
2347 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2348                                          struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2349                                          u32 clock, u32 *voltage)
2350 {
2351         u32 i = 0;
2352
2353         if (allowed_clock_voltage_table->count == 0)
2354                 return -EINVAL;
2355
2356         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2357                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2358                         *voltage = allowed_clock_voltage_table->entries[i].v;
2359                         return 0;
2360                 }
2361         }
2362
2363         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2364
2365         return 0;
2366 }
2367
2368 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2369                                              u32 sclk, u32 min_sclk_in_sr)
2370 {
2371         u32 i;
2372         u32 tmp;
2373         u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2374                 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2375
2376         if (sclk < min)
2377                 return 0;
2378
2379         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2380                 tmp = sclk / (1 << i);
2381                 if (tmp >= min || i == 0)
2382                         break;
2383         }
2384
2385         return (u8)i;
2386 }
2387
2388 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2389 {
2390         return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2391 }
2392
2393 static int ci_reset_to_default(struct radeon_device *rdev)
2394 {
2395         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2396                 0 : -EINVAL;
2397 }
2398
2399 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2400 {
2401         u32 tmp;
2402
2403         tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2404
2405         if (tmp == MC_CG_ARB_FREQ_F0)
2406                 return 0;
2407
2408         return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2409 }
2410
2411 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2412                                         const u32 engine_clock,
2413                                         const u32 memory_clock,
2414                                         u32 *dram_timimg2)
2415 {
2416         bool patch;
2417         u32 tmp, tmp2;
2418
2419         tmp = RREG32(MC_SEQ_MISC0);
2420         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2421
2422         if (patch &&
2423             ((rdev->pdev->device == 0x67B0) ||
2424              (rdev->pdev->device == 0x67B1))) {
2425                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2426                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2427                         *dram_timimg2 &= ~0x00ff0000;
2428                         *dram_timimg2 |= tmp2 << 16;
2429                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2430                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2431                         *dram_timimg2 &= ~0x00ff0000;
2432                         *dram_timimg2 |= tmp2 << 16;
2433                 }
2434         }
2435 }
2436
2437
2438 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2439                                                 u32 sclk,
2440                                                 u32 mclk,
2441                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2442 {
2443         u32 dram_timing;
2444         u32 dram_timing2;
2445         u32 burst_time;
2446
2447         radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2448
2449         dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2450         dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2451         burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2452
2453         ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2454
2455         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2456         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2457         arb_regs->McArbBurstTime = (u8)burst_time;
2458
2459         return 0;
2460 }
2461
2462 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2463 {
2464         struct ci_power_info *pi = ci_get_pi(rdev);
2465         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2466         u32 i, j;
2467         int ret =  0;
2468
2469         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2470
2471         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2472                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2473                         ret = ci_populate_memory_timing_parameters(rdev,
2474                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2475                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2476                                                                    &arb_regs.entries[i][j]);
2477                         if (ret)
2478                                 break;
2479                 }
2480         }
2481
2482         if (ret == 0)
2483                 ret = ci_copy_bytes_to_smc(rdev,
2484                                            pi->arb_table_start,
2485                                            (u8 *)&arb_regs,
2486                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2487                                            pi->sram_end);
2488
2489         return ret;
2490 }
2491
2492 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2493 {
2494         struct ci_power_info *pi = ci_get_pi(rdev);
2495
2496         if (pi->need_update_smu7_dpm_table == 0)
2497                 return 0;
2498
2499         return ci_do_program_memory_timing_parameters(rdev);
2500 }
2501
2502 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2503                                           struct radeon_ps *radeon_boot_state)
2504 {
2505         struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2506         struct ci_power_info *pi = ci_get_pi(rdev);
2507         u32 level = 0;
2508
2509         for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2510                 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2511                     boot_state->performance_levels[0].sclk) {
2512                         pi->smc_state_table.GraphicsBootLevel = level;
2513                         break;
2514                 }
2515         }
2516
2517         for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2518                 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2519                     boot_state->performance_levels[0].mclk) {
2520                         pi->smc_state_table.MemoryBootLevel = level;
2521                         break;
2522                 }
2523         }
2524 }
2525
2526 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2527 {
2528         u32 i;
2529         u32 mask_value = 0;
2530
2531         for (i = dpm_table->count; i > 0; i--) {
2532                 mask_value = mask_value << 1;
2533                 if (dpm_table->dpm_levels[i-1].enabled)
2534                         mask_value |= 0x1;
2535                 else
2536                         mask_value &= 0xFFFFFFFE;
2537         }
2538
2539         return mask_value;
2540 }
2541
2542 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2543                                        SMU7_Discrete_DpmTable *table)
2544 {
2545         struct ci_power_info *pi = ci_get_pi(rdev);
2546         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2547         u32 i;
2548
2549         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2550                 table->LinkLevel[i].PcieGenSpeed =
2551                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2552                 table->LinkLevel[i].PcieLaneCount =
2553                         r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2554                 table->LinkLevel[i].EnabledForActivity = 1;
2555                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2556                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2557         }
2558
2559         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2560         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2561                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2562 }
2563
2564 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2565                                      SMU7_Discrete_DpmTable *table)
2566 {
2567         u32 count;
2568         struct atom_clock_dividers dividers;
2569         int ret = -EINVAL;
2570
2571         table->UvdLevelCount =
2572                 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2573
2574         for (count = 0; count < table->UvdLevelCount; count++) {
2575                 table->UvdLevel[count].VclkFrequency =
2576                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2577                 table->UvdLevel[count].DclkFrequency =
2578                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2579                 table->UvdLevel[count].MinVddc =
2580                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2581                 table->UvdLevel[count].MinVddcPhases = 1;
2582
2583                 ret = radeon_atom_get_clock_dividers(rdev,
2584                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2585                                                      table->UvdLevel[count].VclkFrequency, false, &dividers);
2586                 if (ret)
2587                         return ret;
2588
2589                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2590
2591                 ret = radeon_atom_get_clock_dividers(rdev,
2592                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2593                                                      table->UvdLevel[count].DclkFrequency, false, &dividers);
2594                 if (ret)
2595                         return ret;
2596
2597                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2598
2599                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2600                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2601                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2602         }
2603
2604         return ret;
2605 }
2606
2607 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2608                                      SMU7_Discrete_DpmTable *table)
2609 {
2610         u32 count;
2611         struct atom_clock_dividers dividers;
2612         int ret = -EINVAL;
2613
2614         table->VceLevelCount =
2615                 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2616
2617         for (count = 0; count < table->VceLevelCount; count++) {
2618                 table->VceLevel[count].Frequency =
2619                         rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2620                 table->VceLevel[count].MinVoltage =
2621                         (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2622                 table->VceLevel[count].MinPhases = 1;
2623
2624                 ret = radeon_atom_get_clock_dividers(rdev,
2625                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2626                                                      table->VceLevel[count].Frequency, false, &dividers);
2627                 if (ret)
2628                         return ret;
2629
2630                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2631
2632                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2633                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2634         }
2635
2636         return ret;
2637
2638 }
2639
2640 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2641                                      SMU7_Discrete_DpmTable *table)
2642 {
2643         u32 count;
2644         struct atom_clock_dividers dividers;
2645         int ret = -EINVAL;
2646
2647         table->AcpLevelCount = (u8)
2648                 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2649
2650         for (count = 0; count < table->AcpLevelCount; count++) {
2651                 table->AcpLevel[count].Frequency =
2652                         rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2653                 table->AcpLevel[count].MinVoltage =
2654                         rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2655                 table->AcpLevel[count].MinPhases = 1;
2656
2657                 ret = radeon_atom_get_clock_dividers(rdev,
2658                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2659                                                      table->AcpLevel[count].Frequency, false, &dividers);
2660                 if (ret)
2661                         return ret;
2662
2663                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2664
2665                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2666                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2667         }
2668
2669         return ret;
2670 }
2671
2672 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2673                                       SMU7_Discrete_DpmTable *table)
2674 {
2675         u32 count;
2676         struct atom_clock_dividers dividers;
2677         int ret = -EINVAL;
2678
2679         table->SamuLevelCount =
2680                 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2681
2682         for (count = 0; count < table->SamuLevelCount; count++) {
2683                 table->SamuLevel[count].Frequency =
2684                         rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2685                 table->SamuLevel[count].MinVoltage =
2686                         rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2687                 table->SamuLevel[count].MinPhases = 1;
2688
2689                 ret = radeon_atom_get_clock_dividers(rdev,
2690                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2691                                                      table->SamuLevel[count].Frequency, false, &dividers);
2692                 if (ret)
2693                         return ret;
2694
2695                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2696
2697                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2698                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2699         }
2700
2701         return ret;
2702 }
2703
2704 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2705                                     u32 memory_clock,
2706                                     SMU7_Discrete_MemoryLevel *mclk,
2707                                     bool strobe_mode,
2708                                     bool dll_state_on)
2709 {
2710         struct ci_power_info *pi = ci_get_pi(rdev);
2711         u32  dll_cntl = pi->clock_registers.dll_cntl;
2712         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2713         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2714         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2715         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2716         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2717         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2718         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2719         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2720         struct atom_mpll_param mpll_param;
2721         int ret;
2722
2723         ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2724         if (ret)
2725                 return ret;
2726
2727         mpll_func_cntl &= ~BWCTRL_MASK;
2728         mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2729
2730         mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2731         mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2732                 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2733
2734         mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2735         mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2736
2737         if (pi->mem_gddr5) {
2738                 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2739                 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2740                         YCLK_POST_DIV(mpll_param.post_div);
2741         }
2742
2743         if (pi->caps_mclk_ss_support) {
2744                 struct radeon_atom_ss ss;
2745                 u32 freq_nom;
2746                 u32 tmp;
2747                 u32 reference_clock = rdev->clock.mpll.reference_freq;
2748
2749                 if (mpll_param.qdr == 1)
2750                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2751                 else
2752                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2753
2754                 tmp = (freq_nom / reference_clock);
2755                 tmp = tmp * tmp;
2756                 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2757                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2758                         u32 clks = reference_clock * 5 / ss.rate;
2759                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2760
2761                         mpll_ss1 &= ~CLKV_MASK;
2762                         mpll_ss1 |= CLKV(clkv);
2763
2764                         mpll_ss2 &= ~CLKS_MASK;
2765                         mpll_ss2 |= CLKS(clks);
2766                 }
2767         }
2768
2769         mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2770         mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2771
2772         if (dll_state_on)
2773                 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2774         else
2775                 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2776
2777         mclk->MclkFrequency = memory_clock;
2778         mclk->MpllFuncCntl = mpll_func_cntl;
2779         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2780         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2781         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2782         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2783         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2784         mclk->DllCntl = dll_cntl;
2785         mclk->MpllSs1 = mpll_ss1;
2786         mclk->MpllSs2 = mpll_ss2;
2787
2788         return 0;
2789 }
2790
2791 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2792                                            u32 memory_clock,
2793                                            SMU7_Discrete_MemoryLevel *memory_level)
2794 {
2795         struct ci_power_info *pi = ci_get_pi(rdev);
2796         int ret;
2797         bool dll_state_on;
2798
2799         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2800                 ret = ci_get_dependency_volt_by_clk(rdev,
2801                                                     &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2802                                                     memory_clock, &memory_level->MinVddc);
2803                 if (ret)
2804                         return ret;
2805         }
2806
2807         if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2808                 ret = ci_get_dependency_volt_by_clk(rdev,
2809                                                     &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2810                                                     memory_clock, &memory_level->MinVddci);
2811                 if (ret)
2812                         return ret;
2813         }
2814
2815         if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2816                 ret = ci_get_dependency_volt_by_clk(rdev,
2817                                                     &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2818                                                     memory_clock, &memory_level->MinMvdd);
2819                 if (ret)
2820                         return ret;
2821         }
2822
2823         memory_level->MinVddcPhases = 1;
2824
2825         if (pi->vddc_phase_shed_control)
2826                 ci_populate_phase_value_based_on_mclk(rdev,
2827                                                       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2828                                                       memory_clock,
2829                                                       &memory_level->MinVddcPhases);
2830
2831         memory_level->EnabledForThrottle = 1;
2832         memory_level->UpH = 0;
2833         memory_level->DownH = 100;
2834         memory_level->VoltageDownH = 0;
2835         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2836
2837         memory_level->StutterEnable = false;
2838         memory_level->StrobeEnable = false;
2839         memory_level->EdcReadEnable = false;
2840         memory_level->EdcWriteEnable = false;
2841         memory_level->RttEnable = false;
2842
2843         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2844
2845         if (pi->mclk_stutter_mode_threshold &&
2846             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2847             (pi->uvd_enabled == false) &&
2848             (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2849             (rdev->pm.dpm.new_active_crtc_count <= 2))
2850                 memory_level->StutterEnable = true;
2851
2852         if (pi->mclk_strobe_mode_threshold &&
2853             (memory_clock <= pi->mclk_strobe_mode_threshold))
2854                 memory_level->StrobeEnable = 1;
2855
2856         if (pi->mem_gddr5) {
2857                 memory_level->StrobeRatio =
2858                         si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2859                 if (pi->mclk_edc_enable_threshold &&
2860                     (memory_clock > pi->mclk_edc_enable_threshold))
2861                         memory_level->EdcReadEnable = true;
2862
2863                 if (pi->mclk_edc_wr_enable_threshold &&
2864                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
2865                         memory_level->EdcWriteEnable = true;
2866
2867                 if (memory_level->StrobeEnable) {
2868                         if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2869                             ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2870                                 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2871                         else
2872                                 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2873                 } else {
2874                         dll_state_on = pi->dll_default_on;
2875                 }
2876         } else {
2877                 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2878                 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2879         }
2880
2881         ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2882         if (ret)
2883                 return ret;
2884
2885         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2886         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2887         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2888         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2889
2890         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2891         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2892         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2893         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2894         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2895         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2896         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2897         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2898         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2899         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2900         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2901
2902         return 0;
2903 }
2904
2905 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2906                                       SMU7_Discrete_DpmTable *table)
2907 {
2908         struct ci_power_info *pi = ci_get_pi(rdev);
2909         struct atom_clock_dividers dividers;
2910         SMU7_Discrete_VoltageLevel voltage_level;
2911         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2912         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2913         u32 dll_cntl = pi->clock_registers.dll_cntl;
2914         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2915         int ret;
2916
2917         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2918
2919         if (pi->acpi_vddc)
2920                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2921         else
2922                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2923
2924         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2925
2926         table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2927
2928         ret = radeon_atom_get_clock_dividers(rdev,
2929                                              COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2930                                              table->ACPILevel.SclkFrequency, false, &dividers);
2931         if (ret)
2932                 return ret;
2933
2934         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2935         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2936         table->ACPILevel.DeepSleepDivId = 0;
2937
2938         spll_func_cntl &= ~SPLL_PWRON;
2939         spll_func_cntl |= SPLL_RESET;
2940
2941         spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2942         spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2943
2944         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2945         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2946         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2947         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2948         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2949         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2950         table->ACPILevel.CcPwrDynRm = 0;
2951         table->ACPILevel.CcPwrDynRm1 = 0;
2952
2953         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2954         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2955         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2956         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2957         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2958         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2959         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2960         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2961         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2962         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2963         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2964
2965         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2966         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2967
2968         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2969                 if (pi->acpi_vddci)
2970                         table->MemoryACPILevel.MinVddci =
2971                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2972                 else
2973                         table->MemoryACPILevel.MinVddci =
2974                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2975         }
2976
2977         if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2978                 table->MemoryACPILevel.MinMvdd = 0;
2979         else
2980                 table->MemoryACPILevel.MinMvdd =
2981                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2982
2983         mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2984         mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2985
2986         dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2987
2988         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2989         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2990         table->MemoryACPILevel.MpllAdFuncCntl =
2991                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2992         table->MemoryACPILevel.MpllDqFuncCntl =
2993                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2994         table->MemoryACPILevel.MpllFuncCntl =
2995                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2996         table->MemoryACPILevel.MpllFuncCntl_1 =
2997                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2998         table->MemoryACPILevel.MpllFuncCntl_2 =
2999                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3000         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3001         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3002
3003         table->MemoryACPILevel.EnabledForThrottle = 0;
3004         table->MemoryACPILevel.EnabledForActivity = 0;
3005         table->MemoryACPILevel.UpH = 0;
3006         table->MemoryACPILevel.DownH = 100;
3007         table->MemoryACPILevel.VoltageDownH = 0;
3008         table->MemoryACPILevel.ActivityLevel =
3009                 cpu_to_be16((u16)pi->mclk_activity_target);
3010
3011         table->MemoryACPILevel.StutterEnable = false;
3012         table->MemoryACPILevel.StrobeEnable = false;
3013         table->MemoryACPILevel.EdcReadEnable = false;
3014         table->MemoryACPILevel.EdcWriteEnable = false;
3015         table->MemoryACPILevel.RttEnable = false;
3016
3017         return 0;
3018 }
3019
3020
3021 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3022 {
3023         struct ci_power_info *pi = ci_get_pi(rdev);
3024         struct ci_ulv_parm *ulv = &pi->ulv;
3025
3026         if (ulv->supported) {
3027                 if (enable)
3028                         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3029                                 0 : -EINVAL;
3030                 else
3031                         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3032                                 0 : -EINVAL;
3033         }
3034
3035         return 0;
3036 }
3037
3038 static int ci_populate_ulv_level(struct radeon_device *rdev,
3039                                  SMU7_Discrete_Ulv *state)
3040 {
3041         struct ci_power_info *pi = ci_get_pi(rdev);
3042         u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3043
3044         state->CcPwrDynRm = 0;
3045         state->CcPwrDynRm1 = 0;
3046
3047         if (ulv_voltage == 0) {
3048                 pi->ulv.supported = false;
3049                 return 0;
3050         }
3051
3052         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3053                 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3054                         state->VddcOffset = 0;
3055                 else
3056                         state->VddcOffset =
3057                                 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3058         } else {
3059                 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3060                         state->VddcOffsetVid = 0;
3061                 else
3062                         state->VddcOffsetVid = (u8)
3063                                 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3064                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3065         }
3066         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3067
3068         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3069         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3070         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3071
3072         return 0;
3073 }
3074
3075 static int ci_calculate_sclk_params(struct radeon_device *rdev,
3076                                     u32 engine_clock,
3077                                     SMU7_Discrete_GraphicsLevel *sclk)
3078 {
3079         struct ci_power_info *pi = ci_get_pi(rdev);
3080         struct atom_clock_dividers dividers;
3081         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3082         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3083         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3084         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3085         u32 reference_clock = rdev->clock.spll.reference_freq;
3086         u32 reference_divider;
3087         u32 fbdiv;
3088         int ret;
3089
3090         ret = radeon_atom_get_clock_dividers(rdev,
3091                                              COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3092                                              engine_clock, false, &dividers);
3093         if (ret)
3094                 return ret;
3095
3096         reference_divider = 1 + dividers.ref_div;
3097         fbdiv = dividers.fb_div & 0x3FFFFFF;
3098
3099         spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3100         spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3101         spll_func_cntl_3 |= SPLL_DITHEN;
3102
3103         if (pi->caps_sclk_ss_support) {
3104                 struct radeon_atom_ss ss;
3105                 u32 vco_freq = engine_clock * dividers.post_div;
3106
3107                 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3108                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3109                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3110                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3111
3112                         cg_spll_spread_spectrum &= ~CLK_S_MASK;
3113                         cg_spll_spread_spectrum |= CLK_S(clk_s);
3114                         cg_spll_spread_spectrum |= SSEN;
3115
3116                         cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3117                         cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3118                 }
3119         }
3120
3121         sclk->SclkFrequency = engine_clock;
3122         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3123         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3124         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3125         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3126         sclk->SclkDid = (u8)dividers.post_divider;
3127
3128         return 0;
3129 }
3130
3131 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3132                                             u32 engine_clock,
3133                                             u16 sclk_activity_level_t,
3134                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3135 {
3136         struct ci_power_info *pi = ci_get_pi(rdev);
3137         int ret;
3138
3139         ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3140         if (ret)
3141                 return ret;
3142
3143         ret = ci_get_dependency_volt_by_clk(rdev,
3144                                             &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3145                                             engine_clock, &graphic_level->MinVddc);
3146         if (ret)
3147                 return ret;
3148
3149         graphic_level->SclkFrequency = engine_clock;
3150
3151         graphic_level->Flags =  0;
3152         graphic_level->MinVddcPhases = 1;
3153
3154         if (pi->vddc_phase_shed_control)
3155                 ci_populate_phase_value_based_on_sclk(rdev,
3156                                                       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3157                                                       engine_clock,
3158                                                       &graphic_level->MinVddcPhases);
3159
3160         graphic_level->ActivityLevel = sclk_activity_level_t;
3161
3162         graphic_level->CcPwrDynRm = 0;
3163         graphic_level->CcPwrDynRm1 = 0;
3164         graphic_level->EnabledForThrottle = 1;
3165         graphic_level->UpH = 0;
3166         graphic_level->DownH = 0;
3167         graphic_level->VoltageDownH = 0;
3168         graphic_level->PowerThrottle = 0;
3169
3170         if (pi->caps_sclk_ds)
3171                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3172                                                                                    engine_clock,
3173                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3174
3175         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3176
3177         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3178         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3179         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3180         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3181         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3182         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3183         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3184         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3185         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3186         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3187         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3188
3189         return 0;
3190 }
3191
3192 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3193 {
3194         struct ci_power_info *pi = ci_get_pi(rdev);
3195         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3196         u32 level_array_address = pi->dpm_table_start +
3197                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3198         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3199                 SMU7_MAX_LEVELS_GRAPHICS;
3200         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3201         u32 i, ret;
3202
3203         memset(levels, 0, level_array_size);
3204
3205         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3206                 ret = ci_populate_single_graphic_level(rdev,
3207                                                        dpm_table->sclk_table.dpm_levels[i].value,
3208                                                        (u16)pi->activity_target[i],
3209                                                        &pi->smc_state_table.GraphicsLevel[i]);
3210                 if (ret)
3211                         return ret;
3212                 if (i > 1)
3213                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3214                 if (i == (dpm_table->sclk_table.count - 1))
3215                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3216                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3217         }
3218         pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3219
3220         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3221         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3222                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3223
3224         ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3225                                    (u8 *)levels, level_array_size,
3226                                    pi->sram_end);
3227         if (ret)
3228                 return ret;
3229
3230         return 0;
3231 }
3232
3233 static int ci_populate_ulv_state(struct radeon_device *rdev,
3234                                  SMU7_Discrete_Ulv *ulv_level)
3235 {
3236         return ci_populate_ulv_level(rdev, ulv_level);
3237 }
3238
3239 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3240 {
3241         struct ci_power_info *pi = ci_get_pi(rdev);
3242         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3243         u32 level_array_address = pi->dpm_table_start +
3244                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3245         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3246                 SMU7_MAX_LEVELS_MEMORY;
3247         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3248         u32 i, ret;
3249
3250         memset(levels, 0, level_array_size);
3251
3252         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3253                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3254                         return -EINVAL;
3255                 ret = ci_populate_single_memory_level(rdev,
3256                                                       dpm_table->mclk_table.dpm_levels[i].value,
3257                                                       &pi->smc_state_table.MemoryLevel[i]);
3258                 if (ret)
3259                         return ret;
3260         }
3261
3262         pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3263
3264         if ((dpm_table->mclk_table.count >= 2) &&
3265             ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3266                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3267                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3268                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3269                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3270         }
3271
3272         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3273
3274         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3275         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3276                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3277
3278         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3279                 PPSMC_DISPLAY_WATERMARK_HIGH;
3280
3281         ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3282                                    (u8 *)levels, level_array_size,
3283                                    pi->sram_end);
3284         if (ret)
3285                 return ret;
3286
3287         return 0;
3288 }
3289
3290 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3291                                       struct ci_single_dpm_table* dpm_table,
3292                                       u32 count)
3293 {
3294         u32 i;
3295
3296         dpm_table->count = count;
3297         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3298                 dpm_table->dpm_levels[i].enabled = false;
3299 }
3300
3301 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3302                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3303 {
3304         dpm_table->dpm_levels[index].value = pcie_gen;
3305         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3306         dpm_table->dpm_levels[index].enabled = true;
3307 }
3308
3309 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3310 {
3311         struct ci_power_info *pi = ci_get_pi(rdev);
3312
3313         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3314                 return -EINVAL;
3315
3316         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3317                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3318                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3319         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3320                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3321                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3322         }
3323
3324         ci_reset_single_dpm_table(rdev,
3325                                   &pi->dpm_table.pcie_speed_table,
3326                                   SMU7_MAX_LEVELS_LINK);
3327
3328         if (rdev->family == CHIP_BONAIRE)
3329                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3330                                           pi->pcie_gen_powersaving.min,
3331                                           pi->pcie_lane_powersaving.max);
3332         else
3333                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3334                                           pi->pcie_gen_powersaving.min,
3335                                           pi->pcie_lane_powersaving.min);
3336         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3337                                   pi->pcie_gen_performance.min,
3338                                   pi->pcie_lane_performance.min);
3339         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3340                                   pi->pcie_gen_powersaving.min,
3341                                   pi->pcie_lane_powersaving.max);
3342         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3343                                   pi->pcie_gen_performance.min,
3344                                   pi->pcie_lane_performance.max);
3345         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3346                                   pi->pcie_gen_powersaving.max,
3347                                   pi->pcie_lane_powersaving.max);
3348         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3349                                   pi->pcie_gen_performance.max,
3350                                   pi->pcie_lane_performance.max);
3351
3352         pi->dpm_table.pcie_speed_table.count = 6;
3353
3354         return 0;
3355 }
3356
3357 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3358 {
3359         struct ci_power_info *pi = ci_get_pi(rdev);
3360         struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3361                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3362         struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3363                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3364         struct radeon_cac_leakage_table *std_voltage_table =
3365                 &rdev->pm.dpm.dyn_state.cac_leakage_table;
3366         u32 i;
3367
3368         if (allowed_sclk_vddc_table == NULL)
3369                 return -EINVAL;
3370         if (allowed_sclk_vddc_table->count < 1)
3371                 return -EINVAL;
3372         if (allowed_mclk_table == NULL)
3373                 return -EINVAL;
3374         if (allowed_mclk_table->count < 1)
3375                 return -EINVAL;
3376
3377         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3378
3379         ci_reset_single_dpm_table(rdev,
3380                                   &pi->dpm_table.sclk_table,
3381                                   SMU7_MAX_LEVELS_GRAPHICS);
3382         ci_reset_single_dpm_table(rdev,
3383                                   &pi->dpm_table.mclk_table,
3384                                   SMU7_MAX_LEVELS_MEMORY);
3385         ci_reset_single_dpm_table(rdev,
3386                                   &pi->dpm_table.vddc_table,
3387                                   SMU7_MAX_LEVELS_VDDC);
3388         ci_reset_single_dpm_table(rdev,
3389                                   &pi->dpm_table.vddci_table,
3390                                   SMU7_MAX_LEVELS_VDDCI);
3391         ci_reset_single_dpm_table(rdev,
3392                                   &pi->dpm_table.mvdd_table,
3393                                   SMU7_MAX_LEVELS_MVDD);
3394
3395         pi->dpm_table.sclk_table.count = 0;
3396         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3397                 if ((i == 0) ||
3398                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3399                      allowed_sclk_vddc_table->entries[i].clk)) {
3400                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3401                                 allowed_sclk_vddc_table->entries[i].clk;
3402                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3403                                 (i == 0) ? true : false;
3404                         pi->dpm_table.sclk_table.count++;
3405                 }
3406         }
3407
3408         pi->dpm_table.mclk_table.count = 0;
3409         for (i = 0; i < allowed_mclk_table->count; i++) {
3410                 if ((i == 0) ||
3411                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3412                      allowed_mclk_table->entries[i].clk)) {
3413                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3414                                 allowed_mclk_table->entries[i].clk;
3415                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3416                                 (i == 0) ? true : false;
3417                         pi->dpm_table.mclk_table.count++;
3418                 }
3419         }
3420
3421         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3422                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3423                         allowed_sclk_vddc_table->entries[i].v;
3424                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3425                         std_voltage_table->entries[i].leakage;
3426                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3427         }
3428         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3429
3430         allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3431         if (allowed_mclk_table) {
3432                 for (i = 0; i < allowed_mclk_table->count; i++) {
3433                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3434                                 allowed_mclk_table->entries[i].v;
3435                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3436                 }
3437                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3438         }
3439
3440         allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3441         if (allowed_mclk_table) {
3442                 for (i = 0; i < allowed_mclk_table->count; i++) {
3443                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3444                                 allowed_mclk_table->entries[i].v;
3445                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3446                 }
3447                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3448         }
3449
3450         ci_setup_default_pcie_tables(rdev);
3451
3452         return 0;
3453 }
3454
3455 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3456                               u32 value, u32 *boot_level)
3457 {
3458         u32 i;
3459         int ret = -EINVAL;
3460
3461         for(i = 0; i < table->count; i++) {
3462                 if (value == table->dpm_levels[i].value) {
3463                         *boot_level = i;
3464                         ret = 0;
3465                 }
3466         }
3467
3468         return ret;
3469 }
3470
3471 static int ci_init_smc_table(struct radeon_device *rdev)
3472 {
3473         struct ci_power_info *pi = ci_get_pi(rdev);
3474         struct ci_ulv_parm *ulv = &pi->ulv;
3475         struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3476         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3477         int ret;
3478
3479         ret = ci_setup_default_dpm_tables(rdev);
3480         if (ret)
3481                 return ret;
3482
3483         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3484                 ci_populate_smc_voltage_tables(rdev, table);
3485
3486         ci_init_fps_limits(rdev);
3487
3488         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3489                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3490
3491         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3492                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3493
3494         if (pi->mem_gddr5)
3495                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3496
3497         if (ulv->supported) {
3498                 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3499                 if (ret)
3500                         return ret;
3501                 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3502         }
3503
3504         ret = ci_populate_all_graphic_levels(rdev);
3505         if (ret)
3506                 return ret;
3507
3508         ret = ci_populate_all_memory_levels(rdev);
3509         if (ret)
3510                 return ret;
3511
3512         ci_populate_smc_link_level(rdev, table);
3513
3514         ret = ci_populate_smc_acpi_level(rdev, table);
3515         if (ret)
3516                 return ret;
3517
3518         ret = ci_populate_smc_vce_level(rdev, table);
3519         if (ret)
3520                 return ret;
3521
3522         ret = ci_populate_smc_acp_level(rdev, table);
3523         if (ret)
3524                 return ret;
3525
3526         ret = ci_populate_smc_samu_level(rdev, table);
3527         if (ret)
3528                 return ret;
3529
3530         ret = ci_do_program_memory_timing_parameters(rdev);
3531         if (ret)
3532                 return ret;
3533
3534         ret = ci_populate_smc_uvd_level(rdev, table);
3535         if (ret)
3536                 return ret;
3537
3538         table->UvdBootLevel  = 0;
3539         table->VceBootLevel  = 0;
3540         table->AcpBootLevel  = 0;
3541         table->SamuBootLevel  = 0;
3542         table->GraphicsBootLevel  = 0;
3543         table->MemoryBootLevel  = 0;
3544
3545         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3546                                  pi->vbios_boot_state.sclk_bootup_value,
3547                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3548
3549         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3550                                  pi->vbios_boot_state.mclk_bootup_value,
3551                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3552
3553         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3554         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3555         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3556
3557         ci_populate_smc_initial_state(rdev, radeon_boot_state);
3558
3559         ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3560         if (ret)
3561                 return ret;
3562
3563         table->UVDInterval = 1;
3564         table->VCEInterval = 1;
3565         table->ACPInterval = 1;
3566         table->SAMUInterval = 1;
3567         table->GraphicsVoltageChangeEnable = 1;
3568         table->GraphicsThermThrottleEnable = 1;
3569         table->GraphicsInterval = 1;
3570         table->VoltageInterval = 1;
3571         table->ThermalInterval = 1;
3572         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3573                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3574         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3575                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3576         table->MemoryVoltageChangeEnable = 1;
3577         table->MemoryInterval = 1;
3578         table->VoltageResponseTime = 0;
3579         table->VddcVddciDelta = 4000;
3580         table->PhaseResponseTime = 0;
3581         table->MemoryThermThrottleEnable = 1;
3582         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3583         table->PCIeGenInterval = 1;
3584         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3585                 table->SVI2Enable  = 1;
3586         else
3587                 table->SVI2Enable  = 0;
3588
3589         table->ThermGpio = 17;
3590         table->SclkStepSize = 0x4000;
3591
3592         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3593         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3594         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3595         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3596         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3597         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3598         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3599         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3600         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3601         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3602         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3603         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3604         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3605         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3606
3607         ret = ci_copy_bytes_to_smc(rdev,
3608                                    pi->dpm_table_start +
3609                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3610                                    (u8 *)&table->SystemFlags,
3611                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3612                                    pi->sram_end);
3613         if (ret)
3614                 return ret;
3615
3616         return 0;
3617 }
3618
3619 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3620                                       struct ci_single_dpm_table *dpm_table,
3621                                       u32 low_limit, u32 high_limit)
3622 {
3623         u32 i;
3624
3625         for (i = 0; i < dpm_table->count; i++) {
3626                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3627                     (dpm_table->dpm_levels[i].value > high_limit))
3628                         dpm_table->dpm_levels[i].enabled = false;
3629                 else
3630                         dpm_table->dpm_levels[i].enabled = true;
3631         }
3632 }
3633
3634 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3635                                     u32 speed_low, u32 lanes_low,
3636                                     u32 speed_high, u32 lanes_high)
3637 {
3638         struct ci_power_info *pi = ci_get_pi(rdev);
3639         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3640         u32 i, j;
3641
3642         for (i = 0; i < pcie_table->count; i++) {
3643                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3644                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3645                     (pcie_table->dpm_levels[i].value > speed_high) ||
3646                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3647                         pcie_table->dpm_levels[i].enabled = false;
3648                 else
3649                         pcie_table->dpm_levels[i].enabled = true;
3650         }
3651
3652         for (i = 0; i < pcie_table->count; i++) {
3653                 if (pcie_table->dpm_levels[i].enabled) {
3654                         for (j = i + 1; j < pcie_table->count; j++) {
3655                                 if (pcie_table->dpm_levels[j].enabled) {
3656                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3657                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3658                                                 pcie_table->dpm_levels[j].enabled = false;
3659                                 }
3660                         }
3661                 }
3662         }
3663 }
3664
3665 static int ci_trim_dpm_states(struct radeon_device *rdev,
3666                               struct radeon_ps *radeon_state)
3667 {
3668         struct ci_ps *state = ci_get_ps(radeon_state);
3669         struct ci_power_info *pi = ci_get_pi(rdev);
3670         u32 high_limit_count;
3671
3672         if (state->performance_level_count < 1)
3673                 return -EINVAL;
3674
3675         if (state->performance_level_count == 1)
3676                 high_limit_count = 0;
3677         else
3678                 high_limit_count = 1;
3679
3680         ci_trim_single_dpm_states(rdev,
3681                                   &pi->dpm_table.sclk_table,
3682                                   state->performance_levels[0].sclk,
3683                                   state->performance_levels[high_limit_count].sclk);
3684
3685         ci_trim_single_dpm_states(rdev,
3686                                   &pi->dpm_table.mclk_table,
3687                                   state->performance_levels[0].mclk,
3688                                   state->performance_levels[high_limit_count].mclk);
3689
3690         ci_trim_pcie_dpm_states(rdev,
3691                                 state->performance_levels[0].pcie_gen,
3692                                 state->performance_levels[0].pcie_lane,
3693                                 state->performance_levels[high_limit_count].pcie_gen,
3694                                 state->performance_levels[high_limit_count].pcie_lane);
3695
3696         return 0;
3697 }
3698
3699 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3700 {
3701         struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3702                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3703         struct radeon_clock_voltage_dependency_table *vddc_table =
3704                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3705         u32 requested_voltage = 0;
3706         u32 i;
3707
3708         if (disp_voltage_table == NULL)
3709                 return -EINVAL;
3710         if (!disp_voltage_table->count)
3711                 return -EINVAL;
3712
3713         for (i = 0; i < disp_voltage_table->count; i++) {
3714                 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3715                         requested_voltage = disp_voltage_table->entries[i].v;
3716         }
3717
3718         for (i = 0; i < vddc_table->count; i++) {
3719                 if (requested_voltage <= vddc_table->entries[i].v) {
3720                         requested_voltage = vddc_table->entries[i].v;
3721                         return (ci_send_msg_to_smc_with_parameter(rdev,
3722                                                                   PPSMC_MSG_VddC_Request,
3723                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3724                                 0 : -EINVAL;
3725                 }
3726         }
3727
3728         return -EINVAL;
3729 }
3730
3731 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3732 {
3733         struct ci_power_info *pi = ci_get_pi(rdev);
3734         PPSMC_Result result;
3735
3736         if (!pi->sclk_dpm_key_disabled) {
3737                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3738                         result = ci_send_msg_to_smc_with_parameter(rdev,
3739                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3740                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3741                         if (result != PPSMC_Result_OK)
3742                                 return -EINVAL;
3743                 }
3744         }
3745
3746         if (!pi->mclk_dpm_key_disabled) {
3747                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3748                         result = ci_send_msg_to_smc_with_parameter(rdev,
3749                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3750                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3751                         if (result != PPSMC_Result_OK)
3752                                 return -EINVAL;
3753                 }
3754         }
3755
3756         if (!pi->pcie_dpm_key_disabled) {
3757                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3758                         result = ci_send_msg_to_smc_with_parameter(rdev,
3759                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3760                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3761                         if (result != PPSMC_Result_OK)
3762                                 return -EINVAL;
3763                 }
3764         }
3765
3766         ci_apply_disp_minimum_voltage_request(rdev);
3767
3768         return 0;
3769 }
3770
3771 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3772                                                    struct radeon_ps *radeon_state)
3773 {
3774         struct ci_power_info *pi = ci_get_pi(rdev);
3775         struct ci_ps *state = ci_get_ps(radeon_state);
3776         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3777         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3778         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3779         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3780         u32 i;
3781
3782         pi->need_update_smu7_dpm_table = 0;
3783
3784         for (i = 0; i < sclk_table->count; i++) {
3785                 if (sclk == sclk_table->dpm_levels[i].value)
3786                         break;
3787         }
3788
3789         if (i >= sclk_table->count) {
3790                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3791         } else {
3792                 /* XXX check display min clock requirements */
3793                 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3794                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3795         }
3796
3797         for (i = 0; i < mclk_table->count; i++) {
3798                 if (mclk == mclk_table->dpm_levels[i].value)
3799                         break;
3800         }
3801
3802         if (i >= mclk_table->count)
3803                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3804
3805         if (rdev->pm.dpm.current_active_crtc_count !=
3806             rdev->pm.dpm.new_active_crtc_count)
3807                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3808 }
3809
3810 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3811                                                        struct radeon_ps *radeon_state)
3812 {
3813         struct ci_power_info *pi = ci_get_pi(rdev);
3814         struct ci_ps *state = ci_get_ps(radeon_state);
3815         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3816         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3817         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3818         int ret;
3819
3820         if (!pi->need_update_smu7_dpm_table)
3821                 return 0;
3822
3823         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3824                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3825
3826         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3827                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3828
3829         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3830                 ret = ci_populate_all_graphic_levels(rdev);
3831                 if (ret)
3832                         return ret;
3833         }
3834
3835         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3836                 ret = ci_populate_all_memory_levels(rdev);
3837                 if (ret)
3838                         return ret;
3839         }
3840
3841         return 0;
3842 }
3843
3844 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3845 {
3846         struct ci_power_info *pi = ci_get_pi(rdev);
3847         const struct radeon_clock_and_voltage_limits *max_limits;
3848         int i;
3849
3850         if (rdev->pm.dpm.ac_power)
3851                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3852         else
3853                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3854
3855         if (enable) {
3856                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3857
3858                 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3859                         if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3860                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3861
3862                                 if (!pi->caps_uvd_dpm)
3863                                         break;
3864                         }
3865                 }
3866
3867                 ci_send_msg_to_smc_with_parameter(rdev,
3868                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
3869                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3870
3871                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3872                         pi->uvd_enabled = true;
3873                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3874                         ci_send_msg_to_smc_with_parameter(rdev,
3875                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
3876                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3877                 }
3878         } else {
3879                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3880                         pi->uvd_enabled = false;
3881                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3882                         ci_send_msg_to_smc_with_parameter(rdev,
3883                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
3884                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3885                 }
3886         }
3887
3888         return (ci_send_msg_to_smc(rdev, enable ?
3889                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3890                 0 : -EINVAL;
3891 }
3892
3893 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3894 {
3895         struct ci_power_info *pi = ci_get_pi(rdev);
3896         const struct radeon_clock_and_voltage_limits *max_limits;
3897         int i;
3898
3899         if (rdev->pm.dpm.ac_power)
3900                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3901         else
3902                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3903
3904         if (enable) {
3905                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3906                 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3907                         if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3908                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3909
3910                                 if (!pi->caps_vce_dpm)
3911                                         break;
3912                         }
3913                 }
3914
3915                 ci_send_msg_to_smc_with_parameter(rdev,
3916                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
3917                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3918         }
3919
3920         return (ci_send_msg_to_smc(rdev, enable ?
3921                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3922                 0 : -EINVAL;
3923 }
3924
3925 #if 0
3926 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3927 {
3928         struct ci_power_info *pi = ci_get_pi(rdev);
3929         const struct radeon_clock_and_voltage_limits *max_limits;
3930         int i;
3931
3932         if (rdev->pm.dpm.ac_power)
3933                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3934         else
3935                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3936
3937         if (enable) {
3938                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3939                 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3940                         if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3941                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3942
3943                                 if (!pi->caps_samu_dpm)
3944                                         break;
3945                         }
3946                 }
3947
3948                 ci_send_msg_to_smc_with_parameter(rdev,
3949                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
3950                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3951         }
3952         return (ci_send_msg_to_smc(rdev, enable ?
3953                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3954                 0 : -EINVAL;
3955 }
3956
3957 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3958 {
3959         struct ci_power_info *pi = ci_get_pi(rdev);
3960         const struct radeon_clock_and_voltage_limits *max_limits;
3961         int i;
3962
3963         if (rdev->pm.dpm.ac_power)
3964                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3965         else
3966                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3967
3968         if (enable) {
3969                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3970                 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3971                         if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3972                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3973
3974                                 if (!pi->caps_acp_dpm)
3975                                         break;
3976                         }
3977                 }
3978
3979                 ci_send_msg_to_smc_with_parameter(rdev,
3980                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
3981                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3982         }
3983
3984         return (ci_send_msg_to_smc(rdev, enable ?
3985                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3986                 0 : -EINVAL;
3987 }
3988 #endif
3989
3990 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3991 {
3992         struct ci_power_info *pi = ci_get_pi(rdev);
3993         u32 tmp;
3994
3995         if (!gate) {
3996                 if (pi->caps_uvd_dpm ||
3997                     (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3998                         pi->smc_state_table.UvdBootLevel = 0;
3999                 else
4000                         pi->smc_state_table.UvdBootLevel =
4001                                 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4002
4003                 tmp = RREG32_SMC(DPM_TABLE_475);
4004                 tmp &= ~UvdBootLevel_MASK;
4005                 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4006                 WREG32_SMC(DPM_TABLE_475, tmp);
4007         }
4008
4009         return ci_enable_uvd_dpm(rdev, !gate);
4010 }
4011
4012 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4013 {
4014         u8 i;
4015         u32 min_evclk = 30000; /* ??? */
4016         struct radeon_vce_clock_voltage_dependency_table *table =
4017                 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4018
4019         for (i = 0; i < table->count; i++) {
4020                 if (table->entries[i].evclk >= min_evclk)
4021                         return i;
4022         }
4023
4024         return table->count - 1;
4025 }
4026
4027 static int ci_update_vce_dpm(struct radeon_device *rdev,
4028                              struct radeon_ps *radeon_new_state,
4029                              struct radeon_ps *radeon_current_state)
4030 {
4031         struct ci_power_info *pi = ci_get_pi(rdev);
4032         int ret = 0;
4033         u32 tmp;
4034
4035         if (radeon_current_state->evclk != radeon_new_state->evclk) {
4036                 if (radeon_new_state->evclk) {
4037                         /* turn the clocks on when encoding */
4038                         cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4039
4040                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4041                         tmp = RREG32_SMC(DPM_TABLE_475);
4042                         tmp &= ~VceBootLevel_MASK;
4043                         tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4044                         WREG32_SMC(DPM_TABLE_475, tmp);
4045
4046                         ret = ci_enable_vce_dpm(rdev, true);
4047                 } else {
4048                         /* turn the clocks off when not encoding */
4049                         cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4050
4051                         ret = ci_enable_vce_dpm(rdev, false);
4052                 }
4053         }
4054         return ret;
4055 }
4056
4057 #if 0
4058 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4059 {
4060         return ci_enable_samu_dpm(rdev, gate);
4061 }
4062
4063 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4064 {
4065         struct ci_power_info *pi = ci_get_pi(rdev);
4066         u32 tmp;
4067
4068         if (!gate) {
4069                 pi->smc_state_table.AcpBootLevel = 0;
4070
4071                 tmp = RREG32_SMC(DPM_TABLE_475);
4072                 tmp &= ~AcpBootLevel_MASK;
4073                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4074                 WREG32_SMC(DPM_TABLE_475, tmp);
4075         }
4076
4077         return ci_enable_acp_dpm(rdev, !gate);
4078 }
4079 #endif
4080
4081 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4082                                              struct radeon_ps *radeon_state)
4083 {
4084         struct ci_power_info *pi = ci_get_pi(rdev);
4085         int ret;
4086
4087         ret = ci_trim_dpm_states(rdev, radeon_state);
4088         if (ret)
4089                 return ret;
4090
4091         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4092                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4093         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4094                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4095         pi->last_mclk_dpm_enable_mask =
4096                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4097         if (pi->uvd_enabled) {
4098                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4099                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4100         }
4101         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4102                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4103
4104         return 0;
4105 }
4106
4107 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4108                                        u32 level_mask)
4109 {
4110         u32 level = 0;
4111
4112         while ((level_mask & (1 << level)) == 0)
4113                 level++;
4114
4115         return level;
4116 }
4117
4118
4119 int ci_dpm_force_performance_level(struct radeon_device *rdev,
4120                                    enum radeon_dpm_forced_level level)
4121 {
4122         struct ci_power_info *pi = ci_get_pi(rdev);
4123         u32 tmp, levels, i;
4124         int ret;
4125
4126         if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4127                 if ((!pi->sclk_dpm_key_disabled) &&
4128                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4129                         levels = 0;
4130                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4131                         while (tmp >>= 1)
4132                                 levels++;
4133                         if (levels) {
4134                                 ret = ci_dpm_force_state_sclk(rdev, levels);
4135                                 if (ret)
4136                                         return ret;
4137                                 for (i = 0; i < rdev->usec_timeout; i++) {
4138                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4139                                                CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4140                                         if (tmp == levels)
4141                                                 break;
4142                                         udelay(1);
4143                                 }
4144                         }
4145                 }
4146                 if ((!pi->mclk_dpm_key_disabled) &&
4147                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4148                         levels = 0;
4149                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4150                         while (tmp >>= 1)
4151                                 levels++;
4152                         if (levels) {
4153                                 ret = ci_dpm_force_state_mclk(rdev, levels);
4154                                 if (ret)
4155                                         return ret;
4156                                 for (i = 0; i < rdev->usec_timeout; i++) {
4157                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4158                                                CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4159                                         if (tmp == levels)
4160                                                 break;
4161                                         udelay(1);
4162                                 }
4163                         }
4164                 }
4165                 if ((!pi->pcie_dpm_key_disabled) &&
4166                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4167                         levels = 0;
4168                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4169                         while (tmp >>= 1)
4170                                 levels++;
4171                         if (levels) {
4172                                 ret = ci_dpm_force_state_pcie(rdev, level);
4173                                 if (ret)
4174                                         return ret;
4175                                 for (i = 0; i < rdev->usec_timeout; i++) {
4176                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4177                                                CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4178                                         if (tmp == levels)
4179                                                 break;
4180                                         udelay(1);
4181                                 }
4182                         }
4183                 }
4184         } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4185                 if ((!pi->sclk_dpm_key_disabled) &&
4186                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4187                         levels = ci_get_lowest_enabled_level(rdev,
4188                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4189                         ret = ci_dpm_force_state_sclk(rdev, levels);
4190                         if (ret)
4191                                 return ret;
4192                         for (i = 0; i < rdev->usec_timeout; i++) {
4193                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4194                                        CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4195                                 if (tmp == levels)
4196                                         break;
4197                                 udelay(1);
4198                         }
4199                 }
4200                 if ((!pi->mclk_dpm_key_disabled) &&
4201                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4202                         levels = ci_get_lowest_enabled_level(rdev,
4203                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4204                         ret = ci_dpm_force_state_mclk(rdev, levels);
4205                         if (ret)
4206                                 return ret;
4207                         for (i = 0; i < rdev->usec_timeout; i++) {
4208                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4209                                        CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4210                                 if (tmp == levels)
4211                                         break;
4212                                 udelay(1);
4213                         }
4214                 }
4215                 if ((!pi->pcie_dpm_key_disabled) &&
4216                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4217                         levels = ci_get_lowest_enabled_level(rdev,
4218                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4219                         ret = ci_dpm_force_state_pcie(rdev, levels);
4220                         if (ret)
4221                                 return ret;
4222                         for (i = 0; i < rdev->usec_timeout; i++) {
4223                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4224                                        CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4225                                 if (tmp == levels)
4226                                         break;
4227                                 udelay(1);
4228                         }
4229                 }
4230         } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4231                 ret = ci_upload_dpm_level_enable_mask(rdev);
4232                 if (ret)
4233                         return ret;
4234         }
4235
4236         rdev->pm.dpm.forced_level = level;
4237
4238         return 0;
4239 }
4240
4241 static int ci_set_mc_special_registers(struct radeon_device *rdev,
4242                                        struct ci_mc_reg_table *table)
4243 {
4244         struct ci_power_info *pi = ci_get_pi(rdev);
4245         u8 i, j, k;
4246         u32 temp_reg;
4247
4248         for (i = 0, j = table->last; i < table->last; i++) {
4249                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4250                         return -EINVAL;
4251                 switch(table->mc_reg_address[i].s1 << 2) {
4252                 case MC_SEQ_MISC1:
4253                         temp_reg = RREG32(MC_PMG_CMD_EMRS);
4254                         table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4255                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4256                         for (k = 0; k < table->num_entries; k++) {
4257                                 table->mc_reg_table_entry[k].mc_data[j] =
4258                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4259                         }
4260                         j++;
4261                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4262                                 return -EINVAL;
4263
4264                         temp_reg = RREG32(MC_PMG_CMD_MRS);
4265                         table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4266                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4267                         for (k = 0; k < table->num_entries; k++) {
4268                                 table->mc_reg_table_entry[k].mc_data[j] =
4269                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4270                                 if (!pi->mem_gddr5)
4271                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4272                         }
4273                         j++;
4274                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4275                                 return -EINVAL;
4276
4277                         if (!pi->mem_gddr5) {
4278                                 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4279                                 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4280                                 for (k = 0; k < table->num_entries; k++) {
4281                                         table->mc_reg_table_entry[k].mc_data[j] =
4282                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4283                                 }
4284                                 j++;
4285                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4286                                         return -EINVAL;
4287                         }
4288                         break;
4289                 case MC_SEQ_RESERVE_M:
4290                         temp_reg = RREG32(MC_PMG_CMD_MRS1);
4291                         table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4292                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4293                         for (k = 0; k < table->num_entries; k++) {
4294                                 table->mc_reg_table_entry[k].mc_data[j] =
4295                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4296                         }
4297                         j++;
4298                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4299                                 return -EINVAL;
4300                         break;
4301                 default:
4302                         break;
4303                 }
4304
4305         }
4306
4307         table->last = j;
4308
4309         return 0;
4310 }
4311
4312 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4313 {
4314         bool result = true;
4315
4316         switch(in_reg) {
4317         case MC_SEQ_RAS_TIMING >> 2:
4318                 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4319                 break;
4320         case MC_SEQ_DLL_STBY >> 2:
4321                 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4322                 break;
4323         case MC_SEQ_G5PDX_CMD0 >> 2:
4324                 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4325                 break;
4326         case MC_SEQ_G5PDX_CMD1 >> 2:
4327                 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4328                 break;
4329         case MC_SEQ_G5PDX_CTRL >> 2:
4330                 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4331                 break;
4332         case MC_SEQ_CAS_TIMING >> 2:
4333                 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4334             break;
4335         case MC_SEQ_MISC_TIMING >> 2:
4336                 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4337                 break;
4338         case MC_SEQ_MISC_TIMING2 >> 2:
4339                 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4340                 break;
4341         case MC_SEQ_PMG_DVS_CMD >> 2:
4342                 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4343                 break;
4344         case MC_SEQ_PMG_DVS_CTL >> 2:
4345                 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4346                 break;
4347         case MC_SEQ_RD_CTL_D0 >> 2:
4348                 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4349                 break;
4350         case MC_SEQ_RD_CTL_D1 >> 2:
4351                 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4352                 break;
4353         case MC_SEQ_WR_CTL_D0 >> 2:
4354                 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4355                 break;
4356         case MC_SEQ_WR_CTL_D1 >> 2:
4357                 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4358                 break;
4359         case MC_PMG_CMD_EMRS >> 2:
4360                 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4361                 break;
4362         case MC_PMG_CMD_MRS >> 2:
4363                 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4364                 break;
4365         case MC_PMG_CMD_MRS1 >> 2:
4366                 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4367                 break;
4368         case MC_SEQ_PMG_TIMING >> 2:
4369                 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4370                 break;
4371         case MC_PMG_CMD_MRS2 >> 2:
4372                 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4373                 break;
4374         case MC_SEQ_WR_CTL_2 >> 2:
4375                 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4376                 break;
4377         default:
4378                 result = false;
4379                 break;
4380         }
4381
4382         return result;
4383 }
4384
4385 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4386 {
4387         u8 i, j;
4388
4389         for (i = 0; i < table->last; i++) {
4390                 for (j = 1; j < table->num_entries; j++) {
4391                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4392                             table->mc_reg_table_entry[j].mc_data[i]) {
4393                                 table->valid_flag |= 1 << i;
4394                                 break;
4395                         }
4396                 }
4397         }
4398 }
4399
4400 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4401 {
4402         u32 i;
4403         u16 address;
4404
4405         for (i = 0; i < table->last; i++) {
4406                 table->mc_reg_address[i].s0 =
4407                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4408                         address : table->mc_reg_address[i].s1;
4409         }
4410 }
4411
4412 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4413                                       struct ci_mc_reg_table *ci_table)
4414 {
4415         u8 i, j;
4416
4417         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4418                 return -EINVAL;
4419         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4420                 return -EINVAL;
4421
4422         for (i = 0; i < table->last; i++)
4423                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4424
4425         ci_table->last = table->last;
4426
4427         for (i = 0; i < table->num_entries; i++) {
4428                 ci_table->mc_reg_table_entry[i].mclk_max =
4429                         table->mc_reg_table_entry[i].mclk_max;
4430                 for (j = 0; j < table->last; j++)
4431                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4432                                 table->mc_reg_table_entry[i].mc_data[j];
4433         }
4434         ci_table->num_entries = table->num_entries;
4435
4436         return 0;
4437 }
4438
4439 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4440                                        struct ci_mc_reg_table *table)
4441 {
4442         u8 i, k;
4443         u32 tmp;
4444         bool patch;
4445
4446         tmp = RREG32(MC_SEQ_MISC0);
4447         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4448
4449         if (patch &&
4450             ((rdev->pdev->device == 0x67B0) ||
4451              (rdev->pdev->device == 0x67B1))) {
4452                 for (i = 0; i < table->last; i++) {
4453                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4454                                 return -EINVAL;
4455                         switch(table->mc_reg_address[i].s1 >> 2) {
4456                         case MC_SEQ_MISC1:
4457                                 for (k = 0; k < table->num_entries; k++) {
4458                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4459                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4460                                                 table->mc_reg_table_entry[k].mc_data[i] =
4461                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4462                                                         0x00000007;
4463                                 }
4464                                 break;
4465                         case MC_SEQ_WR_CTL_D0:
4466                                 for (k = 0; k < table->num_entries; k++) {
4467                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4468                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4469                                                 table->mc_reg_table_entry[k].mc_data[i] =
4470                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4471                                                         0x0000D0DD;
4472                                 }
4473                                 break;
4474                         case MC_SEQ_WR_CTL_D1:
4475                                 for (k = 0; k < table->num_entries; k++) {
4476                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4477                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4478                                                 table->mc_reg_table_entry[k].mc_data[i] =
4479                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4480                                                         0x0000D0DD;
4481                                 }
4482                                 break;
4483                         case MC_SEQ_WR_CTL_2:
4484                                 for (k = 0; k < table->num_entries; k++) {
4485                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4486                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4487                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4488                                 }
4489                                 break;
4490                         case MC_SEQ_CAS_TIMING:
4491                                 for (k = 0; k < table->num_entries; k++) {
4492                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4493                                                 table->mc_reg_table_entry[k].mc_data[i] =
4494                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4495                                                         0x000C0140;
4496                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4497                                                 table->mc_reg_table_entry[k].mc_data[i] =
4498                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4499                                                         0x000C0150;
4500                                 }
4501                                 break;
4502                         case MC_SEQ_MISC_TIMING:
4503                                 for (k = 0; k < table->num_entries; k++) {
4504                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4505                                                 table->mc_reg_table_entry[k].mc_data[i] =
4506                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4507                                                         0x00000030;
4508                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4509                                                 table->mc_reg_table_entry[k].mc_data[i] =
4510                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4511                                                         0x00000035;
4512                                 }
4513                                 break;
4514                         default:
4515                                 break;
4516                         }
4517                 }
4518
4519                 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4520                 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4521                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4522                 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4523                 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4524         }
4525
4526         return 0;
4527 }
4528
4529 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4530 {
4531         struct ci_power_info *pi = ci_get_pi(rdev);
4532         struct atom_mc_reg_table *table;
4533         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4534         u8 module_index = rv770_get_memory_module_index(rdev);
4535         int ret;
4536
4537         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4538         if (!table)
4539                 return -ENOMEM;
4540
4541         WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4542         WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4543         WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4544         WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4545         WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4546         WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4547         WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4548         WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4549         WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4550         WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4551         WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4552         WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4553         WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4554         WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4555         WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4556         WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4557         WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4558         WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4559         WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4560         WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4561
4562         ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4563         if (ret)
4564                 goto init_mc_done;
4565
4566         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4567         if (ret)
4568                 goto init_mc_done;
4569
4570         ci_set_s0_mc_reg_index(ci_table);
4571
4572         ret = ci_register_patching_mc_seq(rdev, ci_table);
4573         if (ret)
4574                 goto init_mc_done;
4575
4576         ret = ci_set_mc_special_registers(rdev, ci_table);
4577         if (ret)
4578                 goto init_mc_done;
4579
4580         ci_set_valid_flag(ci_table);
4581
4582 init_mc_done:
4583         kfree(table);
4584
4585         return ret;
4586 }
4587
4588 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4589                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4590 {
4591         struct ci_power_info *pi = ci_get_pi(rdev);
4592         u32 i, j;
4593
4594         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4595                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4596                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4597                                 return -EINVAL;
4598                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4599                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4600                         i++;
4601                 }
4602         }
4603
4604         mc_reg_table->last = (u8)i;
4605
4606         return 0;
4607 }
4608
4609 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4610                                     SMU7_Discrete_MCRegisterSet *data,
4611                                     u32 num_entries, u32 valid_flag)
4612 {
4613         u32 i, j;
4614
4615         for (i = 0, j = 0; j < num_entries; j++) {
4616                 if (valid_flag & (1 << j)) {
4617                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4618                         i++;
4619                 }
4620         }
4621 }
4622
4623 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4624                                                  const u32 memory_clock,
4625                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4626 {
4627         struct ci_power_info *pi = ci_get_pi(rdev);
4628         u32 i = 0;
4629
4630         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4631                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4632                         break;
4633         }
4634
4635         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4636                 --i;
4637
4638         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4639                                 mc_reg_table_data, pi->mc_reg_table.last,
4640                                 pi->mc_reg_table.valid_flag);
4641 }
4642
4643 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4644                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4645 {
4646         struct ci_power_info *pi = ci_get_pi(rdev);
4647         u32 i;
4648
4649         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4650                 ci_convert_mc_reg_table_entry_to_smc(rdev,
4651                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4652                                                      &mc_reg_table->data[i]);
4653 }
4654
4655 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4656 {
4657         struct ci_power_info *pi = ci_get_pi(rdev);
4658         int ret;
4659
4660         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4661
4662         ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4663         if (ret)
4664                 return ret;
4665         ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4666
4667         return ci_copy_bytes_to_smc(rdev,
4668                                     pi->mc_reg_table_start,
4669                                     (u8 *)&pi->smc_mc_reg_table,
4670                                     sizeof(SMU7_Discrete_MCRegisters),
4671                                     pi->sram_end);
4672 }
4673
4674 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4675 {
4676         struct ci_power_info *pi = ci_get_pi(rdev);
4677
4678         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4679                 return 0;
4680
4681         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4682
4683         ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4684
4685         return ci_copy_bytes_to_smc(rdev,
4686                                     pi->mc_reg_table_start +
4687                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4688                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4689                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4690                                     pi->dpm_table.mclk_table.count,
4691                                     pi->sram_end);
4692 }
4693
4694 static void ci_enable_voltage_control(struct radeon_device *rdev)
4695 {
4696         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4697
4698         tmp |= VOLT_PWRMGT_EN;
4699         WREG32_SMC(GENERAL_PWRMGT, tmp);
4700 }
4701
4702 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4703                                                       struct radeon_ps *radeon_state)
4704 {
4705         struct ci_ps *state = ci_get_ps(radeon_state);
4706         int i;
4707         u16 pcie_speed, max_speed = 0;
4708
4709         for (i = 0; i < state->performance_level_count; i++) {
4710                 pcie_speed = state->performance_levels[i].pcie_gen;
4711                 if (max_speed < pcie_speed)
4712                         max_speed = pcie_speed;
4713         }
4714
4715         return max_speed;
4716 }
4717
4718 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4719 {
4720         u32 speed_cntl = 0;
4721
4722         speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4723         speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4724
4725         return (u16)speed_cntl;
4726 }
4727
4728 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4729 {
4730         u32 link_width = 0;
4731
4732         link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4733         link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4734
4735         switch (link_width) {
4736         case RADEON_PCIE_LC_LINK_WIDTH_X1:
4737                 return 1;
4738         case RADEON_PCIE_LC_LINK_WIDTH_X2:
4739                 return 2;
4740         case RADEON_PCIE_LC_LINK_WIDTH_X4:
4741                 return 4;
4742         case RADEON_PCIE_LC_LINK_WIDTH_X8:
4743                 return 8;
4744         case RADEON_PCIE_LC_LINK_WIDTH_X12:
4745                 /* not actually supported */
4746                 return 12;
4747         case RADEON_PCIE_LC_LINK_WIDTH_X0:
4748         case RADEON_PCIE_LC_LINK_WIDTH_X16:
4749         default:
4750                 return 16;
4751         }
4752 }
4753
4754 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4755                                                              struct radeon_ps *radeon_new_state,
4756                                                              struct radeon_ps *radeon_current_state)
4757 {
4758         struct ci_power_info *pi = ci_get_pi(rdev);
4759         enum radeon_pcie_gen target_link_speed =
4760                 ci_get_maximum_link_speed(rdev, radeon_new_state);
4761         enum radeon_pcie_gen current_link_speed;
4762
4763         if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4764                 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4765         else
4766                 current_link_speed = pi->force_pcie_gen;
4767
4768         pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4769         pi->pspp_notify_required = false;
4770         if (target_link_speed > current_link_speed) {
4771                 switch (target_link_speed) {
4772 #ifdef CONFIG_ACPI
4773                 case RADEON_PCIE_GEN3:
4774                         if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4775                                 break;
4776                         pi->force_pcie_gen = RADEON_PCIE_GEN2;
4777                         if (current_link_speed == RADEON_PCIE_GEN2)
4778                                 break;
4779                 case RADEON_PCIE_GEN2:
4780                         if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4781                                 break;
4782 #endif
4783                 default:
4784                         pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4785                         break;
4786                 }
4787         } else {
4788                 if (target_link_speed < current_link_speed)
4789                         pi->pspp_notify_required = true;
4790         }
4791 }
4792
4793 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4794                                                            struct radeon_ps *radeon_new_state,
4795                                                            struct radeon_ps *radeon_current_state)
4796 {
4797         struct ci_power_info *pi = ci_get_pi(rdev);
4798         enum radeon_pcie_gen target_link_speed =
4799                 ci_get_maximum_link_speed(rdev, radeon_new_state);
4800         u8 request;
4801
4802         if (pi->pspp_notify_required) {
4803                 if (target_link_speed == RADEON_PCIE_GEN3)
4804                         request = PCIE_PERF_REQ_PECI_GEN3;
4805                 else if (target_link_speed == RADEON_PCIE_GEN2)
4806                         request = PCIE_PERF_REQ_PECI_GEN2;
4807                 else
4808                         request = PCIE_PERF_REQ_PECI_GEN1;
4809
4810                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4811                     (ci_get_current_pcie_speed(rdev) > 0))
4812                         return;
4813
4814 #ifdef CONFIG_ACPI
4815                 radeon_acpi_pcie_performance_request(rdev, request, false);
4816 #endif
4817         }
4818 }
4819
4820 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4821 {
4822         struct ci_power_info *pi = ci_get_pi(rdev);
4823         struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4824                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4825         struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4826                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4827         struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4828                 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4829
4830         if (allowed_sclk_vddc_table == NULL)
4831                 return -EINVAL;
4832         if (allowed_sclk_vddc_table->count < 1)
4833                 return -EINVAL;
4834         if (allowed_mclk_vddc_table == NULL)
4835                 return -EINVAL;
4836         if (allowed_mclk_vddc_table->count < 1)
4837                 return -EINVAL;
4838         if (allowed_mclk_vddci_table == NULL)
4839                 return -EINVAL;
4840         if (allowed_mclk_vddci_table->count < 1)
4841                 return -EINVAL;
4842
4843         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4844         pi->max_vddc_in_pp_table =
4845                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4846
4847         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4848         pi->max_vddci_in_pp_table =
4849                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4850
4851         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4852                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4853         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4854                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4855         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4856                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4857         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4858                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4859
4860         return 0;
4861 }
4862
4863 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4864 {
4865         struct ci_power_info *pi = ci_get_pi(rdev);
4866         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4867         u32 leakage_index;
4868
4869         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4870                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4871                         *vddc = leakage_table->actual_voltage[leakage_index];
4872                         break;
4873                 }
4874         }
4875 }
4876
4877 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4878 {
4879         struct ci_power_info *pi = ci_get_pi(rdev);
4880         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4881         u32 leakage_index;
4882
4883         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4884                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4885                         *vddci = leakage_table->actual_voltage[leakage_index];
4886                         break;
4887                 }
4888         }
4889 }
4890
4891 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4892                                                                       struct radeon_clock_voltage_dependency_table *table)
4893 {
4894         u32 i;
4895
4896         if (table) {
4897                 for (i = 0; i < table->count; i++)
4898                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4899         }
4900 }
4901
4902 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4903                                                                        struct radeon_clock_voltage_dependency_table *table)
4904 {
4905         u32 i;
4906
4907         if (table) {
4908                 for (i = 0; i < table->count; i++)
4909                         ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4910         }
4911 }
4912
4913 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4914                                                                           struct radeon_vce_clock_voltage_dependency_table *table)
4915 {
4916         u32 i;
4917
4918         if (table) {
4919                 for (i = 0; i < table->count; i++)
4920                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4921         }
4922 }
4923
4924 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4925                                                                           struct radeon_uvd_clock_voltage_dependency_table *table)
4926 {
4927         u32 i;
4928
4929         if (table) {
4930                 for (i = 0; i < table->count; i++)
4931                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4932         }
4933 }
4934
4935 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4936                                                                    struct radeon_phase_shedding_limits_table *table)
4937 {
4938         u32 i;
4939
4940         if (table) {
4941                 for (i = 0; i < table->count; i++)
4942                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4943         }
4944 }
4945
4946 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4947                                                             struct radeon_clock_and_voltage_limits *table)
4948 {
4949         if (table) {
4950                 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4951                 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4952         }
4953 }
4954
4955 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4956                                                          struct radeon_cac_leakage_table *table)
4957 {
4958         u32 i;
4959
4960         if (table) {
4961                 for (i = 0; i < table->count; i++)
4962                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4963         }
4964 }
4965
4966 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4967 {
4968
4969         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4970                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4971         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4972                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4973         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4974                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4975         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4976                                                                    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4977         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4978                                                                       &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4979         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4980                                                                       &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4981         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4982                                                                   &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4983         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4984                                                                   &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4985         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4986                                                                &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4987         ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4988                                                         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4989         ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4990                                                         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4991         ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4992                                                      &rdev->pm.dpm.dyn_state.cac_leakage_table);
4993
4994 }
4995
4996 static void ci_get_memory_type(struct radeon_device *rdev)
4997 {
4998         struct ci_power_info *pi = ci_get_pi(rdev);
4999         u32 tmp;
5000
5001         tmp = RREG32(MC_SEQ_MISC0);
5002
5003         if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5004             MC_SEQ_MISC0_GDDR5_VALUE)
5005                 pi->mem_gddr5 = true;
5006         else
5007                 pi->mem_gddr5 = false;
5008
5009 }
5010
5011 static void ci_update_current_ps(struct radeon_device *rdev,
5012                                  struct radeon_ps *rps)
5013 {
5014         struct ci_ps *new_ps = ci_get_ps(rps);
5015         struct ci_power_info *pi = ci_get_pi(rdev);
5016
5017         pi->current_rps = *rps;
5018         pi->current_ps = *new_ps;
5019         pi->current_rps.ps_priv = &pi->current_ps;
5020 }
5021
5022 static void ci_update_requested_ps(struct radeon_device *rdev,
5023                                    struct radeon_ps *rps)
5024 {
5025         struct ci_ps *new_ps = ci_get_ps(rps);
5026         struct ci_power_info *pi = ci_get_pi(rdev);
5027
5028         pi->requested_rps = *rps;
5029         pi->requested_ps = *new_ps;
5030         pi->requested_rps.ps_priv = &pi->requested_ps;
5031 }
5032
5033 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5034 {
5035         struct ci_power_info *pi = ci_get_pi(rdev);
5036         struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5037         struct radeon_ps *new_ps = &requested_ps;
5038
5039         ci_update_requested_ps(rdev, new_ps);
5040
5041         ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5042
5043         return 0;
5044 }
5045
5046 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5047 {
5048         struct ci_power_info *pi = ci_get_pi(rdev);
5049         struct radeon_ps *new_ps = &pi->requested_rps;
5050
5051         ci_update_current_ps(rdev, new_ps);
5052 }
5053
5054
5055 void ci_dpm_setup_asic(struct radeon_device *rdev)
5056 {
5057         int r;
5058
5059         r = ci_mc_load_microcode(rdev);
5060         if (r)
5061                 DRM_ERROR("Failed to load MC firmware!\n");
5062         ci_read_clock_registers(rdev);
5063         ci_get_memory_type(rdev);
5064         ci_enable_acpi_power_management(rdev);
5065         ci_init_sclk_t(rdev);
5066 }
5067
5068 int ci_dpm_enable(struct radeon_device *rdev)
5069 {
5070         struct ci_power_info *pi = ci_get_pi(rdev);
5071         struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5072         int ret;
5073
5074         if (ci_is_smc_running(rdev))
5075                 return -EINVAL;
5076         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5077                 ci_enable_voltage_control(rdev);
5078                 ret = ci_construct_voltage_tables(rdev);
5079                 if (ret) {
5080                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5081                         return ret;
5082                 }
5083         }
5084         if (pi->caps_dynamic_ac_timing) {
5085                 ret = ci_initialize_mc_reg_table(rdev);
5086                 if (ret)
5087                         pi->caps_dynamic_ac_timing = false;
5088         }
5089         if (pi->dynamic_ss)
5090                 ci_enable_spread_spectrum(rdev, true);
5091         if (pi->thermal_protection)
5092                 ci_enable_thermal_protection(rdev, true);
5093         ci_program_sstp(rdev);
5094         ci_enable_display_gap(rdev);
5095         ci_program_vc(rdev);
5096         ret = ci_upload_firmware(rdev);
5097         if (ret) {
5098                 DRM_ERROR("ci_upload_firmware failed\n");
5099                 return ret;
5100         }
5101         ret = ci_process_firmware_header(rdev);
5102         if (ret) {
5103                 DRM_ERROR("ci_process_firmware_header failed\n");
5104                 return ret;
5105         }
5106         ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5107         if (ret) {
5108                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5109                 return ret;
5110         }
5111         ret = ci_init_smc_table(rdev);
5112         if (ret) {
5113                 DRM_ERROR("ci_init_smc_table failed\n");
5114                 return ret;
5115         }
5116         ret = ci_init_arb_table_index(rdev);
5117         if (ret) {
5118                 DRM_ERROR("ci_init_arb_table_index failed\n");
5119                 return ret;
5120         }
5121         if (pi->caps_dynamic_ac_timing) {
5122                 ret = ci_populate_initial_mc_reg_table(rdev);
5123                 if (ret) {
5124                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5125                         return ret;
5126                 }
5127         }
5128         ret = ci_populate_pm_base(rdev);
5129         if (ret) {
5130                 DRM_ERROR("ci_populate_pm_base failed\n");
5131                 return ret;
5132         }
5133         ci_dpm_start_smc(rdev);
5134         ci_enable_vr_hot_gpio_interrupt(rdev);
5135         ret = ci_notify_smc_display_change(rdev, false);
5136         if (ret) {
5137                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5138                 return ret;
5139         }
5140         ci_enable_sclk_control(rdev, true);
5141         ret = ci_enable_ulv(rdev, true);
5142         if (ret) {
5143                 DRM_ERROR("ci_enable_ulv failed\n");
5144                 return ret;
5145         }
5146         ret = ci_enable_ds_master_switch(rdev, true);
5147         if (ret) {
5148                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5149                 return ret;
5150         }
5151         ret = ci_start_dpm(rdev);
5152         if (ret) {
5153                 DRM_ERROR("ci_start_dpm failed\n");
5154                 return ret;
5155         }
5156         ret = ci_enable_didt(rdev, true);
5157         if (ret) {
5158                 DRM_ERROR("ci_enable_didt failed\n");
5159                 return ret;
5160         }
5161         ret = ci_enable_smc_cac(rdev, true);
5162         if (ret) {
5163                 DRM_ERROR("ci_enable_smc_cac failed\n");
5164                 return ret;
5165         }
5166         ret = ci_enable_power_containment(rdev, true);
5167         if (ret) {
5168                 DRM_ERROR("ci_enable_power_containment failed\n");
5169                 return ret;
5170         }
5171
5172         ret = ci_power_control_set_level(rdev);
5173         if (ret) {
5174                 DRM_ERROR("ci_power_control_set_level failed\n");
5175                 return ret;
5176         }
5177
5178         ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5179
5180         ci_thermal_start_thermal_controller(rdev);
5181
5182         ci_update_current_ps(rdev, boot_ps);
5183
5184         return 0;
5185 }
5186
5187 static int ci_set_temperature_range(struct radeon_device *rdev)
5188 {
5189         int ret;
5190
5191         ret = ci_thermal_enable_alert(rdev, false);
5192         if (ret)
5193                 return ret;
5194         ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5195         if (ret)
5196                 return ret;
5197         ret = ci_thermal_enable_alert(rdev, true);
5198         if (ret)
5199                 return ret;
5200
5201         return ret;
5202 }
5203
5204 int ci_dpm_late_enable(struct radeon_device *rdev)
5205 {
5206         int ret;
5207
5208         ret = ci_set_temperature_range(rdev);
5209         if (ret)
5210                 return ret;
5211
5212         ci_dpm_powergate_uvd(rdev, true);
5213
5214         return 0;
5215 }
5216
5217 void ci_dpm_disable(struct radeon_device *rdev)
5218 {
5219         struct ci_power_info *pi = ci_get_pi(rdev);
5220         struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5221
5222         ci_dpm_powergate_uvd(rdev, false);
5223
5224         if (!ci_is_smc_running(rdev))
5225                 return;
5226
5227         ci_thermal_stop_thermal_controller(rdev);
5228
5229         if (pi->thermal_protection)
5230                 ci_enable_thermal_protection(rdev, false);
5231         ci_enable_power_containment(rdev, false);
5232         ci_enable_smc_cac(rdev, false);
5233         ci_enable_didt(rdev, false);
5234         ci_enable_spread_spectrum(rdev, false);
5235         ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5236         ci_stop_dpm(rdev);
5237         ci_enable_ds_master_switch(rdev, false);
5238         ci_enable_ulv(rdev, false);
5239         ci_clear_vc(rdev);
5240         ci_reset_to_default(rdev);
5241         ci_dpm_stop_smc(rdev);
5242         ci_force_switch_to_arb_f0(rdev);
5243
5244         ci_update_current_ps(rdev, boot_ps);
5245 }
5246
5247 int ci_dpm_set_power_state(struct radeon_device *rdev)
5248 {
5249         struct ci_power_info *pi = ci_get_pi(rdev);
5250         struct radeon_ps *new_ps = &pi->requested_rps;
5251         struct radeon_ps *old_ps = &pi->current_rps;
5252         int ret;
5253
5254         ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5255         if (pi->pcie_performance_request)
5256                 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5257         ret = ci_freeze_sclk_mclk_dpm(rdev);
5258         if (ret) {
5259                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5260                 return ret;
5261         }
5262         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5263         if (ret) {
5264                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5265                 return ret;
5266         }
5267         ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5268         if (ret) {
5269                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5270                 return ret;
5271         }
5272
5273         ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5274         if (ret) {
5275                 DRM_ERROR("ci_update_vce_dpm failed\n");
5276                 return ret;
5277         }
5278
5279         ret = ci_update_sclk_t(rdev);
5280         if (ret) {
5281                 DRM_ERROR("ci_update_sclk_t failed\n");
5282                 return ret;
5283         }
5284         if (pi->caps_dynamic_ac_timing) {
5285                 ret = ci_update_and_upload_mc_reg_table(rdev);
5286                 if (ret) {
5287                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5288                         return ret;
5289                 }
5290         }
5291         ret = ci_program_memory_timing_parameters(rdev);
5292         if (ret) {
5293                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5294                 return ret;
5295         }
5296         ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5297         if (ret) {
5298                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5299                 return ret;
5300         }
5301         ret = ci_upload_dpm_level_enable_mask(rdev);
5302         if (ret) {
5303                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5304                 return ret;
5305         }
5306         if (pi->pcie_performance_request)
5307                 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5308
5309         return 0;
5310 }
5311
5312 void ci_dpm_reset_asic(struct radeon_device *rdev)
5313 {
5314         ci_set_boot_state(rdev);
5315 }
5316
5317 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5318 {
5319         ci_program_display_gap(rdev);
5320 }
5321
5322 union power_info {
5323         struct _ATOM_POWERPLAY_INFO info;
5324         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5325         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5326         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5327         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5328         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5329 };
5330
5331 union pplib_clock_info {
5332         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5333         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5334         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5335         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5336         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5337         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5338 };
5339
5340 union pplib_power_state {
5341         struct _ATOM_PPLIB_STATE v1;
5342         struct _ATOM_PPLIB_STATE_V2 v2;
5343 };
5344
5345 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5346                                           struct radeon_ps *rps,
5347                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5348                                           u8 table_rev)
5349 {
5350         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5351         rps->class = le16_to_cpu(non_clock_info->usClassification);
5352         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5353
5354         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5355                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5356                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5357         } else {
5358                 rps->vclk = 0;
5359                 rps->dclk = 0;
5360         }
5361
5362         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5363                 rdev->pm.dpm.boot_ps = rps;
5364         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5365                 rdev->pm.dpm.uvd_ps = rps;
5366 }
5367
5368 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5369                                       struct radeon_ps *rps, int index,
5370                                       union pplib_clock_info *clock_info)
5371 {
5372         struct ci_power_info *pi = ci_get_pi(rdev);
5373         struct ci_ps *ps = ci_get_ps(rps);
5374         struct ci_pl *pl = &ps->performance_levels[index];
5375
5376         ps->performance_level_count = index + 1;
5377
5378         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5379         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5380         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5381         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5382
5383         pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5384                                                  pi->sys_pcie_mask,
5385                                                  pi->vbios_boot_state.pcie_gen_bootup_value,
5386                                                  clock_info->ci.ucPCIEGen);
5387         pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5388                                                    pi->vbios_boot_state.pcie_lane_bootup_value,
5389                                                    le16_to_cpu(clock_info->ci.usPCIELane));
5390
5391         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5392                 pi->acpi_pcie_gen = pl->pcie_gen;
5393         }
5394
5395         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5396                 pi->ulv.supported = true;
5397                 pi->ulv.pl = *pl;
5398                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5399         }
5400
5401         /* patch up boot state */
5402         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5403                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5404                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5405                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5406                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5407         }
5408
5409         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5410         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5411                 pi->use_pcie_powersaving_levels = true;
5412                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5413                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5414                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5415                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5416                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5417                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5418                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5419                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5420                 break;
5421         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5422                 pi->use_pcie_performance_levels = true;
5423                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5424                         pi->pcie_gen_performance.max = pl->pcie_gen;
5425                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5426                         pi->pcie_gen_performance.min = pl->pcie_gen;
5427                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5428                         pi->pcie_lane_performance.max = pl->pcie_lane;
5429                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5430                         pi->pcie_lane_performance.min = pl->pcie_lane;
5431                 break;
5432         default:
5433                 break;
5434         }
5435 }
5436
5437 static int ci_parse_power_table(struct radeon_device *rdev)
5438 {
5439         struct radeon_mode_info *mode_info = &rdev->mode_info;
5440         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5441         union pplib_power_state *power_state;
5442         int i, j, k, non_clock_array_index, clock_array_index;
5443         union pplib_clock_info *clock_info;
5444         struct _StateArray *state_array;
5445         struct _ClockInfoArray *clock_info_array;
5446         struct _NonClockInfoArray *non_clock_info_array;
5447         union power_info *power_info;
5448         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5449         u16 data_offset;
5450         u8 frev, crev;
5451         u8 *power_state_offset;
5452         struct ci_ps *ps;
5453
5454         if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5455                                    &frev, &crev, &data_offset))
5456                 return -EINVAL;
5457         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5458
5459         state_array = (struct _StateArray *)
5460                 (mode_info->atom_context->bios + data_offset +
5461                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5462         clock_info_array = (struct _ClockInfoArray *)
5463                 (mode_info->atom_context->bios + data_offset +
5464                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5465         non_clock_info_array = (struct _NonClockInfoArray *)
5466                 (mode_info->atom_context->bios + data_offset +
5467                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5468
5469         rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
5470                                   state_array->ucNumEntries, GFP_KERNEL);
5471         if (!rdev->pm.dpm.ps)
5472                 return -ENOMEM;
5473         power_state_offset = (u8 *)state_array->states;
5474         for (i = 0; i < state_array->ucNumEntries; i++) {
5475                 u8 *idx;
5476                 power_state = (union pplib_power_state *)power_state_offset;
5477                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5478                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5479                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5480                 if (!rdev->pm.power_state[i].clock_info)
5481                         return -EINVAL;
5482                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5483                 if (ps == NULL) {
5484                         kfree(rdev->pm.dpm.ps);
5485                         return -ENOMEM;
5486                 }
5487                 rdev->pm.dpm.ps[i].ps_priv = ps;
5488                 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5489                                               non_clock_info,
5490                                               non_clock_info_array->ucEntrySize);
5491                 k = 0;
5492                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5493                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5494                         clock_array_index = idx[j];
5495                         if (clock_array_index >= clock_info_array->ucNumEntries)
5496                                 continue;
5497                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5498                                 break;
5499                         clock_info = (union pplib_clock_info *)
5500                                 ((u8 *)&clock_info_array->clockInfo[0] +
5501                                  (clock_array_index * clock_info_array->ucEntrySize));
5502                         ci_parse_pplib_clock_info(rdev,
5503                                                   &rdev->pm.dpm.ps[i], k,
5504                                                   clock_info);
5505                         k++;
5506                 }
5507                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5508         }
5509         rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5510
5511         /* fill in the vce power states */
5512         for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5513                 u32 sclk, mclk;
5514                 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5515                 clock_info = (union pplib_clock_info *)
5516                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5517                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5518                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5519                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5520                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5521                 rdev->pm.dpm.vce_states[i].sclk = sclk;
5522                 rdev->pm.dpm.vce_states[i].mclk = mclk;
5523         }
5524
5525         return 0;
5526 }
5527
5528 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5529                                     struct ci_vbios_boot_state *boot_state)
5530 {
5531         struct radeon_mode_info *mode_info = &rdev->mode_info;
5532         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5533         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5534         u8 frev, crev;
5535         u16 data_offset;
5536
5537         if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5538                                    &frev, &crev, &data_offset)) {
5539                 firmware_info =
5540                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5541                                                     data_offset);
5542                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5543                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5544                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5545                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5546                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5547                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5548                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5549
5550                 return 0;
5551         }
5552         return -EINVAL;
5553 }
5554
5555 void ci_dpm_fini(struct radeon_device *rdev)
5556 {
5557         int i;
5558
5559         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5560                 kfree(rdev->pm.dpm.ps[i].ps_priv);
5561         }
5562         kfree(rdev->pm.dpm.ps);
5563         kfree(rdev->pm.dpm.priv);
5564         kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5565         r600_free_extended_power_table(rdev);
5566 }
5567
5568 int ci_dpm_init(struct radeon_device *rdev)
5569 {
5570         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5571         SMU7_Discrete_DpmTable  *dpm_table;
5572         struct radeon_gpio_rec gpio;
5573         u16 data_offset, size;
5574         u8 frev, crev;
5575         struct ci_power_info *pi;
5576         int ret;
5577         u32 mask;
5578
5579         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5580         if (pi == NULL)
5581                 return -ENOMEM;
5582         rdev->pm.dpm.priv = pi;
5583
5584         ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5585         if (ret)
5586                 pi->sys_pcie_mask = 0;
5587         else
5588                 pi->sys_pcie_mask = mask;
5589         pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5590
5591         pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5592         pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5593         pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5594         pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5595
5596         pi->pcie_lane_performance.max = 0;
5597         pi->pcie_lane_performance.min = 16;
5598         pi->pcie_lane_powersaving.max = 0;
5599         pi->pcie_lane_powersaving.min = 16;
5600
5601         ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5602         if (ret) {
5603                 ci_dpm_fini(rdev);
5604                 return ret;
5605         }
5606
5607         ret = r600_get_platform_caps(rdev);
5608         if (ret) {
5609                 ci_dpm_fini(rdev);
5610                 return ret;
5611         }
5612
5613         ret = r600_parse_extended_power_table(rdev);
5614         if (ret) {
5615                 ci_dpm_fini(rdev);
5616                 return ret;
5617         }
5618
5619         ret = ci_parse_power_table(rdev);
5620         if (ret) {
5621                 ci_dpm_fini(rdev);
5622                 return ret;
5623         }
5624
5625         pi->dll_default_on = false;
5626         pi->sram_end = SMC_RAM_END;
5627
5628         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5629         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5630         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5631         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5632         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5633         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5634         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5635         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5636
5637         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5638
5639         pi->sclk_dpm_key_disabled = 0;
5640         pi->mclk_dpm_key_disabled = 0;
5641         pi->pcie_dpm_key_disabled = 0;
5642
5643         /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5644         if ((rdev->pdev->device == 0x6658) &&
5645             (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5646                 pi->mclk_dpm_key_disabled = 1;
5647         }
5648
5649         pi->caps_sclk_ds = true;
5650
5651         pi->mclk_strobe_mode_threshold = 40000;
5652         pi->mclk_stutter_mode_threshold = 40000;
5653         pi->mclk_edc_enable_threshold = 40000;
5654         pi->mclk_edc_wr_enable_threshold = 40000;
5655
5656         ci_initialize_powertune_defaults(rdev);
5657
5658         pi->caps_fps = false;
5659
5660         pi->caps_sclk_throttle_low_notification = false;
5661
5662         pi->caps_uvd_dpm = true;
5663         pi->caps_vce_dpm = true;
5664
5665         ci_get_leakage_voltages(rdev);
5666         ci_patch_dependency_tables_with_leakage(rdev);
5667         ci_set_private_data_variables_based_on_pptable(rdev);
5668
5669         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5670                 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5671         if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5672                 ci_dpm_fini(rdev);
5673                 return -ENOMEM;
5674         }
5675         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5676         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5677         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5678         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5679         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5680         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5681         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5682         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5683         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5684
5685         rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5686         rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5687         rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5688
5689         rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5690         rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5691         rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5692         rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5693
5694         if (rdev->family == CHIP_HAWAII) {
5695                 pi->thermal_temp_setting.temperature_low = 94500;
5696                 pi->thermal_temp_setting.temperature_high = 95000;
5697                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5698         } else {
5699                 pi->thermal_temp_setting.temperature_low = 99500;
5700                 pi->thermal_temp_setting.temperature_high = 100000;
5701                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5702         }
5703
5704         pi->uvd_enabled = false;
5705
5706         dpm_table = &pi->smc_state_table;
5707
5708         gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5709         if (gpio.valid) {
5710                 dpm_table->VRHotGpio = gpio.shift;
5711                 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5712         } else {
5713                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5714                 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5715         }
5716
5717         gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5718         if (gpio.valid) {
5719                 dpm_table->AcDcGpio = gpio.shift;
5720                 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5721         } else {
5722                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5723                 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5724         }
5725
5726         gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5727         if (gpio.valid) {
5728                 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5729
5730                 switch (gpio.shift) {
5731                 case 0:
5732                         tmp &= ~GNB_SLOW_MODE_MASK;
5733                         tmp |= GNB_SLOW_MODE(1);
5734                         break;
5735                 case 1:
5736                         tmp &= ~GNB_SLOW_MODE_MASK;
5737                         tmp |= GNB_SLOW_MODE(2);
5738                         break;
5739                 case 2:
5740                         tmp |= GNB_SLOW;
5741                         break;
5742                 case 3:
5743                         tmp |= FORCE_NB_PS1;
5744                         break;
5745                 case 4:
5746                         tmp |= DPM_ENABLED;
5747                         break;
5748                 default:
5749                         DRM_ERROR("Invalid PCC GPIO!");
5750                         break;
5751                 }
5752                 WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5753         }
5754
5755         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5756         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5757         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5758         if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5759                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5760         else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5761                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5762
5763         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5764                 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5765                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5766                 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5767                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5768                 else
5769                         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5770         }
5771
5772         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5773                 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5774                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5775                 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5776                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5777                 else
5778                         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5779         }
5780
5781         pi->vddc_phase_shed_control = true;
5782
5783 #if defined(CONFIG_ACPI)
5784         pi->pcie_performance_request =
5785                 radeon_acpi_is_pcie_performance_request_supported(rdev);
5786 #else
5787         pi->pcie_performance_request = false;
5788 #endif
5789
5790         if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5791                                    &frev, &crev, &data_offset)) {
5792                 pi->caps_sclk_ss_support = true;
5793                 pi->caps_mclk_ss_support = true;
5794                 pi->dynamic_ss = true;
5795         } else {
5796                 pi->caps_sclk_ss_support = false;
5797                 pi->caps_mclk_ss_support = false;
5798                 pi->dynamic_ss = true;
5799         }
5800
5801         if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5802                 pi->thermal_protection = true;
5803         else
5804                 pi->thermal_protection = false;
5805
5806         pi->caps_dynamic_ac_timing = true;
5807
5808         pi->uvd_power_gated = false;
5809
5810         /* make sure dc limits are valid */
5811         if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5812             (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5813                 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5814                         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5815
5816         pi->fan_ctrl_is_in_default_mode = true;
5817         rdev->pm.dpm.fan.ucode_fan_control = false;
5818
5819         return 0;
5820 }
5821
5822 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5823                                                     struct seq_file *m)
5824 {
5825         struct ci_power_info *pi = ci_get_pi(rdev);
5826         struct radeon_ps *rps = &pi->current_rps;
5827         u32 sclk = ci_get_average_sclk_freq(rdev);
5828         u32 mclk = ci_get_average_mclk_freq(rdev);
5829
5830         seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5831         seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5832         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5833                    sclk, mclk);
5834 }
5835
5836 void ci_dpm_print_power_state(struct radeon_device *rdev,
5837                               struct radeon_ps *rps)
5838 {
5839         struct ci_ps *ps = ci_get_ps(rps);
5840         struct ci_pl *pl;
5841         int i;
5842
5843         r600_dpm_print_class_info(rps->class, rps->class2);
5844         r600_dpm_print_cap_info(rps->caps);
5845         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5846         for (i = 0; i < ps->performance_level_count; i++) {
5847                 pl = &ps->performance_levels[i];
5848                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5849                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5850         }
5851         r600_dpm_print_ps_status(rdev, rps);
5852 }
5853
5854 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5855 {
5856         struct ci_power_info *pi = ci_get_pi(rdev);
5857         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5858
5859         if (low)
5860                 return requested_state->performance_levels[0].sclk;
5861         else
5862                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5863 }
5864
5865 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5866 {
5867         struct ci_power_info *pi = ci_get_pi(rdev);
5868         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5869
5870         if (low)
5871                 return requested_state->performance_levels[0].mclk;
5872         else
5873                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5874 }