OSDN Git Service

de295b7442b7b0d5ce2e82dd1d6171a98d315059
[android-x86/kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / smu7_hwmgr.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <asm/div64.h>
29 #include <drm/amdgpu_drm.h>
30 #include "ppatomctrl.h"
31 #include "atombios.h"
32 #include "pptable_v1_0.h"
33 #include "pppcielanes.h"
34 #include "amd_pcie_helpers.h"
35 #include "hardwaremanager.h"
36 #include "process_pptables_v1_0.h"
37 #include "cgs_common.h"
38
39 #include "smu7_common.h"
40
41 #include "hwmgr.h"
42 #include "smu7_hwmgr.h"
43 #include "smu_ucode_xfer_vi.h"
44 #include "smu7_powertune.h"
45 #include "smu7_dyn_defaults.h"
46 #include "smu7_thermal.h"
47 #include "smu7_clockpowergating.h"
48 #include "processpptables.h"
49 #include "pp_thermal.h"
50
51 #define MC_CG_ARB_FREQ_F0           0x0a
52 #define MC_CG_ARB_FREQ_F1           0x0b
53 #define MC_CG_ARB_FREQ_F2           0x0c
54 #define MC_CG_ARB_FREQ_F3           0x0d
55
56 #define MC_CG_SEQ_DRAMCONF_S0       0x05
57 #define MC_CG_SEQ_DRAMCONF_S1       0x06
58 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
59 #define MC_CG_SEQ_YCLK_RESUME       0x0a
60
61 #define SMC_CG_IND_START            0xc0030000
62 #define SMC_CG_IND_END              0xc0040000
63
64 #define MEM_FREQ_LOW_LATENCY        25000
65 #define MEM_FREQ_HIGH_LATENCY       80000
66
67 #define MEM_LATENCY_HIGH            45
68 #define MEM_LATENCY_LOW             35
69 #define MEM_LATENCY_ERR             0xFFFF
70
71 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
72 #define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
73 #define MC_SEQ_MISC0_GDDR5_VALUE 5
74
75 #define PCIE_BUS_CLK                10000
76 #define TCLK                        (PCIE_BUS_CLK / 10)
77
78 static const struct profile_mode_setting smu7_profiling[6] =
79                                         {{1, 0, 100, 30, 1, 0, 100, 10},
80                                          {1, 10, 0, 30, 0, 0, 0, 0},
81                                          {0, 0, 0, 0, 1, 10, 16, 31},
82                                          {1, 0, 11, 50, 1, 0, 100, 10},
83                                          {1, 0, 5, 30, 0, 0, 0, 0},
84                                          {0, 0, 0, 0, 0, 0, 0, 0},
85                                         };
86
87 #define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
88
89 #define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
90 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
91 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
92 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
93 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
94
95 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
96 enum DPM_EVENT_SRC {
97         DPM_EVENT_SRC_ANALOG = 0,
98         DPM_EVENT_SRC_EXTERNAL = 1,
99         DPM_EVENT_SRC_DIGITAL = 2,
100         DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
101         DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
102 };
103
104 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
105 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
106                 enum pp_clock_type type, uint32_t mask);
107
108 static struct smu7_power_state *cast_phw_smu7_power_state(
109                                   struct pp_hw_power_state *hw_ps)
110 {
111         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
112                                 "Invalid Powerstate Type!",
113                                  return NULL);
114
115         return (struct smu7_power_state *)hw_ps;
116 }
117
118 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
119                                  const struct pp_hw_power_state *hw_ps)
120 {
121         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
122                                 "Invalid Powerstate Type!",
123                                  return NULL);
124
125         return (const struct smu7_power_state *)hw_ps;
126 }
127
128 /**
129  * Find the MC microcode version and store it in the HwMgr struct
130  *
131  * @param    hwmgr  the address of the powerplay hardware manager.
132  * @return   always 0
133  */
134 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
135 {
136         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
137
138         hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
139
140         return 0;
141 }
142
143 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
144 {
145         uint32_t speedCntl = 0;
146
147         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
148         speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
149                         ixPCIE_LC_SPEED_CNTL);
150         return((uint16_t)PHM_GET_FIELD(speedCntl,
151                         PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
152 }
153
154 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
155 {
156         uint32_t link_width;
157
158         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
159         link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
160                         PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
161
162         PP_ASSERT_WITH_CODE((7 >= link_width),
163                         "Invalid PCIe lane width!", return 0);
164
165         return decode_pcie_lane_width(link_width);
166 }
167
168 /**
169 * Enable voltage control
170 *
171 * @param    pHwMgr  the address of the powerplay hardware manager.
172 * @return   always PP_Result_OK
173 */
174 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
175 {
176         if (hwmgr->chip_id == CHIP_VEGAM) {
177                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
178                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
179                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
180                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
181         }
182
183         if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
184                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
185
186         return 0;
187 }
188
189 /**
190 * Checks if we want to support voltage control
191 *
192 * @param    hwmgr  the address of the powerplay hardware manager.
193 */
194 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
195 {
196         const struct smu7_hwmgr *data =
197                         (const struct smu7_hwmgr *)(hwmgr->backend);
198
199         return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
200 }
201
202 /**
203 * Enable voltage control
204 *
205 * @param    hwmgr  the address of the powerplay hardware manager.
206 * @return   always 0
207 */
208 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
209 {
210         /* enable voltage control */
211         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
212                         GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
213
214         return 0;
215 }
216
217 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
218                 struct phm_clock_voltage_dependency_table *voltage_dependency_table
219                 )
220 {
221         uint32_t i;
222
223         PP_ASSERT_WITH_CODE((NULL != voltage_table),
224                         "Voltage Dependency Table empty.", return -EINVAL;);
225
226         voltage_table->mask_low = 0;
227         voltage_table->phase_delay = 0;
228         voltage_table->count = voltage_dependency_table->count;
229
230         for (i = 0; i < voltage_dependency_table->count; i++) {
231                 voltage_table->entries[i].value =
232                         voltage_dependency_table->entries[i].v;
233                 voltage_table->entries[i].smio_low = 0;
234         }
235
236         return 0;
237 }
238
239
240 /**
241 * Create Voltage Tables.
242 *
243 * @param    hwmgr  the address of the powerplay hardware manager.
244 * @return   always 0
245 */
246 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
247 {
248         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
249         struct phm_ppt_v1_information *table_info =
250                         (struct phm_ppt_v1_information *)hwmgr->pptable;
251         int result = 0;
252         uint32_t tmp;
253
254         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
255                 result = atomctrl_get_voltage_table_v3(hwmgr,
256                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
257                                 &(data->mvdd_voltage_table));
258                 PP_ASSERT_WITH_CODE((0 == result),
259                                 "Failed to retrieve MVDD table.",
260                                 return result);
261         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
262                 if (hwmgr->pp_table_version == PP_TABLE_V1)
263                         result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
264                                         table_info->vdd_dep_on_mclk);
265                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
266                         result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
267                                         hwmgr->dyn_state.mvdd_dependency_on_mclk);
268
269                 PP_ASSERT_WITH_CODE((0 == result),
270                                 "Failed to retrieve SVI2 MVDD table from dependancy table.",
271                                 return result;);
272         }
273
274         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
275                 result = atomctrl_get_voltage_table_v3(hwmgr,
276                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
277                                 &(data->vddci_voltage_table));
278                 PP_ASSERT_WITH_CODE((0 == result),
279                                 "Failed to retrieve VDDCI table.",
280                                 return result);
281         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
282                 if (hwmgr->pp_table_version == PP_TABLE_V1)
283                         result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
284                                         table_info->vdd_dep_on_mclk);
285                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
286                         result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
287                                         hwmgr->dyn_state.vddci_dependency_on_mclk);
288                 PP_ASSERT_WITH_CODE((0 == result),
289                                 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
290                                 return result);
291         }
292
293         if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
294                 /* VDDGFX has only SVI2 voltage control */
295                 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
296                                         table_info->vddgfx_lookup_table);
297                 PP_ASSERT_WITH_CODE((0 == result),
298                         "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
299         }
300
301
302         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
303                 result = atomctrl_get_voltage_table_v3(hwmgr,
304                                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
305                                         &data->vddc_voltage_table);
306                 PP_ASSERT_WITH_CODE((0 == result),
307                         "Failed to retrieve VDDC table.", return result;);
308         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
309
310                 if (hwmgr->pp_table_version == PP_TABLE_V0)
311                         result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
312                                         hwmgr->dyn_state.vddc_dependency_on_mclk);
313                 else if (hwmgr->pp_table_version == PP_TABLE_V1)
314                         result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
315                                 table_info->vddc_lookup_table);
316
317                 PP_ASSERT_WITH_CODE((0 == result),
318                         "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
319         }
320
321         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
322         PP_ASSERT_WITH_CODE(
323                         (data->vddc_voltage_table.count <= tmp),
324                 "Too many voltage values for VDDC. Trimming to fit state table.",
325                         phm_trim_voltage_table_to_fit_state_table(tmp,
326                                                 &(data->vddc_voltage_table)));
327
328         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
329         PP_ASSERT_WITH_CODE(
330                         (data->vddgfx_voltage_table.count <= tmp),
331                 "Too many voltage values for VDDC. Trimming to fit state table.",
332                         phm_trim_voltage_table_to_fit_state_table(tmp,
333                                                 &(data->vddgfx_voltage_table)));
334
335         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
336         PP_ASSERT_WITH_CODE(
337                         (data->vddci_voltage_table.count <= tmp),
338                 "Too many voltage values for VDDCI. Trimming to fit state table.",
339                         phm_trim_voltage_table_to_fit_state_table(tmp,
340                                         &(data->vddci_voltage_table)));
341
342         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
343         PP_ASSERT_WITH_CODE(
344                         (data->mvdd_voltage_table.count <= tmp),
345                 "Too many voltage values for MVDD. Trimming to fit state table.",
346                         phm_trim_voltage_table_to_fit_state_table(tmp,
347                                                 &(data->mvdd_voltage_table)));
348
349         return 0;
350 }
351
352 /**
353 * Programs static screed detection parameters
354 *
355 * @param    hwmgr  the address of the powerplay hardware manager.
356 * @return   always 0
357 */
358 static int smu7_program_static_screen_threshold_parameters(
359                                                         struct pp_hwmgr *hwmgr)
360 {
361         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
362
363         /* Set static screen threshold unit */
364         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
365                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
366                         data->static_screen_threshold_unit);
367         /* Set static screen threshold */
368         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
369                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
370                         data->static_screen_threshold);
371
372         return 0;
373 }
374
375 /**
376 * Setup display gap for glitch free memory clock switching.
377 *
378 * @param    hwmgr  the address of the powerplay hardware manager.
379 * @return   always  0
380 */
381 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
382 {
383         uint32_t display_gap =
384                         cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
385                                         ixCG_DISPLAY_GAP_CNTL);
386
387         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
388                         DISP_GAP, DISPLAY_GAP_IGNORE);
389
390         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
391                         DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
392
393         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
394                         ixCG_DISPLAY_GAP_CNTL, display_gap);
395
396         return 0;
397 }
398
399 /**
400 * Programs activity state transition voting clients
401 *
402 * @param    hwmgr  the address of the powerplay hardware manager.
403 * @return   always  0
404 */
405 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
406 {
407         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
408         int i;
409
410         /* Clear reset for voting clients before enabling DPM */
411         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
412                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
413         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
414                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
415
416         for (i = 0; i < 8; i++)
417                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
418                                         ixCG_FREQ_TRAN_VOTING_0 + i * 4,
419                                         data->voting_rights_clients[i]);
420         return 0;
421 }
422
423 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
424 {
425         int i;
426
427         /* Reset voting clients before disabling DPM */
428         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
429                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
430         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
431                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
432
433         for (i = 0; i < 8; i++)
434                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
435                                 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
436
437         return 0;
438 }
439
440 /* Copy one arb setting to another and then switch the active set.
441  * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
442  */
443 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
444                 uint32_t arb_src, uint32_t arb_dest)
445 {
446         uint32_t mc_arb_dram_timing;
447         uint32_t mc_arb_dram_timing2;
448         uint32_t burst_time;
449         uint32_t mc_cg_config;
450
451         switch (arb_src) {
452         case MC_CG_ARB_FREQ_F0:
453                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
454                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
455                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
456                 break;
457         case MC_CG_ARB_FREQ_F1:
458                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
459                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
460                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
461                 break;
462         default:
463                 return -EINVAL;
464         }
465
466         switch (arb_dest) {
467         case MC_CG_ARB_FREQ_F0:
468                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
469                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
470                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
471                 break;
472         case MC_CG_ARB_FREQ_F1:
473                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
474                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
475                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
476                 break;
477         default:
478                 return -EINVAL;
479         }
480
481         mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
482         mc_cg_config |= 0x0000000F;
483         cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
484         PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
485
486         return 0;
487 }
488
489 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
490 {
491         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
492 }
493
494 /**
495 * Initial switch from ARB F0->F1
496 *
497 * @param    hwmgr  the address of the powerplay hardware manager.
498 * @return   always 0
499 * This function is to be called from the SetPowerState table.
500 */
501 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
502 {
503         return smu7_copy_and_switch_arb_sets(hwmgr,
504                         MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
505 }
506
507 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
508 {
509         uint32_t tmp;
510
511         tmp = (cgs_read_ind_register(hwmgr->device,
512                         CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
513                         0x0000ff00) >> 8;
514
515         if (tmp == MC_CG_ARB_FREQ_F0)
516                 return 0;
517
518         return smu7_copy_and_switch_arb_sets(hwmgr,
519                         tmp, MC_CG_ARB_FREQ_F0);
520 }
521
522 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
523 {
524         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
525
526         struct phm_ppt_v1_information *table_info =
527                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
528         struct phm_ppt_v1_pcie_table *pcie_table = NULL;
529
530         uint32_t i, max_entry;
531         uint32_t tmp;
532
533         PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
534                         data->use_pcie_power_saving_levels), "No pcie performance levels!",
535                         return -EINVAL);
536
537         if (table_info != NULL)
538                 pcie_table = table_info->pcie_table;
539
540         if (data->use_pcie_performance_levels &&
541                         !data->use_pcie_power_saving_levels) {
542                 data->pcie_gen_power_saving = data->pcie_gen_performance;
543                 data->pcie_lane_power_saving = data->pcie_lane_performance;
544         } else if (!data->use_pcie_performance_levels &&
545                         data->use_pcie_power_saving_levels) {
546                 data->pcie_gen_performance = data->pcie_gen_power_saving;
547                 data->pcie_lane_performance = data->pcie_lane_power_saving;
548         }
549         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
550         phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
551                                         tmp,
552                                         MAX_REGULAR_DPM_NUMBER);
553
554         if (pcie_table != NULL) {
555                 /* max_entry is used to make sure we reserve one PCIE level
556                  * for boot level (fix for A+A PSPP issue).
557                  * If PCIE table from PPTable have ULV entry + 8 entries,
558                  * then ignore the last entry.*/
559                 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
560                 for (i = 1; i < max_entry; i++) {
561                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
562                                         get_pcie_gen_support(data->pcie_gen_cap,
563                                                         pcie_table->entries[i].gen_speed),
564                                         get_pcie_lane_support(data->pcie_lane_cap,
565                                                         pcie_table->entries[i].lane_width));
566                 }
567                 data->dpm_table.pcie_speed_table.count = max_entry - 1;
568                 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
569         } else {
570                 /* Hardcode Pcie Table */
571                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
572                                 get_pcie_gen_support(data->pcie_gen_cap,
573                                                 PP_Min_PCIEGen),
574                                 get_pcie_lane_support(data->pcie_lane_cap,
575                                                 PP_Max_PCIELane));
576                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
577                                 get_pcie_gen_support(data->pcie_gen_cap,
578                                                 PP_Min_PCIEGen),
579                                 get_pcie_lane_support(data->pcie_lane_cap,
580                                                 PP_Max_PCIELane));
581                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
582                                 get_pcie_gen_support(data->pcie_gen_cap,
583                                                 PP_Max_PCIEGen),
584                                 get_pcie_lane_support(data->pcie_lane_cap,
585                                                 PP_Max_PCIELane));
586                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
587                                 get_pcie_gen_support(data->pcie_gen_cap,
588                                                 PP_Max_PCIEGen),
589                                 get_pcie_lane_support(data->pcie_lane_cap,
590                                                 PP_Max_PCIELane));
591                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
592                                 get_pcie_gen_support(data->pcie_gen_cap,
593                                                 PP_Max_PCIEGen),
594                                 get_pcie_lane_support(data->pcie_lane_cap,
595                                                 PP_Max_PCIELane));
596                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
597                                 get_pcie_gen_support(data->pcie_gen_cap,
598                                                 PP_Max_PCIEGen),
599                                 get_pcie_lane_support(data->pcie_lane_cap,
600                                                 PP_Max_PCIELane));
601
602                 data->dpm_table.pcie_speed_table.count = 6;
603         }
604         /* Populate last level for boot PCIE level, but do not increment count. */
605         if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
606                 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
607                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
608                                 get_pcie_gen_support(data->pcie_gen_cap,
609                                                 PP_Max_PCIEGen),
610                                 data->vbios_boot_state.pcie_lane_bootup_value);
611         } else {
612                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
613                         data->dpm_table.pcie_speed_table.count,
614                         get_pcie_gen_support(data->pcie_gen_cap,
615                                         PP_Min_PCIEGen),
616                         get_pcie_lane_support(data->pcie_lane_cap,
617                                         PP_Max_PCIELane));
618         }
619         return 0;
620 }
621
622 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
623 {
624         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
625
626         memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
627
628         phm_reset_single_dpm_table(
629                         &data->dpm_table.sclk_table,
630                                 smum_get_mac_definition(hwmgr,
631                                         SMU_MAX_LEVELS_GRAPHICS),
632                                         MAX_REGULAR_DPM_NUMBER);
633         phm_reset_single_dpm_table(
634                         &data->dpm_table.mclk_table,
635                         smum_get_mac_definition(hwmgr,
636                                 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
637
638         phm_reset_single_dpm_table(
639                         &data->dpm_table.vddc_table,
640                                 smum_get_mac_definition(hwmgr,
641                                         SMU_MAX_LEVELS_VDDC),
642                                         MAX_REGULAR_DPM_NUMBER);
643         phm_reset_single_dpm_table(
644                         &data->dpm_table.vddci_table,
645                         smum_get_mac_definition(hwmgr,
646                                 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
647
648         phm_reset_single_dpm_table(
649                         &data->dpm_table.mvdd_table,
650                                 smum_get_mac_definition(hwmgr,
651                                         SMU_MAX_LEVELS_MVDD),
652                                         MAX_REGULAR_DPM_NUMBER);
653         return 0;
654 }
655 /*
656  * This function is to initialize all DPM state tables
657  * for SMU7 based on the dependency table.
658  * Dynamic state patching function will then trim these
659  * state tables to the allowed range based
660  * on the power policy or external client requests,
661  * such as UVD request, etc.
662  */
663
664 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
665 {
666         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
667         struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
668                 hwmgr->dyn_state.vddc_dependency_on_sclk;
669         struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
670                 hwmgr->dyn_state.vddc_dependency_on_mclk;
671         struct phm_cac_leakage_table *std_voltage_table =
672                 hwmgr->dyn_state.cac_leakage_table;
673         uint32_t i;
674
675         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
676                 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
677         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
678                 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
679
680         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
681                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
682         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
683                 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
684
685
686         /* Initialize Sclk DPM table based on allow Sclk values*/
687         data->dpm_table.sclk_table.count = 0;
688
689         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
690                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
691                                 allowed_vdd_sclk_table->entries[i].clk) {
692                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
693                                 allowed_vdd_sclk_table->entries[i].clk;
694                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
695                         data->dpm_table.sclk_table.count++;
696                 }
697         }
698
699         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
700                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
701         /* Initialize Mclk DPM table based on allow Mclk values */
702         data->dpm_table.mclk_table.count = 0;
703         for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
704                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
705                         allowed_vdd_mclk_table->entries[i].clk) {
706                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
707                                 allowed_vdd_mclk_table->entries[i].clk;
708                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
709                         data->dpm_table.mclk_table.count++;
710                 }
711         }
712
713         /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
714         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
715                 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
716                 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
717                 /* param1 is for corresponding std voltage */
718                 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
719         }
720
721         data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
722         allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
723
724         if (NULL != allowed_vdd_mclk_table) {
725                 /* Initialize Vddci DPM table based on allow Mclk values */
726                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
727                         data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
728                         data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
729                 }
730                 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
731         }
732
733         allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
734
735         if (NULL != allowed_vdd_mclk_table) {
736                 /*
737                  * Initialize MVDD DPM table based on allow Mclk
738                  * values
739                  */
740                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
741                         data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
742                         data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
743                 }
744                 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
745         }
746
747         return 0;
748 }
749
750 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
751 {
752         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
753         struct phm_ppt_v1_information *table_info =
754                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
755         uint32_t i;
756
757         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
758         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
759
760         if (table_info == NULL)
761                 return -EINVAL;
762
763         dep_sclk_table = table_info->vdd_dep_on_sclk;
764         dep_mclk_table = table_info->vdd_dep_on_mclk;
765
766         PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
767                         "SCLK dependency table is missing.",
768                         return -EINVAL);
769         PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
770                         "SCLK dependency table count is 0.",
771                         return -EINVAL);
772
773         PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
774                         "MCLK dependency table is missing.",
775                         return -EINVAL);
776         PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
777                         "MCLK dependency table count is 0",
778                         return -EINVAL);
779
780         /* Initialize Sclk DPM table based on allow Sclk values */
781         data->dpm_table.sclk_table.count = 0;
782         for (i = 0; i < dep_sclk_table->count; i++) {
783                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
784                                                 dep_sclk_table->entries[i].clk) {
785
786                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
787                                         dep_sclk_table->entries[i].clk;
788
789                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
790                                         (i == 0) ? true : false;
791                         data->dpm_table.sclk_table.count++;
792                 }
793         }
794         if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
795                 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
796         /* Initialize Mclk DPM table based on allow Mclk values */
797         data->dpm_table.mclk_table.count = 0;
798         for (i = 0; i < dep_mclk_table->count; i++) {
799                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
800                                 [data->dpm_table.mclk_table.count - 1].value !=
801                                                 dep_mclk_table->entries[i].clk) {
802                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
803                                                         dep_mclk_table->entries[i].clk;
804                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
805                                                         (i == 0) ? true : false;
806                         data->dpm_table.mclk_table.count++;
807                 }
808         }
809
810         if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
811                 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
812         return 0;
813 }
814
815 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
816 {
817         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
818         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
819         struct phm_ppt_v1_information *table_info =
820                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
821         uint32_t i;
822
823         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
824         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
825         struct phm_odn_performance_level *entries;
826
827         if (table_info == NULL)
828                 return -EINVAL;
829
830         dep_sclk_table = table_info->vdd_dep_on_sclk;
831         dep_mclk_table = table_info->vdd_dep_on_mclk;
832
833         odn_table->odn_core_clock_dpm_levels.num_of_pl =
834                                                 data->golden_dpm_table.sclk_table.count;
835         entries = odn_table->odn_core_clock_dpm_levels.entries;
836         for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
837                 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
838                 entries[i].enabled = true;
839                 entries[i].vddc = dep_sclk_table->entries[i].vddc;
840         }
841
842         smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
843                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
844
845         odn_table->odn_memory_clock_dpm_levels.num_of_pl =
846                                                 data->golden_dpm_table.mclk_table.count;
847         entries = odn_table->odn_memory_clock_dpm_levels.entries;
848         for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
849                 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
850                 entries[i].enabled = true;
851                 entries[i].vddc = dep_mclk_table->entries[i].vddc;
852         }
853
854         smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
855                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
856
857         return 0;
858 }
859
860 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
861 {
862         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
863         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
864         struct phm_ppt_v1_information *table_info =
865                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
866         uint32_t min_vddc = 0;
867         uint32_t max_vddc = 0;
868
869         if (!table_info)
870                 return;
871
872         dep_sclk_table = table_info->vdd_dep_on_sclk;
873
874         atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
875
876         if (min_vddc == 0 || min_vddc > 2000
877                 || min_vddc > dep_sclk_table->entries[0].vddc)
878                 min_vddc = dep_sclk_table->entries[0].vddc;
879
880         if (max_vddc == 0 || max_vddc > 2000
881                 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
882                 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
883
884         data->odn_dpm_table.min_vddc = min_vddc;
885         data->odn_dpm_table.max_vddc = max_vddc;
886 }
887
888 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
889 {
890         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
891         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
892         struct phm_ppt_v1_information *table_info =
893                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
894         uint32_t i;
895
896         struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
897         struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
898
899         if (table_info == NULL)
900                 return;
901
902         for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
903                 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
904                                         data->dpm_table.sclk_table.dpm_levels[i].value) {
905                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
906                         break;
907                 }
908         }
909
910         for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
911                 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
912                                         data->dpm_table.mclk_table.dpm_levels[i].value) {
913                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
914                         break;
915                 }
916         }
917
918         dep_table = table_info->vdd_dep_on_mclk;
919         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
920
921         for (i = 0; i < dep_table->count; i++) {
922                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
923                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
924                         return;
925                 }
926         }
927
928         dep_table = table_info->vdd_dep_on_sclk;
929         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
930         for (i = 0; i < dep_table->count; i++) {
931                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
932                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
933                         return;
934                 }
935         }
936         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
937                 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
938                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
939         }
940 }
941
942 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
943 {
944         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
945
946         smu7_reset_dpm_tables(hwmgr);
947
948         if (hwmgr->pp_table_version == PP_TABLE_V1)
949                 smu7_setup_dpm_tables_v1(hwmgr);
950         else if (hwmgr->pp_table_version == PP_TABLE_V0)
951                 smu7_setup_dpm_tables_v0(hwmgr);
952
953         smu7_setup_default_pcie_table(hwmgr);
954
955         /* save a copy of the default DPM table */
956         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
957                         sizeof(struct smu7_dpm_table));
958
959         /* initialize ODN table */
960         if (hwmgr->od_enabled) {
961                 if (data->odn_dpm_table.max_vddc) {
962                         smu7_check_dpm_table_updated(hwmgr);
963                 } else {
964                         smu7_setup_voltage_range_from_vbios(hwmgr);
965                         smu7_odn_initial_default_setting(hwmgr);
966                 }
967         }
968         return 0;
969 }
970
971 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
972 {
973
974         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
975                         PHM_PlatformCaps_RegulatorHot))
976                 return smum_send_msg_to_smc(hwmgr,
977                                 PPSMC_MSG_EnableVRHotGPIOInterrupt);
978
979         return 0;
980 }
981
982 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
983 {
984         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
985                         SCLK_PWRMGT_OFF, 0);
986         return 0;
987 }
988
989 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
990 {
991         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
992
993         if (data->ulv_supported)
994                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
995
996         return 0;
997 }
998
999 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1000 {
1001         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1002
1003         if (data->ulv_supported)
1004                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
1005
1006         return 0;
1007 }
1008
1009 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1010 {
1011         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1012                         PHM_PlatformCaps_SclkDeepSleep)) {
1013                 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
1014                         PP_ASSERT_WITH_CODE(false,
1015                                         "Attempt to enable Master Deep Sleep switch failed!",
1016                                         return -EINVAL);
1017         } else {
1018                 if (smum_send_msg_to_smc(hwmgr,
1019                                 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1020                         PP_ASSERT_WITH_CODE(false,
1021                                         "Attempt to disable Master Deep Sleep switch failed!",
1022                                         return -EINVAL);
1023                 }
1024         }
1025
1026         return 0;
1027 }
1028
1029 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1030 {
1031         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1032                         PHM_PlatformCaps_SclkDeepSleep)) {
1033                 if (smum_send_msg_to_smc(hwmgr,
1034                                 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1035                         PP_ASSERT_WITH_CODE(false,
1036                                         "Attempt to disable Master Deep Sleep switch failed!",
1037                                         return -EINVAL);
1038                 }
1039         }
1040
1041         return 0;
1042 }
1043
1044 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1045 {
1046         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1047         uint32_t soft_register_value = 0;
1048         uint32_t handshake_disables_offset = data->soft_regs_start
1049                                 + smum_get_offsetof(hwmgr,
1050                                         SMU_SoftRegisters, HandshakeDisables);
1051
1052         soft_register_value = cgs_read_ind_register(hwmgr->device,
1053                                 CGS_IND_REG__SMC, handshake_disables_offset);
1054         soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1055         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1056                         handshake_disables_offset, soft_register_value);
1057         return 0;
1058 }
1059
1060 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1061 {
1062         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1063         uint32_t soft_register_value = 0;
1064         uint32_t handshake_disables_offset = data->soft_regs_start
1065                                 + smum_get_offsetof(hwmgr,
1066                                         SMU_SoftRegisters, HandshakeDisables);
1067
1068         soft_register_value = cgs_read_ind_register(hwmgr->device,
1069                                 CGS_IND_REG__SMC, handshake_disables_offset);
1070         soft_register_value |= smum_get_mac_definition(hwmgr,
1071                                         SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1072         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1073                         handshake_disables_offset, soft_register_value);
1074         return 0;
1075 }
1076
1077 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1078 {
1079         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1080
1081         /* enable SCLK dpm */
1082         if (!data->sclk_dpm_key_disabled) {
1083                 if (hwmgr->chip_id == CHIP_VEGAM)
1084                         smu7_disable_sclk_vce_handshake(hwmgr);
1085
1086                 PP_ASSERT_WITH_CODE(
1087                 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
1088                 "Failed to enable SCLK DPM during DPM Start Function!",
1089                 return -EINVAL);
1090         }
1091
1092         /* enable MCLK dpm */
1093         if (0 == data->mclk_dpm_key_disabled) {
1094                 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1095                         smu7_disable_handshake_uvd(hwmgr);
1096
1097                 PP_ASSERT_WITH_CODE(
1098                                 (0 == smum_send_msg_to_smc(hwmgr,
1099                                                 PPSMC_MSG_MCLKDPM_Enable)),
1100                                 "Failed to enable MCLK DPM during DPM Start Function!",
1101                                 return -EINVAL);
1102
1103                 if (hwmgr->chip_family != CHIP_VEGAM)
1104                         PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1105
1106
1107                 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1108                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1109                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1110                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1111                         udelay(10);
1112                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1113                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1114                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1115                 } else {
1116                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1117                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1118                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1119                         udelay(10);
1120                         if (hwmgr->chip_id == CHIP_VEGAM) {
1121                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1122                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1123                         } else {
1124                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1125                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1126                         }
1127                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1128                 }
1129         }
1130
1131         return 0;
1132 }
1133
1134 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1135 {
1136         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1137
1138         /*enable general power management */
1139
1140         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1141                         GLOBAL_PWRMGT_EN, 1);
1142
1143         /* enable sclk deep sleep */
1144
1145         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1146                         DYNAMIC_PM_EN, 1);
1147
1148         /* prepare for PCIE DPM */
1149
1150         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1151                         data->soft_regs_start +
1152                         smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1153                                                 VoltageChangeTimeout), 0x1000);
1154         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1155                         SWRST_COMMAND_1, RESETLC, 0x0);
1156
1157         if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1158                 cgs_write_register(hwmgr->device, 0x1488,
1159                         (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1160
1161         if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1162                 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1163                 return -EINVAL;
1164         }
1165
1166         /* enable PCIE dpm */
1167         if (0 == data->pcie_dpm_key_disabled) {
1168                 PP_ASSERT_WITH_CODE(
1169                                 (0 == smum_send_msg_to_smc(hwmgr,
1170                                                 PPSMC_MSG_PCIeDPM_Enable)),
1171                                 "Failed to enable pcie DPM during DPM Start Function!",
1172                                 return -EINVAL);
1173         }
1174
1175         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1176                                 PHM_PlatformCaps_Falcon_QuickTransition)) {
1177                 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1178                                 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1179                                 "Failed to enable AC DC GPIO Interrupt!",
1180                                 );
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1187 {
1188         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1189
1190         /* disable SCLK dpm */
1191         if (!data->sclk_dpm_key_disabled) {
1192                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1193                                 "Trying to disable SCLK DPM when DPM is disabled",
1194                                 return 0);
1195                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
1196         }
1197
1198         /* disable MCLK dpm */
1199         if (!data->mclk_dpm_key_disabled) {
1200                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1201                                 "Trying to disable MCLK DPM when DPM is disabled",
1202                                 return 0);
1203                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
1204         }
1205
1206         return 0;
1207 }
1208
1209 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1210 {
1211         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1212
1213         /* disable general power management */
1214         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1215                         GLOBAL_PWRMGT_EN, 0);
1216         /* disable sclk deep sleep */
1217         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1218                         DYNAMIC_PM_EN, 0);
1219
1220         /* disable PCIE dpm */
1221         if (!data->pcie_dpm_key_disabled) {
1222                 PP_ASSERT_WITH_CODE(
1223                                 (smum_send_msg_to_smc(hwmgr,
1224                                                 PPSMC_MSG_PCIeDPM_Disable) == 0),
1225                                 "Failed to disable pcie DPM during DPM Stop Function!",
1226                                 return -EINVAL);
1227         }
1228
1229         smu7_disable_sclk_mclk_dpm(hwmgr);
1230
1231         PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1232                         "Trying to disable voltage DPM when DPM is disabled",
1233                         return 0);
1234
1235         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
1236
1237         return 0;
1238 }
1239
1240 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1241 {
1242         bool protection;
1243         enum DPM_EVENT_SRC src;
1244
1245         switch (sources) {
1246         default:
1247                 pr_err("Unknown throttling event sources.");
1248                 /* fall through */
1249         case 0:
1250                 protection = false;
1251                 /* src is unused */
1252                 break;
1253         case (1 << PHM_AutoThrottleSource_Thermal):
1254                 protection = true;
1255                 src = DPM_EVENT_SRC_DIGITAL;
1256                 break;
1257         case (1 << PHM_AutoThrottleSource_External):
1258                 protection = true;
1259                 src = DPM_EVENT_SRC_EXTERNAL;
1260                 break;
1261         case (1 << PHM_AutoThrottleSource_External) |
1262                         (1 << PHM_AutoThrottleSource_Thermal):
1263                 protection = true;
1264                 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1265                 break;
1266         }
1267         /* Order matters - don't enable thermal protection for the wrong source. */
1268         if (protection) {
1269                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1270                                 DPM_EVENT_SRC, src);
1271                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1272                                 THERMAL_PROTECTION_DIS,
1273                                 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1274                                                 PHM_PlatformCaps_ThermalController));
1275         } else
1276                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1277                                 THERMAL_PROTECTION_DIS, 1);
1278 }
1279
1280 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1281                 PHM_AutoThrottleSource source)
1282 {
1283         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1284
1285         if (!(data->active_auto_throttle_sources & (1 << source))) {
1286                 data->active_auto_throttle_sources |= 1 << source;
1287                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1288         }
1289         return 0;
1290 }
1291
1292 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1293 {
1294         return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1295 }
1296
1297 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1298                 PHM_AutoThrottleSource source)
1299 {
1300         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1301
1302         if (data->active_auto_throttle_sources & (1 << source)) {
1303                 data->active_auto_throttle_sources &= ~(1 << source);
1304                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1305         }
1306         return 0;
1307 }
1308
1309 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1310 {
1311         return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1312 }
1313
1314 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1315 {
1316         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1317         data->pcie_performance_request = true;
1318
1319         return 0;
1320 }
1321
1322 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1323 {
1324         int tmp_result = 0;
1325         int result = 0;
1326
1327         if (smu7_voltage_control(hwmgr)) {
1328                 tmp_result = smu7_enable_voltage_control(hwmgr);
1329                 PP_ASSERT_WITH_CODE(tmp_result == 0,
1330                                 "Failed to enable voltage control!",
1331                                 result = tmp_result);
1332
1333                 tmp_result = smu7_construct_voltage_tables(hwmgr);
1334                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1335                                 "Failed to construct voltage tables!",
1336                                 result = tmp_result);
1337         }
1338         smum_initialize_mc_reg_table(hwmgr);
1339
1340         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1341                         PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1342                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1343                                 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1344
1345         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1346                         PHM_PlatformCaps_ThermalController))
1347                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1348                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1349
1350         tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1351         PP_ASSERT_WITH_CODE((0 == tmp_result),
1352                         "Failed to program static screen threshold parameters!",
1353                         result = tmp_result);
1354
1355         tmp_result = smu7_enable_display_gap(hwmgr);
1356         PP_ASSERT_WITH_CODE((0 == tmp_result),
1357                         "Failed to enable display gap!", result = tmp_result);
1358
1359         tmp_result = smu7_program_voting_clients(hwmgr);
1360         PP_ASSERT_WITH_CODE((0 == tmp_result),
1361                         "Failed to program voting clients!", result = tmp_result);
1362
1363         tmp_result = smum_process_firmware_header(hwmgr);
1364         PP_ASSERT_WITH_CODE((0 == tmp_result),
1365                         "Failed to process firmware header!", result = tmp_result);
1366
1367         if (hwmgr->chip_id != CHIP_VEGAM) {
1368                 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1369                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1370                                 "Failed to initialize switch from ArbF0 to F1!",
1371                                 result = tmp_result);
1372         }
1373
1374         result = smu7_setup_default_dpm_tables(hwmgr);
1375         PP_ASSERT_WITH_CODE(0 == result,
1376                         "Failed to setup default DPM tables!", return result);
1377
1378         tmp_result = smum_init_smc_table(hwmgr);
1379         PP_ASSERT_WITH_CODE((0 == tmp_result),
1380                         "Failed to initialize SMC table!", result = tmp_result);
1381
1382         tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1383         PP_ASSERT_WITH_CODE((0 == tmp_result),
1384                         "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1385
1386         smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
1387
1388         tmp_result = smu7_enable_sclk_control(hwmgr);
1389         PP_ASSERT_WITH_CODE((0 == tmp_result),
1390                         "Failed to enable SCLK control!", result = tmp_result);
1391
1392         tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1393         PP_ASSERT_WITH_CODE((0 == tmp_result),
1394                         "Failed to enable voltage control!", result = tmp_result);
1395
1396         tmp_result = smu7_enable_ulv(hwmgr);
1397         PP_ASSERT_WITH_CODE((0 == tmp_result),
1398                         "Failed to enable ULV!", result = tmp_result);
1399
1400         tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1401         PP_ASSERT_WITH_CODE((0 == tmp_result),
1402                         "Failed to enable deep sleep master switch!", result = tmp_result);
1403
1404         tmp_result = smu7_enable_didt_config(hwmgr);
1405         PP_ASSERT_WITH_CODE((tmp_result == 0),
1406                         "Failed to enable deep sleep master switch!", result = tmp_result);
1407
1408         tmp_result = smu7_start_dpm(hwmgr);
1409         PP_ASSERT_WITH_CODE((0 == tmp_result),
1410                         "Failed to start DPM!", result = tmp_result);
1411
1412         tmp_result = smu7_enable_smc_cac(hwmgr);
1413         PP_ASSERT_WITH_CODE((0 == tmp_result),
1414                         "Failed to enable SMC CAC!", result = tmp_result);
1415
1416         tmp_result = smu7_enable_power_containment(hwmgr);
1417         PP_ASSERT_WITH_CODE((0 == tmp_result),
1418                         "Failed to enable power containment!", result = tmp_result);
1419
1420         tmp_result = smu7_power_control_set_level(hwmgr);
1421         PP_ASSERT_WITH_CODE((0 == tmp_result),
1422                         "Failed to power control set level!", result = tmp_result);
1423
1424         tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1425         PP_ASSERT_WITH_CODE((0 == tmp_result),
1426                         "Failed to enable thermal auto throttle!", result = tmp_result);
1427
1428         tmp_result = smu7_pcie_performance_request(hwmgr);
1429         PP_ASSERT_WITH_CODE((0 == tmp_result),
1430                         "pcie performance request failed!", result = tmp_result);
1431
1432         return 0;
1433 }
1434
1435 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1436 {
1437         if (!hwmgr->avfs_supported)
1438                 return 0;
1439
1440         if (enable) {
1441                 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1442                                 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1443                         PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1444                                         hwmgr, PPSMC_MSG_EnableAvfs),
1445                                         "Failed to enable AVFS!",
1446                                         return -EINVAL);
1447                 }
1448         } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1449                         CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1450                 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1451                                 hwmgr, PPSMC_MSG_DisableAvfs),
1452                                 "Failed to disable AVFS!",
1453                                 return -EINVAL);
1454         }
1455
1456         return 0;
1457 }
1458
1459 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1460 {
1461         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1462
1463         if (!hwmgr->avfs_supported)
1464                 return 0;
1465
1466         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1467                 smu7_avfs_control(hwmgr, false);
1468         } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1469                 smu7_avfs_control(hwmgr, false);
1470                 smu7_avfs_control(hwmgr, true);
1471         } else {
1472                 smu7_avfs_control(hwmgr, true);
1473         }
1474
1475         return 0;
1476 }
1477
1478 int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1479 {
1480         int tmp_result, result = 0;
1481
1482         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1483                         PHM_PlatformCaps_ThermalController))
1484                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1485                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1486
1487         tmp_result = smu7_disable_power_containment(hwmgr);
1488         PP_ASSERT_WITH_CODE((tmp_result == 0),
1489                         "Failed to disable power containment!", result = tmp_result);
1490
1491         tmp_result = smu7_disable_smc_cac(hwmgr);
1492         PP_ASSERT_WITH_CODE((tmp_result == 0),
1493                         "Failed to disable SMC CAC!", result = tmp_result);
1494
1495         tmp_result = smu7_disable_didt_config(hwmgr);
1496         PP_ASSERT_WITH_CODE((tmp_result == 0),
1497                         "Failed to disable DIDT!", result = tmp_result);
1498
1499         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1500                         CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1501         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1502                         GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1503
1504         tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1505         PP_ASSERT_WITH_CODE((tmp_result == 0),
1506                         "Failed to disable thermal auto throttle!", result = tmp_result);
1507
1508         tmp_result = smu7_avfs_control(hwmgr, false);
1509         PP_ASSERT_WITH_CODE((tmp_result == 0),
1510                         "Failed to disable AVFS!", result = tmp_result);
1511
1512         tmp_result = smu7_stop_dpm(hwmgr);
1513         PP_ASSERT_WITH_CODE((tmp_result == 0),
1514                         "Failed to stop DPM!", result = tmp_result);
1515
1516         tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1517         PP_ASSERT_WITH_CODE((tmp_result == 0),
1518                         "Failed to disable deep sleep master switch!", result = tmp_result);
1519
1520         tmp_result = smu7_disable_ulv(hwmgr);
1521         PP_ASSERT_WITH_CODE((tmp_result == 0),
1522                         "Failed to disable ULV!", result = tmp_result);
1523
1524         tmp_result = smu7_clear_voting_clients(hwmgr);
1525         PP_ASSERT_WITH_CODE((tmp_result == 0),
1526                         "Failed to clear voting clients!", result = tmp_result);
1527
1528         tmp_result = smu7_reset_to_default(hwmgr);
1529         PP_ASSERT_WITH_CODE((tmp_result == 0),
1530                         "Failed to reset to default!", result = tmp_result);
1531
1532         tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1533         PP_ASSERT_WITH_CODE((tmp_result == 0),
1534                         "Failed to force to switch arbf0!", result = tmp_result);
1535
1536         return result;
1537 }
1538
1539 int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1540 {
1541
1542         return 0;
1543 }
1544
1545 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1546 {
1547         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1548         struct phm_ppt_v1_information *table_info =
1549                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1550         struct amdgpu_device *adev = hwmgr->adev;
1551
1552         data->dll_default_on = false;
1553         data->mclk_dpm0_activity_target = 0xa;
1554         data->vddc_vddgfx_delta = 300;
1555         data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1556         data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1557         data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1558         data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1559         data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1560         data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1561         data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1562         data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1563         data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1564         data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1565
1566         data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1567         data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1568         data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1569         /* need to set voltage control types before EVV patching */
1570         data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1571         data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1572         data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1573         data->enable_tdc_limit_feature = true;
1574         data->enable_pkg_pwr_tracking_feature = true;
1575         data->force_pcie_gen = PP_PCIEGenInvalid;
1576         data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1577         data->current_profile_setting.bupdate_sclk = 1;
1578         data->current_profile_setting.sclk_up_hyst = 0;
1579         data->current_profile_setting.sclk_down_hyst = 100;
1580         data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1581         data->current_profile_setting.bupdate_sclk = 1;
1582         data->current_profile_setting.mclk_up_hyst = 0;
1583         data->current_profile_setting.mclk_down_hyst = 100;
1584         data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1585         hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1586         hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1587         hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1588
1589         if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1590                 uint8_t tmp1, tmp2;
1591                 uint16_t tmp3 = 0;
1592                 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1593                                                 &tmp3);
1594                 tmp3 = (tmp3 >> 5) & 0x3;
1595                 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1596         } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1597                 data->vddc_phase_shed_control = 1;
1598         } else {
1599                 data->vddc_phase_shed_control = 0;
1600         }
1601
1602         if (hwmgr->chip_id  == CHIP_HAWAII) {
1603                 data->thermal_temp_setting.temperature_low = 94500;
1604                 data->thermal_temp_setting.temperature_high = 95000;
1605                 data->thermal_temp_setting.temperature_shutdown = 104000;
1606         } else {
1607                 data->thermal_temp_setting.temperature_low = 99500;
1608                 data->thermal_temp_setting.temperature_high = 100000;
1609                 data->thermal_temp_setting.temperature_shutdown = 104000;
1610         }
1611
1612         data->fast_watermark_threshold = 100;
1613         if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1614                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1615                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1616         else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1617                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1618                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1619
1620         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1621                         PHM_PlatformCaps_ControlVDDGFX)) {
1622                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1623                         VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1624                         data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1625                 }
1626         }
1627
1628         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1629                         PHM_PlatformCaps_EnableMVDDControl)) {
1630                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1631                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1632                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1633                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1634                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1635                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1636         }
1637
1638         if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1639                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1640                         PHM_PlatformCaps_ControlVDDGFX);
1641
1642         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1643                         PHM_PlatformCaps_ControlVDDCI)) {
1644                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1645                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1646                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1647                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1648                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1649                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1650         }
1651
1652         if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1653                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1654                                 PHM_PlatformCaps_EnableMVDDControl);
1655
1656         if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1657                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1658                                 PHM_PlatformCaps_ControlVDDCI);
1659
1660         if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1661                 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1662                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1663                                         PHM_PlatformCaps_ClockStretcher);
1664
1665         data->pcie_gen_performance.max = PP_PCIEGen1;
1666         data->pcie_gen_performance.min = PP_PCIEGen3;
1667         data->pcie_gen_power_saving.max = PP_PCIEGen1;
1668         data->pcie_gen_power_saving.min = PP_PCIEGen3;
1669         data->pcie_lane_performance.max = 0;
1670         data->pcie_lane_performance.min = 16;
1671         data->pcie_lane_power_saving.max = 0;
1672         data->pcie_lane_power_saving.min = 16;
1673
1674
1675         if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1676                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1677                               PHM_PlatformCaps_UVDPowerGating);
1678         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1679                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1680                               PHM_PlatformCaps_VCEPowerGating);
1681 }
1682
1683 /**
1684 * Get Leakage VDDC based on leakage ID.
1685 *
1686 * @param    hwmgr  the address of the powerplay hardware manager.
1687 * @return   always 0
1688 */
1689 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1690 {
1691         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1692         uint16_t vv_id;
1693         uint16_t vddc = 0;
1694         uint16_t vddgfx = 0;
1695         uint16_t i, j;
1696         uint32_t sclk = 0;
1697         struct phm_ppt_v1_information *table_info =
1698                         (struct phm_ppt_v1_information *)hwmgr->pptable;
1699         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1700
1701
1702         for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1703                 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1704
1705                 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1706                         if ((hwmgr->pp_table_version == PP_TABLE_V1)
1707                             && !phm_get_sclk_for_voltage_evv(hwmgr,
1708                                                 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1709                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1710                                                         PHM_PlatformCaps_ClockStretcher)) {
1711                                         sclk_table = table_info->vdd_dep_on_sclk;
1712
1713                                         for (j = 1; j < sclk_table->count; j++) {
1714                                                 if (sclk_table->entries[j].clk == sclk &&
1715                                                                 sclk_table->entries[j].cks_enable == 0) {
1716                                                         sclk += 5000;
1717                                                         break;
1718                                                 }
1719                                         }
1720                                 }
1721                                 if (0 == atomctrl_get_voltage_evv_on_sclk
1722                                     (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1723                                      vv_id, &vddgfx)) {
1724                                         /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1725                                         PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1726
1727                                         /* the voltage should not be zero nor equal to leakage ID */
1728                                         if (vddgfx != 0 && vddgfx != vv_id) {
1729                                                 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1730                                                 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1731                                                 data->vddcgfx_leakage.count++;
1732                                         }
1733                                 } else {
1734                                         pr_info("Error retrieving EVV voltage value!\n");
1735                                 }
1736                         }
1737                 } else {
1738                         if ((hwmgr->pp_table_version == PP_TABLE_V0)
1739                                 || !phm_get_sclk_for_voltage_evv(hwmgr,
1740                                         table_info->vddc_lookup_table, vv_id, &sclk)) {
1741                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1742                                                 PHM_PlatformCaps_ClockStretcher)) {
1743                                         if (table_info == NULL)
1744                                                 return -EINVAL;
1745                                         sclk_table = table_info->vdd_dep_on_sclk;
1746
1747                                         for (j = 1; j < sclk_table->count; j++) {
1748                                                 if (sclk_table->entries[j].clk == sclk &&
1749                                                                 sclk_table->entries[j].cks_enable == 0) {
1750                                                         sclk += 5000;
1751                                                         break;
1752                                                 }
1753                                         }
1754                                 }
1755
1756                                 if (phm_get_voltage_evv_on_sclk(hwmgr,
1757                                                         VOLTAGE_TYPE_VDDC,
1758                                                         sclk, vv_id, &vddc) == 0) {
1759                                         if (vddc >= 2000 || vddc == 0)
1760                                                 return -EINVAL;
1761                                 } else {
1762                                         pr_debug("failed to retrieving EVV voltage!\n");
1763                                         continue;
1764                                 }
1765
1766                                 /* the voltage should not be zero nor equal to leakage ID */
1767                                 if (vddc != 0 && vddc != vv_id) {
1768                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1769                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1770                                         data->vddc_leakage.count++;
1771                                 }
1772                         }
1773                 }
1774         }
1775
1776         return 0;
1777 }
1778
1779 /**
1780  * Change virtual leakage voltage to actual value.
1781  *
1782  * @param     hwmgr  the address of the powerplay hardware manager.
1783  * @param     pointer to changing voltage
1784  * @param     pointer to leakage table
1785  */
1786 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1787                 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1788 {
1789         uint32_t index;
1790
1791         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1792         for (index = 0; index < leakage_table->count; index++) {
1793                 /* if this voltage matches a leakage voltage ID */
1794                 /* patch with actual leakage voltage */
1795                 if (leakage_table->leakage_id[index] == *voltage) {
1796                         *voltage = leakage_table->actual_voltage[index];
1797                         break;
1798                 }
1799         }
1800
1801         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1802                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1803 }
1804
1805 /**
1806 * Patch voltage lookup table by EVV leakages.
1807 *
1808 * @param     hwmgr  the address of the powerplay hardware manager.
1809 * @param     pointer to voltage lookup table
1810 * @param     pointer to leakage table
1811 * @return     always 0
1812 */
1813 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1814                 phm_ppt_v1_voltage_lookup_table *lookup_table,
1815                 struct smu7_leakage_voltage *leakage_table)
1816 {
1817         uint32_t i;
1818
1819         for (i = 0; i < lookup_table->count; i++)
1820                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1821                                 &lookup_table->entries[i].us_vdd, leakage_table);
1822
1823         return 0;
1824 }
1825
1826 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1827                 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1828                 uint16_t *vddc)
1829 {
1830         struct phm_ppt_v1_information *table_info =
1831                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1832         smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1833         hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1834                         table_info->max_clock_voltage_on_dc.vddc;
1835         return 0;
1836 }
1837
1838 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1839                 struct pp_hwmgr *hwmgr)
1840 {
1841         uint8_t entry_id;
1842         uint8_t voltage_id;
1843         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1844         struct phm_ppt_v1_information *table_info =
1845                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1846
1847         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1848                         table_info->vdd_dep_on_sclk;
1849         struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1850                         table_info->vdd_dep_on_mclk;
1851         struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1852                         table_info->mm_dep_table;
1853
1854         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1855                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1856                         voltage_id = sclk_table->entries[entry_id].vddInd;
1857                         sclk_table->entries[entry_id].vddgfx =
1858                                 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1859                 }
1860         } else {
1861                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1862                         voltage_id = sclk_table->entries[entry_id].vddInd;
1863                         sclk_table->entries[entry_id].vddc =
1864                                 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1865                 }
1866         }
1867
1868         for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1869                 voltage_id = mclk_table->entries[entry_id].vddInd;
1870                 mclk_table->entries[entry_id].vddc =
1871                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1872         }
1873
1874         for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1875                 voltage_id = mm_table->entries[entry_id].vddcInd;
1876                 mm_table->entries[entry_id].vddc =
1877                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1878         }
1879
1880         return 0;
1881
1882 }
1883
1884 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1885                         phm_ppt_v1_voltage_lookup_table *look_up_table,
1886                         phm_ppt_v1_voltage_lookup_record *record)
1887 {
1888         uint32_t i;
1889
1890         PP_ASSERT_WITH_CODE((NULL != look_up_table),
1891                 "Lookup Table empty.", return -EINVAL);
1892         PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1893                 "Lookup Table empty.", return -EINVAL);
1894
1895         i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
1896         PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1897                 "Lookup Table is full.", return -EINVAL);
1898
1899         /* This is to avoid entering duplicate calculated records. */
1900         for (i = 0; i < look_up_table->count; i++) {
1901                 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1902                         if (look_up_table->entries[i].us_calculated == 1)
1903                                 return 0;
1904                         break;
1905                 }
1906         }
1907
1908         look_up_table->entries[i].us_calculated = 1;
1909         look_up_table->entries[i].us_vdd = record->us_vdd;
1910         look_up_table->entries[i].us_cac_low = record->us_cac_low;
1911         look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1912         look_up_table->entries[i].us_cac_high = record->us_cac_high;
1913         /* Only increment the count when we're appending, not replacing duplicate entry. */
1914         if (i == look_up_table->count)
1915                 look_up_table->count++;
1916
1917         return 0;
1918 }
1919
1920
1921 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1922 {
1923         uint8_t entry_id;
1924         struct phm_ppt_v1_voltage_lookup_record v_record;
1925         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1926         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1927
1928         phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1929         phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1930
1931         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1932                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1933                         if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1934                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1935                                         sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1936                         else
1937                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1938                                         sclk_table->entries[entry_id].vdd_offset;
1939
1940                         sclk_table->entries[entry_id].vddc =
1941                                 v_record.us_cac_low = v_record.us_cac_mid =
1942                                 v_record.us_cac_high = v_record.us_vdd;
1943
1944                         phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1945                 }
1946
1947                 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1948                         if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1949                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1950                                         mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1951                         else
1952                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1953                                         mclk_table->entries[entry_id].vdd_offset;
1954
1955                         mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1956                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1957                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1958                 }
1959         }
1960         return 0;
1961 }
1962
1963 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1964 {
1965         uint8_t entry_id;
1966         struct phm_ppt_v1_voltage_lookup_record v_record;
1967         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1968         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1969         phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1970
1971         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1972                 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1973                         if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1974                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1975                                         mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1976                         else
1977                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1978                                         mm_table->entries[entry_id].vddgfx_offset;
1979
1980                         /* Add the calculated VDDGFX to the VDDGFX lookup table */
1981                         mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1982                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1983                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1984                 }
1985         }
1986         return 0;
1987 }
1988
1989 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1990                 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1991 {
1992         uint32_t table_size, i, j;
1993         struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1994         table_size = lookup_table->count;
1995
1996         PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1997                 "Lookup table is empty", return -EINVAL);
1998
1999         /* Sorting voltages */
2000         for (i = 0; i < table_size - 1; i++) {
2001                 for (j = i + 1; j > 0; j--) {
2002                         if (lookup_table->entries[j].us_vdd <
2003                                         lookup_table->entries[j - 1].us_vdd) {
2004                                 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
2005                                 lookup_table->entries[j - 1] = lookup_table->entries[j];
2006                                 lookup_table->entries[j] = tmp_voltage_lookup_record;
2007                         }
2008                 }
2009         }
2010
2011         return 0;
2012 }
2013
2014 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2015 {
2016         int result = 0;
2017         int tmp_result;
2018         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2019         struct phm_ppt_v1_information *table_info =
2020                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2021
2022         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2023                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2024                         table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2025                 if (tmp_result != 0)
2026                         result = tmp_result;
2027
2028                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2029                         &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2030         } else {
2031
2032                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2033                                 table_info->vddc_lookup_table, &(data->vddc_leakage));
2034                 if (tmp_result)
2035                         result = tmp_result;
2036
2037                 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2038                                 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2039                 if (tmp_result)
2040                         result = tmp_result;
2041         }
2042
2043         tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2044         if (tmp_result)
2045                 result = tmp_result;
2046
2047         tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2048         if (tmp_result)
2049                 result = tmp_result;
2050
2051         tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2052         if (tmp_result)
2053                 result = tmp_result;
2054
2055         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2056         if (tmp_result)
2057                 result = tmp_result;
2058
2059         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2060         if (tmp_result)
2061                 result = tmp_result;
2062
2063         return result;
2064 }
2065
2066 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2067 {
2068         struct phm_ppt_v1_information *table_info =
2069                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2070
2071         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2072                                                 table_info->vdd_dep_on_sclk;
2073         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2074                                                 table_info->vdd_dep_on_mclk;
2075
2076         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2077                 "VDD dependency on SCLK table is missing.",
2078                 return -EINVAL);
2079         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2080                 "VDD dependency on SCLK table has to have is missing.",
2081                 return -EINVAL);
2082
2083         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2084                 "VDD dependency on MCLK table is missing",
2085                 return -EINVAL);
2086         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2087                 "VDD dependency on MCLK table has to have is missing.",
2088                 return -EINVAL);
2089
2090         table_info->max_clock_voltage_on_ac.sclk =
2091                 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2092         table_info->max_clock_voltage_on_ac.mclk =
2093                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2094         table_info->max_clock_voltage_on_ac.vddc =
2095                 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2096         table_info->max_clock_voltage_on_ac.vddci =
2097                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2098
2099         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2100         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2101         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2102         hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2103
2104         return 0;
2105 }
2106
2107 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2108 {
2109         struct phm_ppt_v1_information *table_info =
2110                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2111         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2112         struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2113         uint32_t i;
2114         uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2115         struct amdgpu_device *adev = hwmgr->adev;
2116
2117         if (table_info != NULL) {
2118                 dep_mclk_table = table_info->vdd_dep_on_mclk;
2119                 lookup_table = table_info->vddc_lookup_table;
2120         } else
2121                 return 0;
2122
2123         hw_revision = adev->pdev->revision;
2124         sub_sys_id = adev->pdev->subsystem_device;
2125         sub_vendor_id = adev->pdev->subsystem_vendor;
2126
2127         if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
2128                         ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2129                     (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2130                     (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2131                 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2132                         return 0;
2133
2134                 for (i = 0; i < lookup_table->count; i++) {
2135                         if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2136                                 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2137                                 return 0;
2138                         }
2139                 }
2140         }
2141         return 0;
2142 }
2143
2144 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2145 {
2146         struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2147         uint32_t temp_reg;
2148         struct phm_ppt_v1_information *table_info =
2149                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2150
2151
2152         if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2153                 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2154                 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2155                 case 0:
2156                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2157                         break;
2158                 case 1:
2159                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2160                         break;
2161                 case 2:
2162                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2163                         break;
2164                 case 3:
2165                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2166                         break;
2167                 case 4:
2168                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2169                         break;
2170                 default:
2171                         break;
2172                 }
2173                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2174         }
2175
2176         if (table_info == NULL)
2177                 return 0;
2178
2179         if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2180                 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2181                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2182                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2183
2184                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2185                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2186
2187                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2188
2189                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2190
2191                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2192                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2193
2194                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2195
2196                 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2197                                                                 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2198
2199                 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2200                 table_info->cac_dtp_table->usOperatingTempStep = 1;
2201                 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2202
2203                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2204                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2205
2206                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2207                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2208
2209                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2210                                table_info->cac_dtp_table->usOperatingTempMinLimit;
2211
2212                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2213                                table_info->cac_dtp_table->usOperatingTempMaxLimit;
2214
2215                 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2216                                table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2217
2218                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2219                                table_info->cac_dtp_table->usOperatingTempStep;
2220
2221                 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2222                                table_info->cac_dtp_table->usTargetOperatingTemp;
2223                 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2224                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2225                                         PHM_PlatformCaps_ODFuzzyFanControlSupport);
2226         }
2227
2228         return 0;
2229 }
2230
2231 /**
2232  * Change virtual leakage voltage to actual value.
2233  *
2234  * @param     hwmgr  the address of the powerplay hardware manager.
2235  * @param     pointer to changing voltage
2236  * @param     pointer to leakage table
2237  */
2238 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2239                 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2240 {
2241         uint32_t index;
2242
2243         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2244         for (index = 0; index < leakage_table->count; index++) {
2245                 /* if this voltage matches a leakage voltage ID */
2246                 /* patch with actual leakage voltage */
2247                 if (leakage_table->leakage_id[index] == *voltage) {
2248                         *voltage = leakage_table->actual_voltage[index];
2249                         break;
2250                 }
2251         }
2252
2253         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2254                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2255 }
2256
2257
2258 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2259                               struct phm_clock_voltage_dependency_table *tab)
2260 {
2261         uint16_t i;
2262         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2263
2264         if (tab)
2265                 for (i = 0; i < tab->count; i++)
2266                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2267                                                 &data->vddc_leakage);
2268
2269         return 0;
2270 }
2271
2272 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2273                                struct phm_clock_voltage_dependency_table *tab)
2274 {
2275         uint16_t i;
2276         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2277
2278         if (tab)
2279                 for (i = 0; i < tab->count; i++)
2280                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2281                                                         &data->vddci_leakage);
2282
2283         return 0;
2284 }
2285
2286 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2287                                   struct phm_vce_clock_voltage_dependency_table *tab)
2288 {
2289         uint16_t i;
2290         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2291
2292         if (tab)
2293                 for (i = 0; i < tab->count; i++)
2294                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2295                                                         &data->vddc_leakage);
2296
2297         return 0;
2298 }
2299
2300
2301 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2302                                   struct phm_uvd_clock_voltage_dependency_table *tab)
2303 {
2304         uint16_t i;
2305         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2306
2307         if (tab)
2308                 for (i = 0; i < tab->count; i++)
2309                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2310                                                         &data->vddc_leakage);
2311
2312         return 0;
2313 }
2314
2315 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2316                                          struct phm_phase_shedding_limits_table *tab)
2317 {
2318         uint16_t i;
2319         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2320
2321         if (tab)
2322                 for (i = 0; i < tab->count; i++)
2323                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2324                                                         &data->vddc_leakage);
2325
2326         return 0;
2327 }
2328
2329 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2330                                    struct phm_samu_clock_voltage_dependency_table *tab)
2331 {
2332         uint16_t i;
2333         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2334
2335         if (tab)
2336                 for (i = 0; i < tab->count; i++)
2337                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2338                                                         &data->vddc_leakage);
2339
2340         return 0;
2341 }
2342
2343 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2344                                   struct phm_acp_clock_voltage_dependency_table *tab)
2345 {
2346         uint16_t i;
2347         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2348
2349         if (tab)
2350                 for (i = 0; i < tab->count; i++)
2351                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2352                                         &data->vddc_leakage);
2353
2354         return 0;
2355 }
2356
2357 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2358                                   struct phm_clock_and_voltage_limits *tab)
2359 {
2360         uint32_t vddc, vddci;
2361         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2362
2363         if (tab) {
2364                 vddc = tab->vddc;
2365                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2366                                                    &data->vddc_leakage);
2367                 tab->vddc = vddc;
2368                 vddci = tab->vddci;
2369                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2370                                                    &data->vddci_leakage);
2371                 tab->vddci = vddci;
2372         }
2373
2374         return 0;
2375 }
2376
2377 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2378 {
2379         uint32_t i;
2380         uint32_t vddc;
2381         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2382
2383         if (tab) {
2384                 for (i = 0; i < tab->count; i++) {
2385                         vddc = (uint32_t)(tab->entries[i].Vddc);
2386                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2387                         tab->entries[i].Vddc = (uint16_t)vddc;
2388                 }
2389         }
2390
2391         return 0;
2392 }
2393
2394 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2395 {
2396         int tmp;
2397
2398         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2399         if (tmp)
2400                 return -EINVAL;
2401
2402         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2403         if (tmp)
2404                 return -EINVAL;
2405
2406         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2407         if (tmp)
2408                 return -EINVAL;
2409
2410         tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2411         if (tmp)
2412                 return -EINVAL;
2413
2414         tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2415         if (tmp)
2416                 return -EINVAL;
2417
2418         tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2419         if (tmp)
2420                 return -EINVAL;
2421
2422         tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2423         if (tmp)
2424                 return -EINVAL;
2425
2426         tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2427         if (tmp)
2428                 return -EINVAL;
2429
2430         tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2431         if (tmp)
2432                 return -EINVAL;
2433
2434         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2435         if (tmp)
2436                 return -EINVAL;
2437
2438         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2439         if (tmp)
2440                 return -EINVAL;
2441
2442         tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2443         if (tmp)
2444                 return -EINVAL;
2445
2446         return 0;
2447 }
2448
2449
2450 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2451 {
2452         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2453
2454         struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2455         struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2456         struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2457
2458         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2459                 "VDDC dependency on SCLK table is missing. This table is mandatory",
2460                 return -EINVAL);
2461         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2462                 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2463                 return -EINVAL);
2464
2465         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2466                 "VDDC dependency on MCLK table is missing. This table is mandatory",
2467                 return -EINVAL);
2468         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2469                 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2470                 return -EINVAL);
2471
2472         data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2473         data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2474
2475         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2476                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2477         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2478                 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2479         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2480                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2481
2482         if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2483                 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2484                 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2485         }
2486
2487         if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2488                 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2489
2490         return 0;
2491 }
2492
2493 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2494 {
2495         kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2496         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2497         kfree(hwmgr->backend);
2498         hwmgr->backend = NULL;
2499
2500         return 0;
2501 }
2502
2503 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2504 {
2505         uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2506         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2507         int i;
2508
2509         if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2510                 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2511                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2512                         if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2513                                                                 virtual_voltage_id,
2514                                                                 efuse_voltage_id) == 0) {
2515                                 if (vddc != 0 && vddc != virtual_voltage_id) {
2516                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2517                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2518                                         data->vddc_leakage.count++;
2519                                 }
2520                                 if (vddci != 0 && vddci != virtual_voltage_id) {
2521                                         data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2522                                         data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2523                                         data->vddci_leakage.count++;
2524                                 }
2525                         }
2526                 }
2527         }
2528         return 0;
2529 }
2530
2531 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2532 {
2533         struct smu7_hwmgr *data;
2534         int result = 0;
2535
2536         data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2537         if (data == NULL)
2538                 return -ENOMEM;
2539
2540         hwmgr->backend = data;
2541         smu7_patch_voltage_workaround(hwmgr);
2542         smu7_init_dpm_defaults(hwmgr);
2543
2544         /* Get leakage voltage based on leakage ID. */
2545         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2546                         PHM_PlatformCaps_EVV)) {
2547                 result = smu7_get_evv_voltages(hwmgr);
2548                 if (result) {
2549                         pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2550                         return -EINVAL;
2551                 }
2552         } else {
2553                 smu7_get_elb_voltages(hwmgr);
2554         }
2555
2556         if (hwmgr->pp_table_version == PP_TABLE_V1) {
2557                 smu7_complete_dependency_tables(hwmgr);
2558                 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2559         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2560                 smu7_patch_dependency_tables_with_leakage(hwmgr);
2561                 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2562         }
2563
2564         /* Initalize Dynamic State Adjustment Rule Settings */
2565         result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2566
2567         if (0 == result) {
2568                 struct amdgpu_device *adev = hwmgr->adev;
2569
2570                 data->is_tlu_enabled = false;
2571
2572                 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2573                                                         SMU7_MAX_HARDWARE_POWERLEVELS;
2574                 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2575                 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2576
2577                 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2578                 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2579                         data->pcie_spc_cap = 20;
2580                 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2581
2582                 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2583 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2584                 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2585                 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2586                 smu7_thermal_parameter_init(hwmgr);
2587         } else {
2588                 /* Ignore return value in here, we are cleaning up a mess. */
2589                 smu7_hwmgr_backend_fini(hwmgr);
2590         }
2591
2592         return 0;
2593 }
2594
2595 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2596 {
2597         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2598         uint32_t level, tmp;
2599
2600         if (!data->pcie_dpm_key_disabled) {
2601                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2602                         level = 0;
2603                         tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2604                         while (tmp >>= 1)
2605                                 level++;
2606
2607                         if (level)
2608                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2609                                                 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2610                 }
2611         }
2612
2613         if (!data->sclk_dpm_key_disabled) {
2614                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2615                         level = 0;
2616                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2617                         while (tmp >>= 1)
2618                                 level++;
2619
2620                         if (level)
2621                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2622                                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2623                                                 (1 << level));
2624                 }
2625         }
2626
2627         if (!data->mclk_dpm_key_disabled) {
2628                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2629                         level = 0;
2630                         tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2631                         while (tmp >>= 1)
2632                                 level++;
2633
2634                         if (level)
2635                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2636                                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2637                                                 (1 << level));
2638                 }
2639         }
2640
2641         return 0;
2642 }
2643
2644 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2645 {
2646         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2647
2648         if (hwmgr->pp_table_version == PP_TABLE_V1)
2649                 phm_apply_dal_min_voltage_request(hwmgr);
2650 /* TO DO  for v0 iceland and Ci*/
2651
2652         if (!data->sclk_dpm_key_disabled) {
2653                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2654                         smum_send_msg_to_smc_with_parameter(hwmgr,
2655                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
2656                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2657         }
2658
2659         if (!data->mclk_dpm_key_disabled) {
2660                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2661                         smum_send_msg_to_smc_with_parameter(hwmgr,
2662                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
2663                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2664         }
2665
2666         return 0;
2667 }
2668
2669 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2670 {
2671         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2672
2673         if (!smum_is_dpm_running(hwmgr))
2674                 return -EINVAL;
2675
2676         if (!data->pcie_dpm_key_disabled) {
2677                 smum_send_msg_to_smc(hwmgr,
2678                                 PPSMC_MSG_PCIeDPM_UnForceLevel);
2679         }
2680
2681         return smu7_upload_dpm_level_enable_mask(hwmgr);
2682 }
2683
2684 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2685 {
2686         struct smu7_hwmgr *data =
2687                         (struct smu7_hwmgr *)(hwmgr->backend);
2688         uint32_t level;
2689
2690         if (!data->sclk_dpm_key_disabled)
2691                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2692                         level = phm_get_lowest_enabled_level(hwmgr,
2693                                                               data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2694                         smum_send_msg_to_smc_with_parameter(hwmgr,
2695                                                             PPSMC_MSG_SCLKDPM_SetEnabledMask,
2696                                                             (1 << level));
2697
2698         }
2699
2700         if (!data->mclk_dpm_key_disabled) {
2701                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2702                         level = phm_get_lowest_enabled_level(hwmgr,
2703                                                               data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2704                         smum_send_msg_to_smc_with_parameter(hwmgr,
2705                                                             PPSMC_MSG_MCLKDPM_SetEnabledMask,
2706                                                             (1 << level));
2707                 }
2708         }
2709
2710         if (!data->pcie_dpm_key_disabled) {
2711                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2712                         level = phm_get_lowest_enabled_level(hwmgr,
2713                                                               data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2714                         smum_send_msg_to_smc_with_parameter(hwmgr,
2715                                                             PPSMC_MSG_PCIeDPM_ForceLevel,
2716                                                             (level));
2717                 }
2718         }
2719
2720         return 0;
2721 }
2722
2723 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2724                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2725 {
2726         uint32_t percentage;
2727         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2728         struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2729         int32_t tmp_mclk;
2730         int32_t tmp_sclk;
2731         int32_t count;
2732
2733         if (golden_dpm_table->mclk_table.count < 1)
2734                 return -EINVAL;
2735
2736         percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2737                         golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2738
2739         if (golden_dpm_table->mclk_table.count == 1) {
2740                 percentage = 70;
2741                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2742                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2743         } else {
2744                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2745                 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2746         }
2747
2748         tmp_sclk = tmp_mclk * percentage / 100;
2749
2750         if (hwmgr->pp_table_version == PP_TABLE_V0) {
2751                 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2752                         count >= 0; count--) {
2753                         if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2754                                 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2755                                 *sclk_mask = count;
2756                                 break;
2757                         }
2758                 }
2759                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2760                         *sclk_mask = 0;
2761                         tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2762                 }
2763
2764                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2765                         *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2766         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2767                 struct phm_ppt_v1_information *table_info =
2768                                 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2769
2770                 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2771                         if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2772                                 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2773                                 *sclk_mask = count;
2774                                 break;
2775                         }
2776                 }
2777                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2778                         *sclk_mask = 0;
2779                         tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
2780                 }
2781
2782                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2783                         *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2784         }
2785
2786         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2787                 *mclk_mask = 0;
2788         else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2789                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2790
2791         *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2792         hwmgr->pstate_sclk = tmp_sclk;
2793         hwmgr->pstate_mclk = tmp_mclk;
2794
2795         return 0;
2796 }
2797
2798 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2799                                 enum amd_dpm_forced_level level)
2800 {
2801         int ret = 0;
2802         uint32_t sclk_mask = 0;
2803         uint32_t mclk_mask = 0;
2804         uint32_t pcie_mask = 0;
2805
2806         if (hwmgr->pstate_sclk == 0)
2807                 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2808
2809         switch (level) {
2810         case AMD_DPM_FORCED_LEVEL_HIGH:
2811                 ret = smu7_force_dpm_highest(hwmgr);
2812                 break;
2813         case AMD_DPM_FORCED_LEVEL_LOW:
2814                 ret = smu7_force_dpm_lowest(hwmgr);
2815                 break;
2816         case AMD_DPM_FORCED_LEVEL_AUTO:
2817                 ret = smu7_unforce_dpm_levels(hwmgr);
2818                 break;
2819         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2820         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2821         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2822         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2823                 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2824                 if (ret)
2825                         return ret;
2826                 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2827                 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2828                 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2829                 break;
2830         case AMD_DPM_FORCED_LEVEL_MANUAL:
2831         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2832         default:
2833                 break;
2834         }
2835
2836         if (!ret) {
2837                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2838                         smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2839                 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2840                         smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2841         }
2842         return ret;
2843 }
2844
2845 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2846 {
2847         return sizeof(struct smu7_power_state);
2848 }
2849
2850 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2851                                  uint32_t vblank_time_us)
2852 {
2853         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2854         uint32_t switch_limit_us;
2855
2856         switch (hwmgr->chip_id) {
2857         case CHIP_POLARIS10:
2858         case CHIP_POLARIS11:
2859         case CHIP_POLARIS12:
2860                 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2861                 break;
2862         case CHIP_VEGAM:
2863                 switch_limit_us = 30;
2864                 break;
2865         default:
2866                 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2867                 break;
2868         }
2869
2870         if (vblank_time_us < switch_limit_us)
2871                 return true;
2872         else
2873                 return false;
2874 }
2875
2876 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2877                                 struct pp_power_state *request_ps,
2878                         const struct pp_power_state *current_ps)
2879 {
2880         struct amdgpu_device *adev = hwmgr->adev;
2881         struct smu7_power_state *smu7_ps =
2882                                 cast_phw_smu7_power_state(&request_ps->hardware);
2883         uint32_t sclk;
2884         uint32_t mclk;
2885         struct PP_Clocks minimum_clocks = {0};
2886         bool disable_mclk_switching;
2887         bool disable_mclk_switching_for_frame_lock;
2888         const struct phm_clock_and_voltage_limits *max_limits;
2889         uint32_t i;
2890         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2891         struct phm_ppt_v1_information *table_info =
2892                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2893         int32_t count;
2894         int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2895
2896         data->battery_state = (PP_StateUILabel_Battery ==
2897                         request_ps->classification.ui_label);
2898
2899         PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2900                                  "VI should always have 2 performance levels",
2901                                 );
2902
2903         max_limits = adev->pm.ac_power ?
2904                         &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2905                         &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2906
2907         /* Cap clock DPM tables at DC MAX if it is in DC. */
2908         if (!adev->pm.ac_power) {
2909                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2910                         if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2911                                 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2912                         if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2913                                 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2914                 }
2915         }
2916
2917         minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
2918         minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2919
2920         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2921                         PHM_PlatformCaps_StablePState)) {
2922                 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2923                 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2924
2925                 for (count = table_info->vdd_dep_on_sclk->count - 1;
2926                                 count >= 0; count--) {
2927                         if (stable_pstate_sclk >=
2928                                         table_info->vdd_dep_on_sclk->entries[count].clk) {
2929                                 stable_pstate_sclk =
2930                                                 table_info->vdd_dep_on_sclk->entries[count].clk;
2931                                 break;
2932                         }
2933                 }
2934
2935                 if (count < 0)
2936                         stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2937
2938                 stable_pstate_mclk = max_limits->mclk;
2939
2940                 minimum_clocks.engineClock = stable_pstate_sclk;
2941                 minimum_clocks.memoryClock = stable_pstate_mclk;
2942         }
2943
2944         disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2945                                     hwmgr->platform_descriptor.platformCaps,
2946                                     PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2947
2948
2949         if (hwmgr->display_config->num_display == 0)
2950                 disable_mclk_switching = false;
2951         else
2952                 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) ||
2953                                           disable_mclk_switching_for_frame_lock ||
2954                                           smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
2955
2956         sclk = smu7_ps->performance_levels[0].engine_clock;
2957         mclk = smu7_ps->performance_levels[0].memory_clock;
2958
2959         if (disable_mclk_switching)
2960                 mclk = smu7_ps->performance_levels
2961                 [smu7_ps->performance_level_count - 1].memory_clock;
2962
2963         if (sclk < minimum_clocks.engineClock)
2964                 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2965                                 max_limits->sclk : minimum_clocks.engineClock;
2966
2967         if (mclk < minimum_clocks.memoryClock)
2968                 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2969                                 max_limits->mclk : minimum_clocks.memoryClock;
2970
2971         smu7_ps->performance_levels[0].engine_clock = sclk;
2972         smu7_ps->performance_levels[0].memory_clock = mclk;
2973
2974         smu7_ps->performance_levels[1].engine_clock =
2975                 (smu7_ps->performance_levels[1].engine_clock >=
2976                                 smu7_ps->performance_levels[0].engine_clock) ?
2977                                                 smu7_ps->performance_levels[1].engine_clock :
2978                                                 smu7_ps->performance_levels[0].engine_clock;
2979
2980         if (disable_mclk_switching) {
2981                 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2982                         mclk = smu7_ps->performance_levels[1].memory_clock;
2983
2984                 smu7_ps->performance_levels[0].memory_clock = mclk;
2985                 smu7_ps->performance_levels[1].memory_clock = mclk;
2986         } else {
2987                 if (smu7_ps->performance_levels[1].memory_clock <
2988                                 smu7_ps->performance_levels[0].memory_clock)
2989                         smu7_ps->performance_levels[1].memory_clock =
2990                                         smu7_ps->performance_levels[0].memory_clock;
2991         }
2992
2993         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2994                         PHM_PlatformCaps_StablePState)) {
2995                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2996                         smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2997                         smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2998                         smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2999                         smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3000                 }
3001         }
3002         return 0;
3003 }
3004
3005
3006 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3007 {
3008         struct pp_power_state  *ps;
3009         struct smu7_power_state  *smu7_ps;
3010
3011         if (hwmgr == NULL)
3012                 return -EINVAL;
3013
3014         ps = hwmgr->request_ps;
3015
3016         if (ps == NULL)
3017                 return -EINVAL;
3018
3019         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3020
3021         if (low)
3022                 return smu7_ps->performance_levels[0].memory_clock;
3023         else
3024                 return smu7_ps->performance_levels
3025                                 [smu7_ps->performance_level_count-1].memory_clock;
3026 }
3027
3028 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3029 {
3030         struct pp_power_state  *ps;
3031         struct smu7_power_state  *smu7_ps;
3032
3033         if (hwmgr == NULL)
3034                 return -EINVAL;
3035
3036         ps = hwmgr->request_ps;
3037
3038         if (ps == NULL)
3039                 return -EINVAL;
3040
3041         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3042
3043         if (low)
3044                 return smu7_ps->performance_levels[0].engine_clock;
3045         else
3046                 return smu7_ps->performance_levels
3047                                 [smu7_ps->performance_level_count-1].engine_clock;
3048 }
3049
3050 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3051                                         struct pp_hw_power_state *hw_ps)
3052 {
3053         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3054         struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3055         ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3056         uint16_t size;
3057         uint8_t frev, crev;
3058         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3059
3060         /* First retrieve the Boot clocks and VDDC from the firmware info table.
3061          * We assume here that fw_info is unchanged if this call fails.
3062          */
3063         fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3064                         &size, &frev, &crev);
3065         if (!fw_info)
3066                 /* During a test, there is no firmware info table. */
3067                 return 0;
3068
3069         /* Patch the state. */
3070         data->vbios_boot_state.sclk_bootup_value =
3071                         le32_to_cpu(fw_info->ulDefaultEngineClock);
3072         data->vbios_boot_state.mclk_bootup_value =
3073                         le32_to_cpu(fw_info->ulDefaultMemoryClock);
3074         data->vbios_boot_state.mvdd_bootup_value =
3075                         le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3076         data->vbios_boot_state.vddc_bootup_value =
3077                         le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3078         data->vbios_boot_state.vddci_bootup_value =
3079                         le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3080         data->vbios_boot_state.pcie_gen_bootup_value =
3081                         smu7_get_current_pcie_speed(hwmgr);
3082
3083         data->vbios_boot_state.pcie_lane_bootup_value =
3084                         (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3085
3086         /* set boot power state */
3087         ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3088         ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3089         ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3090         ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3091
3092         return 0;
3093 }
3094
3095 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3096 {
3097         int result;
3098         unsigned long ret = 0;
3099
3100         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3101                 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3102                 return result ? 0 : ret;
3103         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3104                 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3105                 return result;
3106         }
3107         return 0;
3108 }
3109
3110 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3111                 void *state, struct pp_power_state *power_state,
3112                 void *pp_table, uint32_t classification_flag)
3113 {
3114         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3115         struct smu7_power_state  *smu7_power_state =
3116                         (struct smu7_power_state *)(&(power_state->hardware));
3117         struct smu7_performance_level *performance_level;
3118         ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3119         ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3120                         (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3121         PPTable_Generic_SubTable_Header *sclk_dep_table =
3122                         (PPTable_Generic_SubTable_Header *)
3123                         (((unsigned long)powerplay_table) +
3124                                 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3125
3126         ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3127                         (ATOM_Tonga_MCLK_Dependency_Table *)
3128                         (((unsigned long)powerplay_table) +
3129                                 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3130
3131         /* The following fields are not initialized here: id orderedList allStatesList */
3132         power_state->classification.ui_label =
3133                         (le16_to_cpu(state_entry->usClassification) &
3134                         ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3135                         ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3136         power_state->classification.flags = classification_flag;
3137         /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3138
3139         power_state->classification.temporary_state = false;
3140         power_state->classification.to_be_deleted = false;
3141
3142         power_state->validation.disallowOnDC =
3143                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3144                                         ATOM_Tonga_DISALLOW_ON_DC));
3145
3146         power_state->pcie.lanes = 0;
3147
3148         power_state->display.disableFrameModulation = false;
3149         power_state->display.limitRefreshrate = false;
3150         power_state->display.enableVariBright =
3151                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3152                                         ATOM_Tonga_ENABLE_VARIBRIGHT));
3153
3154         power_state->validation.supportedPowerLevels = 0;
3155         power_state->uvd_clocks.VCLK = 0;
3156         power_state->uvd_clocks.DCLK = 0;
3157         power_state->temperatures.min = 0;
3158         power_state->temperatures.max = 0;
3159
3160         performance_level = &(smu7_power_state->performance_levels
3161                         [smu7_power_state->performance_level_count++]);
3162
3163         PP_ASSERT_WITH_CODE(
3164                         (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3165                         "Performance levels exceeds SMC limit!",
3166                         return -EINVAL);
3167
3168         PP_ASSERT_WITH_CODE(
3169                         (smu7_power_state->performance_level_count <=
3170                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3171                         "Performance levels exceeds Driver limit!",
3172                         return -EINVAL);
3173
3174         /* Performance levels are arranged from low to high. */
3175         performance_level->memory_clock = mclk_dep_table->entries
3176                         [state_entry->ucMemoryClockIndexLow].ulMclk;
3177         if (sclk_dep_table->ucRevId == 0)
3178                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3179                         [state_entry->ucEngineClockIndexLow].ulSclk;
3180         else if (sclk_dep_table->ucRevId == 1)
3181                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3182                         [state_entry->ucEngineClockIndexLow].ulSclk;
3183         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3184                         state_entry->ucPCIEGenLow);
3185         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3186                         state_entry->ucPCIELaneHigh);
3187
3188         performance_level = &(smu7_power_state->performance_levels
3189                         [smu7_power_state->performance_level_count++]);
3190         performance_level->memory_clock = mclk_dep_table->entries
3191                         [state_entry->ucMemoryClockIndexHigh].ulMclk;
3192
3193         if (sclk_dep_table->ucRevId == 0)
3194                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3195                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3196         else if (sclk_dep_table->ucRevId == 1)
3197                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3198                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3199
3200         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3201                         state_entry->ucPCIEGenHigh);
3202         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3203                         state_entry->ucPCIELaneHigh);
3204
3205         return 0;
3206 }
3207
3208 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3209                 unsigned long entry_index, struct pp_power_state *state)
3210 {
3211         int result;
3212         struct smu7_power_state *ps;
3213         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3214         struct phm_ppt_v1_information *table_info =
3215                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3216         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3217                         table_info->vdd_dep_on_mclk;
3218
3219         state->hardware.magic = PHM_VIslands_Magic;
3220
3221         ps = (struct smu7_power_state *)(&state->hardware);
3222
3223         result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3224                         smu7_get_pp_table_entry_callback_func_v1);
3225
3226         /* This is the earliest time we have all the dependency table and the VBIOS boot state
3227          * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3228          * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3229          */
3230         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3231                 if (dep_mclk_table->entries[0].clk !=
3232                                 data->vbios_boot_state.mclk_bootup_value)
3233                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3234                                         "does not match VBIOS boot MCLK level");
3235                 if (dep_mclk_table->entries[0].vddci !=
3236                                 data->vbios_boot_state.vddci_bootup_value)
3237                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3238                                         "does not match VBIOS boot VDDCI level");
3239         }
3240
3241         /* set DC compatible flag if this state supports DC */
3242         if (!state->validation.disallowOnDC)
3243                 ps->dc_compatible = true;
3244
3245         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3246                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3247
3248         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3249         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3250
3251         if (!result) {
3252                 uint32_t i;
3253
3254                 switch (state->classification.ui_label) {
3255                 case PP_StateUILabel_Performance:
3256                         data->use_pcie_performance_levels = true;
3257                         for (i = 0; i < ps->performance_level_count; i++) {
3258                                 if (data->pcie_gen_performance.max <
3259                                                 ps->performance_levels[i].pcie_gen)
3260                                         data->pcie_gen_performance.max =
3261                                                         ps->performance_levels[i].pcie_gen;
3262
3263                                 if (data->pcie_gen_performance.min >
3264                                                 ps->performance_levels[i].pcie_gen)
3265                                         data->pcie_gen_performance.min =
3266                                                         ps->performance_levels[i].pcie_gen;
3267
3268                                 if (data->pcie_lane_performance.max <
3269                                                 ps->performance_levels[i].pcie_lane)
3270                                         data->pcie_lane_performance.max =
3271                                                         ps->performance_levels[i].pcie_lane;
3272                                 if (data->pcie_lane_performance.min >
3273                                                 ps->performance_levels[i].pcie_lane)
3274                                         data->pcie_lane_performance.min =
3275                                                         ps->performance_levels[i].pcie_lane;
3276                         }
3277                         break;
3278                 case PP_StateUILabel_Battery:
3279                         data->use_pcie_power_saving_levels = true;
3280
3281                         for (i = 0; i < ps->performance_level_count; i++) {
3282                                 if (data->pcie_gen_power_saving.max <
3283                                                 ps->performance_levels[i].pcie_gen)
3284                                         data->pcie_gen_power_saving.max =
3285                                                         ps->performance_levels[i].pcie_gen;
3286
3287                                 if (data->pcie_gen_power_saving.min >
3288                                                 ps->performance_levels[i].pcie_gen)
3289                                         data->pcie_gen_power_saving.min =
3290                                                         ps->performance_levels[i].pcie_gen;
3291
3292                                 if (data->pcie_lane_power_saving.max <
3293                                                 ps->performance_levels[i].pcie_lane)
3294                                         data->pcie_lane_power_saving.max =
3295                                                         ps->performance_levels[i].pcie_lane;
3296
3297                                 if (data->pcie_lane_power_saving.min >
3298                                                 ps->performance_levels[i].pcie_lane)
3299                                         data->pcie_lane_power_saving.min =
3300                                                         ps->performance_levels[i].pcie_lane;
3301                         }
3302                         break;
3303                 default:
3304                         break;
3305                 }
3306         }
3307         return 0;
3308 }
3309
3310 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3311                                         struct pp_hw_power_state *power_state,
3312                                         unsigned int index, const void *clock_info)
3313 {
3314         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3315         struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3316         const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3317         struct smu7_performance_level *performance_level;
3318         uint32_t engine_clock, memory_clock;
3319         uint16_t pcie_gen_from_bios;
3320
3321         engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3322         memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3323
3324         if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3325                 data->highest_mclk = memory_clock;
3326
3327         PP_ASSERT_WITH_CODE(
3328                         (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3329                         "Performance levels exceeds SMC limit!",
3330                         return -EINVAL);
3331
3332         PP_ASSERT_WITH_CODE(
3333                         (ps->performance_level_count <
3334                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3335                         "Performance levels exceeds Driver limit, Skip!",
3336                         return 0);
3337
3338         performance_level = &(ps->performance_levels
3339                         [ps->performance_level_count++]);
3340
3341         /* Performance levels are arranged from low to high. */
3342         performance_level->memory_clock = memory_clock;
3343         performance_level->engine_clock = engine_clock;
3344
3345         pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3346
3347         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3348         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3349
3350         return 0;
3351 }
3352
3353 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3354                 unsigned long entry_index, struct pp_power_state *state)
3355 {
3356         int result;
3357         struct smu7_power_state *ps;
3358         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3359         struct phm_clock_voltage_dependency_table *dep_mclk_table =
3360                         hwmgr->dyn_state.vddci_dependency_on_mclk;
3361
3362         memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3363
3364         state->hardware.magic = PHM_VIslands_Magic;
3365
3366         ps = (struct smu7_power_state *)(&state->hardware);
3367
3368         result = pp_tables_get_entry(hwmgr, entry_index, state,
3369                         smu7_get_pp_table_entry_callback_func_v0);
3370
3371         /*
3372          * This is the earliest time we have all the dependency table
3373          * and the VBIOS boot state as
3374          * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3375          * state if there is only one VDDCI/MCLK level, check if it's
3376          * the same as VBIOS boot state
3377          */
3378         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3379                 if (dep_mclk_table->entries[0].clk !=
3380                                 data->vbios_boot_state.mclk_bootup_value)
3381                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3382                                         "does not match VBIOS boot MCLK level");
3383                 if (dep_mclk_table->entries[0].v !=
3384                                 data->vbios_boot_state.vddci_bootup_value)
3385                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3386                                         "does not match VBIOS boot VDDCI level");
3387         }
3388
3389         /* set DC compatible flag if this state supports DC */
3390         if (!state->validation.disallowOnDC)
3391                 ps->dc_compatible = true;
3392
3393         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3394                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3395
3396         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3397         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3398
3399         if (!result) {
3400                 uint32_t i;
3401
3402                 switch (state->classification.ui_label) {
3403                 case PP_StateUILabel_Performance:
3404                         data->use_pcie_performance_levels = true;
3405
3406                         for (i = 0; i < ps->performance_level_count; i++) {
3407                                 if (data->pcie_gen_performance.max <
3408                                                 ps->performance_levels[i].pcie_gen)
3409                                         data->pcie_gen_performance.max =
3410                                                         ps->performance_levels[i].pcie_gen;
3411
3412                                 if (data->pcie_gen_performance.min >
3413                                                 ps->performance_levels[i].pcie_gen)
3414                                         data->pcie_gen_performance.min =
3415                                                         ps->performance_levels[i].pcie_gen;
3416
3417                                 if (data->pcie_lane_performance.max <
3418                                                 ps->performance_levels[i].pcie_lane)
3419                                         data->pcie_lane_performance.max =
3420                                                         ps->performance_levels[i].pcie_lane;
3421
3422                                 if (data->pcie_lane_performance.min >
3423                                                 ps->performance_levels[i].pcie_lane)
3424                                         data->pcie_lane_performance.min =
3425                                                         ps->performance_levels[i].pcie_lane;
3426                         }
3427                         break;
3428                 case PP_StateUILabel_Battery:
3429                         data->use_pcie_power_saving_levels = true;
3430
3431                         for (i = 0; i < ps->performance_level_count; i++) {
3432                                 if (data->pcie_gen_power_saving.max <
3433                                                 ps->performance_levels[i].pcie_gen)
3434                                         data->pcie_gen_power_saving.max =
3435                                                         ps->performance_levels[i].pcie_gen;
3436
3437                                 if (data->pcie_gen_power_saving.min >
3438                                                 ps->performance_levels[i].pcie_gen)
3439                                         data->pcie_gen_power_saving.min =
3440                                                         ps->performance_levels[i].pcie_gen;
3441
3442                                 if (data->pcie_lane_power_saving.max <
3443                                                 ps->performance_levels[i].pcie_lane)
3444                                         data->pcie_lane_power_saving.max =
3445                                                         ps->performance_levels[i].pcie_lane;
3446
3447                                 if (data->pcie_lane_power_saving.min >
3448                                                 ps->performance_levels[i].pcie_lane)
3449                                         data->pcie_lane_power_saving.min =
3450                                                         ps->performance_levels[i].pcie_lane;
3451                         }
3452                         break;
3453                 default:
3454                         break;
3455                 }
3456         }
3457         return 0;
3458 }
3459
3460 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3461                 unsigned long entry_index, struct pp_power_state *state)
3462 {
3463         if (hwmgr->pp_table_version == PP_TABLE_V0)
3464                 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3465         else if (hwmgr->pp_table_version == PP_TABLE_V1)
3466                 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3467
3468         return 0;
3469 }
3470
3471 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3472 {
3473         int i;
3474         u32 tmp = 0;
3475
3476         if (!query)
3477                 return -EINVAL;
3478
3479         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
3480         tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3481         *query = tmp;
3482
3483         if (tmp != 0)
3484                 return 0;
3485
3486         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
3487         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3488                                                         ixSMU_PM_STATUS_94, 0);
3489
3490         for (i = 0; i < 10; i++) {
3491                 mdelay(1);
3492                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
3493                 tmp = cgs_read_ind_register(hwmgr->device,
3494                                                 CGS_IND_REG__SMC,
3495                                                 ixSMU_PM_STATUS_94);
3496                 if (tmp != 0)
3497                         break;
3498         }
3499         *query = tmp;
3500
3501         return 0;
3502 }
3503
3504 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3505                             void *value, int *size)
3506 {
3507         uint32_t sclk, mclk, activity_percent;
3508         uint32_t offset, val_vid;
3509         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3510
3511         /* size must be at least 4 bytes for all sensors */
3512         if (*size < 4)
3513                 return -EINVAL;
3514
3515         switch (idx) {
3516         case AMDGPU_PP_SENSOR_GFX_SCLK:
3517                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
3518                 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3519                 *((uint32_t *)value) = sclk;
3520                 *size = 4;
3521                 return 0;
3522         case AMDGPU_PP_SENSOR_GFX_MCLK:
3523                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
3524                 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3525                 *((uint32_t *)value) = mclk;
3526                 *size = 4;
3527                 return 0;
3528         case AMDGPU_PP_SENSOR_GPU_LOAD:
3529                 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3530                                                                 SMU_SoftRegisters,
3531                                                                 AverageGraphicsActivity);
3532
3533                 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3534                 activity_percent += 0x80;
3535                 activity_percent >>= 8;
3536                 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3537                 *size = 4;
3538                 return 0;
3539         case AMDGPU_PP_SENSOR_GPU_TEMP:
3540                 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3541                 *size = 4;
3542                 return 0;
3543         case AMDGPU_PP_SENSOR_UVD_POWER:
3544                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3545                 *size = 4;
3546                 return 0;
3547         case AMDGPU_PP_SENSOR_VCE_POWER:
3548                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3549                 *size = 4;
3550                 return 0;
3551         case AMDGPU_PP_SENSOR_GPU_POWER:
3552                 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3553         case AMDGPU_PP_SENSOR_VDDGFX:
3554                 if ((data->vr_config & 0xff) == 0x2)
3555                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3556                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3557                 else
3558                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3559                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3560
3561                 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3562                 return 0;
3563         default:
3564                 return -EINVAL;
3565         }
3566 }
3567
3568 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3569 {
3570         const struct phm_set_power_state_input *states =
3571                         (const struct phm_set_power_state_input *)input;
3572         const struct smu7_power_state *smu7_ps =
3573                         cast_const_phw_smu7_power_state(states->pnew_state);
3574         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3575         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3576         uint32_t sclk = smu7_ps->performance_levels
3577                         [smu7_ps->performance_level_count - 1].engine_clock;
3578         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3579         uint32_t mclk = smu7_ps->performance_levels
3580                         [smu7_ps->performance_level_count - 1].memory_clock;
3581         struct PP_Clocks min_clocks = {0};
3582         uint32_t i;
3583
3584         for (i = 0; i < sclk_table->count; i++) {
3585                 if (sclk == sclk_table->dpm_levels[i].value)
3586                         break;
3587         }
3588
3589         if (i >= sclk_table->count)
3590                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3591         else {
3592         /* TODO: Check SCLK in DAL's minimum clocks
3593          * in case DeepSleep divider update is required.
3594          */
3595                 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3596                         (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3597                                 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3598                         data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3599         }
3600
3601         for (i = 0; i < mclk_table->count; i++) {
3602                 if (mclk == mclk_table->dpm_levels[i].value)
3603                         break;
3604         }
3605
3606         if (i >= mclk_table->count)
3607                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3608
3609
3610         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3611                 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3612
3613         return 0;
3614 }
3615
3616 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3617                 const struct smu7_power_state *smu7_ps)
3618 {
3619         uint32_t i;
3620         uint32_t sclk, max_sclk = 0;
3621         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3622         struct smu7_dpm_table *dpm_table = &data->dpm_table;
3623
3624         for (i = 0; i < smu7_ps->performance_level_count; i++) {
3625                 sclk = smu7_ps->performance_levels[i].engine_clock;
3626                 if (max_sclk < sclk)
3627                         max_sclk = sclk;
3628         }
3629
3630         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3631                 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3632                         return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3633                                         dpm_table->pcie_speed_table.dpm_levels
3634                                         [dpm_table->pcie_speed_table.count - 1].value :
3635                                         dpm_table->pcie_speed_table.dpm_levels[i].value);
3636         }
3637
3638         return 0;
3639 }
3640
3641 static int smu7_request_link_speed_change_before_state_change(
3642                 struct pp_hwmgr *hwmgr, const void *input)
3643 {
3644         const struct phm_set_power_state_input *states =
3645                         (const struct phm_set_power_state_input *)input;
3646         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3647         const struct smu7_power_state *smu7_nps =
3648                         cast_const_phw_smu7_power_state(states->pnew_state);
3649         const struct smu7_power_state *polaris10_cps =
3650                         cast_const_phw_smu7_power_state(states->pcurrent_state);
3651
3652         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3653         uint16_t current_link_speed;
3654
3655         if (data->force_pcie_gen == PP_PCIEGenInvalid)
3656                 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3657         else
3658                 current_link_speed = data->force_pcie_gen;
3659
3660         data->force_pcie_gen = PP_PCIEGenInvalid;
3661         data->pspp_notify_required = false;
3662
3663         if (target_link_speed > current_link_speed) {
3664                 switch (target_link_speed) {
3665 #ifdef CONFIG_ACPI
3666                 case PP_PCIEGen3:
3667                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3668                                 break;
3669                         data->force_pcie_gen = PP_PCIEGen2;
3670                         if (current_link_speed == PP_PCIEGen2)
3671                                 break;
3672                 case PP_PCIEGen2:
3673                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3674                                 break;
3675 #endif
3676                 default:
3677                         data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3678                         break;
3679                 }
3680         } else {
3681                 if (target_link_speed < current_link_speed)
3682                         data->pspp_notify_required = true;
3683         }
3684
3685         return 0;
3686 }
3687
3688 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3689 {
3690         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3691
3692         if (0 == data->need_update_smu7_dpm_table)
3693                 return 0;
3694
3695         if ((0 == data->sclk_dpm_key_disabled) &&
3696                 (data->need_update_smu7_dpm_table &
3697                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3698                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3699                                 "Trying to freeze SCLK DPM when DPM is disabled",
3700                                 );
3701                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3702                                 PPSMC_MSG_SCLKDPM_FreezeLevel),
3703                                 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3704                                 return -EINVAL);
3705         }
3706
3707         if ((0 == data->mclk_dpm_key_disabled) &&
3708                 (data->need_update_smu7_dpm_table &
3709                  DPMTABLE_OD_UPDATE_MCLK)) {
3710                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3711                                 "Trying to freeze MCLK DPM when DPM is disabled",
3712                                 );
3713                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3714                                 PPSMC_MSG_MCLKDPM_FreezeLevel),
3715                                 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3716                                 return -EINVAL);
3717         }
3718
3719         return 0;
3720 }
3721
3722 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3723                 struct pp_hwmgr *hwmgr, const void *input)
3724 {
3725         int result = 0;
3726         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3727         struct smu7_dpm_table *dpm_table = &data->dpm_table;
3728         uint32_t count;
3729         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3730         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3731         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3732
3733         if (0 == data->need_update_smu7_dpm_table)
3734                 return 0;
3735
3736         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3737                 for (count = 0; count < dpm_table->sclk_table.count; count++) {
3738                         dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3739                         dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3740                 }
3741         }
3742
3743         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3744                 for (count = 0; count < dpm_table->mclk_table.count; count++) {
3745                         dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3746                         dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3747                 }
3748         }
3749
3750         if (data->need_update_smu7_dpm_table &
3751                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3752                 result = smum_populate_all_graphic_levels(hwmgr);
3753                 PP_ASSERT_WITH_CODE((0 == result),
3754                                 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3755                                 return result);
3756         }
3757
3758         if (data->need_update_smu7_dpm_table &
3759                         (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3760                 /*populate MCLK dpm table to SMU7 */
3761                 result = smum_populate_all_memory_levels(hwmgr);
3762                 PP_ASSERT_WITH_CODE((0 == result),
3763                                 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3764                                 return result);
3765         }
3766
3767         return result;
3768 }
3769
3770 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3771                           struct smu7_single_dpm_table *dpm_table,
3772                         uint32_t low_limit, uint32_t high_limit)
3773 {
3774         uint32_t i;
3775
3776         for (i = 0; i < dpm_table->count; i++) {
3777         /*skip the trim if od is enabled*/
3778                 if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
3779                         || dpm_table->dpm_levels[i].value > high_limit))
3780                         dpm_table->dpm_levels[i].enabled = false;
3781                 else
3782                         dpm_table->dpm_levels[i].enabled = true;
3783         }
3784
3785         return 0;
3786 }
3787
3788 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3789                 const struct smu7_power_state *smu7_ps)
3790 {
3791         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3792         uint32_t high_limit_count;
3793
3794         PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3795                         "power state did not have any performance level",
3796                         return -EINVAL);
3797
3798         high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3799
3800         smu7_trim_single_dpm_states(hwmgr,
3801                         &(data->dpm_table.sclk_table),
3802                         smu7_ps->performance_levels[0].engine_clock,
3803                         smu7_ps->performance_levels[high_limit_count].engine_clock);
3804
3805         smu7_trim_single_dpm_states(hwmgr,
3806                         &(data->dpm_table.mclk_table),
3807                         smu7_ps->performance_levels[0].memory_clock,
3808                         smu7_ps->performance_levels[high_limit_count].memory_clock);
3809
3810         return 0;
3811 }
3812
3813 static int smu7_generate_dpm_level_enable_mask(
3814                 struct pp_hwmgr *hwmgr, const void *input)
3815 {
3816         int result = 0;
3817         const struct phm_set_power_state_input *states =
3818                         (const struct phm_set_power_state_input *)input;
3819         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3820         const struct smu7_power_state *smu7_ps =
3821                         cast_const_phw_smu7_power_state(states->pnew_state);
3822
3823
3824         result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3825         if (result)
3826                 return result;
3827
3828         data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3829                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3830         data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3831                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3832         data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3833                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3834
3835         return 0;
3836 }
3837
3838 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3839 {
3840         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3841
3842         if (0 == data->need_update_smu7_dpm_table)
3843                 return 0;
3844
3845         if ((0 == data->sclk_dpm_key_disabled) &&
3846                 (data->need_update_smu7_dpm_table &
3847                 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3848
3849                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3850                                 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3851                                 );
3852                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3853                                 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3854                         "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3855                         return -EINVAL);
3856         }
3857
3858         if ((0 == data->mclk_dpm_key_disabled) &&
3859                 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3860
3861                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3862                                 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3863                                 );
3864                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3865                                 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
3866                     "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3867                     return -EINVAL);
3868         }
3869
3870         data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3871
3872         return 0;
3873 }
3874
3875 static int smu7_notify_link_speed_change_after_state_change(
3876                 struct pp_hwmgr *hwmgr, const void *input)
3877 {
3878         const struct phm_set_power_state_input *states =
3879                         (const struct phm_set_power_state_input *)input;
3880         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3881         const struct smu7_power_state *smu7_ps =
3882                         cast_const_phw_smu7_power_state(states->pnew_state);
3883         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3884         uint8_t  request;
3885
3886         if (data->pspp_notify_required) {
3887                 if (target_link_speed == PP_PCIEGen3)
3888                         request = PCIE_PERF_REQ_GEN3;
3889                 else if (target_link_speed == PP_PCIEGen2)
3890                         request = PCIE_PERF_REQ_GEN2;
3891                 else
3892                         request = PCIE_PERF_REQ_GEN1;
3893
3894                 if (request == PCIE_PERF_REQ_GEN1 &&
3895                                 smu7_get_current_pcie_speed(hwmgr) > 0)
3896                         return 0;
3897
3898 #ifdef CONFIG_ACPI
3899                 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3900                         if (PP_PCIEGen2 == target_link_speed)
3901                                 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3902                         else
3903                                 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3904                 }
3905 #endif
3906         }
3907
3908         return 0;
3909 }
3910
3911 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3912 {
3913         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3914
3915         if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
3916                 if (hwmgr->chip_id == CHIP_VEGAM)
3917                         smum_send_msg_to_smc_with_parameter(hwmgr,
3918                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
3919                 else
3920                         smum_send_msg_to_smc_with_parameter(hwmgr,
3921                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3922         }
3923         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
3924 }
3925
3926 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3927 {
3928         int tmp_result, result = 0;
3929         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3930
3931         tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3932         PP_ASSERT_WITH_CODE((0 == tmp_result),
3933                         "Failed to find DPM states clocks in DPM table!",
3934                         result = tmp_result);
3935
3936         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3937                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
3938                 tmp_result =
3939                         smu7_request_link_speed_change_before_state_change(hwmgr, input);
3940                 PP_ASSERT_WITH_CODE((0 == tmp_result),
3941                                 "Failed to request link speed change before state change!",
3942                                 result = tmp_result);
3943         }
3944
3945         tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3946         PP_ASSERT_WITH_CODE((0 == tmp_result),
3947                         "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3948
3949         tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3950         PP_ASSERT_WITH_CODE((0 == tmp_result),
3951                         "Failed to populate and upload SCLK MCLK DPM levels!",
3952                         result = tmp_result);
3953
3954         tmp_result = smu7_update_avfs(hwmgr);
3955         PP_ASSERT_WITH_CODE((0 == tmp_result),
3956                         "Failed to update avfs voltages!",
3957                         result = tmp_result);
3958
3959         tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3960         PP_ASSERT_WITH_CODE((0 == tmp_result),
3961                         "Failed to generate DPM level enabled mask!",
3962                         result = tmp_result);
3963
3964         tmp_result = smum_update_sclk_threshold(hwmgr);
3965         PP_ASSERT_WITH_CODE((0 == tmp_result),
3966                         "Failed to update SCLK threshold!",
3967                         result = tmp_result);
3968
3969         tmp_result = smu7_notify_smc_display(hwmgr);
3970         PP_ASSERT_WITH_CODE((0 == tmp_result),
3971                         "Failed to notify smc display settings!",
3972                         result = tmp_result);
3973
3974         tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3975         PP_ASSERT_WITH_CODE((0 == tmp_result),
3976                         "Failed to unfreeze SCLK MCLK DPM!",
3977                         result = tmp_result);
3978
3979         tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3980         PP_ASSERT_WITH_CODE((0 == tmp_result),
3981                         "Failed to upload DPM level enabled mask!",
3982                         result = tmp_result);
3983
3984         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3985                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
3986                 tmp_result =
3987                         smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3988                 PP_ASSERT_WITH_CODE((0 == tmp_result),
3989                                 "Failed to notify link speed change after state change!",
3990                                 result = tmp_result);
3991         }
3992         data->apply_optimized_settings = false;
3993         return result;
3994 }
3995
3996 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3997 {
3998         hwmgr->thermal_controller.
3999         advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4000
4001         return smum_send_msg_to_smc_with_parameter(hwmgr,
4002                         PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4003 }
4004
4005 static int
4006 smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4007 {
4008         PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4009
4010         return (smum_send_msg_to_smc(hwmgr, msg) == 0) ?  0 : -1;
4011 }
4012
4013 static int
4014 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4015 {
4016         if (hwmgr->display_config->num_display > 1 &&
4017                         !hwmgr->display_config->multi_monitor_in_sync)
4018                 smu7_notify_smc_display_change(hwmgr, false);
4019
4020         return 0;
4021 }
4022
4023 /**
4024 * Programs the display gap
4025 *
4026 * @param    hwmgr  the address of the powerplay hardware manager.
4027 * @return   always OK
4028 */
4029 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4030 {
4031         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4032         uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4033         uint32_t display_gap2;
4034         uint32_t pre_vbi_time_in_us;
4035         uint32_t frame_time_in_us;
4036         uint32_t ref_clock, refresh_rate;
4037
4038         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4039         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4040
4041         ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4042         refresh_rate = hwmgr->display_config->vrefresh;
4043
4044         if (0 == refresh_rate)
4045                 refresh_rate = 60;
4046
4047         frame_time_in_us = 1000000 / refresh_rate;
4048
4049         pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4050
4051         data->frame_time_x2 = frame_time_in_us * 2 / 100;
4052
4053         display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4054
4055         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4056
4057         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4058                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4059                                                         SMU_SoftRegisters,
4060                                                         PreVBlankGap), 0x64);
4061
4062         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4063                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4064                                                         SMU_SoftRegisters,
4065                                                         VBlankTimeout),
4066                                         (frame_time_in_us - pre_vbi_time_in_us));
4067
4068         return 0;
4069 }
4070
4071 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4072 {
4073         return smu7_program_display_gap(hwmgr);
4074 }
4075
4076 /**
4077 *  Set maximum target operating fan output RPM
4078 *
4079 * @param    hwmgr:  the address of the powerplay hardware manager.
4080 * @param    usMaxFanRpm:  max operating fan RPM value.
4081 * @return   The response that came from the SMC.
4082 */
4083 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4084 {
4085         hwmgr->thermal_controller.
4086         advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4087
4088         return smum_send_msg_to_smc_with_parameter(hwmgr,
4089                         PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4090 }
4091
4092 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4093         .process = phm_irq_process,
4094 };
4095
4096 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4097 {
4098         struct amdgpu_irq_src *source =
4099                 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4100
4101         if (!source)
4102                 return -ENOMEM;
4103
4104         source->funcs = &smu7_irq_funcs;
4105
4106         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4107                         AMDGPU_IH_CLIENTID_LEGACY,
4108                         230,
4109                         source);
4110         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4111                         AMDGPU_IH_CLIENTID_LEGACY,
4112                         231,
4113                         source);
4114
4115         /* Register CTF(GPIO_19) interrupt */
4116         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4117                         AMDGPU_IH_CLIENTID_LEGACY,
4118                         83,
4119                         source);
4120
4121         return 0;
4122 }
4123
4124 static bool
4125 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4126 {
4127         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4128         bool is_update_required = false;
4129
4130         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4131                 is_update_required = true;
4132
4133         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4134                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4135                         (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4136                         hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4137                         is_update_required = true;
4138         }
4139         return is_update_required;
4140 }
4141
4142 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4143                                                            const struct smu7_performance_level *pl2)
4144 {
4145         return ((pl1->memory_clock == pl2->memory_clock) &&
4146                   (pl1->engine_clock == pl2->engine_clock) &&
4147                   (pl1->pcie_gen == pl2->pcie_gen) &&
4148                   (pl1->pcie_lane == pl2->pcie_lane));
4149 }
4150
4151 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4152                 const struct pp_hw_power_state *pstate1,
4153                 const struct pp_hw_power_state *pstate2, bool *equal)
4154 {
4155         const struct smu7_power_state *psa;
4156         const struct smu7_power_state *psb;
4157         int i;
4158         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4159
4160         if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4161                 return -EINVAL;
4162
4163         psa = cast_const_phw_smu7_power_state(pstate1);
4164         psb = cast_const_phw_smu7_power_state(pstate2);
4165         /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4166         if (psa->performance_level_count != psb->performance_level_count) {
4167                 *equal = false;
4168                 return 0;
4169         }
4170
4171         for (i = 0; i < psa->performance_level_count; i++) {
4172                 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4173                         /* If we have found even one performance level pair that is different the states are different. */
4174                         *equal = false;
4175                         return 0;
4176                 }
4177         }
4178
4179         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4180         *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4181         *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4182         *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4183         /* For OD call, set value based on flag */
4184         *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4185                                                         DPMTABLE_OD_UPDATE_MCLK |
4186                                                         DPMTABLE_OD_UPDATE_VDDC));
4187
4188         return 0;
4189 }
4190
4191 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4192 {
4193         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4194
4195         uint32_t vbios_version;
4196         uint32_t tmp;
4197
4198         /* Read MC indirect register offset 0x9F bits [3:0] to see
4199          * if VBIOS has already loaded a full version of MC ucode
4200          * or not.
4201          */
4202
4203         smu7_get_mc_microcode_version(hwmgr);
4204         vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4205
4206         data->need_long_memory_training = false;
4207
4208         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4209                                                         ixMC_IO_DEBUG_UP_13);
4210         tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4211
4212         if (tmp & (1 << 23)) {
4213                 data->mem_latency_high = MEM_LATENCY_HIGH;
4214                 data->mem_latency_low = MEM_LATENCY_LOW;
4215         } else {
4216                 data->mem_latency_high = 330;
4217                 data->mem_latency_low = 330;
4218         }
4219
4220         return 0;
4221 }
4222
4223 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4224 {
4225         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4226
4227         data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4228                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4229         data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4230                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4231         data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4232                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4233         data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4234                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4235         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4236                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4237         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4238                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4239         data->clock_registers.vDLL_CNTL                  =
4240                 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4241         data->clock_registers.vMCLK_PWRMGT_CNTL          =
4242                 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4243         data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4244                 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4245         data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4246                 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4247         data->clock_registers.vMPLL_FUNC_CNTL            =
4248                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4249         data->clock_registers.vMPLL_FUNC_CNTL_1          =
4250                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4251         data->clock_registers.vMPLL_FUNC_CNTL_2          =
4252                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4253         data->clock_registers.vMPLL_SS1                  =
4254                 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4255         data->clock_registers.vMPLL_SS2                  =
4256                 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4257         return 0;
4258
4259 }
4260
4261 /**
4262  * Find out if memory is GDDR5.
4263  *
4264  * @param    hwmgr  the address of the powerplay hardware manager.
4265  * @return   always 0
4266  */
4267 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4268 {
4269         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4270         struct amdgpu_device *adev = hwmgr->adev;
4271
4272         data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4273
4274         return 0;
4275 }
4276
4277 /**
4278  * Enables Dynamic Power Management by SMC
4279  *
4280  * @param    hwmgr  the address of the powerplay hardware manager.
4281  * @return   always 0
4282  */
4283 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4284 {
4285         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4286                         GENERAL_PWRMGT, STATIC_PM_EN, 1);
4287
4288         return 0;
4289 }
4290
4291 /**
4292  * Initialize PowerGating States for different engines
4293  *
4294  * @param    hwmgr  the address of the powerplay hardware manager.
4295  * @return   always 0
4296  */
4297 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4298 {
4299         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4300
4301         data->uvd_power_gated = false;
4302         data->vce_power_gated = false;
4303
4304         return 0;
4305 }
4306
4307 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4308 {
4309         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4310
4311         data->low_sclk_interrupt_threshold = 0;
4312         return 0;
4313 }
4314
4315 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4316 {
4317         int tmp_result, result = 0;
4318
4319         smu7_check_mc_firmware(hwmgr);
4320
4321         tmp_result = smu7_read_clock_registers(hwmgr);
4322         PP_ASSERT_WITH_CODE((0 == tmp_result),
4323                         "Failed to read clock registers!", result = tmp_result);
4324
4325         tmp_result = smu7_get_memory_type(hwmgr);
4326         PP_ASSERT_WITH_CODE((0 == tmp_result),
4327                         "Failed to get memory type!", result = tmp_result);
4328
4329         tmp_result = smu7_enable_acpi_power_management(hwmgr);
4330         PP_ASSERT_WITH_CODE((0 == tmp_result),
4331                         "Failed to enable ACPI power management!", result = tmp_result);
4332
4333         tmp_result = smu7_init_power_gate_state(hwmgr);
4334         PP_ASSERT_WITH_CODE((0 == tmp_result),
4335                         "Failed to init power gate state!", result = tmp_result);
4336
4337         tmp_result = smu7_get_mc_microcode_version(hwmgr);
4338         PP_ASSERT_WITH_CODE((0 == tmp_result),
4339                         "Failed to get MC microcode version!", result = tmp_result);
4340
4341         tmp_result = smu7_init_sclk_threshold(hwmgr);
4342         PP_ASSERT_WITH_CODE((0 == tmp_result),
4343                         "Failed to init sclk threshold!", result = tmp_result);
4344
4345         return result;
4346 }
4347
4348 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4349                 enum pp_clock_type type, uint32_t mask)
4350 {
4351         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4352
4353         if (mask == 0)
4354                 return -EINVAL;
4355
4356         switch (type) {
4357         case PP_SCLK:
4358                 if (!data->sclk_dpm_key_disabled)
4359                         smum_send_msg_to_smc_with_parameter(hwmgr,
4360                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
4361                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4362                 break;
4363         case PP_MCLK:
4364                 if (!data->mclk_dpm_key_disabled)
4365                         smum_send_msg_to_smc_with_parameter(hwmgr,
4366                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
4367                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4368                 break;
4369         case PP_PCIE:
4370         {
4371                 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4372
4373                 if (!data->pcie_dpm_key_disabled) {
4374                         if (fls(tmp) != ffs(tmp))
4375                                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
4376                         else
4377                                 smum_send_msg_to_smc_with_parameter(hwmgr,
4378                                         PPSMC_MSG_PCIeDPM_ForceLevel,
4379                                         fls(tmp) - 1);
4380                 }
4381                 break;
4382         }
4383         default:
4384                 break;
4385         }
4386
4387         return 0;
4388 }
4389
4390 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4391                 enum pp_clock_type type, char *buf)
4392 {
4393         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4394         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4395         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4396         struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4397         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4398         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4399         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4400         int i, now, size = 0;
4401         uint32_t clock, pcie_speed;
4402
4403         switch (type) {
4404         case PP_SCLK:
4405                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
4406                 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4407
4408                 for (i = 0; i < sclk_table->count; i++) {
4409                         if (clock > sclk_table->dpm_levels[i].value)
4410                                 continue;
4411                         break;
4412                 }
4413                 now = i;
4414
4415                 for (i = 0; i < sclk_table->count; i++)
4416                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4417                                         i, sclk_table->dpm_levels[i].value / 100,
4418                                         (i == now) ? "*" : "");
4419                 break;
4420         case PP_MCLK:
4421                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
4422                 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4423
4424                 for (i = 0; i < mclk_table->count; i++) {
4425                         if (clock > mclk_table->dpm_levels[i].value)
4426                                 continue;
4427                         break;
4428                 }
4429                 now = i;
4430
4431                 for (i = 0; i < mclk_table->count; i++)
4432                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4433                                         i, mclk_table->dpm_levels[i].value / 100,
4434                                         (i == now) ? "*" : "");
4435                 break;
4436         case PP_PCIE:
4437                 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4438                 for (i = 0; i < pcie_table->count; i++) {
4439                         if (pcie_speed != pcie_table->dpm_levels[i].value)
4440                                 continue;
4441                         break;
4442                 }
4443                 now = i;
4444
4445                 for (i = 0; i < pcie_table->count; i++)
4446                         size += sprintf(buf + size, "%d: %s %s\n", i,
4447                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4448                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4449                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4450                                         (i == now) ? "*" : "");
4451                 break;
4452         case OD_SCLK:
4453                 if (hwmgr->od_enabled) {
4454                         size = sprintf(buf, "%s:\n", "OD_SCLK");
4455                         for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4456                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4457                                         i, odn_sclk_table->entries[i].clock/100,
4458                                         odn_sclk_table->entries[i].vddc);
4459                 }
4460                 break;
4461         case OD_MCLK:
4462                 if (hwmgr->od_enabled) {
4463                         size = sprintf(buf, "%s:\n", "OD_MCLK");
4464                         for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4465                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4466                                         i, odn_mclk_table->entries[i].clock/100,
4467                                         odn_mclk_table->entries[i].vddc);
4468                 }
4469                 break;
4470         case OD_RANGE:
4471                 if (hwmgr->od_enabled) {
4472                         size = sprintf(buf, "%s:\n", "OD_RANGE");
4473                         size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4474                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4475                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4476                         size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4477                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4478                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4479                         size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4480                                 data->odn_dpm_table.min_vddc,
4481                                 data->odn_dpm_table.max_vddc);
4482                 }
4483                 break;
4484         default:
4485                 break;
4486         }
4487         return size;
4488 }
4489
4490 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4491 {
4492         switch (mode) {
4493         case AMD_FAN_CTRL_NONE:
4494                 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4495                 break;
4496         case AMD_FAN_CTRL_MANUAL:
4497                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4498                         PHM_PlatformCaps_MicrocodeFanControl))
4499                         smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4500                 break;
4501         case AMD_FAN_CTRL_AUTO:
4502                 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4503                         smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4504                 break;
4505         default:
4506                 break;
4507         }
4508 }
4509
4510 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4511 {
4512         return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4513 }
4514
4515 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4516 {
4517         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4518         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4519         struct smu7_single_dpm_table *golden_sclk_table =
4520                         &(data->golden_dpm_table.sclk_table);
4521         int value;
4522
4523         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4524                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4525                         100 /
4526                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4527
4528         return value;
4529 }
4530
4531 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4532 {
4533         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4534         struct smu7_single_dpm_table *golden_sclk_table =
4535                         &(data->golden_dpm_table.sclk_table);
4536         struct pp_power_state  *ps;
4537         struct smu7_power_state  *smu7_ps;
4538
4539         if (value > 20)
4540                 value = 20;
4541
4542         ps = hwmgr->request_ps;
4543
4544         if (ps == NULL)
4545                 return -EINVAL;
4546
4547         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4548
4549         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4550                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4551                         value / 100 +
4552                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4553
4554         return 0;
4555 }
4556
4557 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4558 {
4559         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4560         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4561         struct smu7_single_dpm_table *golden_mclk_table =
4562                         &(data->golden_dpm_table.mclk_table);
4563         int value;
4564
4565         value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4566                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4567                         100 /
4568                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4569
4570         return value;
4571 }
4572
4573 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4574 {
4575         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4576         struct smu7_single_dpm_table *golden_mclk_table =
4577                         &(data->golden_dpm_table.mclk_table);
4578         struct pp_power_state  *ps;
4579         struct smu7_power_state  *smu7_ps;
4580
4581         if (value > 20)
4582                 value = 20;
4583
4584         ps = hwmgr->request_ps;
4585
4586         if (ps == NULL)
4587                 return -EINVAL;
4588
4589         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4590
4591         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4592                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4593                         value / 100 +
4594                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4595
4596         return 0;
4597 }
4598
4599
4600 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4601 {
4602         struct phm_ppt_v1_information *table_info =
4603                         (struct phm_ppt_v1_information *)hwmgr->pptable;
4604         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4605         struct phm_clock_voltage_dependency_table *sclk_table;
4606         int i;
4607
4608         if (hwmgr->pp_table_version == PP_TABLE_V1) {
4609                 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4610                         return -EINVAL;
4611                 dep_sclk_table = table_info->vdd_dep_on_sclk;
4612                 for (i = 0; i < dep_sclk_table->count; i++)
4613                         clocks->clock[i] = dep_sclk_table->entries[i].clk;
4614                 clocks->count = dep_sclk_table->count;
4615         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4616                 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4617                 for (i = 0; i < sclk_table->count; i++)
4618                         clocks->clock[i] = sclk_table->entries[i].clk;
4619                 clocks->count = sclk_table->count;
4620         }
4621
4622         return 0;
4623 }
4624
4625 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4626 {
4627         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4628
4629         if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4630                 return data->mem_latency_high;
4631         else if (clk >= MEM_FREQ_HIGH_LATENCY)
4632                 return data->mem_latency_low;
4633         else
4634                 return MEM_LATENCY_ERR;
4635 }
4636
4637 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4638 {
4639         struct phm_ppt_v1_information *table_info =
4640                         (struct phm_ppt_v1_information *)hwmgr->pptable;
4641         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4642         int i;
4643         struct phm_clock_voltage_dependency_table *mclk_table;
4644
4645         if (hwmgr->pp_table_version == PP_TABLE_V1) {
4646                 if (table_info == NULL)
4647                         return -EINVAL;
4648                 dep_mclk_table = table_info->vdd_dep_on_mclk;
4649                 for (i = 0; i < dep_mclk_table->count; i++) {
4650                         clocks->clock[i] = dep_mclk_table->entries[i].clk;
4651                         clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4652                                                 dep_mclk_table->entries[i].clk);
4653                 }
4654                 clocks->count = dep_mclk_table->count;
4655         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4656                 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4657                 for (i = 0; i < mclk_table->count; i++)
4658                         clocks->clock[i] = mclk_table->entries[i].clk;
4659                 clocks->count = mclk_table->count;
4660         }
4661         return 0;
4662 }
4663
4664 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4665                                                 struct amd_pp_clocks *clocks)
4666 {
4667         switch (type) {
4668         case amd_pp_sys_clock:
4669                 smu7_get_sclks(hwmgr, clocks);
4670                 break;
4671         case amd_pp_mem_clock:
4672                 smu7_get_mclks(hwmgr, clocks);
4673                 break;
4674         default:
4675                 return -EINVAL;
4676         }
4677
4678         return 0;
4679 }
4680
4681 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4682                                         uint32_t virtual_addr_low,
4683                                         uint32_t virtual_addr_hi,
4684                                         uint32_t mc_addr_low,
4685                                         uint32_t mc_addr_hi,
4686                                         uint32_t size)
4687 {
4688         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4689
4690         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4691                                         data->soft_regs_start +
4692                                         smum_get_offsetof(hwmgr,
4693                                         SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4694                                         mc_addr_hi);
4695
4696         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4697                                         data->soft_regs_start +
4698                                         smum_get_offsetof(hwmgr,
4699                                         SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4700                                         mc_addr_low);
4701
4702         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4703                                         data->soft_regs_start +
4704                                         smum_get_offsetof(hwmgr,
4705                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4706                                         virtual_addr_hi);
4707
4708         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4709                                         data->soft_regs_start +
4710                                         smum_get_offsetof(hwmgr,
4711                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4712                                         virtual_addr_low);
4713
4714         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4715                                         data->soft_regs_start +
4716                                         smum_get_offsetof(hwmgr,
4717                                         SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4718                                         size);
4719         return 0;
4720 }
4721
4722 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4723                                         struct amd_pp_simple_clock_info *clocks)
4724 {
4725         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4726         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4727         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4728
4729         if (clocks == NULL)
4730                 return -EINVAL;
4731
4732         clocks->memory_max_clock = mclk_table->count > 1 ?
4733                                 mclk_table->dpm_levels[mclk_table->count-1].value :
4734                                 mclk_table->dpm_levels[0].value;
4735         clocks->engine_max_clock = sclk_table->count > 1 ?
4736                                 sclk_table->dpm_levels[sclk_table->count-1].value :
4737                                 sclk_table->dpm_levels[0].value;
4738         return 0;
4739 }
4740
4741 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4742                 struct PP_TemperatureRange *thermal_data)
4743 {
4744         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4745         struct phm_ppt_v1_information *table_info =
4746                         (struct phm_ppt_v1_information *)hwmgr->pptable;
4747
4748         memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
4749
4750         if (hwmgr->pp_table_version == PP_TABLE_V1)
4751                 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
4752                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4753         else if (hwmgr->pp_table_version == PP_TABLE_V0)
4754                 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
4755                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4756
4757         return 0;
4758 }
4759
4760 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4761                                         enum PP_OD_DPM_TABLE_COMMAND type,
4762                                         uint32_t clk,
4763                                         uint32_t voltage)
4764 {
4765         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4766
4767         if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
4768                 pr_info("OD voltage is out of range [%d - %d] mV\n",
4769                                                 data->odn_dpm_table.min_vddc,
4770                                                 data->odn_dpm_table.max_vddc);
4771                 return false;
4772         }
4773
4774         if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4775                 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
4776                         hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4777                         pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4778                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4779                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4780                         return false;
4781                 }
4782         } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4783                 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
4784                         hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4785                         pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4786                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4787                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4788                         return false;
4789                 }
4790         } else {
4791                 return false;
4792         }
4793
4794         return true;
4795 }
4796
4797 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4798                                         enum PP_OD_DPM_TABLE_COMMAND type,
4799                                         long *input, uint32_t size)
4800 {
4801         uint32_t i;
4802         struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
4803         struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
4804         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4805
4806         uint32_t input_clk;
4807         uint32_t input_vol;
4808         uint32_t input_level;
4809
4810         PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4811                                 return -EINVAL);
4812
4813         if (!hwmgr->od_enabled) {
4814                 pr_info("OverDrive feature not enabled\n");
4815                 return -EINVAL;
4816         }
4817
4818         if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4819                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
4820                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
4821                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4822                                 "Failed to get ODN SCLK and Voltage tables",
4823                                 return -EINVAL);
4824         } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4825                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
4826                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
4827
4828                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4829                         "Failed to get ODN MCLK and Voltage tables",
4830                         return -EINVAL);
4831         } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4832                 smu7_odn_initial_default_setting(hwmgr);
4833                 return 0;
4834         } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4835                 smu7_check_dpm_table_updated(hwmgr);
4836                 return 0;
4837         } else {
4838                 return -EINVAL;
4839         }
4840
4841         for (i = 0; i < size; i += 3) {
4842                 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
4843                         pr_info("invalid clock voltage input \n");
4844                         return 0;
4845                 }
4846                 input_level = input[i];
4847                 input_clk = input[i+1] * 100;
4848                 input_vol = input[i+2];
4849
4850                 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4851                         podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
4852                         podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
4853                         podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
4854                         podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
4855                 } else {
4856                         return -EINVAL;
4857                 }
4858         }
4859
4860         return 0;
4861 }
4862
4863 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4864 {
4865         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4866         uint32_t i, size = 0;
4867         uint32_t len;
4868
4869         static const char *profile_name[6] = {"3D_FULL_SCREEN",
4870                                         "POWER_SAVING",
4871                                         "VIDEO",
4872                                         "VR",
4873                                         "COMPUTE",
4874                                         "CUSTOM"};
4875
4876         static const char *title[8] = {"NUM",
4877                         "MODE_NAME",
4878                         "SCLK_UP_HYST",
4879                         "SCLK_DOWN_HYST",
4880                         "SCLK_ACTIVE_LEVEL",
4881                         "MCLK_UP_HYST",
4882                         "MCLK_DOWN_HYST",
4883                         "MCLK_ACTIVE_LEVEL"};
4884
4885         if (!buf)
4886                 return -EINVAL;
4887
4888         size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
4889                         title[0], title[1], title[2], title[3],
4890                         title[4], title[5], title[6], title[7]);
4891
4892         len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
4893
4894         for (i = 0; i < len; i++) {
4895                 if (i == hwmgr->power_profile_mode) {
4896                         size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
4897                         i, profile_name[i], "*",
4898                         data->current_profile_setting.sclk_up_hyst,
4899                         data->current_profile_setting.sclk_down_hyst,
4900                         data->current_profile_setting.sclk_activity,
4901                         data->current_profile_setting.mclk_up_hyst,
4902                         data->current_profile_setting.mclk_down_hyst,
4903                         data->current_profile_setting.mclk_activity);
4904                         continue;
4905                 }
4906                 if (smu7_profiling[i].bupdate_sclk)
4907                         size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4908                         i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
4909                         smu7_profiling[i].sclk_down_hyst,
4910                         smu7_profiling[i].sclk_activity);
4911                 else
4912                         size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
4913                         i, profile_name[i], "-", "-", "-");
4914
4915                 if (smu7_profiling[i].bupdate_mclk)
4916                         size += sprintf(buf + size, "%16d %16d %16d\n",
4917                         smu7_profiling[i].mclk_up_hyst,
4918                         smu7_profiling[i].mclk_down_hyst,
4919                         smu7_profiling[i].mclk_activity);
4920                 else
4921                         size += sprintf(buf + size, "%16s %16s %16s\n",
4922                         "-", "-", "-");
4923         }
4924
4925         return size;
4926 }
4927
4928 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
4929                                         enum PP_SMC_POWER_PROFILE requst)
4930 {
4931         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4932         uint32_t tmp, level;
4933
4934         if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
4935                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4936                         level = 0;
4937                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
4938                         while (tmp >>= 1)
4939                                 level++;
4940                         if (level > 0)
4941                                 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
4942                 }
4943         } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
4944                 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4945         }
4946 }
4947
4948 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4949 {
4950         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4951         struct profile_mode_setting tmp;
4952         enum PP_SMC_POWER_PROFILE mode;
4953
4954         if (input == NULL)
4955                 return -EINVAL;
4956
4957         mode = input[size];
4958         switch (mode) {
4959         case PP_SMC_POWER_PROFILE_CUSTOM:
4960                 if (size < 8)
4961                         return -EINVAL;
4962
4963                 tmp.bupdate_sclk = input[0];
4964                 tmp.sclk_up_hyst = input[1];
4965                 tmp.sclk_down_hyst = input[2];
4966                 tmp.sclk_activity = input[3];
4967                 tmp.bupdate_mclk = input[4];
4968                 tmp.mclk_up_hyst = input[5];
4969                 tmp.mclk_down_hyst = input[6];
4970                 tmp.mclk_activity = input[7];
4971                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4972                         memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
4973                         hwmgr->power_profile_mode = mode;
4974                 }
4975                 break;
4976         case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
4977         case PP_SMC_POWER_PROFILE_POWERSAVING:
4978         case PP_SMC_POWER_PROFILE_VIDEO:
4979         case PP_SMC_POWER_PROFILE_VR:
4980         case PP_SMC_POWER_PROFILE_COMPUTE:
4981                 if (mode == hwmgr->power_profile_mode)
4982                         return 0;
4983
4984                 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
4985                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4986                         if (tmp.bupdate_sclk) {
4987                                 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
4988                                 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
4989                                 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
4990                                 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
4991                         }
4992                         if (tmp.bupdate_mclk) {
4993                                 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
4994                                 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
4995                                 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
4996                                 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
4997                         }
4998                         smu7_patch_compute_profile_mode(hwmgr, mode);
4999                         hwmgr->power_profile_mode = mode;
5000                 }
5001                 break;
5002         default:
5003                 return -EINVAL;
5004         }
5005
5006         return 0;
5007 }
5008
5009 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5010         .backend_init = &smu7_hwmgr_backend_init,
5011         .backend_fini = &smu7_hwmgr_backend_fini,
5012         .asic_setup = &smu7_setup_asic_task,
5013         .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5014         .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5015         .force_dpm_level = &smu7_force_dpm_level,
5016         .power_state_set = smu7_set_power_state_tasks,
5017         .get_power_state_size = smu7_get_power_state_size,
5018         .get_mclk = smu7_dpm_get_mclk,
5019         .get_sclk = smu7_dpm_get_sclk,
5020         .patch_boot_state = smu7_dpm_patch_boot_state,
5021         .get_pp_table_entry = smu7_get_pp_table_entry,
5022         .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5023         .powerdown_uvd = smu7_powerdown_uvd,
5024         .powergate_uvd = smu7_powergate_uvd,
5025         .powergate_vce = smu7_powergate_vce,
5026         .disable_clock_power_gating = smu7_disable_clock_power_gating,
5027         .update_clock_gatings = smu7_update_clock_gatings,
5028         .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5029         .display_config_changed = smu7_display_configuration_changed_task,
5030         .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5031         .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5032         .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5033         .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5034         .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5035         .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5036         .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5037         .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5038         .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5039         .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5040         .register_irq_handlers = smu7_register_irq_handlers,
5041         .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5042         .check_states_equal = smu7_check_states_equal,
5043         .set_fan_control_mode = smu7_set_fan_control_mode,
5044         .get_fan_control_mode = smu7_get_fan_control_mode,
5045         .force_clock_level = smu7_force_clock_level,
5046         .print_clock_levels = smu7_print_clock_levels,
5047         .powergate_gfx = smu7_powergate_gfx,
5048         .get_sclk_od = smu7_get_sclk_od,
5049         .set_sclk_od = smu7_set_sclk_od,
5050         .get_mclk_od = smu7_get_mclk_od,
5051         .set_mclk_od = smu7_set_mclk_od,
5052         .get_clock_by_type = smu7_get_clock_by_type,
5053         .read_sensor = smu7_read_sensor,
5054         .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5055         .avfs_control = smu7_avfs_control,
5056         .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5057         .start_thermal_controller = smu7_start_thermal_controller,
5058         .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5059         .get_max_high_clocks = smu7_get_max_high_clocks,
5060         .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5061         .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5062         .set_power_limit = smu7_set_power_limit,
5063         .get_power_profile_mode = smu7_get_power_profile_mode,
5064         .set_power_profile_mode = smu7_set_power_profile_mode,
5065 };
5066
5067 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5068                 uint32_t clock_insr)
5069 {
5070         uint8_t i;
5071         uint32_t temp;
5072         uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5073
5074         PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5075         for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
5076                 temp = clock >> i;
5077
5078                 if (temp >= min || i == 0)
5079                         break;
5080         }
5081         return i;
5082 }
5083
5084 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5085 {
5086         int ret = 0;
5087
5088         hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5089         if (hwmgr->pp_table_version == PP_TABLE_V0)
5090                 hwmgr->pptable_func = &pptable_funcs;
5091         else if (hwmgr->pp_table_version == PP_TABLE_V1)
5092                 hwmgr->pptable_func = &pptable_v1_0_funcs;
5093
5094         return ret;
5095 }