OSDN Git Service

Merge tag 'kbuild-fixes-v5.6-3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / amd / powerplay / smu_v12_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_internal.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "smu_v12_0.h"
31 #include "soc15_common.h"
32 #include "atom.h"
33
34 #include "asic_reg/mp/mp_12_0_0_offset.h"
35 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
36
37 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
38
39 #define mmSMUIO_GFX_MISC_CNTL                                0x00c8
40 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                       0
41 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK          0x00000006L
42 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT        0x1
43
44 int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
45                                               uint16_t msg)
46 {
47         struct amdgpu_device *adev = smu->adev;
48
49         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
50         return 0;
51 }
52
53 int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
54 {
55         struct amdgpu_device *adev = smu->adev;
56
57         *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
58         return 0;
59 }
60
61 int smu_v12_0_wait_for_response(struct smu_context *smu)
62 {
63         struct amdgpu_device *adev = smu->adev;
64         uint32_t cur_value, i;
65
66         for (i = 0; i < adev->usec_timeout; i++) {
67                 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
68                 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
69                         return cur_value == 0x1 ? 0 : -EIO;
70
71                 udelay(1);
72         }
73
74         /* timeout means wrong logic */
75         return -ETIME;
76 }
77
78 int
79 smu_v12_0_send_msg_with_param(struct smu_context *smu,
80                               enum smu_message_type msg,
81                               uint32_t param)
82 {
83         struct amdgpu_device *adev = smu->adev;
84         int ret = 0, index = 0;
85
86         index = smu_msg_get_index(smu, msg);
87         if (index < 0)
88                 return index;
89
90         ret = smu_v12_0_wait_for_response(smu);
91         if (ret) {
92                 pr_err("Msg issuing pre-check failed and "
93                        "SMU may be not in the right state!\n");
94                 return ret;
95         }
96
97         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
98
99         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
100
101         smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
102
103         ret = smu_v12_0_wait_for_response(smu);
104         if (ret)
105                 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
106                        index, ret, param);
107
108         return ret;
109 }
110
111 int smu_v12_0_check_fw_status(struct smu_context *smu)
112 {
113         struct amdgpu_device *adev = smu->adev;
114         uint32_t mp1_fw_flags;
115
116         mp1_fw_flags = RREG32_PCIE(MP1_Public |
117                 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
118
119         if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
120                 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
121                 return 0;
122
123         return -EIO;
124 }
125
126 int smu_v12_0_check_fw_version(struct smu_context *smu)
127 {
128         uint32_t if_version = 0xff, smu_version = 0xff;
129         uint16_t smu_major;
130         uint8_t smu_minor, smu_debug;
131         int ret = 0;
132
133         ret = smu_get_smc_version(smu, &if_version, &smu_version);
134         if (ret)
135                 return ret;
136
137         smu_major = (smu_version >> 16) & 0xffff;
138         smu_minor = (smu_version >> 8) & 0xff;
139         smu_debug = (smu_version >> 0) & 0xff;
140
141         /*
142          * 1. if_version mismatch is not critical as our fw is designed
143          * to be backward compatible.
144          * 2. New fw usually brings some optimizations. But that's visible
145          * only on the paired driver.
146          * Considering above, we just leave user a warning message instead
147          * of halt driver loading.
148          */
149         if (if_version != smu->smc_if_version) {
150                 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
151                         "smu fw version = 0x%08x (%d.%d.%d)\n",
152                         smu->smc_if_version, if_version,
153                         smu_version, smu_major, smu_minor, smu_debug);
154                 pr_warn("SMU driver if version not matched\n");
155         }
156
157         return ret;
158 }
159
160 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
161 {
162         if (!smu->is_apu)
163                 return 0;
164
165         if (gate)
166                 return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
167         else
168                 return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
169 }
170
171 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
172 {
173         if (!smu->is_apu)
174                 return 0;
175
176         if (gate)
177                 return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
178         else
179                 return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
180 }
181
182 int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
183 {
184         if (!smu->is_apu)
185                 return 0;
186
187         if (gate)
188                 return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
189         else
190                 return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
191 }
192
193 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
194 {
195         if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
196                 return 0;
197
198         return smu_v12_0_send_msg_with_param(smu,
199                 SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
200 }
201
202 int smu_v12_0_read_sensor(struct smu_context *smu,
203                                  enum amd_pp_sensors sensor,
204                                  void *data, uint32_t *size)
205 {
206         int ret = 0;
207
208         if(!data || !size)
209                 return -EINVAL;
210
211         switch (sensor) {
212         case AMDGPU_PP_SENSOR_GFX_MCLK:
213                 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
214                 *size = 4;
215                 break;
216         case AMDGPU_PP_SENSOR_GFX_SCLK:
217                 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
218                 *size = 4;
219                 break;
220         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
221                 *(uint32_t *)data = 0;
222                 *size = 4;
223                 break;
224         default:
225                 ret = smu_common_read_sensor(smu, sensor, data, size);
226                 break;
227         }
228
229         if (ret)
230                 *size = 0;
231
232         return ret;
233 }
234
235 /**
236  * smu_v12_0_get_gfxoff_status - get gfxoff status
237  *
238  * @smu: amdgpu_device pointer
239  *
240  * This function will be used to get gfxoff status
241  *
242  * Returns 0=GFXOFF(default).
243  * Returns 1=Transition out of GFX State.
244  * Returns 2=Not in GFXOFF.
245  * Returns 3=Transition into GFXOFF.
246  */
247 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
248 {
249         uint32_t reg;
250         uint32_t gfxOff_Status = 0;
251         struct amdgpu_device *adev = smu->adev;
252
253         reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
254         gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
255                 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
256
257         return gfxOff_Status;
258 }
259
260 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
261 {
262         int ret = 0, timeout = 500;
263
264         if (enable) {
265                 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
266
267         } else {
268                 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
269
270                 /* confirm gfx is back to "on" state, timeout is 0.5 second */
271                 while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
272                         msleep(1);
273                         timeout--;
274                         if (timeout == 0) {
275                                 DRM_ERROR("disable gfxoff timeout and failed!\n");
276                                 break;
277                         }
278                 }
279         }
280
281         return ret;
282 }
283
284 int smu_v12_0_init_smc_tables(struct smu_context *smu)
285 {
286         struct smu_table_context *smu_table = &smu->smu_table;
287         struct smu_table *tables = NULL;
288
289         if (smu_table->tables)
290                 return -EINVAL;
291
292         tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
293                          GFP_KERNEL);
294         if (!tables)
295                 return -ENOMEM;
296
297         smu_table->tables = tables;
298
299         return smu_tables_init(smu, tables);
300 }
301
302 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
303 {
304         struct smu_table_context *smu_table = &smu->smu_table;
305
306         if (!smu_table->tables)
307                 return -EINVAL;
308
309         kfree(smu_table->clocks_table);
310         kfree(smu_table->tables);
311
312         smu_table->clocks_table = NULL;
313         smu_table->tables = NULL;
314
315         return 0;
316 }
317
318 int smu_v12_0_populate_smc_tables(struct smu_context *smu)
319 {
320         struct smu_table_context *smu_table = &smu->smu_table;
321
322         return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
323 }
324
325 int smu_v12_0_get_enabled_mask(struct smu_context *smu,
326                                       uint32_t *feature_mask, uint32_t num)
327 {
328         uint32_t feature_mask_high = 0, feature_mask_low = 0;
329         int ret = 0;
330
331         if (!feature_mask || num < 2)
332                 return -EINVAL;
333
334         ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
335         if (ret)
336                 return ret;
337         ret = smu_read_smc_arg(smu, &feature_mask_high);
338         if (ret)
339                 return ret;
340
341         ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
342         if (ret)
343                 return ret;
344         ret = smu_read_smc_arg(smu, &feature_mask_low);
345         if (ret)
346                 return ret;
347
348         feature_mask[0] = feature_mask_low;
349         feature_mask[1] = feature_mask_high;
350
351         return ret;
352 }
353
354 int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
355                                           enum smu_clk_type clk_id,
356                                           uint32_t *value)
357 {
358         int ret = 0;
359         uint32_t freq = 0;
360
361         if (clk_id >= SMU_CLK_COUNT || !value)
362                 return -EINVAL;
363
364         ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
365         if (ret)
366                 return ret;
367
368         freq *= 100;
369         *value = freq;
370
371         return ret;
372 }
373
374 int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
375                                                  uint32_t *min, uint32_t *max)
376 {
377         int ret = 0;
378         uint32_t mclk_mask, soc_mask;
379
380         if (max) {
381                 ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
382                                                  NULL,
383                                                  &mclk_mask,
384                                                  &soc_mask);
385                 if (ret)
386                         goto failed;
387
388                 switch (clk_type) {
389                 case SMU_GFXCLK:
390                 case SMU_SCLK:
391                         ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
392                         if (ret) {
393                                 pr_err("Attempt to get max GX frequency from SMC Failed !\n");
394                                 goto failed;
395                         }
396                         ret = smu_read_smc_arg(smu, max);
397                         if (ret)
398                                 goto failed;
399                         break;
400                 case SMU_UCLK:
401                 case SMU_FCLK:
402                 case SMU_MCLK:
403                         ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
404                         if (ret)
405                                 goto failed;
406                         break;
407                 case SMU_SOCCLK:
408                         ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
409                         if (ret)
410                                 goto failed;
411                         break;
412                 default:
413                         ret = -EINVAL;
414                         goto failed;
415                 }
416         }
417
418         if (min) {
419                 switch (clk_type) {
420                 case SMU_GFXCLK:
421                 case SMU_SCLK:
422                         ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
423                         if (ret) {
424                                 pr_err("Attempt to get min GX frequency from SMC Failed !\n");
425                                 goto failed;
426                         }
427                         ret = smu_read_smc_arg(smu, min);
428                         if (ret)
429                                 goto failed;
430                         break;
431                 case SMU_UCLK:
432                 case SMU_FCLK:
433                 case SMU_MCLK:
434                         ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
435                         if (ret)
436                                 goto failed;
437                         break;
438                 case SMU_SOCCLK:
439                         ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
440                         if (ret)
441                                 goto failed;
442                         break;
443                 default:
444                         ret = -EINVAL;
445                         goto failed;
446                 }
447         }
448 failed:
449         return ret;
450 }
451
452 int smu_v12_0_mode2_reset(struct smu_context *smu){
453         return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
454 }
455
456 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
457                             uint32_t min, uint32_t max)
458 {
459         int ret = 0;
460
461         switch (clk_type) {
462         case SMU_GFXCLK:
463         case SMU_SCLK:
464                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
465                 if (ret)
466                         return ret;
467
468                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
469                 if (ret)
470                         return ret;
471         break;
472         case SMU_FCLK:
473         case SMU_MCLK:
474                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
475                 if (ret)
476                         return ret;
477
478                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
479                 if (ret)
480                         return ret;
481         break;
482         case SMU_SOCCLK:
483                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
484                 if (ret)
485                         return ret;
486
487                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
488                 if (ret)
489                         return ret;
490         break;
491         case SMU_VCLK:
492                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
493                 if (ret)
494                         return ret;
495
496                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
497                 if (ret)
498                         return ret;
499         break;
500         default:
501                 return -EINVAL;
502         }
503
504         return ret;
505 }
506
507 int smu_v12_0_set_driver_table_location(struct smu_context *smu)
508 {
509         struct smu_table *driver_table = &smu->smu_table.driver_table;
510         int ret = 0;
511
512         if (driver_table->mc_address) {
513                 ret = smu_send_smc_msg_with_param(smu,
514                                 SMU_MSG_SetDriverDramAddrHigh,
515                                 upper_32_bits(driver_table->mc_address));
516                 if (!ret)
517                         ret = smu_send_smc_msg_with_param(smu,
518                                 SMU_MSG_SetDriverDramAddrLow,
519                                 lower_32_bits(driver_table->mc_address));
520         }
521
522         return ret;
523 }