OSDN Git Service

SCSI: Fix NULL pointer dereference in runtime PM
[uclinux-h8/linux.git] / drivers / gpu / drm / amd / amdgpu / vi.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40
41 #include "bif/bif_5_0_d.h"
42 #include "bif/bif_5_0_sh_mask.h"
43
44 #include "gca/gfx_8_0_d.h"
45 #include "gca/gfx_8_0_sh_mask.h"
46
47 #include "smu/smu_7_1_1_d.h"
48 #include "smu/smu_7_1_1_sh_mask.h"
49
50 #include "uvd/uvd_5_0_d.h"
51 #include "uvd/uvd_5_0_sh_mask.h"
52
53 #include "vce/vce_3_0_d.h"
54 #include "vce/vce_3_0_sh_mask.h"
55
56 #include "dce/dce_10_0_d.h"
57 #include "dce/dce_10_0_sh_mask.h"
58
59 #include "vid.h"
60 #include "vi.h"
61 #include "vi_dpm.h"
62 #include "gmc_v8_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74
75 /*
76  * Indirect registers accessor
77  */
78 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
79 {
80         unsigned long flags;
81         u32 r;
82
83         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
84         WREG32(mmPCIE_INDEX, reg);
85         (void)RREG32(mmPCIE_INDEX);
86         r = RREG32(mmPCIE_DATA);
87         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
88         return r;
89 }
90
91 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
92 {
93         unsigned long flags;
94
95         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
96         WREG32(mmPCIE_INDEX, reg);
97         (void)RREG32(mmPCIE_INDEX);
98         WREG32(mmPCIE_DATA, v);
99         (void)RREG32(mmPCIE_DATA);
100         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
101 }
102
103 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
104 {
105         unsigned long flags;
106         u32 r;
107
108         spin_lock_irqsave(&adev->smc_idx_lock, flags);
109         WREG32(mmSMC_IND_INDEX_0, (reg));
110         r = RREG32(mmSMC_IND_DATA_0);
111         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
112         return r;
113 }
114
115 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
116 {
117         unsigned long flags;
118
119         spin_lock_irqsave(&adev->smc_idx_lock, flags);
120         WREG32(mmSMC_IND_INDEX_0, (reg));
121         WREG32(mmSMC_IND_DATA_0, (v));
122         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
123 }
124
125 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
126 {
127         unsigned long flags;
128         u32 r;
129
130         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
131         WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
132         r = RREG32(mmUVD_CTX_DATA);
133         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
134         return r;
135 }
136
137 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
138 {
139         unsigned long flags;
140
141         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
142         WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
143         WREG32(mmUVD_CTX_DATA, (v));
144         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
145 }
146
147 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
148 {
149         unsigned long flags;
150         u32 r;
151
152         spin_lock_irqsave(&adev->didt_idx_lock, flags);
153         WREG32(mmDIDT_IND_INDEX, (reg));
154         r = RREG32(mmDIDT_IND_DATA);
155         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
156         return r;
157 }
158
159 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
160 {
161         unsigned long flags;
162
163         spin_lock_irqsave(&adev->didt_idx_lock, flags);
164         WREG32(mmDIDT_IND_INDEX, (reg));
165         WREG32(mmDIDT_IND_DATA, (v));
166         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
167 }
168
169 static const u32 tonga_mgcg_cgcg_init[] =
170 {
171         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
172         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
173         mmPCIE_DATA, 0x000f0000, 0x00000000,
174         mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
175         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
176         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
177         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
178 };
179
180 static const u32 iceland_mgcg_cgcg_init[] =
181 {
182         mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
183         mmPCIE_DATA, 0x000f0000, 0x00000000,
184         mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
185         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
186         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
187 };
188
189 static const u32 cz_mgcg_cgcg_init[] =
190 {
191         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
192         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
193         mmPCIE_DATA, 0x000f0000, 0x00000000,
194         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
195         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
196 };
197
198 static void vi_init_golden_registers(struct amdgpu_device *adev)
199 {
200         /* Some of the registers might be dependent on GRBM_GFX_INDEX */
201         mutex_lock(&adev->grbm_idx_mutex);
202
203         switch (adev->asic_type) {
204         case CHIP_TOPAZ:
205                 amdgpu_program_register_sequence(adev,
206                                                  iceland_mgcg_cgcg_init,
207                                                  (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
208                 break;
209         case CHIP_TONGA:
210                 amdgpu_program_register_sequence(adev,
211                                                  tonga_mgcg_cgcg_init,
212                                                  (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
213                 break;
214         case CHIP_CARRIZO:
215                 amdgpu_program_register_sequence(adev,
216                                                  cz_mgcg_cgcg_init,
217                                                  (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
218                 break;
219         default:
220                 break;
221         }
222         mutex_unlock(&adev->grbm_idx_mutex);
223 }
224
225 /**
226  * vi_get_xclk - get the xclk
227  *
228  * @adev: amdgpu_device pointer
229  *
230  * Returns the reference clock used by the gfx engine
231  * (VI).
232  */
233 static u32 vi_get_xclk(struct amdgpu_device *adev)
234 {
235         u32 reference_clock = adev->clock.spll.reference_freq;
236         u32 tmp;
237
238         if (adev->flags & AMDGPU_IS_APU)
239                 return reference_clock;
240
241         tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
242         if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
243                 return 1000;
244
245         tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
246         if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
247                 return reference_clock / 4;
248
249         return reference_clock;
250 }
251
252 /**
253  * vi_srbm_select - select specific register instances
254  *
255  * @adev: amdgpu_device pointer
256  * @me: selected ME (micro engine)
257  * @pipe: pipe
258  * @queue: queue
259  * @vmid: VMID
260  *
261  * Switches the currently active registers instances.  Some
262  * registers are instanced per VMID, others are instanced per
263  * me/pipe/queue combination.
264  */
265 void vi_srbm_select(struct amdgpu_device *adev,
266                      u32 me, u32 pipe, u32 queue, u32 vmid)
267 {
268         u32 srbm_gfx_cntl = 0;
269         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
270         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
271         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
272         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
273         WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
274 }
275
276 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
277 {
278         /* todo */
279 }
280
281 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
282 {
283         u32 bus_cntl;
284         u32 d1vga_control = 0;
285         u32 d2vga_control = 0;
286         u32 vga_render_control = 0;
287         u32 rom_cntl;
288         bool r;
289
290         bus_cntl = RREG32(mmBUS_CNTL);
291         if (adev->mode_info.num_crtc) {
292                 d1vga_control = RREG32(mmD1VGA_CONTROL);
293                 d2vga_control = RREG32(mmD2VGA_CONTROL);
294                 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
295         }
296         rom_cntl = RREG32_SMC(ixROM_CNTL);
297
298         /* enable the rom */
299         WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
300         if (adev->mode_info.num_crtc) {
301                 /* Disable VGA mode */
302                 WREG32(mmD1VGA_CONTROL,
303                        (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
304                                           D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
305                 WREG32(mmD2VGA_CONTROL,
306                        (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
307                                           D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
308                 WREG32(mmVGA_RENDER_CONTROL,
309                        (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
310         }
311         WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
312
313         r = amdgpu_read_bios(adev);
314
315         /* restore regs */
316         WREG32(mmBUS_CNTL, bus_cntl);
317         if (adev->mode_info.num_crtc) {
318                 WREG32(mmD1VGA_CONTROL, d1vga_control);
319                 WREG32(mmD2VGA_CONTROL, d2vga_control);
320                 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
321         }
322         WREG32_SMC(ixROM_CNTL, rom_cntl);
323         return r;
324 }
325 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
326         {mmGB_MACROTILE_MODE7, true},
327 };
328
329 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
330         {mmGB_TILE_MODE7, true},
331         {mmGB_TILE_MODE12, true},
332         {mmGB_TILE_MODE17, true},
333         {mmGB_TILE_MODE23, true},
334         {mmGB_MACROTILE_MODE7, true},
335 };
336
337 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
338         {mmGRBM_STATUS, false},
339         {mmGB_ADDR_CONFIG, false},
340         {mmMC_ARB_RAMCFG, false},
341         {mmGB_TILE_MODE0, false},
342         {mmGB_TILE_MODE1, false},
343         {mmGB_TILE_MODE2, false},
344         {mmGB_TILE_MODE3, false},
345         {mmGB_TILE_MODE4, false},
346         {mmGB_TILE_MODE5, false},
347         {mmGB_TILE_MODE6, false},
348         {mmGB_TILE_MODE7, false},
349         {mmGB_TILE_MODE8, false},
350         {mmGB_TILE_MODE9, false},
351         {mmGB_TILE_MODE10, false},
352         {mmGB_TILE_MODE11, false},
353         {mmGB_TILE_MODE12, false},
354         {mmGB_TILE_MODE13, false},
355         {mmGB_TILE_MODE14, false},
356         {mmGB_TILE_MODE15, false},
357         {mmGB_TILE_MODE16, false},
358         {mmGB_TILE_MODE17, false},
359         {mmGB_TILE_MODE18, false},
360         {mmGB_TILE_MODE19, false},
361         {mmGB_TILE_MODE20, false},
362         {mmGB_TILE_MODE21, false},
363         {mmGB_TILE_MODE22, false},
364         {mmGB_TILE_MODE23, false},
365         {mmGB_TILE_MODE24, false},
366         {mmGB_TILE_MODE25, false},
367         {mmGB_TILE_MODE26, false},
368         {mmGB_TILE_MODE27, false},
369         {mmGB_TILE_MODE28, false},
370         {mmGB_TILE_MODE29, false},
371         {mmGB_TILE_MODE30, false},
372         {mmGB_TILE_MODE31, false},
373         {mmGB_MACROTILE_MODE0, false},
374         {mmGB_MACROTILE_MODE1, false},
375         {mmGB_MACROTILE_MODE2, false},
376         {mmGB_MACROTILE_MODE3, false},
377         {mmGB_MACROTILE_MODE4, false},
378         {mmGB_MACROTILE_MODE5, false},
379         {mmGB_MACROTILE_MODE6, false},
380         {mmGB_MACROTILE_MODE7, false},
381         {mmGB_MACROTILE_MODE8, false},
382         {mmGB_MACROTILE_MODE9, false},
383         {mmGB_MACROTILE_MODE10, false},
384         {mmGB_MACROTILE_MODE11, false},
385         {mmGB_MACROTILE_MODE12, false},
386         {mmGB_MACROTILE_MODE13, false},
387         {mmGB_MACROTILE_MODE14, false},
388         {mmGB_MACROTILE_MODE15, false},
389         {mmCC_RB_BACKEND_DISABLE, false, true},
390         {mmGC_USER_RB_BACKEND_DISABLE, false, true},
391         {mmGB_BACKEND_MAP, false, false},
392         {mmPA_SC_RASTER_CONFIG, false, true},
393         {mmPA_SC_RASTER_CONFIG_1, false, true},
394 };
395
396 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
397                                          u32 sh_num, u32 reg_offset)
398 {
399         uint32_t val;
400
401         mutex_lock(&adev->grbm_idx_mutex);
402         if (se_num != 0xffffffff || sh_num != 0xffffffff)
403                 gfx_v8_0_select_se_sh(adev, se_num, sh_num);
404
405         val = RREG32(reg_offset);
406
407         if (se_num != 0xffffffff || sh_num != 0xffffffff)
408                 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
409         mutex_unlock(&adev->grbm_idx_mutex);
410         return val;
411 }
412
413 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
414                             u32 sh_num, u32 reg_offset, u32 *value)
415 {
416         struct amdgpu_allowed_register_entry *asic_register_table = NULL;
417         struct amdgpu_allowed_register_entry *asic_register_entry;
418         uint32_t size, i;
419
420         *value = 0;
421         switch (adev->asic_type) {
422         case CHIP_TOPAZ:
423                 asic_register_table = tonga_allowed_read_registers;
424                 size = ARRAY_SIZE(tonga_allowed_read_registers);
425                 break;
426         case CHIP_TONGA:
427         case CHIP_CARRIZO:
428                 asic_register_table = cz_allowed_read_registers;
429                 size = ARRAY_SIZE(cz_allowed_read_registers);
430                 break;
431         default:
432                 return -EINVAL;
433         }
434
435         if (asic_register_table) {
436                 for (i = 0; i < size; i++) {
437                         asic_register_entry = asic_register_table + i;
438                         if (reg_offset != asic_register_entry->reg_offset)
439                                 continue;
440                         if (!asic_register_entry->untouched)
441                                 *value = asic_register_entry->grbm_indexed ?
442                                         vi_read_indexed_register(adev, se_num,
443                                                                  sh_num, reg_offset) :
444                                         RREG32(reg_offset);
445                         return 0;
446                 }
447         }
448
449         for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
450                 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
451                         continue;
452
453                 if (!vi_allowed_read_registers[i].untouched)
454                         *value = vi_allowed_read_registers[i].grbm_indexed ?
455                                 vi_read_indexed_register(adev, se_num,
456                                                          sh_num, reg_offset) :
457                                 RREG32(reg_offset);
458                 return 0;
459         }
460         return -EINVAL;
461 }
462
463 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
464 {
465         dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
466                 RREG32(mmGRBM_STATUS));
467         dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
468                 RREG32(mmGRBM_STATUS2));
469         dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
470                 RREG32(mmGRBM_STATUS_SE0));
471         dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
472                 RREG32(mmGRBM_STATUS_SE1));
473         dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
474                 RREG32(mmGRBM_STATUS_SE2));
475         dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
476                 RREG32(mmGRBM_STATUS_SE3));
477         dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
478                 RREG32(mmSRBM_STATUS));
479         dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
480                 RREG32(mmSRBM_STATUS2));
481         dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
482                 RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
483         dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
484                  RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
485         dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
486         dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
487                  RREG32(mmCP_STALLED_STAT1));
488         dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
489                  RREG32(mmCP_STALLED_STAT2));
490         dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
491                  RREG32(mmCP_STALLED_STAT3));
492         dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
493                  RREG32(mmCP_CPF_BUSY_STAT));
494         dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
495                  RREG32(mmCP_CPF_STALLED_STAT1));
496         dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
497         dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
498         dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
499                  RREG32(mmCP_CPC_STALLED_STAT1));
500         dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
501 }
502
503 /**
504  * vi_gpu_check_soft_reset - check which blocks are busy
505  *
506  * @adev: amdgpu_device pointer
507  *
508  * Check which blocks are busy and return the relevant reset
509  * mask to be used by vi_gpu_soft_reset().
510  * Returns a mask of the blocks to be reset.
511  */
512 u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
513 {
514         u32 reset_mask = 0;
515         u32 tmp;
516
517         /* GRBM_STATUS */
518         tmp = RREG32(mmGRBM_STATUS);
519         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
520                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
521                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
522                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
523                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
524                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
525                 reset_mask |= AMDGPU_RESET_GFX;
526
527         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
528                 reset_mask |= AMDGPU_RESET_CP;
529
530         /* GRBM_STATUS2 */
531         tmp = RREG32(mmGRBM_STATUS2);
532         if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
533                 reset_mask |= AMDGPU_RESET_RLC;
534
535         if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
536                    GRBM_STATUS2__CPC_BUSY_MASK |
537                    GRBM_STATUS2__CPG_BUSY_MASK))
538                 reset_mask |= AMDGPU_RESET_CP;
539
540         /* SRBM_STATUS2 */
541         tmp = RREG32(mmSRBM_STATUS2);
542         if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
543                 reset_mask |= AMDGPU_RESET_DMA;
544
545         if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
546                 reset_mask |= AMDGPU_RESET_DMA1;
547
548         /* SRBM_STATUS */
549         tmp = RREG32(mmSRBM_STATUS);
550
551         if (tmp & SRBM_STATUS__IH_BUSY_MASK)
552                 reset_mask |= AMDGPU_RESET_IH;
553
554         if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
555                 reset_mask |= AMDGPU_RESET_SEM;
556
557         if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
558                 reset_mask |= AMDGPU_RESET_GRBM;
559
560         if (adev->asic_type != CHIP_TOPAZ) {
561                 if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
562                            SRBM_STATUS__UVD_BUSY_MASK))
563                         reset_mask |= AMDGPU_RESET_UVD;
564         }
565
566         if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
567                 reset_mask |= AMDGPU_RESET_VMC;
568
569         if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
570                    SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
571                 reset_mask |= AMDGPU_RESET_MC;
572
573         /* SDMA0_STATUS_REG */
574         tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
575         if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
576                 reset_mask |= AMDGPU_RESET_DMA;
577
578         /* SDMA1_STATUS_REG */
579         tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
580         if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
581                 reset_mask |= AMDGPU_RESET_DMA1;
582 #if 0
583         /* VCE_STATUS */
584         if (adev->asic_type != CHIP_TOPAZ) {
585                 tmp = RREG32(mmVCE_STATUS);
586                 if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
587                         reset_mask |= AMDGPU_RESET_VCE;
588                 if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
589                         reset_mask |= AMDGPU_RESET_VCE1;
590
591         }
592
593         if (adev->asic_type != CHIP_TOPAZ) {
594                 if (amdgpu_display_is_display_hung(adev))
595                         reset_mask |= AMDGPU_RESET_DISPLAY;
596         }
597 #endif
598
599         /* Skip MC reset as it's mostly likely not hung, just busy */
600         if (reset_mask & AMDGPU_RESET_MC) {
601                 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
602                 reset_mask &= ~AMDGPU_RESET_MC;
603         }
604
605         return reset_mask;
606 }
607
608 /**
609  * vi_gpu_soft_reset - soft reset GPU
610  *
611  * @adev: amdgpu_device pointer
612  * @reset_mask: mask of which blocks to reset
613  *
614  * Soft reset the blocks specified in @reset_mask.
615  */
616 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
617 {
618         struct amdgpu_mode_mc_save save;
619         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
620         u32 tmp;
621
622         if (reset_mask == 0)
623                 return;
624
625         dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
626
627         vi_print_gpu_status_regs(adev);
628         dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
629                  RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
630         dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
631                  RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
632
633         /* disable CG/PG */
634
635         /* stop the rlc */
636         //XXX
637         //gfx_v8_0_rlc_stop(adev);
638
639         /* Disable GFX parsing/prefetching */
640         tmp = RREG32(mmCP_ME_CNTL);
641         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
642         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
643         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
644         WREG32(mmCP_ME_CNTL, tmp);
645
646         /* Disable MEC parsing/prefetching */
647         tmp = RREG32(mmCP_MEC_CNTL);
648         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
649         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
650         WREG32(mmCP_MEC_CNTL, tmp);
651
652         if (reset_mask & AMDGPU_RESET_DMA) {
653                 /* sdma0 */
654                 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
655                 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
656                 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
657         }
658         if (reset_mask & AMDGPU_RESET_DMA1) {
659                 /* sdma1 */
660                 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
661                 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
662                 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
663         }
664
665         gmc_v8_0_mc_stop(adev, &save);
666         if (amdgpu_asic_wait_for_mc_idle(adev)) {
667                 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
668         }
669
670         if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
671                 grbm_soft_reset =
672                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
673                 grbm_soft_reset =
674                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
675         }
676
677         if (reset_mask & AMDGPU_RESET_CP) {
678                 grbm_soft_reset =
679                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
680                 srbm_soft_reset =
681                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
682         }
683
684         if (reset_mask & AMDGPU_RESET_DMA)
685                 srbm_soft_reset =
686                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
687
688         if (reset_mask & AMDGPU_RESET_DMA1)
689                 srbm_soft_reset =
690                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
691
692         if (reset_mask & AMDGPU_RESET_DISPLAY)
693                 srbm_soft_reset =
694                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
695
696         if (reset_mask & AMDGPU_RESET_RLC)
697                 grbm_soft_reset =
698                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
699
700         if (reset_mask & AMDGPU_RESET_SEM)
701                 srbm_soft_reset =
702                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
703
704         if (reset_mask & AMDGPU_RESET_IH)
705                 srbm_soft_reset =
706                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
707
708         if (reset_mask & AMDGPU_RESET_GRBM)
709                 srbm_soft_reset =
710                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
711
712         if (reset_mask & AMDGPU_RESET_VMC)
713                 srbm_soft_reset =
714                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
715
716         if (reset_mask & AMDGPU_RESET_UVD)
717                 srbm_soft_reset =
718                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
719
720         if (reset_mask & AMDGPU_RESET_VCE)
721                 srbm_soft_reset =
722                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
723
724         if (reset_mask & AMDGPU_RESET_VCE)
725                 srbm_soft_reset =
726                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
727
728         if (!(adev->flags & AMDGPU_IS_APU)) {
729                 if (reset_mask & AMDGPU_RESET_MC)
730                 srbm_soft_reset =
731                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
732         }
733
734         if (grbm_soft_reset) {
735                 tmp = RREG32(mmGRBM_SOFT_RESET);
736                 tmp |= grbm_soft_reset;
737                 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
738                 WREG32(mmGRBM_SOFT_RESET, tmp);
739                 tmp = RREG32(mmGRBM_SOFT_RESET);
740
741                 udelay(50);
742
743                 tmp &= ~grbm_soft_reset;
744                 WREG32(mmGRBM_SOFT_RESET, tmp);
745                 tmp = RREG32(mmGRBM_SOFT_RESET);
746         }
747
748         if (srbm_soft_reset) {
749                 tmp = RREG32(mmSRBM_SOFT_RESET);
750                 tmp |= srbm_soft_reset;
751                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
752                 WREG32(mmSRBM_SOFT_RESET, tmp);
753                 tmp = RREG32(mmSRBM_SOFT_RESET);
754
755                 udelay(50);
756
757                 tmp &= ~srbm_soft_reset;
758                 WREG32(mmSRBM_SOFT_RESET, tmp);
759                 tmp = RREG32(mmSRBM_SOFT_RESET);
760         }
761
762         /* Wait a little for things to settle down */
763         udelay(50);
764
765         gmc_v8_0_mc_resume(adev, &save);
766         udelay(50);
767
768         vi_print_gpu_status_regs(adev);
769 }
770
771 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
772 {
773         struct amdgpu_mode_mc_save save;
774         u32 tmp, i;
775
776         dev_info(adev->dev, "GPU pci config reset\n");
777
778         /* disable dpm? */
779
780         /* disable cg/pg */
781
782         /* Disable GFX parsing/prefetching */
783         tmp = RREG32(mmCP_ME_CNTL);
784         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
785         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
786         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
787         WREG32(mmCP_ME_CNTL, tmp);
788
789         /* Disable MEC parsing/prefetching */
790         tmp = RREG32(mmCP_MEC_CNTL);
791         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
792         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
793         WREG32(mmCP_MEC_CNTL, tmp);
794
795         /* Disable GFX parsing/prefetching */
796         WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
797                 CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
798
799         /* Disable MEC parsing/prefetching */
800         WREG32(mmCP_MEC_CNTL,
801                         CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
802
803         /* sdma0 */
804         tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
805         tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
806         WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
807
808         /* sdma1 */
809         tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
810         tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
811         WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
812
813         /* XXX other engines? */
814
815         /* halt the rlc, disable cp internal ints */
816         //XXX
817         //gfx_v8_0_rlc_stop(adev);
818
819         udelay(50);
820
821         /* disable mem access */
822         gmc_v8_0_mc_stop(adev, &save);
823         if (amdgpu_asic_wait_for_mc_idle(adev)) {
824                 dev_warn(adev->dev, "Wait for MC idle timed out !\n");
825         }
826
827         /* disable BM */
828         pci_clear_master(adev->pdev);
829         /* reset */
830         amdgpu_pci_config_reset(adev);
831
832         udelay(100);
833
834         /* wait for asic to come out of reset */
835         for (i = 0; i < adev->usec_timeout; i++) {
836                 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
837                         break;
838                 udelay(1);
839         }
840
841 }
842
843 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
844 {
845         u32 tmp = RREG32(mmBIOS_SCRATCH_3);
846
847         if (hung)
848                 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
849         else
850                 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
851
852         WREG32(mmBIOS_SCRATCH_3, tmp);
853 }
854
855 /**
856  * vi_asic_reset - soft reset GPU
857  *
858  * @adev: amdgpu_device pointer
859  *
860  * Look up which blocks are hung and attempt
861  * to reset them.
862  * Returns 0 for success.
863  */
864 static int vi_asic_reset(struct amdgpu_device *adev)
865 {
866         u32 reset_mask;
867
868         reset_mask = vi_gpu_check_soft_reset(adev);
869
870         if (reset_mask)
871                 vi_set_bios_scratch_engine_hung(adev, true);
872
873         /* try soft reset */
874         vi_gpu_soft_reset(adev, reset_mask);
875
876         reset_mask = vi_gpu_check_soft_reset(adev);
877
878         /* try pci config reset */
879         if (reset_mask && amdgpu_hard_reset)
880                 vi_gpu_pci_config_reset(adev);
881
882         reset_mask = vi_gpu_check_soft_reset(adev);
883
884         if (!reset_mask)
885                 vi_set_bios_scratch_engine_hung(adev, false);
886
887         return 0;
888 }
889
890 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
891                         u32 cntl_reg, u32 status_reg)
892 {
893         int r, i;
894         struct atom_clock_dividers dividers;
895         uint32_t tmp;
896
897         r = amdgpu_atombios_get_clock_dividers(adev,
898                                                COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
899                                                clock, false, &dividers);
900         if (r)
901                 return r;
902
903         tmp = RREG32_SMC(cntl_reg);
904         tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
905                 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
906         tmp |= dividers.post_divider;
907         WREG32_SMC(cntl_reg, tmp);
908
909         for (i = 0; i < 100; i++) {
910                 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
911                         break;
912                 mdelay(10);
913         }
914         if (i == 100)
915                 return -ETIMEDOUT;
916
917         return 0;
918 }
919
920 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
921 {
922         int r;
923
924         r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
925         if (r)
926                 return r;
927
928         r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
929
930         return 0;
931 }
932
933 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
934 {
935         /* todo */
936
937         return 0;
938 }
939
940 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
941 {
942         u32 mask;
943         int ret;
944
945         if (amdgpu_pcie_gen2 == 0)
946                 return;
947
948         if (adev->flags & AMDGPU_IS_APU)
949                 return;
950
951         ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
952         if (ret != 0)
953                 return;
954
955         if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
956                 return;
957
958         /* todo */
959 }
960
961 static void vi_program_aspm(struct amdgpu_device *adev)
962 {
963
964         if (amdgpu_aspm == 0)
965                 return;
966
967         /* todo */
968 }
969
970 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
971                                         bool enable)
972 {
973         u32 tmp;
974
975         /* not necessary on CZ */
976         if (adev->flags & AMDGPU_IS_APU)
977                 return;
978
979         tmp = RREG32(mmBIF_DOORBELL_APER_EN);
980         if (enable)
981                 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
982         else
983                 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
984
985         WREG32(mmBIF_DOORBELL_APER_EN, tmp);
986 }
987
988 /* topaz has no DCE, UVD, VCE */
989 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
990 {
991         /* ORDER MATTERS! */
992         {
993                 .type = AMD_IP_BLOCK_TYPE_COMMON,
994                 .major = 2,
995                 .minor = 0,
996                 .rev = 0,
997                 .funcs = &vi_common_ip_funcs,
998         },
999         {
1000                 .type = AMD_IP_BLOCK_TYPE_GMC,
1001                 .major = 8,
1002                 .minor = 0,
1003                 .rev = 0,
1004                 .funcs = &gmc_v8_0_ip_funcs,
1005         },
1006         {
1007                 .type = AMD_IP_BLOCK_TYPE_IH,
1008                 .major = 2,
1009                 .minor = 4,
1010                 .rev = 0,
1011                 .funcs = &iceland_ih_ip_funcs,
1012         },
1013         {
1014                 .type = AMD_IP_BLOCK_TYPE_SMC,
1015                 .major = 7,
1016                 .minor = 1,
1017                 .rev = 0,
1018                 .funcs = &iceland_dpm_ip_funcs,
1019         },
1020         {
1021                 .type = AMD_IP_BLOCK_TYPE_GFX,
1022                 .major = 8,
1023                 .minor = 0,
1024                 .rev = 0,
1025                 .funcs = &gfx_v8_0_ip_funcs,
1026         },
1027         {
1028                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1029                 .major = 2,
1030                 .minor = 4,
1031                 .rev = 0,
1032                 .funcs = &sdma_v2_4_ip_funcs,
1033         },
1034 };
1035
1036 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1037 {
1038         /* ORDER MATTERS! */
1039         {
1040                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1041                 .major = 2,
1042                 .minor = 0,
1043                 .rev = 0,
1044                 .funcs = &vi_common_ip_funcs,
1045         },
1046         {
1047                 .type = AMD_IP_BLOCK_TYPE_GMC,
1048                 .major = 8,
1049                 .minor = 0,
1050                 .rev = 0,
1051                 .funcs = &gmc_v8_0_ip_funcs,
1052         },
1053         {
1054                 .type = AMD_IP_BLOCK_TYPE_IH,
1055                 .major = 3,
1056                 .minor = 0,
1057                 .rev = 0,
1058                 .funcs = &tonga_ih_ip_funcs,
1059         },
1060         {
1061                 .type = AMD_IP_BLOCK_TYPE_SMC,
1062                 .major = 7,
1063                 .minor = 1,
1064                 .rev = 0,
1065                 .funcs = &tonga_dpm_ip_funcs,
1066         },
1067         {
1068                 .type = AMD_IP_BLOCK_TYPE_DCE,
1069                 .major = 10,
1070                 .minor = 0,
1071                 .rev = 0,
1072                 .funcs = &dce_v10_0_ip_funcs,
1073         },
1074         {
1075                 .type = AMD_IP_BLOCK_TYPE_GFX,
1076                 .major = 8,
1077                 .minor = 0,
1078                 .rev = 0,
1079                 .funcs = &gfx_v8_0_ip_funcs,
1080         },
1081         {
1082                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1083                 .major = 3,
1084                 .minor = 0,
1085                 .rev = 0,
1086                 .funcs = &sdma_v3_0_ip_funcs,
1087         },
1088         {
1089                 .type = AMD_IP_BLOCK_TYPE_UVD,
1090                 .major = 5,
1091                 .minor = 0,
1092                 .rev = 0,
1093                 .funcs = &uvd_v5_0_ip_funcs,
1094         },
1095         {
1096                 .type = AMD_IP_BLOCK_TYPE_VCE,
1097                 .major = 3,
1098                 .minor = 0,
1099                 .rev = 0,
1100                 .funcs = &vce_v3_0_ip_funcs,
1101         },
1102 };
1103
1104 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1105 {
1106         /* ORDER MATTERS! */
1107         {
1108                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1109                 .major = 2,
1110                 .minor = 0,
1111                 .rev = 0,
1112                 .funcs = &vi_common_ip_funcs,
1113         },
1114         {
1115                 .type = AMD_IP_BLOCK_TYPE_GMC,
1116                 .major = 8,
1117                 .minor = 0,
1118                 .rev = 0,
1119                 .funcs = &gmc_v8_0_ip_funcs,
1120         },
1121         {
1122                 .type = AMD_IP_BLOCK_TYPE_IH,
1123                 .major = 3,
1124                 .minor = 0,
1125                 .rev = 0,
1126                 .funcs = &cz_ih_ip_funcs,
1127         },
1128         {
1129                 .type = AMD_IP_BLOCK_TYPE_SMC,
1130                 .major = 8,
1131                 .minor = 0,
1132                 .rev = 0,
1133                 .funcs = &cz_dpm_ip_funcs,
1134         },
1135         {
1136                 .type = AMD_IP_BLOCK_TYPE_DCE,
1137                 .major = 11,
1138                 .minor = 0,
1139                 .rev = 0,
1140                 .funcs = &dce_v11_0_ip_funcs,
1141         },
1142         {
1143                 .type = AMD_IP_BLOCK_TYPE_GFX,
1144                 .major = 8,
1145                 .minor = 0,
1146                 .rev = 0,
1147                 .funcs = &gfx_v8_0_ip_funcs,
1148         },
1149         {
1150                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1151                 .major = 3,
1152                 .minor = 0,
1153                 .rev = 0,
1154                 .funcs = &sdma_v3_0_ip_funcs,
1155         },
1156         {
1157                 .type = AMD_IP_BLOCK_TYPE_UVD,
1158                 .major = 6,
1159                 .minor = 0,
1160                 .rev = 0,
1161                 .funcs = &uvd_v6_0_ip_funcs,
1162         },
1163         {
1164                 .type = AMD_IP_BLOCK_TYPE_VCE,
1165                 .major = 3,
1166                 .minor = 0,
1167                 .rev = 0,
1168                 .funcs = &vce_v3_0_ip_funcs,
1169         },
1170 };
1171
1172 int vi_set_ip_blocks(struct amdgpu_device *adev)
1173 {
1174         switch (adev->asic_type) {
1175         case CHIP_TOPAZ:
1176                 adev->ip_blocks = topaz_ip_blocks;
1177                 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1178                 break;
1179         case CHIP_TONGA:
1180                 adev->ip_blocks = tonga_ip_blocks;
1181                 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1182                 break;
1183         case CHIP_CARRIZO:
1184                 adev->ip_blocks = cz_ip_blocks;
1185                 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1186                 break;
1187         default:
1188                 /* FIXME: not supported yet */
1189                 return -EINVAL;
1190         }
1191
1192         return 0;
1193 }
1194
1195 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1196 {
1197         if (adev->asic_type == CHIP_TOPAZ)
1198                 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1199                         >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1200         else
1201                 return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1202                         >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1203 }
1204
1205 static const struct amdgpu_asic_funcs vi_asic_funcs =
1206 {
1207         .read_disabled_bios = &vi_read_disabled_bios,
1208         .read_register = &vi_read_register,
1209         .reset = &vi_asic_reset,
1210         .set_vga_state = &vi_vga_set_state,
1211         .get_xclk = &vi_get_xclk,
1212         .set_uvd_clocks = &vi_set_uvd_clocks,
1213         .set_vce_clocks = &vi_set_vce_clocks,
1214         .get_cu_info = &gfx_v8_0_get_cu_info,
1215         /* these should be moved to their own ip modules */
1216         .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1217         .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1218 };
1219
1220 static int vi_common_early_init(void *handle)
1221 {
1222         bool smc_enabled = false;
1223         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224
1225         adev->smc_rreg = &vi_smc_rreg;
1226         adev->smc_wreg = &vi_smc_wreg;
1227         adev->pcie_rreg = &vi_pcie_rreg;
1228         adev->pcie_wreg = &vi_pcie_wreg;
1229         adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1230         adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1231         adev->didt_rreg = &vi_didt_rreg;
1232         adev->didt_wreg = &vi_didt_wreg;
1233
1234         adev->asic_funcs = &vi_asic_funcs;
1235
1236         if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1237                 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1238                 smc_enabled = true;
1239
1240         adev->rev_id = vi_get_rev_id(adev);
1241         adev->external_rev_id = 0xFF;
1242         switch (adev->asic_type) {
1243         case CHIP_TOPAZ:
1244                 adev->has_uvd = false;
1245                 adev->cg_flags = 0;
1246                 adev->pg_flags = 0;
1247                 adev->external_rev_id = 0x1;
1248                 if (amdgpu_smc_load_fw && smc_enabled)
1249                         adev->firmware.smu_load = true;
1250                 break;
1251         case CHIP_TONGA:
1252                 adev->has_uvd = true;
1253                 adev->cg_flags = 0;
1254                 adev->pg_flags = 0;
1255                 adev->external_rev_id = adev->rev_id + 0x14;
1256                 if (amdgpu_smc_load_fw && smc_enabled)
1257                         adev->firmware.smu_load = true;
1258                 break;
1259         case CHIP_CARRIZO:
1260                 adev->has_uvd = true;
1261                 adev->cg_flags = 0;
1262                 adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
1263                 adev->external_rev_id = adev->rev_id + 0x1;
1264                 if (amdgpu_smc_load_fw && smc_enabled)
1265                         adev->firmware.smu_load = true;
1266                 break;
1267         default:
1268                 /* FIXME: not supported yet */
1269                 return -EINVAL;
1270         }
1271
1272         return 0;
1273 }
1274
1275 static int vi_common_sw_init(void *handle)
1276 {
1277         return 0;
1278 }
1279
1280 static int vi_common_sw_fini(void *handle)
1281 {
1282         return 0;
1283 }
1284
1285 static int vi_common_hw_init(void *handle)
1286 {
1287         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288
1289         /* move the golden regs per IP block */
1290         vi_init_golden_registers(adev);
1291         /* enable pcie gen2/3 link */
1292         vi_pcie_gen3_enable(adev);
1293         /* enable aspm */
1294         vi_program_aspm(adev);
1295         /* enable the doorbell aperture */
1296         vi_enable_doorbell_aperture(adev, true);
1297
1298         return 0;
1299 }
1300
1301 static int vi_common_hw_fini(void *handle)
1302 {
1303         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304
1305         /* enable the doorbell aperture */
1306         vi_enable_doorbell_aperture(adev, false);
1307
1308         return 0;
1309 }
1310
1311 static int vi_common_suspend(void *handle)
1312 {
1313         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1314
1315         return vi_common_hw_fini(adev);
1316 }
1317
1318 static int vi_common_resume(void *handle)
1319 {
1320         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1321
1322         return vi_common_hw_init(adev);
1323 }
1324
1325 static bool vi_common_is_idle(void *handle)
1326 {
1327         return true;
1328 }
1329
1330 static int vi_common_wait_for_idle(void *handle)
1331 {
1332         return 0;
1333 }
1334
1335 static void vi_common_print_status(void *handle)
1336 {
1337         return;
1338 }
1339
1340 static int vi_common_soft_reset(void *handle)
1341 {
1342         return 0;
1343 }
1344
1345 static int vi_common_set_clockgating_state(void *handle,
1346                                             enum amd_clockgating_state state)
1347 {
1348         return 0;
1349 }
1350
1351 static int vi_common_set_powergating_state(void *handle,
1352                                             enum amd_powergating_state state)
1353 {
1354         return 0;
1355 }
1356
1357 const struct amd_ip_funcs vi_common_ip_funcs = {
1358         .early_init = vi_common_early_init,
1359         .late_init = NULL,
1360         .sw_init = vi_common_sw_init,
1361         .sw_fini = vi_common_sw_fini,
1362         .hw_init = vi_common_hw_init,
1363         .hw_fini = vi_common_hw_fini,
1364         .suspend = vi_common_suspend,
1365         .resume = vi_common_resume,
1366         .is_idle = vi_common_is_idle,
1367         .wait_for_idle = vi_common_wait_for_idle,
1368         .soft_reset = vi_common_soft_reset,
1369         .print_status = vi_common_print_status,
1370         .set_clockgating_state = vi_common_set_clockgating_state,
1371         .set_powergating_state = vi_common_set_powergating_state,
1372 };
1373