2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
27 #include "amdgpu_gfx.h"
30 #include "amdgpu_atomfirmware.h"
32 #include "gc/gc_9_0_offset.h"
33 #include "gc/gc_9_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "hdp/hdp_4_0_offset.h"
37 #include "soc15_common.h"
38 #include "clearstate_gfx9.h"
39 #include "v9_structs.h"
41 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
43 #define GFX9_NUM_GFX_RINGS 1
44 #define GFX9_MEC_HPD_SIZE 4096
45 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
46 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
48 #define mmPWR_MISC_CNTL_STATUS 0x0183
49 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
52 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
53 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
55 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
59 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
60 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
62 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
66 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
67 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
69 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
70 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
72 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
73 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
76 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
77 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/raven_me.bin");
79 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
80 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
84 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
86 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
87 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
89 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
91 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
93 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
94 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
95 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
96 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
98 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
119 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
144 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
159 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
187 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
198 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
215 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
216 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
217 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
218 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
221 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
223 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
224 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
227 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
229 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
230 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
231 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
232 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
233 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
234 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
235 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
236 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
237 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
238 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
239 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
240 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
241 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
242 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
243 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
244 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
247 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
249 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
250 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
251 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
252 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
253 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
254 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
255 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
256 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
257 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
258 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
259 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
260 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
261 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
264 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
266 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
267 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
268 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
269 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
270 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
271 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
272 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
273 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
276 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
278 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
279 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
280 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
281 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
282 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
283 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
284 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
285 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
288 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
289 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
290 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
291 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
293 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
294 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
295 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
296 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
297 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
298 struct amdgpu_cu_info *cu_info);
299 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
300 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
301 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
303 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
305 switch (adev->asic_type) {
307 soc15_program_register_sequence(adev,
308 golden_settings_gc_9_0,
309 ARRAY_SIZE(golden_settings_gc_9_0));
310 soc15_program_register_sequence(adev,
311 golden_settings_gc_9_0_vg10,
312 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
315 soc15_program_register_sequence(adev,
316 golden_settings_gc_9_2_1,
317 ARRAY_SIZE(golden_settings_gc_9_2_1));
318 soc15_program_register_sequence(adev,
319 golden_settings_gc_9_2_1_vg12,
320 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
323 soc15_program_register_sequence(adev,
324 golden_settings_gc_9_0,
325 ARRAY_SIZE(golden_settings_gc_9_0));
326 soc15_program_register_sequence(adev,
327 golden_settings_gc_9_0_vg20,
328 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
331 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
332 ARRAY_SIZE(golden_settings_gc_9_1));
333 if (adev->rev_id >= 8)
334 soc15_program_register_sequence(adev,
335 golden_settings_gc_9_1_rv2,
336 ARRAY_SIZE(golden_settings_gc_9_1_rv2));
338 soc15_program_register_sequence(adev,
339 golden_settings_gc_9_1_rv1,
340 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
346 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
347 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
350 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
352 adev->gfx.scratch.num_reg = 8;
353 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
354 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
357 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
358 bool wc, uint32_t reg, uint32_t val)
360 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
361 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
362 WRITE_DATA_DST_SEL(0) |
363 (wc ? WR_CONFIRM : 0));
364 amdgpu_ring_write(ring, reg);
365 amdgpu_ring_write(ring, 0);
366 amdgpu_ring_write(ring, val);
369 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
370 int mem_space, int opt, uint32_t addr0,
371 uint32_t addr1, uint32_t ref, uint32_t mask,
374 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
375 amdgpu_ring_write(ring,
376 /* memory (1) or register (0) */
377 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
378 WAIT_REG_MEM_OPERATION(opt) | /* wait */
379 WAIT_REG_MEM_FUNCTION(3) | /* equal */
380 WAIT_REG_MEM_ENGINE(eng_sel)));
383 BUG_ON(addr0 & 0x3); /* Dword align */
384 amdgpu_ring_write(ring, addr0);
385 amdgpu_ring_write(ring, addr1);
386 amdgpu_ring_write(ring, ref);
387 amdgpu_ring_write(ring, mask);
388 amdgpu_ring_write(ring, inv); /* poll interval */
391 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
393 struct amdgpu_device *adev = ring->adev;
399 r = amdgpu_gfx_scratch_get(adev, &scratch);
403 WREG32(scratch, 0xCAFEDEAD);
404 r = amdgpu_ring_alloc(ring, 3);
406 goto error_free_scratch;
408 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
409 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
410 amdgpu_ring_write(ring, 0xDEADBEEF);
411 amdgpu_ring_commit(ring);
413 for (i = 0; i < adev->usec_timeout; i++) {
414 tmp = RREG32(scratch);
415 if (tmp == 0xDEADBEEF)
420 if (i >= adev->usec_timeout)
424 amdgpu_gfx_scratch_free(adev, scratch);
428 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
430 struct amdgpu_device *adev = ring->adev;
432 struct dma_fence *f = NULL;
439 r = amdgpu_device_wb_get(adev, &index);
443 gpu_addr = adev->wb.gpu_addr + (index * 4);
444 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
445 memset(&ib, 0, sizeof(ib));
446 r = amdgpu_ib_get(adev, NULL, 16, &ib);
450 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
451 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
452 ib.ptr[2] = lower_32_bits(gpu_addr);
453 ib.ptr[3] = upper_32_bits(gpu_addr);
454 ib.ptr[4] = 0xDEADBEEF;
457 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
461 r = dma_fence_wait_timeout(f, false, timeout);
469 tmp = adev->wb.wb[index];
470 if (tmp == 0xDEADBEEF)
476 amdgpu_ib_free(adev, &ib, NULL);
479 amdgpu_device_wb_free(adev, index);
484 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
486 release_firmware(adev->gfx.pfp_fw);
487 adev->gfx.pfp_fw = NULL;
488 release_firmware(adev->gfx.me_fw);
489 adev->gfx.me_fw = NULL;
490 release_firmware(adev->gfx.ce_fw);
491 adev->gfx.ce_fw = NULL;
492 release_firmware(adev->gfx.rlc_fw);
493 adev->gfx.rlc_fw = NULL;
494 release_firmware(adev->gfx.mec_fw);
495 adev->gfx.mec_fw = NULL;
496 release_firmware(adev->gfx.mec2_fw);
497 adev->gfx.mec2_fw = NULL;
499 kfree(adev->gfx.rlc.register_list_format);
502 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
504 const struct rlc_firmware_header_v2_1 *rlc_hdr;
506 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
507 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
508 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
509 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
510 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
511 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
512 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
513 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
514 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
515 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
516 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
517 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
518 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
519 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
520 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
523 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
525 adev->gfx.me_fw_write_wait = false;
526 adev->gfx.mec_fw_write_wait = false;
528 switch (adev->asic_type) {
530 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
531 (adev->gfx.me_feature_version >= 42) &&
532 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
533 (adev->gfx.pfp_feature_version >= 42))
534 adev->gfx.me_fw_write_wait = true;
536 if ((adev->gfx.mec_fw_version >= 0x00000193) &&
537 (adev->gfx.mec_feature_version >= 42))
538 adev->gfx.mec_fw_write_wait = true;
541 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
542 (adev->gfx.me_feature_version >= 44) &&
543 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
544 (adev->gfx.pfp_feature_version >= 44))
545 adev->gfx.me_fw_write_wait = true;
547 if ((adev->gfx.mec_fw_version >= 0x00000196) &&
548 (adev->gfx.mec_feature_version >= 44))
549 adev->gfx.mec_fw_write_wait = true;
552 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
553 (adev->gfx.me_feature_version >= 44) &&
554 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
555 (adev->gfx.pfp_feature_version >= 44))
556 adev->gfx.me_fw_write_wait = true;
558 if ((adev->gfx.mec_fw_version >= 0x00000197) &&
559 (adev->gfx.mec_feature_version >= 44))
560 adev->gfx.mec_fw_write_wait = true;
563 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
564 (adev->gfx.me_feature_version >= 42) &&
565 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
566 (adev->gfx.pfp_feature_version >= 42))
567 adev->gfx.me_fw_write_wait = true;
569 if ((adev->gfx.mec_fw_version >= 0x00000192) &&
570 (adev->gfx.mec_feature_version >= 42))
571 adev->gfx.mec_fw_write_wait = true;
578 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
580 const char *chip_name;
583 struct amdgpu_firmware_info *info = NULL;
584 const struct common_firmware_header *header = NULL;
585 const struct gfx_firmware_header_v1_0 *cp_hdr;
586 const struct rlc_firmware_header_v2_0 *rlc_hdr;
587 unsigned int *tmp = NULL;
589 uint16_t version_major;
590 uint16_t version_minor;
594 switch (adev->asic_type) {
596 chip_name = "vega10";
599 chip_name = "vega12";
602 chip_name = "vega20";
605 if (adev->rev_id >= 8)
606 chip_name = "raven2";
607 else if (adev->pdev->device == 0x15d8)
608 chip_name = "picasso";
616 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
617 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
620 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
623 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
624 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
625 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
627 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
628 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
631 err = amdgpu_ucode_validate(adev->gfx.me_fw);
634 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
635 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
636 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
638 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
639 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
642 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
645 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
646 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
647 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
650 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
651 * instead of picasso_rlc.bin.
653 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
654 * or revision >= 0xD8 && revision <= 0xDF
655 * otherwise is PCO FP5
657 if (!strcmp(chip_name, "picasso") &&
658 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
659 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
660 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
662 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
663 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
666 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
667 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
669 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
670 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
671 if (version_major == 2 && version_minor == 1)
672 adev->gfx.rlc.is_rlc_v2_1 = true;
674 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
675 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
676 adev->gfx.rlc.save_and_restore_offset =
677 le32_to_cpu(rlc_hdr->save_and_restore_offset);
678 adev->gfx.rlc.clear_state_descriptor_offset =
679 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
680 adev->gfx.rlc.avail_scratch_ram_locations =
681 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
682 adev->gfx.rlc.reg_restore_list_size =
683 le32_to_cpu(rlc_hdr->reg_restore_list_size);
684 adev->gfx.rlc.reg_list_format_start =
685 le32_to_cpu(rlc_hdr->reg_list_format_start);
686 adev->gfx.rlc.reg_list_format_separate_start =
687 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
688 adev->gfx.rlc.starting_offsets_start =
689 le32_to_cpu(rlc_hdr->starting_offsets_start);
690 adev->gfx.rlc.reg_list_format_size_bytes =
691 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
692 adev->gfx.rlc.reg_list_size_bytes =
693 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
694 adev->gfx.rlc.register_list_format =
695 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
696 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
697 if (!adev->gfx.rlc.register_list_format) {
702 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
703 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
704 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
705 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
707 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
709 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
710 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
711 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
712 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
714 if (adev->gfx.rlc.is_rlc_v2_1)
715 gfx_v9_0_init_rlc_ext_microcode(adev);
717 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
718 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
721 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
724 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
725 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
726 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
729 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
730 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
732 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
735 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
736 adev->gfx.mec2_fw->data;
737 adev->gfx.mec2_fw_version =
738 le32_to_cpu(cp_hdr->header.ucode_version);
739 adev->gfx.mec2_feature_version =
740 le32_to_cpu(cp_hdr->ucode_feature_version);
743 adev->gfx.mec2_fw = NULL;
746 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
747 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
748 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
749 info->fw = adev->gfx.pfp_fw;
750 header = (const struct common_firmware_header *)info->fw->data;
751 adev->firmware.fw_size +=
752 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
754 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
755 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
756 info->fw = adev->gfx.me_fw;
757 header = (const struct common_firmware_header *)info->fw->data;
758 adev->firmware.fw_size +=
759 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
761 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
762 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
763 info->fw = adev->gfx.ce_fw;
764 header = (const struct common_firmware_header *)info->fw->data;
765 adev->firmware.fw_size +=
766 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
768 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
769 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
770 info->fw = adev->gfx.rlc_fw;
771 header = (const struct common_firmware_header *)info->fw->data;
772 adev->firmware.fw_size +=
773 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
775 if (adev->gfx.rlc.is_rlc_v2_1 &&
776 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
777 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
778 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
779 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
780 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
781 info->fw = adev->gfx.rlc_fw;
782 adev->firmware.fw_size +=
783 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
785 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
786 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
787 info->fw = adev->gfx.rlc_fw;
788 adev->firmware.fw_size +=
789 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
791 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
792 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
793 info->fw = adev->gfx.rlc_fw;
794 adev->firmware.fw_size +=
795 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
798 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
799 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
800 info->fw = adev->gfx.mec_fw;
801 header = (const struct common_firmware_header *)info->fw->data;
802 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
803 adev->firmware.fw_size +=
804 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
806 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
807 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
808 info->fw = adev->gfx.mec_fw;
809 adev->firmware.fw_size +=
810 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
812 if (adev->gfx.mec2_fw) {
813 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
814 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
815 info->fw = adev->gfx.mec2_fw;
816 header = (const struct common_firmware_header *)info->fw->data;
817 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
818 adev->firmware.fw_size +=
819 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
820 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
821 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
822 info->fw = adev->gfx.mec2_fw;
823 adev->firmware.fw_size +=
824 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
830 gfx_v9_0_check_fw_write_wait(adev);
833 "gfx9: Failed to load firmware \"%s\"\n",
835 release_firmware(adev->gfx.pfp_fw);
836 adev->gfx.pfp_fw = NULL;
837 release_firmware(adev->gfx.me_fw);
838 adev->gfx.me_fw = NULL;
839 release_firmware(adev->gfx.ce_fw);
840 adev->gfx.ce_fw = NULL;
841 release_firmware(adev->gfx.rlc_fw);
842 adev->gfx.rlc_fw = NULL;
843 release_firmware(adev->gfx.mec_fw);
844 adev->gfx.mec_fw = NULL;
845 release_firmware(adev->gfx.mec2_fw);
846 adev->gfx.mec2_fw = NULL;
851 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
854 const struct cs_section_def *sect = NULL;
855 const struct cs_extent_def *ext = NULL;
857 /* begin clear state */
859 /* context control state */
862 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
863 for (ext = sect->section; ext->extent != NULL; ++ext) {
864 if (sect->id == SECT_CONTEXT)
865 count += 2 + ext->reg_count;
871 /* end clear state */
879 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
880 volatile u32 *buffer)
883 const struct cs_section_def *sect = NULL;
884 const struct cs_extent_def *ext = NULL;
886 if (adev->gfx.rlc.cs_data == NULL)
891 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
892 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
894 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
895 buffer[count++] = cpu_to_le32(0x80000000);
896 buffer[count++] = cpu_to_le32(0x80000000);
898 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
899 for (ext = sect->section; ext->extent != NULL; ++ext) {
900 if (sect->id == SECT_CONTEXT) {
902 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
903 buffer[count++] = cpu_to_le32(ext->reg_index -
904 PACKET3_SET_CONTEXT_REG_START);
905 for (i = 0; i < ext->reg_count; i++)
906 buffer[count++] = cpu_to_le32(ext->extent[i]);
913 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
914 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
916 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
917 buffer[count++] = cpu_to_le32(0);
920 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
922 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
923 uint32_t pg_always_on_cu_num = 2;
924 uint32_t always_on_cu_num;
926 uint32_t mask, cu_bitmap, counter;
928 if (adev->flags & AMD_IS_APU)
929 always_on_cu_num = 4;
930 else if (adev->asic_type == CHIP_VEGA12)
931 always_on_cu_num = 8;
933 always_on_cu_num = 12;
935 mutex_lock(&adev->grbm_idx_mutex);
936 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
937 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
941 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
943 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
944 if (cu_info->bitmap[i][j] & mask) {
945 if (counter == pg_always_on_cu_num)
946 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
947 if (counter < always_on_cu_num)
956 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
957 cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
960 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
961 mutex_unlock(&adev->grbm_idx_mutex);
964 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
968 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
969 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
970 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
971 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
972 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
974 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
975 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
977 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
978 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
980 mutex_lock(&adev->grbm_idx_mutex);
981 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
982 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
983 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
985 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
986 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
987 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
988 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
989 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
991 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
992 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
995 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
998 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
999 * programmed in gfx_v9_0_init_always_on_cu_mask()
1002 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1003 * but used for RLC_LB_CNTL configuration */
1004 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1005 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1006 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1007 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1008 mutex_unlock(&adev->grbm_idx_mutex);
1010 gfx_v9_0_init_always_on_cu_mask(adev);
1013 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1017 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1018 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1019 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1020 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1021 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1023 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1024 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1026 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1027 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1029 mutex_lock(&adev->grbm_idx_mutex);
1030 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1031 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1032 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1034 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1035 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1036 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1037 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1038 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1040 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1041 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1044 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1047 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1048 * programmed in gfx_v9_0_init_always_on_cu_mask()
1051 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1052 * but used for RLC_LB_CNTL configuration */
1053 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1054 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1055 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1056 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1057 mutex_unlock(&adev->grbm_idx_mutex);
1059 gfx_v9_0_init_always_on_cu_mask(adev);
1062 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1064 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1067 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1072 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1074 const struct cs_section_def *cs_data;
1077 adev->gfx.rlc.cs_data = gfx9_cs_data;
1079 cs_data = adev->gfx.rlc.cs_data;
1082 /* init clear state block */
1083 r = amdgpu_gfx_rlc_init_csb(adev);
1088 if (adev->asic_type == CHIP_RAVEN) {
1089 /* TODO: double check the cp_table_size for RV */
1090 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1091 r = amdgpu_gfx_rlc_init_cpt(adev);
1096 switch (adev->asic_type) {
1098 gfx_v9_0_init_lbpw(adev);
1101 gfx_v9_4_init_lbpw(adev);
1110 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
1114 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1115 if (unlikely(r != 0))
1118 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1119 AMDGPU_GEM_DOMAIN_VRAM);
1121 adev->gfx.rlc.clear_state_gpu_addr =
1122 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1124 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1129 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1133 if (!adev->gfx.rlc.clear_state_obj)
1136 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1137 if (likely(r == 0)) {
1138 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1139 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1143 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1145 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1146 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1149 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1153 const __le32 *fw_data;
1156 size_t mec_hpd_size;
1158 const struct gfx_firmware_header_v1_0 *mec_hdr;
1160 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1162 /* take ownership of the relevant compute queues */
1163 amdgpu_gfx_compute_queue_acquire(adev);
1164 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1166 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1167 AMDGPU_GEM_DOMAIN_VRAM,
1168 &adev->gfx.mec.hpd_eop_obj,
1169 &adev->gfx.mec.hpd_eop_gpu_addr,
1172 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1173 gfx_v9_0_mec_fini(adev);
1177 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1179 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1180 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1182 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1184 fw_data = (const __le32 *)
1185 (adev->gfx.mec_fw->data +
1186 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1187 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1189 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1190 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1191 &adev->gfx.mec.mec_fw_obj,
1192 &adev->gfx.mec.mec_fw_gpu_addr,
1195 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1196 gfx_v9_0_mec_fini(adev);
1200 memcpy(fw, fw_data, fw_size);
1202 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1203 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1208 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1210 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1211 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1212 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1213 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1214 (SQ_IND_INDEX__FORCE_READ_MASK));
1215 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1218 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1219 uint32_t wave, uint32_t thread,
1220 uint32_t regno, uint32_t num, uint32_t *out)
1222 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1223 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1224 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1225 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1226 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1227 (SQ_IND_INDEX__FORCE_READ_MASK) |
1228 (SQ_IND_INDEX__AUTO_INCR_MASK));
1230 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1233 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1235 /* type 1 wave data */
1236 dst[(*no_fields)++] = 1;
1237 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1238 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1239 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1240 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1241 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1242 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1243 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1244 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1245 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1246 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1247 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1248 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1249 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1250 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1253 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1254 uint32_t wave, uint32_t start,
1255 uint32_t size, uint32_t *dst)
1258 adev, simd, wave, 0,
1259 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1262 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1263 uint32_t wave, uint32_t thread,
1264 uint32_t start, uint32_t size,
1268 adev, simd, wave, thread,
1269 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1272 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1273 u32 me, u32 pipe, u32 q)
1275 soc15_grbm_select(adev, me, pipe, q, 0);
1278 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1279 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1280 .select_se_sh = &gfx_v9_0_select_se_sh,
1281 .read_wave_data = &gfx_v9_0_read_wave_data,
1282 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1283 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1284 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1287 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1292 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1294 switch (adev->asic_type) {
1296 adev->gfx.config.max_hw_contexts = 8;
1297 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1298 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1299 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1300 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1301 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1304 adev->gfx.config.max_hw_contexts = 8;
1305 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1306 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1307 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1308 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1309 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1310 DRM_INFO("fix gfx.config for vega12\n");
1313 adev->gfx.config.max_hw_contexts = 8;
1314 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1315 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1316 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1317 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1318 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1319 gb_addr_config &= ~0xf3e777ff;
1320 gb_addr_config |= 0x22014042;
1321 /* check vbios table if gpu info is not available */
1322 err = amdgpu_atomfirmware_get_gfx_info(adev);
1327 adev->gfx.config.max_hw_contexts = 8;
1328 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1329 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1330 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1331 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1332 if (adev->rev_id >= 8)
1333 gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1335 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1342 adev->gfx.config.gb_addr_config = gb_addr_config;
1344 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1346 adev->gfx.config.gb_addr_config,
1350 adev->gfx.config.max_tile_pipes =
1351 adev->gfx.config.gb_addr_config_fields.num_pipes;
1353 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1355 adev->gfx.config.gb_addr_config,
1358 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1360 adev->gfx.config.gb_addr_config,
1362 MAX_COMPRESSED_FRAGS);
1363 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1365 adev->gfx.config.gb_addr_config,
1368 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1370 adev->gfx.config.gb_addr_config,
1372 NUM_SHADER_ENGINES);
1373 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1375 adev->gfx.config.gb_addr_config,
1377 PIPE_INTERLEAVE_SIZE));
1382 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1383 struct amdgpu_ngg_buf *ngg_buf,
1385 int default_size_se)
1390 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1393 size_se = size_se ? size_se : default_size_se;
1395 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1396 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1397 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1402 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1405 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1410 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1414 for (i = 0; i < NGG_BUF_MAX; i++)
1415 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1416 &adev->gfx.ngg.buf[i].gpu_addr,
1419 memset(&adev->gfx.ngg.buf[0], 0,
1420 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1422 adev->gfx.ngg.init = false;
1427 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1431 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1434 /* GDS reserve memory: 64 bytes alignment */
1435 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1436 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1437 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1438 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1439 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1441 /* Primitive Buffer */
1442 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1443 amdgpu_prim_buf_per_se,
1446 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1450 /* Position Buffer */
1451 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1452 amdgpu_pos_buf_per_se,
1455 dev_err(adev->dev, "Failed to create Position Buffer\n");
1459 /* Control Sideband */
1460 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1461 amdgpu_cntl_sb_buf_per_se,
1464 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1468 /* Parameter Cache, not created by default */
1469 if (amdgpu_param_buf_per_se <= 0)
1472 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1473 amdgpu_param_buf_per_se,
1476 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1481 adev->gfx.ngg.init = true;
1484 gfx_v9_0_ngg_fini(adev);
1488 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1490 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1497 /* Program buffer size */
1498 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1499 adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1500 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1501 adev->gfx.ngg.buf[NGG_POS].size >> 8);
1502 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1504 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1505 adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1506 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1507 adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1508 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1510 /* Program buffer base address */
1511 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1512 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1513 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1515 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1516 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1517 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1519 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1520 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1521 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1523 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1524 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1525 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1527 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1528 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1529 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1531 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1532 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1533 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1535 /* Clear GDS reserved memory */
1536 r = amdgpu_ring_alloc(ring, 17);
1538 DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
1543 gfx_v9_0_write_data_to_reg(ring, 0, false,
1544 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1545 (adev->gds.mem.total_size +
1546 adev->gfx.ngg.gds_reserve_size));
1548 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1549 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1550 PACKET3_DMA_DATA_DST_SEL(1) |
1551 PACKET3_DMA_DATA_SRC_SEL(2)));
1552 amdgpu_ring_write(ring, 0);
1553 amdgpu_ring_write(ring, 0);
1554 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1555 amdgpu_ring_write(ring, 0);
1556 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1557 adev->gfx.ngg.gds_reserve_size);
1559 gfx_v9_0_write_data_to_reg(ring, 0, false,
1560 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1562 amdgpu_ring_commit(ring);
1567 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1568 int mec, int pipe, int queue)
1572 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1574 ring = &adev->gfx.compute_ring[ring_id];
1579 ring->queue = queue;
1581 ring->ring_obj = NULL;
1582 ring->use_doorbell = true;
1583 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1584 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1585 + (ring_id * GFX9_MEC_HPD_SIZE);
1586 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1588 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1589 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1592 /* type-2 packets are deprecated on MEC, use type-3 instead */
1593 r = amdgpu_ring_init(adev, ring, 1024,
1594 &adev->gfx.eop_irq, irq_type);
1602 static int gfx_v9_0_sw_init(void *handle)
1604 int i, j, k, r, ring_id;
1605 struct amdgpu_ring *ring;
1606 struct amdgpu_kiq *kiq;
1607 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1609 switch (adev->asic_type) {
1614 adev->gfx.mec.num_mec = 2;
1617 adev->gfx.mec.num_mec = 1;
1621 adev->gfx.mec.num_pipe_per_mec = 4;
1622 adev->gfx.mec.num_queue_per_pipe = 8;
1625 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1629 /* Privileged reg */
1630 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1631 &adev->gfx.priv_reg_irq);
1635 /* Privileged inst */
1636 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1637 &adev->gfx.priv_inst_irq);
1641 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1643 gfx_v9_0_scratch_init(adev);
1645 r = gfx_v9_0_init_microcode(adev);
1647 DRM_ERROR("Failed to load gfx firmware!\n");
1651 r = adev->gfx.rlc.funcs->init(adev);
1653 DRM_ERROR("Failed to init rlc BOs!\n");
1657 r = gfx_v9_0_mec_init(adev);
1659 DRM_ERROR("Failed to init MEC BOs!\n");
1663 /* set up the gfx ring */
1664 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1665 ring = &adev->gfx.gfx_ring[i];
1666 ring->ring_obj = NULL;
1668 sprintf(ring->name, "gfx");
1670 sprintf(ring->name, "gfx_%d", i);
1671 ring->use_doorbell = true;
1672 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1673 r = amdgpu_ring_init(adev, ring, 1024,
1674 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1679 /* set up the compute queues - allocate horizontally across pipes */
1681 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1682 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1683 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1684 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1687 r = gfx_v9_0_compute_ring_init(adev,
1698 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1700 DRM_ERROR("Failed to init KIQ BOs!\n");
1704 kiq = &adev->gfx.kiq;
1705 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1709 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1710 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1714 adev->gfx.ce_ram_size = 0x8000;
1716 r = gfx_v9_0_gpu_early_init(adev);
1720 r = gfx_v9_0_ngg_init(adev);
1728 static int gfx_v9_0_sw_fini(void *handle)
1731 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1733 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1734 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1735 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1737 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1738 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1739 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1740 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1742 amdgpu_gfx_compute_mqd_sw_fini(adev);
1743 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1744 amdgpu_gfx_kiq_fini(adev);
1746 gfx_v9_0_mec_fini(adev);
1747 gfx_v9_0_ngg_fini(adev);
1748 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1749 &adev->gfx.rlc.clear_state_gpu_addr,
1750 (void **)&adev->gfx.rlc.cs_ptr);
1751 if (adev->asic_type == CHIP_RAVEN) {
1752 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1753 &adev->gfx.rlc.cp_table_gpu_addr,
1754 (void **)&adev->gfx.rlc.cp_table_ptr);
1756 gfx_v9_0_free_microcode(adev);
1762 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1767 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1771 if (instance == 0xffffffff)
1772 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1774 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1776 if (se_num == 0xffffffff)
1777 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1779 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1781 if (sh_num == 0xffffffff)
1782 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1784 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1786 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1789 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1793 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1794 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1796 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1797 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1799 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1800 adev->gfx.config.max_sh_per_se);
1802 return (~data) & mask;
1805 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1810 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1811 adev->gfx.config.max_sh_per_se;
1813 mutex_lock(&adev->grbm_idx_mutex);
1814 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1815 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1816 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1817 data = gfx_v9_0_get_rb_active_bitmap(adev);
1818 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1819 rb_bitmap_width_per_sh);
1822 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1823 mutex_unlock(&adev->grbm_idx_mutex);
1825 adev->gfx.config.backend_enable_mask = active_rbs;
1826 adev->gfx.config.num_rbs = hweight32(active_rbs);
1829 #define DEFAULT_SH_MEM_BASES (0x6000)
1830 #define FIRST_COMPUTE_VMID (8)
1831 #define LAST_COMPUTE_VMID (16)
1832 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1835 uint32_t sh_mem_config;
1836 uint32_t sh_mem_bases;
1839 * Configure apertures:
1840 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1841 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1842 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1844 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1846 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1847 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1848 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1850 mutex_lock(&adev->srbm_mutex);
1851 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1852 soc15_grbm_select(adev, 0, 0, 0, i);
1853 /* CP and shaders */
1854 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1855 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1857 soc15_grbm_select(adev, 0, 0, 0, 0);
1858 mutex_unlock(&adev->srbm_mutex);
1861 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
1866 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1868 gfx_v9_0_tiling_mode_table_init(adev);
1870 gfx_v9_0_setup_rb(adev);
1871 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1872 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1874 /* XXX SH_MEM regs */
1875 /* where to put LDS, scratch, GPUVM in FSA64 space */
1876 mutex_lock(&adev->srbm_mutex);
1877 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1878 soc15_grbm_select(adev, 0, 0, 0, i);
1879 /* CP and shaders */
1881 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1882 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1883 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1884 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1886 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1887 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1888 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1889 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1890 (adev->gmc.private_aperture_start >> 48));
1891 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1892 (adev->gmc.shared_aperture_start >> 48));
1893 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1896 soc15_grbm_select(adev, 0, 0, 0, 0);
1898 mutex_unlock(&adev->srbm_mutex);
1900 gfx_v9_0_init_compute_vmid(adev);
1902 mutex_lock(&adev->grbm_idx_mutex);
1904 * making sure that the following register writes will be broadcasted
1905 * to all the shaders
1907 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1909 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1910 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1911 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1912 (adev->gfx.config.sc_prim_fifo_size_backend <<
1913 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1914 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1915 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1916 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1917 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1918 mutex_unlock(&adev->grbm_idx_mutex);
1922 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1927 mutex_lock(&adev->grbm_idx_mutex);
1928 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1929 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1930 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1931 for (k = 0; k < adev->usec_timeout; k++) {
1932 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1936 if (k == adev->usec_timeout) {
1937 gfx_v9_0_select_se_sh(adev, 0xffffffff,
1938 0xffffffff, 0xffffffff);
1939 mutex_unlock(&adev->grbm_idx_mutex);
1940 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1946 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1947 mutex_unlock(&adev->grbm_idx_mutex);
1949 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1950 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1951 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1952 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1953 for (k = 0; k < adev->usec_timeout; k++) {
1954 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1960 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1963 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1965 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1966 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1967 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1968 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1970 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1973 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1976 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1977 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1978 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1979 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1980 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1981 adev->gfx.rlc.clear_state_size);
1984 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
1985 int indirect_offset,
1987 int *unique_indirect_regs,
1988 int unique_indirect_reg_count,
1989 int *indirect_start_offsets,
1990 int *indirect_start_offsets_count,
1991 int max_start_offsets_count)
1995 for (; indirect_offset < list_size; indirect_offset++) {
1996 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
1997 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1998 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2000 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2001 indirect_offset += 2;
2003 /* look for the matching indice */
2004 for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2005 if (unique_indirect_regs[idx] ==
2006 register_list_format[indirect_offset] ||
2007 !unique_indirect_regs[idx])
2011 BUG_ON(idx >= unique_indirect_reg_count);
2013 if (!unique_indirect_regs[idx])
2014 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2021 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2023 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2024 int unique_indirect_reg_count = 0;
2026 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2027 int indirect_start_offsets_count = 0;
2033 u32 *register_list_format =
2034 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2035 if (!register_list_format)
2037 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
2038 adev->gfx.rlc.reg_list_format_size_bytes);
2040 /* setup unique_indirect_regs array and indirect_start_offsets array */
2041 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2042 gfx_v9_1_parse_ind_reg_list(register_list_format,
2043 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2044 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2045 unique_indirect_regs,
2046 unique_indirect_reg_count,
2047 indirect_start_offsets,
2048 &indirect_start_offsets_count,
2049 ARRAY_SIZE(indirect_start_offsets));
2051 /* enable auto inc in case it is disabled */
2052 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2053 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2054 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2056 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2057 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2058 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2059 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2060 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2061 adev->gfx.rlc.register_restore[i]);
2063 /* load indirect register */
2064 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2065 adev->gfx.rlc.reg_list_format_start);
2067 /* direct register portion */
2068 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2069 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2070 register_list_format[i]);
2072 /* indirect register portion */
2073 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2074 if (register_list_format[i] == 0xFFFFFFFF) {
2075 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2079 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2080 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2082 for (j = 0; j < unique_indirect_reg_count; j++) {
2083 if (register_list_format[i] == unique_indirect_regs[j]) {
2084 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2089 BUG_ON(j >= unique_indirect_reg_count);
2094 /* set save/restore list size */
2095 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2096 list_size = list_size >> 1;
2097 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2098 adev->gfx.rlc.reg_restore_list_size);
2099 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2101 /* write the starting offsets to RLC scratch ram */
2102 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2103 adev->gfx.rlc.starting_offsets_start);
2104 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2105 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2106 indirect_start_offsets[i]);
2108 /* load unique indirect regs*/
2109 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2110 if (unique_indirect_regs[i] != 0) {
2111 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2112 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2113 unique_indirect_regs[i] & 0x3FFFF);
2115 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2116 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2117 unique_indirect_regs[i] >> 20);
2121 kfree(register_list_format);
2125 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2127 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2130 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2134 uint32_t default_data = 0;
2136 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2137 if (enable == true) {
2138 /* enable GFXIP control over CGPG */
2139 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2140 if(default_data != data)
2141 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2144 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2145 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2146 if(default_data != data)
2147 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2149 /* restore GFXIP control over GCPG */
2150 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2151 if(default_data != data)
2152 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2156 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2160 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2161 AMD_PG_SUPPORT_GFX_SMG |
2162 AMD_PG_SUPPORT_GFX_DMG)) {
2163 /* init IDLE_POLL_COUNT = 60 */
2164 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2165 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2166 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2167 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2169 /* init RLC PG Delay */
2171 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2172 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2173 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2174 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2175 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2177 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2178 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2179 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2180 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2182 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2183 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2184 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2185 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2187 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2188 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2190 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2191 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2192 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2194 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2198 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2202 uint32_t default_data = 0;
2204 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2205 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2206 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2208 if (default_data != data)
2209 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2212 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2216 uint32_t default_data = 0;
2218 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2219 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2220 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2222 if(default_data != data)
2223 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2226 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2230 uint32_t default_data = 0;
2232 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2233 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2236 if(default_data != data)
2237 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2240 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2243 uint32_t data, default_data;
2245 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2246 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2247 GFX_POWER_GATING_ENABLE,
2249 if(default_data != data)
2250 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2253 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2256 uint32_t data, default_data;
2258 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2259 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2260 GFX_PIPELINE_PG_ENABLE,
2262 if(default_data != data)
2263 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2266 /* read any GFX register to wake up GFX */
2267 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2270 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2273 uint32_t data, default_data;
2275 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2276 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2277 STATIC_PER_CU_PG_ENABLE,
2279 if(default_data != data)
2280 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2283 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2286 uint32_t data, default_data;
2288 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2289 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2290 DYN_PER_CU_PG_ENABLE,
2292 if(default_data != data)
2293 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2296 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2298 gfx_v9_0_init_csb(adev);
2301 * Rlc save restore list is workable since v2_1.
2302 * And it's needed by gfxoff feature.
2304 if (adev->gfx.rlc.is_rlc_v2_1) {
2305 gfx_v9_1_init_rlc_save_restore_list(adev);
2306 gfx_v9_0_enable_save_restore_machine(adev);
2309 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2310 AMD_PG_SUPPORT_GFX_SMG |
2311 AMD_PG_SUPPORT_GFX_DMG |
2313 AMD_PG_SUPPORT_GDS |
2314 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2315 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2316 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2317 gfx_v9_0_init_gfx_power_gating(adev);
2321 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2323 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2324 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2325 gfx_v9_0_wait_for_rlc_serdes(adev);
2328 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2330 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2332 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2336 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2338 #ifdef AMDGPU_RLC_DEBUG_RETRY
2342 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2344 /* carrizo do enable cp interrupt after cp inited */
2345 if (!(adev->flags & AMD_IS_APU))
2346 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2350 #ifdef AMDGPU_RLC_DEBUG_RETRY
2351 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2352 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2353 if(rlc_ucode_ver == 0x108) {
2354 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2355 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2356 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2357 * default is 0x9C4 to create a 100us interval */
2358 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2359 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2360 * to disable the page fault retry interrupts, default is
2362 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2367 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2369 const struct rlc_firmware_header_v2_0 *hdr;
2370 const __le32 *fw_data;
2371 unsigned i, fw_size;
2373 if (!adev->gfx.rlc_fw)
2376 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2377 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2379 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2380 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2381 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2383 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2384 RLCG_UCODE_LOADING_START_ADDRESS);
2385 for (i = 0; i < fw_size; i++)
2386 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2387 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2392 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2396 if (amdgpu_sriov_vf(adev)) {
2397 gfx_v9_0_init_csb(adev);
2401 adev->gfx.rlc.funcs->stop(adev);
2404 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2406 adev->gfx.rlc.funcs->reset(adev);
2408 gfx_v9_0_init_pg(adev);
2410 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2411 /* legacy rlc firmware loading */
2412 r = gfx_v9_0_rlc_load_microcode(adev);
2417 switch (adev->asic_type) {
2419 if (amdgpu_lbpw == 0)
2420 gfx_v9_0_enable_lbpw(adev, false);
2422 gfx_v9_0_enable_lbpw(adev, true);
2425 if (amdgpu_lbpw > 0)
2426 gfx_v9_0_enable_lbpw(adev, true);
2428 gfx_v9_0_enable_lbpw(adev, false);
2434 adev->gfx.rlc.funcs->start(adev);
2439 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2442 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2444 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2445 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2446 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2448 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2449 adev->gfx.gfx_ring[i].sched.ready = false;
2451 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2455 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2457 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2458 const struct gfx_firmware_header_v1_0 *ce_hdr;
2459 const struct gfx_firmware_header_v1_0 *me_hdr;
2460 const __le32 *fw_data;
2461 unsigned i, fw_size;
2463 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2466 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2467 adev->gfx.pfp_fw->data;
2468 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2469 adev->gfx.ce_fw->data;
2470 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2471 adev->gfx.me_fw->data;
2473 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2474 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2475 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2477 gfx_v9_0_cp_gfx_enable(adev, false);
2480 fw_data = (const __le32 *)
2481 (adev->gfx.pfp_fw->data +
2482 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2483 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2484 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2485 for (i = 0; i < fw_size; i++)
2486 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2487 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2490 fw_data = (const __le32 *)
2491 (adev->gfx.ce_fw->data +
2492 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2493 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2494 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2495 for (i = 0; i < fw_size; i++)
2496 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2497 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2500 fw_data = (const __le32 *)
2501 (adev->gfx.me_fw->data +
2502 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2503 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2504 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2505 for (i = 0; i < fw_size; i++)
2506 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2507 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2512 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2514 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2515 const struct cs_section_def *sect = NULL;
2516 const struct cs_extent_def *ext = NULL;
2520 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2521 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2523 gfx_v9_0_cp_gfx_enable(adev, true);
2525 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2527 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2531 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2532 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2534 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2535 amdgpu_ring_write(ring, 0x80000000);
2536 amdgpu_ring_write(ring, 0x80000000);
2538 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2539 for (ext = sect->section; ext->extent != NULL; ++ext) {
2540 if (sect->id == SECT_CONTEXT) {
2541 amdgpu_ring_write(ring,
2542 PACKET3(PACKET3_SET_CONTEXT_REG,
2544 amdgpu_ring_write(ring,
2545 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2546 for (i = 0; i < ext->reg_count; i++)
2547 amdgpu_ring_write(ring, ext->extent[i]);
2552 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2553 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2555 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2556 amdgpu_ring_write(ring, 0);
2558 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2559 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2560 amdgpu_ring_write(ring, 0x8000);
2561 amdgpu_ring_write(ring, 0x8000);
2563 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2564 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2565 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2566 amdgpu_ring_write(ring, tmp);
2567 amdgpu_ring_write(ring, 0);
2569 amdgpu_ring_commit(ring);
2574 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2576 struct amdgpu_ring *ring;
2579 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2581 /* Set the write pointer delay */
2582 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2584 /* set the RB to use vmid 0 */
2585 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2587 /* Set ring buffer size */
2588 ring = &adev->gfx.gfx_ring[0];
2589 rb_bufsz = order_base_2(ring->ring_size / 8);
2590 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2591 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2593 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2595 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2597 /* Initialize the ring buffer's write pointers */
2599 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2600 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2602 /* set the wb address wether it's enabled or not */
2603 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2604 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2605 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2607 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2608 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2609 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2612 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2614 rb_addr = ring->gpu_addr >> 8;
2615 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2616 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2618 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2619 if (ring->use_doorbell) {
2620 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2621 DOORBELL_OFFSET, ring->doorbell_index);
2622 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2625 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2627 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2629 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2630 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2631 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2633 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2634 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2637 /* start the ring */
2638 gfx_v9_0_cp_gfx_start(adev);
2639 ring->sched.ready = true;
2644 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2649 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2651 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2652 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2653 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2654 adev->gfx.compute_ring[i].sched.ready = false;
2655 adev->gfx.kiq.ring.sched.ready = false;
2660 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2662 const struct gfx_firmware_header_v1_0 *mec_hdr;
2663 const __le32 *fw_data;
2667 if (!adev->gfx.mec_fw)
2670 gfx_v9_0_cp_compute_enable(adev, false);
2672 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2673 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2675 fw_data = (const __le32 *)
2676 (adev->gfx.mec_fw->data +
2677 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2679 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2680 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2681 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2683 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2684 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2685 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2686 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2689 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2690 mec_hdr->jt_offset);
2691 for (i = 0; i < mec_hdr->jt_size; i++)
2692 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2693 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2695 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2696 adev->gfx.mec_fw_version);
2697 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2703 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2706 struct amdgpu_device *adev = ring->adev;
2708 /* tell RLC which is KIQ queue */
2709 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2711 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2712 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2714 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2717 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2719 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2720 uint64_t queue_mask = 0;
2723 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2724 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2727 /* This situation may be hit in the future if a new HW
2728 * generation exposes more than 64 queues. If so, the
2729 * definition of queue_mask needs updating */
2730 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2731 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2735 queue_mask |= (1ull << i);
2738 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
2740 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2745 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2746 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2747 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2748 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2749 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2750 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2751 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2752 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2753 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2754 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2755 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2756 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2757 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2759 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2760 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2761 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2762 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2763 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2764 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2765 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2766 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2767 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2768 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2769 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2770 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2771 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2772 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2773 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2774 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2775 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2778 r = amdgpu_ring_test_helper(kiq_ring);
2780 DRM_ERROR("KCQ enable failed\n");
2785 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2787 struct amdgpu_device *adev = ring->adev;
2788 struct v9_mqd *mqd = ring->mqd_ptr;
2789 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2792 mqd->header = 0xC0310800;
2793 mqd->compute_pipelinestat_enable = 0x00000001;
2794 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2795 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2796 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2797 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2798 mqd->compute_misc_reserved = 0x00000003;
2800 mqd->dynamic_cu_mask_addr_lo =
2801 lower_32_bits(ring->mqd_gpu_addr
2802 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2803 mqd->dynamic_cu_mask_addr_hi =
2804 upper_32_bits(ring->mqd_gpu_addr
2805 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2807 eop_base_addr = ring->eop_gpu_addr >> 8;
2808 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2809 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2811 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2812 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2813 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2814 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2816 mqd->cp_hqd_eop_control = tmp;
2818 /* enable doorbell? */
2819 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2821 if (ring->use_doorbell) {
2822 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2823 DOORBELL_OFFSET, ring->doorbell_index);
2824 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2826 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2827 DOORBELL_SOURCE, 0);
2828 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2831 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2835 mqd->cp_hqd_pq_doorbell_control = tmp;
2837 /* disable the queue if it's active */
2839 mqd->cp_hqd_dequeue_request = 0;
2840 mqd->cp_hqd_pq_rptr = 0;
2841 mqd->cp_hqd_pq_wptr_lo = 0;
2842 mqd->cp_hqd_pq_wptr_hi = 0;
2844 /* set the pointer to the MQD */
2845 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2846 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2848 /* set MQD vmid to 0 */
2849 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2850 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2851 mqd->cp_mqd_control = tmp;
2853 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2854 hqd_gpu_addr = ring->gpu_addr >> 8;
2855 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2856 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2858 /* set up the HQD, this is similar to CP_RB0_CNTL */
2859 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2860 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2861 (order_base_2(ring->ring_size / 4) - 1));
2862 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2863 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2865 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2867 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2868 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2869 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2870 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2871 mqd->cp_hqd_pq_control = tmp;
2873 /* set the wb address whether it's enabled or not */
2874 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2875 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2876 mqd->cp_hqd_pq_rptr_report_addr_hi =
2877 upper_32_bits(wb_gpu_addr) & 0xffff;
2879 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2880 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2881 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2882 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2885 /* enable the doorbell if requested */
2886 if (ring->use_doorbell) {
2887 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2888 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2889 DOORBELL_OFFSET, ring->doorbell_index);
2891 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2893 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2894 DOORBELL_SOURCE, 0);
2895 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2899 mqd->cp_hqd_pq_doorbell_control = tmp;
2901 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2903 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2905 /* set the vmid for the queue */
2906 mqd->cp_hqd_vmid = 0;
2908 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2909 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2910 mqd->cp_hqd_persistent_state = tmp;
2912 /* set MIN_IB_AVAIL_SIZE */
2913 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2914 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2915 mqd->cp_hqd_ib_control = tmp;
2917 /* activate the queue */
2918 mqd->cp_hqd_active = 1;
2923 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2925 struct amdgpu_device *adev = ring->adev;
2926 struct v9_mqd *mqd = ring->mqd_ptr;
2929 /* disable wptr polling */
2930 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2932 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2933 mqd->cp_hqd_eop_base_addr_lo);
2934 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2935 mqd->cp_hqd_eop_base_addr_hi);
2937 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2938 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2939 mqd->cp_hqd_eop_control);
2941 /* enable doorbell? */
2942 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2943 mqd->cp_hqd_pq_doorbell_control);
2945 /* disable the queue if it's active */
2946 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2947 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2948 for (j = 0; j < adev->usec_timeout; j++) {
2949 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2953 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2954 mqd->cp_hqd_dequeue_request);
2955 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2956 mqd->cp_hqd_pq_rptr);
2957 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2958 mqd->cp_hqd_pq_wptr_lo);
2959 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2960 mqd->cp_hqd_pq_wptr_hi);
2963 /* set the pointer to the MQD */
2964 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2965 mqd->cp_mqd_base_addr_lo);
2966 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2967 mqd->cp_mqd_base_addr_hi);
2969 /* set MQD vmid to 0 */
2970 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2971 mqd->cp_mqd_control);
2973 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2974 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2975 mqd->cp_hqd_pq_base_lo);
2976 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2977 mqd->cp_hqd_pq_base_hi);
2979 /* set up the HQD, this is similar to CP_RB0_CNTL */
2980 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2981 mqd->cp_hqd_pq_control);
2983 /* set the wb address whether it's enabled or not */
2984 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2985 mqd->cp_hqd_pq_rptr_report_addr_lo);
2986 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2987 mqd->cp_hqd_pq_rptr_report_addr_hi);
2989 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2990 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2991 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2992 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2993 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2995 /* enable the doorbell if requested */
2996 if (ring->use_doorbell) {
2997 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2998 (adev->doorbell_index.kiq * 2) << 2);
2999 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3000 (adev->doorbell_index.userqueue_end * 2) << 2);
3003 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3004 mqd->cp_hqd_pq_doorbell_control);
3006 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3007 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3008 mqd->cp_hqd_pq_wptr_lo);
3009 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3010 mqd->cp_hqd_pq_wptr_hi);
3012 /* set the vmid for the queue */
3013 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3015 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3016 mqd->cp_hqd_persistent_state);
3018 /* activate the queue */
3019 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3020 mqd->cp_hqd_active);
3022 if (ring->use_doorbell)
3023 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3028 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3030 struct amdgpu_device *adev = ring->adev;
3033 /* disable the queue if it's active */
3034 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3036 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3038 for (j = 0; j < adev->usec_timeout; j++) {
3039 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3044 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3045 DRM_DEBUG("KIQ dequeue request failed.\n");
3047 /* Manual disable if dequeue request times out */
3048 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
3051 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3055 WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3056 WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3057 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3058 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3059 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3060 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3061 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3062 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3067 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3069 struct amdgpu_device *adev = ring->adev;
3070 struct v9_mqd *mqd = ring->mqd_ptr;
3071 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3073 gfx_v9_0_kiq_setting(ring);
3075 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3076 /* reset MQD to a clean status */
3077 if (adev->gfx.mec.mqd_backup[mqd_idx])
3078 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3080 /* reset ring buffer */
3082 amdgpu_ring_clear_ring(ring);
3084 mutex_lock(&adev->srbm_mutex);
3085 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3086 gfx_v9_0_kiq_init_register(ring);
3087 soc15_grbm_select(adev, 0, 0, 0, 0);
3088 mutex_unlock(&adev->srbm_mutex);
3090 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3091 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3092 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3093 mutex_lock(&adev->srbm_mutex);
3094 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3095 gfx_v9_0_mqd_init(ring);
3096 gfx_v9_0_kiq_init_register(ring);
3097 soc15_grbm_select(adev, 0, 0, 0, 0);
3098 mutex_unlock(&adev->srbm_mutex);
3100 if (adev->gfx.mec.mqd_backup[mqd_idx])
3101 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3107 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3109 struct amdgpu_device *adev = ring->adev;
3110 struct v9_mqd *mqd = ring->mqd_ptr;
3111 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3113 if (!adev->in_gpu_reset && !adev->in_suspend) {
3114 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3115 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3116 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3117 mutex_lock(&adev->srbm_mutex);
3118 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3119 gfx_v9_0_mqd_init(ring);
3120 soc15_grbm_select(adev, 0, 0, 0, 0);
3121 mutex_unlock(&adev->srbm_mutex);
3123 if (adev->gfx.mec.mqd_backup[mqd_idx])
3124 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3125 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3126 /* reset MQD to a clean status */
3127 if (adev->gfx.mec.mqd_backup[mqd_idx])
3128 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3130 /* reset ring buffer */
3132 amdgpu_ring_clear_ring(ring);
3134 amdgpu_ring_clear_ring(ring);
3140 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3142 struct amdgpu_ring *ring;
3145 ring = &adev->gfx.kiq.ring;
3147 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3148 if (unlikely(r != 0))
3151 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3152 if (unlikely(r != 0))
3155 gfx_v9_0_kiq_init_queue(ring);
3156 amdgpu_bo_kunmap(ring->mqd_obj);
3157 ring->mqd_ptr = NULL;
3158 amdgpu_bo_unreserve(ring->mqd_obj);
3159 ring->sched.ready = true;
3163 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3165 struct amdgpu_ring *ring = NULL;
3168 gfx_v9_0_cp_compute_enable(adev, true);
3170 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3171 ring = &adev->gfx.compute_ring[i];
3173 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3174 if (unlikely(r != 0))
3176 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3178 r = gfx_v9_0_kcq_init_queue(ring);
3179 amdgpu_bo_kunmap(ring->mqd_obj);
3180 ring->mqd_ptr = NULL;
3182 amdgpu_bo_unreserve(ring->mqd_obj);
3187 r = gfx_v9_0_kiq_kcq_enable(adev);
3192 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3195 struct amdgpu_ring *ring;
3197 if (!(adev->flags & AMD_IS_APU))
3198 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3200 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3201 /* legacy firmware loading */
3202 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3206 r = gfx_v9_0_cp_compute_load_microcode(adev);
3211 r = gfx_v9_0_kiq_resume(adev);
3215 r = gfx_v9_0_cp_gfx_resume(adev);
3219 r = gfx_v9_0_kcq_resume(adev);
3223 ring = &adev->gfx.gfx_ring[0];
3224 r = amdgpu_ring_test_helper(ring);
3228 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3229 ring = &adev->gfx.compute_ring[i];
3230 amdgpu_ring_test_helper(ring);
3233 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3238 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3240 gfx_v9_0_cp_gfx_enable(adev, enable);
3241 gfx_v9_0_cp_compute_enable(adev, enable);
3244 static int gfx_v9_0_hw_init(void *handle)
3247 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3249 gfx_v9_0_init_golden_registers(adev);
3251 gfx_v9_0_constants_init(adev);
3253 r = gfx_v9_0_csb_vram_pin(adev);
3257 r = adev->gfx.rlc.funcs->resume(adev);
3261 r = gfx_v9_0_cp_resume(adev);
3265 r = gfx_v9_0_ngg_en(adev);
3272 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
3275 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3277 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
3279 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3281 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3282 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3284 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3285 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3286 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3287 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3288 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3289 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3290 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3291 amdgpu_ring_write(kiq_ring, 0);
3292 amdgpu_ring_write(kiq_ring, 0);
3293 amdgpu_ring_write(kiq_ring, 0);
3295 r = amdgpu_ring_test_helper(kiq_ring);
3297 DRM_ERROR("KCQ disable failed\n");
3302 static int gfx_v9_0_hw_fini(void *handle)
3304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3306 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3307 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3309 /* disable KCQ to avoid CPC touch memory not valid anymore */
3310 gfx_v9_0_kcq_disable(adev);
3312 if (amdgpu_sriov_vf(adev)) {
3313 gfx_v9_0_cp_gfx_enable(adev, false);
3314 /* must disable polling for SRIOV when hw finished, otherwise
3315 * CPC engine may still keep fetching WB address which is already
3316 * invalid after sw finished and trigger DMAR reading error in
3319 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3323 /* Use deinitialize sequence from CAIL when unbinding device from driver,
3324 * otherwise KIQ is hanging when binding back
3326 if (!adev->in_gpu_reset && !adev->in_suspend) {
3327 mutex_lock(&adev->srbm_mutex);
3328 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3329 adev->gfx.kiq.ring.pipe,
3330 adev->gfx.kiq.ring.queue, 0);
3331 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3332 soc15_grbm_select(adev, 0, 0, 0, 0);
3333 mutex_unlock(&adev->srbm_mutex);
3336 gfx_v9_0_cp_enable(adev, false);
3337 adev->gfx.rlc.funcs->stop(adev);
3339 gfx_v9_0_csb_vram_unpin(adev);
3344 static int gfx_v9_0_suspend(void *handle)
3346 return gfx_v9_0_hw_fini(handle);
3349 static int gfx_v9_0_resume(void *handle)
3351 return gfx_v9_0_hw_init(handle);
3354 static bool gfx_v9_0_is_idle(void *handle)
3356 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3358 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3359 GRBM_STATUS, GUI_ACTIVE))
3365 static int gfx_v9_0_wait_for_idle(void *handle)
3368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3370 for (i = 0; i < adev->usec_timeout; i++) {
3371 if (gfx_v9_0_is_idle(handle))
3378 static int gfx_v9_0_soft_reset(void *handle)
3380 u32 grbm_soft_reset = 0;
3382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3385 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3386 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3387 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3388 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3389 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3390 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3391 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3392 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3393 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3394 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3395 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3398 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3399 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3400 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3404 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3405 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3406 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3407 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3410 if (grbm_soft_reset) {
3412 adev->gfx.rlc.funcs->stop(adev);
3414 /* Disable GFX parsing/prefetching */
3415 gfx_v9_0_cp_gfx_enable(adev, false);
3417 /* Disable MEC parsing/prefetching */
3418 gfx_v9_0_cp_compute_enable(adev, false);
3420 if (grbm_soft_reset) {
3421 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3422 tmp |= grbm_soft_reset;
3423 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3424 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3425 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3429 tmp &= ~grbm_soft_reset;
3430 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3431 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3434 /* Wait a little for things to settle down */
3440 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3444 mutex_lock(&adev->gfx.gpu_clock_mutex);
3445 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3446 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3447 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3448 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3452 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3454 uint32_t gds_base, uint32_t gds_size,
3455 uint32_t gws_base, uint32_t gws_size,
3456 uint32_t oa_base, uint32_t oa_size)
3458 struct amdgpu_device *adev = ring->adev;
3461 gfx_v9_0_write_data_to_reg(ring, 0, false,
3462 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3466 gfx_v9_0_write_data_to_reg(ring, 0, false,
3467 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3471 gfx_v9_0_write_data_to_reg(ring, 0, false,
3472 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3473 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3476 gfx_v9_0_write_data_to_reg(ring, 0, false,
3477 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3478 (1 << (oa_size + oa_base)) - (1 << oa_base));
3481 static int gfx_v9_0_early_init(void *handle)
3483 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3485 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3486 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3487 gfx_v9_0_set_ring_funcs(adev);
3488 gfx_v9_0_set_irq_funcs(adev);
3489 gfx_v9_0_set_gds_init(adev);
3490 gfx_v9_0_set_rlc_funcs(adev);
3495 static int gfx_v9_0_late_init(void *handle)
3497 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3500 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3504 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3511 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
3513 uint32_t rlc_setting;
3515 /* if RLC is not enabled, do nothing */
3516 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3517 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3523 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
3528 data = RLC_SAFE_MODE__CMD_MASK;
3529 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3530 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3532 /* wait for RLC_SAFE_MODE */
3533 for (i = 0; i < adev->usec_timeout; i++) {
3534 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3540 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
3544 data = RLC_SAFE_MODE__CMD_MASK;
3545 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3548 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3551 amdgpu_gfx_rlc_enter_safe_mode(adev);
3553 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3554 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3555 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3556 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3558 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3559 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3562 amdgpu_gfx_rlc_exit_safe_mode(adev);
3565 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3568 /* TODO: double check if we need to perform under safe mode */
3569 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3571 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3572 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3574 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3576 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3577 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3579 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3581 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3584 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3589 amdgpu_gfx_rlc_enter_safe_mode(adev);
3591 /* It is disabled by HW by default */
3592 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3593 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3594 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3596 if (adev->asic_type != CHIP_VEGA12)
3597 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3599 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3600 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3601 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3603 /* only for Vega10 & Raven1 */
3604 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3607 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3609 /* MGLS is a global flag to control all MGLS in GFX */
3610 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3611 /* 2 - RLC memory Light sleep */
3612 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3613 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3614 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3616 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3618 /* 3 - CP memory Light sleep */
3619 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3620 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3621 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3623 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3627 /* 1 - MGCG_OVERRIDE */
3628 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3630 if (adev->asic_type != CHIP_VEGA12)
3631 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3633 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3634 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3635 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3636 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3639 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3641 /* 2 - disable MGLS in RLC */
3642 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3643 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3644 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3645 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3648 /* 3 - disable MGLS in CP */
3649 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3650 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3651 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3652 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3656 amdgpu_gfx_rlc_exit_safe_mode(adev);
3659 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3664 amdgpu_gfx_rlc_enter_safe_mode(adev);
3666 /* Enable 3D CGCG/CGLS */
3667 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3668 /* write cmd to clear cgcg/cgls ov */
3669 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3670 /* unset CGCG override */
3671 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3672 /* update CGCG and CGLS override bits */
3674 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3676 /* enable 3Dcgcg FSM(0x0000363f) */
3677 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3679 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3680 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3681 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3682 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3683 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3685 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3687 /* set IDLE_POLL_COUNT(0x00900100) */
3688 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3689 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3690 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3692 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3694 /* Disable CGCG/CGLS */
3695 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3696 /* disable cgcg, cgls should be disabled */
3697 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3698 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3699 /* disable cgcg and cgls in FSM */
3701 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3704 amdgpu_gfx_rlc_exit_safe_mode(adev);
3707 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3712 amdgpu_gfx_rlc_enter_safe_mode(adev);
3714 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3715 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3716 /* unset CGCG override */
3717 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3718 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3719 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3721 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3722 /* update CGCG and CGLS override bits */
3724 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3726 /* enable cgcg FSM(0x0000363F) */
3727 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3729 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3730 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3731 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3732 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3733 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3735 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3737 /* set IDLE_POLL_COUNT(0x00900100) */
3738 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3739 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3740 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3742 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3744 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3745 /* reset CGCG/CGLS bits */
3746 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3747 /* disable cgcg and cgls in FSM */
3749 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3752 amdgpu_gfx_rlc_exit_safe_mode(adev);
3755 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3759 /* CGCG/CGLS should be enabled after MGCG/MGLS
3760 * === MGCG + MGLS ===
3762 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3763 /* === CGCG /CGLS for GFX 3D Only === */
3764 gfx_v9_0_update_3d_clock_gating(adev, enable);
3765 /* === CGCG + CGLS === */
3766 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3768 /* CGCG/CGLS should be disabled before MGCG/MGLS
3769 * === CGCG + CGLS ===
3771 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3772 /* === CGCG /CGLS for GFX 3D Only === */
3773 gfx_v9_0_update_3d_clock_gating(adev, enable);
3774 /* === MGCG + MGLS === */
3775 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3780 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3781 .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
3782 .set_safe_mode = gfx_v9_0_set_safe_mode,
3783 .unset_safe_mode = gfx_v9_0_unset_safe_mode,
3784 .init = gfx_v9_0_rlc_init,
3785 .get_csb_size = gfx_v9_0_get_csb_size,
3786 .get_csb_buffer = gfx_v9_0_get_csb_buffer,
3787 .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
3788 .resume = gfx_v9_0_rlc_resume,
3789 .stop = gfx_v9_0_rlc_stop,
3790 .reset = gfx_v9_0_rlc_reset,
3791 .start = gfx_v9_0_rlc_start
3794 static int gfx_v9_0_set_powergating_state(void *handle,
3795 enum amd_powergating_state state)
3797 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3798 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3800 switch (adev->asic_type) {
3803 amdgpu_gfx_off_ctrl(adev, false);
3804 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3806 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3807 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3808 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3810 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3811 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3814 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3815 gfx_v9_0_enable_cp_power_gating(adev, true);
3817 gfx_v9_0_enable_cp_power_gating(adev, false);
3819 /* update gfx cgpg state */
3820 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3822 /* update mgcg state */
3823 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3826 amdgpu_gfx_off_ctrl(adev, true);
3830 amdgpu_gfx_off_ctrl(adev, false);
3831 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3833 amdgpu_gfx_off_ctrl(adev, true);
3843 static int gfx_v9_0_set_clockgating_state(void *handle,
3844 enum amd_clockgating_state state)
3846 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3848 if (amdgpu_sriov_vf(adev))
3851 switch (adev->asic_type) {
3856 gfx_v9_0_update_gfx_clock_gating(adev,
3857 state == AMD_CG_STATE_GATE ? true : false);
3865 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3867 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3870 if (amdgpu_sriov_vf(adev))
3873 /* AMD_CG_SUPPORT_GFX_MGCG */
3874 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3875 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3876 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3878 /* AMD_CG_SUPPORT_GFX_CGCG */
3879 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3880 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3881 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3883 /* AMD_CG_SUPPORT_GFX_CGLS */
3884 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3885 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3887 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3888 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3889 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3890 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3892 /* AMD_CG_SUPPORT_GFX_CP_LS */
3893 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3894 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3895 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3897 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3898 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3899 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3900 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3902 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3903 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3904 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3907 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3909 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3912 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3914 struct amdgpu_device *adev = ring->adev;
3917 /* XXX check if swapping is necessary on BE */
3918 if (ring->use_doorbell) {
3919 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3921 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3922 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3928 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3930 struct amdgpu_device *adev = ring->adev;
3932 if (ring->use_doorbell) {
3933 /* XXX check if swapping is necessary on BE */
3934 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3935 WDOORBELL64(ring->doorbell_index, ring->wptr);
3937 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3938 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3942 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3944 struct amdgpu_device *adev = ring->adev;
3945 u32 ref_and_mask, reg_mem_engine;
3946 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
3948 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3951 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3954 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3961 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3962 reg_mem_engine = 1; /* pfp */
3965 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3966 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
3967 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
3968 ref_and_mask, ref_and_mask, 0x20);
3971 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3972 struct amdgpu_job *job,
3973 struct amdgpu_ib *ib,
3976 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3977 u32 header, control = 0;
3979 if (ib->flags & AMDGPU_IB_FLAG_CE)
3980 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3982 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3984 control |= ib->length_dw | (vmid << 24);
3986 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3987 control |= INDIRECT_BUFFER_PRE_ENB(1);
3989 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3990 gfx_v9_0_ring_emit_de_meta(ring);
3993 amdgpu_ring_write(ring, header);
3994 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3995 amdgpu_ring_write(ring,
3999 lower_32_bits(ib->gpu_addr));
4000 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4001 amdgpu_ring_write(ring, control);
4004 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4005 struct amdgpu_job *job,
4006 struct amdgpu_ib *ib,
4009 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4010 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4012 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4013 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4014 amdgpu_ring_write(ring,
4018 lower_32_bits(ib->gpu_addr));
4019 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4020 amdgpu_ring_write(ring, control);
4023 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4024 u64 seq, unsigned flags)
4026 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4027 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4028 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
4030 /* RELEASE_MEM - flush caches, send int */
4031 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4032 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
4033 EOP_TC_NC_ACTION_EN) :
4034 (EOP_TCL1_ACTION_EN |
4036 EOP_TC_WB_ACTION_EN |
4037 EOP_TC_MD_ACTION_EN)) |
4038 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4040 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4043 * the address should be Qword aligned if 64bit write, Dword
4044 * aligned if only send 32bit data low (discard data high)
4050 amdgpu_ring_write(ring, lower_32_bits(addr));
4051 amdgpu_ring_write(ring, upper_32_bits(addr));
4052 amdgpu_ring_write(ring, lower_32_bits(seq));
4053 amdgpu_ring_write(ring, upper_32_bits(seq));
4054 amdgpu_ring_write(ring, 0);
4057 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4059 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4060 uint32_t seq = ring->fence_drv.sync_seq;
4061 uint64_t addr = ring->fence_drv.gpu_addr;
4063 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
4064 lower_32_bits(addr), upper_32_bits(addr),
4065 seq, 0xffffffff, 4);
4068 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4069 unsigned vmid, uint64_t pd_addr)
4071 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4073 /* compute doesn't have PFP */
4074 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4075 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4076 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4077 amdgpu_ring_write(ring, 0x0);
4081 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4083 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4086 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4090 /* XXX check if swapping is necessary on BE */
4091 if (ring->use_doorbell)
4092 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4098 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4101 struct amdgpu_device *adev = ring->adev;
4102 int pipe_num, tmp, reg;
4103 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4105 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4107 /* first me only has 2 entries, GFX and HP3D */
4111 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4113 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4117 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4118 struct amdgpu_ring *ring,
4123 struct amdgpu_ring *iring;
4125 mutex_lock(&adev->gfx.pipe_reserve_mutex);
4126 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4128 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4130 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4132 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4133 /* Clear all reservations - everyone reacquires all resources */
4134 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4135 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4138 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4139 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4142 /* Lower all pipes without a current reservation */
4143 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4144 iring = &adev->gfx.gfx_ring[i];
4145 pipe = amdgpu_gfx_queue_to_bit(adev,
4149 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4150 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4153 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4154 iring = &adev->gfx.compute_ring[i];
4155 pipe = amdgpu_gfx_queue_to_bit(adev,
4159 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4160 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4164 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4167 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4168 struct amdgpu_ring *ring,
4171 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4172 uint32_t queue_priority = acquire ? 0xf : 0x0;
4174 mutex_lock(&adev->srbm_mutex);
4175 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4177 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4178 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4180 soc15_grbm_select(adev, 0, 0, 0, 0);
4181 mutex_unlock(&adev->srbm_mutex);
4184 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4185 enum drm_sched_priority priority)
4187 struct amdgpu_device *adev = ring->adev;
4188 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4190 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4193 gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4194 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4197 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4199 struct amdgpu_device *adev = ring->adev;
4201 /* XXX check if swapping is necessary on BE */
4202 if (ring->use_doorbell) {
4203 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4204 WDOORBELL64(ring->doorbell_index, ring->wptr);
4206 BUG(); /* only DOORBELL method supported on gfx9 now */
4210 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4211 u64 seq, unsigned int flags)
4213 struct amdgpu_device *adev = ring->adev;
4215 /* we only allocate 32bit for each seq wb address */
4216 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4218 /* write fence seq to the "addr" */
4219 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4220 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4221 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4222 amdgpu_ring_write(ring, lower_32_bits(addr));
4223 amdgpu_ring_write(ring, upper_32_bits(addr));
4224 amdgpu_ring_write(ring, lower_32_bits(seq));
4226 if (flags & AMDGPU_FENCE_FLAG_INT) {
4227 /* set register to trigger INT */
4228 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4229 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4230 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4231 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4232 amdgpu_ring_write(ring, 0);
4233 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4237 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
4239 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4240 amdgpu_ring_write(ring, 0);
4243 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
4245 struct v9_ce_ib_state ce_payload = {0};
4249 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4250 csa_addr = amdgpu_csa_vaddr(ring->adev);
4252 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4253 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4254 WRITE_DATA_DST_SEL(8) |
4256 WRITE_DATA_CACHE_POLICY(0));
4257 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4258 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4259 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4262 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4264 struct v9_de_ib_state de_payload = {0};
4265 uint64_t csa_addr, gds_addr;
4268 csa_addr = amdgpu_csa_vaddr(ring->adev);
4269 gds_addr = csa_addr + 4096;
4270 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4271 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4273 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4274 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4275 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4276 WRITE_DATA_DST_SEL(8) |
4278 WRITE_DATA_CACHE_POLICY(0));
4279 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4280 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4281 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4284 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4286 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4287 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4290 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4294 if (amdgpu_sriov_vf(ring->adev))
4295 gfx_v9_0_ring_emit_ce_meta(ring);
4297 gfx_v9_0_ring_emit_tmz(ring, true);
4299 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4300 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4301 /* set load_global_config & load_global_uconfig */
4303 /* set load_cs_sh_regs */
4305 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4308 /* set load_ce_ram if preamble presented */
4309 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4312 /* still load_ce_ram if this is the first time preamble presented
4313 * although there is no context switch happens.
4315 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4319 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4320 amdgpu_ring_write(ring, dw2);
4321 amdgpu_ring_write(ring, 0);
4324 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4327 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4328 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4329 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4330 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4331 ret = ring->wptr & ring->buf_mask;
4332 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4336 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4339 BUG_ON(offset > ring->buf_mask);
4340 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4342 cur = (ring->wptr & ring->buf_mask) - 1;
4343 if (likely(cur > offset))
4344 ring->ring[offset] = cur - offset;
4346 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4349 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4351 struct amdgpu_device *adev = ring->adev;
4353 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4354 amdgpu_ring_write(ring, 0 | /* src: register*/
4355 (5 << 8) | /* dst: memory */
4356 (1 << 20)); /* write confirm */
4357 amdgpu_ring_write(ring, reg);
4358 amdgpu_ring_write(ring, 0);
4359 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4360 adev->virt.reg_val_offs * 4));
4361 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4362 adev->virt.reg_val_offs * 4));
4365 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4370 switch (ring->funcs->type) {
4371 case AMDGPU_RING_TYPE_GFX:
4372 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4374 case AMDGPU_RING_TYPE_KIQ:
4375 cmd = (1 << 16); /* no inc addr */
4381 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4382 amdgpu_ring_write(ring, cmd);
4383 amdgpu_ring_write(ring, reg);
4384 amdgpu_ring_write(ring, 0);
4385 amdgpu_ring_write(ring, val);
4388 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4389 uint32_t val, uint32_t mask)
4391 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4394 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4395 uint32_t reg0, uint32_t reg1,
4396 uint32_t ref, uint32_t mask)
4398 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4399 struct amdgpu_device *adev = ring->adev;
4400 bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
4401 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
4404 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4407 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4411 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4413 struct amdgpu_device *adev = ring->adev;
4416 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4417 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4418 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4419 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4420 WREG32(mmSQ_CMD, value);
4423 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4424 enum amdgpu_interrupt_state state)
4427 case AMDGPU_IRQ_STATE_DISABLE:
4428 case AMDGPU_IRQ_STATE_ENABLE:
4429 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4430 TIME_STAMP_INT_ENABLE,
4431 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4438 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4440 enum amdgpu_interrupt_state state)
4442 u32 mec_int_cntl, mec_int_cntl_reg;
4445 * amdgpu controls only the first MEC. That's why this function only
4446 * handles the setting of interrupts for this specific MEC. All other
4447 * pipes' interrupts are set by amdkfd.
4453 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4456 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4459 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4462 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4465 DRM_DEBUG("invalid pipe %d\n", pipe);
4469 DRM_DEBUG("invalid me %d\n", me);
4474 case AMDGPU_IRQ_STATE_DISABLE:
4475 mec_int_cntl = RREG32(mec_int_cntl_reg);
4476 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4477 TIME_STAMP_INT_ENABLE, 0);
4478 WREG32(mec_int_cntl_reg, mec_int_cntl);
4480 case AMDGPU_IRQ_STATE_ENABLE:
4481 mec_int_cntl = RREG32(mec_int_cntl_reg);
4482 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4483 TIME_STAMP_INT_ENABLE, 1);
4484 WREG32(mec_int_cntl_reg, mec_int_cntl);
4491 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4492 struct amdgpu_irq_src *source,
4494 enum amdgpu_interrupt_state state)
4497 case AMDGPU_IRQ_STATE_DISABLE:
4498 case AMDGPU_IRQ_STATE_ENABLE:
4499 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4500 PRIV_REG_INT_ENABLE,
4501 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4510 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4511 struct amdgpu_irq_src *source,
4513 enum amdgpu_interrupt_state state)
4516 case AMDGPU_IRQ_STATE_DISABLE:
4517 case AMDGPU_IRQ_STATE_ENABLE:
4518 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4519 PRIV_INSTR_INT_ENABLE,
4520 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4528 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4529 struct amdgpu_irq_src *src,
4531 enum amdgpu_interrupt_state state)
4534 case AMDGPU_CP_IRQ_GFX_EOP:
4535 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4537 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4538 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4540 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4541 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4543 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4544 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4546 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4547 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4549 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4550 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4552 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4553 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4555 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4556 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4558 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4559 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4567 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4568 struct amdgpu_irq_src *source,
4569 struct amdgpu_iv_entry *entry)
4572 u8 me_id, pipe_id, queue_id;
4573 struct amdgpu_ring *ring;
4575 DRM_DEBUG("IH: CP EOP\n");
4576 me_id = (entry->ring_id & 0x0c) >> 2;
4577 pipe_id = (entry->ring_id & 0x03) >> 0;
4578 queue_id = (entry->ring_id & 0x70) >> 4;
4582 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4586 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4587 ring = &adev->gfx.compute_ring[i];
4588 /* Per-queue interrupt is supported for MEC starting from VI.
4589 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4591 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4592 amdgpu_fence_process(ring);
4599 static void gfx_v9_0_fault(struct amdgpu_device *adev,
4600 struct amdgpu_iv_entry *entry)
4602 u8 me_id, pipe_id, queue_id;
4603 struct amdgpu_ring *ring;
4606 me_id = (entry->ring_id & 0x0c) >> 2;
4607 pipe_id = (entry->ring_id & 0x03) >> 0;
4608 queue_id = (entry->ring_id & 0x70) >> 4;
4612 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4616 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4617 ring = &adev->gfx.compute_ring[i];
4618 if (ring->me == me_id && ring->pipe == pipe_id &&
4619 ring->queue == queue_id)
4620 drm_sched_fault(&ring->sched);
4626 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4627 struct amdgpu_irq_src *source,
4628 struct amdgpu_iv_entry *entry)
4630 DRM_ERROR("Illegal register access in command stream\n");
4631 gfx_v9_0_fault(adev, entry);
4635 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4636 struct amdgpu_irq_src *source,
4637 struct amdgpu_iv_entry *entry)
4639 DRM_ERROR("Illegal instruction in command stream\n");
4640 gfx_v9_0_fault(adev, entry);
4644 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4646 .early_init = gfx_v9_0_early_init,
4647 .late_init = gfx_v9_0_late_init,
4648 .sw_init = gfx_v9_0_sw_init,
4649 .sw_fini = gfx_v9_0_sw_fini,
4650 .hw_init = gfx_v9_0_hw_init,
4651 .hw_fini = gfx_v9_0_hw_fini,
4652 .suspend = gfx_v9_0_suspend,
4653 .resume = gfx_v9_0_resume,
4654 .is_idle = gfx_v9_0_is_idle,
4655 .wait_for_idle = gfx_v9_0_wait_for_idle,
4656 .soft_reset = gfx_v9_0_soft_reset,
4657 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4658 .set_powergating_state = gfx_v9_0_set_powergating_state,
4659 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4662 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4663 .type = AMDGPU_RING_TYPE_GFX,
4665 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4666 .support_64bit_ptrs = true,
4667 .vmhub = AMDGPU_GFXHUB,
4668 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4669 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4670 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4671 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4673 7 + /* PIPELINE_SYNC */
4674 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4675 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4677 8 + /* FENCE for VM_FLUSH */
4678 20 + /* GDS switch */
4679 4 + /* double SWITCH_BUFFER,
4680 the first COND_EXEC jump to the place just
4681 prior to this double SWITCH_BUFFER */
4689 8 + 8 + /* FENCE x2 */
4690 2, /* SWITCH_BUFFER */
4691 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4692 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4693 .emit_fence = gfx_v9_0_ring_emit_fence,
4694 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4695 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4696 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4697 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4698 .test_ring = gfx_v9_0_ring_test_ring,
4699 .test_ib = gfx_v9_0_ring_test_ib,
4700 .insert_nop = amdgpu_ring_insert_nop,
4701 .pad_ib = amdgpu_ring_generic_pad_ib,
4702 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4703 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4704 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4705 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4706 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4707 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4708 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4709 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4710 .soft_recovery = gfx_v9_0_ring_soft_recovery,
4713 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4714 .type = AMDGPU_RING_TYPE_COMPUTE,
4716 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4717 .support_64bit_ptrs = true,
4718 .vmhub = AMDGPU_GFXHUB,
4719 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4720 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4721 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4723 20 + /* gfx_v9_0_ring_emit_gds_switch */
4724 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4725 5 + /* hdp invalidate */
4726 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4727 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4728 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4729 2 + /* gfx_v9_0_ring_emit_vm_flush */
4730 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4731 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4732 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4733 .emit_fence = gfx_v9_0_ring_emit_fence,
4734 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4735 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4736 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4737 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4738 .test_ring = gfx_v9_0_ring_test_ring,
4739 .test_ib = gfx_v9_0_ring_test_ib,
4740 .insert_nop = amdgpu_ring_insert_nop,
4741 .pad_ib = amdgpu_ring_generic_pad_ib,
4742 .set_priority = gfx_v9_0_ring_set_priority_compute,
4743 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4744 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4745 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4748 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4749 .type = AMDGPU_RING_TYPE_KIQ,
4751 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4752 .support_64bit_ptrs = true,
4753 .vmhub = AMDGPU_GFXHUB,
4754 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4755 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4756 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4758 20 + /* gfx_v9_0_ring_emit_gds_switch */
4759 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4760 5 + /* hdp invalidate */
4761 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4762 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4763 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4764 2 + /* gfx_v9_0_ring_emit_vm_flush */
4765 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4766 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4767 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4768 .test_ring = gfx_v9_0_ring_test_ring,
4769 .insert_nop = amdgpu_ring_insert_nop,
4770 .pad_ib = amdgpu_ring_generic_pad_ib,
4771 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4772 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4773 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4774 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4777 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4781 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4783 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4784 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4786 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4787 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4790 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4791 .set = gfx_v9_0_set_eop_interrupt_state,
4792 .process = gfx_v9_0_eop_irq,
4795 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4796 .set = gfx_v9_0_set_priv_reg_fault_state,
4797 .process = gfx_v9_0_priv_reg_irq,
4800 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4801 .set = gfx_v9_0_set_priv_inst_fault_state,
4802 .process = gfx_v9_0_priv_inst_irq,
4805 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4807 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4808 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4810 adev->gfx.priv_reg_irq.num_types = 1;
4811 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4813 adev->gfx.priv_inst_irq.num_types = 1;
4814 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4817 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4819 switch (adev->asic_type) {
4824 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4831 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4833 /* init asci gds info */
4834 switch (adev->asic_type) {
4838 adev->gds.mem.total_size = 0x10000;
4841 adev->gds.mem.total_size = 0x1000;
4844 adev->gds.mem.total_size = 0x10000;
4848 adev->gds.gws.total_size = 64;
4849 adev->gds.oa.total_size = 16;
4851 if (adev->gds.mem.total_size == 64 * 1024) {
4852 adev->gds.mem.gfx_partition_size = 4096;
4853 adev->gds.mem.cs_partition_size = 4096;
4855 adev->gds.gws.gfx_partition_size = 4;
4856 adev->gds.gws.cs_partition_size = 4;
4858 adev->gds.oa.gfx_partition_size = 4;
4859 adev->gds.oa.cs_partition_size = 1;
4861 adev->gds.mem.gfx_partition_size = 1024;
4862 adev->gds.mem.cs_partition_size = 1024;
4864 adev->gds.gws.gfx_partition_size = 16;
4865 adev->gds.gws.cs_partition_size = 16;
4867 adev->gds.oa.gfx_partition_size = 4;
4868 adev->gds.oa.cs_partition_size = 4;
4872 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4880 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4881 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4883 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4886 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4890 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4891 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4893 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4894 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4896 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4898 return (~data) & mask;
4901 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4902 struct amdgpu_cu_info *cu_info)
4904 int i, j, k, counter, active_cu_number = 0;
4905 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4906 unsigned disable_masks[4 * 2];
4908 if (!adev || !cu_info)
4911 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4913 mutex_lock(&adev->grbm_idx_mutex);
4914 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4915 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4919 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4921 gfx_v9_0_set_user_cu_inactive_bitmap(
4922 adev, disable_masks[i * 2 + j]);
4923 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4924 cu_info->bitmap[i][j] = bitmap;
4926 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4927 if (bitmap & mask) {
4928 if (counter < adev->gfx.config.max_cu_per_sh)
4934 active_cu_number += counter;
4936 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4937 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4940 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4941 mutex_unlock(&adev->grbm_idx_mutex);
4943 cu_info->number = active_cu_number;
4944 cu_info->ao_cu_mask = ao_cu_mask;
4945 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4950 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4952 .type = AMD_IP_BLOCK_TYPE_GFX,
4956 .funcs = &gfx_v9_0_ip_funcs,