2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
13 #include <amdgpu_drm.h>
16 #include "addrinterface.h"
21 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
22 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
25 #define mmCC_RB_BACKEND_DISABLE 0x263d
26 #define mmGB_TILE_MODE0 0x2644
27 #define mmGB_MACROTILE_MODE0 0x2664
28 #define mmGB_ADDR_CONFIG 0x263e
29 #define mmMC_ARB_RAMCFG 0x9d8
42 static struct supported_combination combos[5] = {
43 {DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
44 BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN},
45 {DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
46 BO_USE_RENDERING | BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY},
47 {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_NONE,
48 BO_USE_RENDERING | BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY},
49 {DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
50 BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN},
51 {DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
52 BO_USE_RENDERING | BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY},
55 static int amdgpu_set_metadata(int fd, uint32_t handle,
56 struct amdgpu_bo_metadata *info)
58 struct drm_amdgpu_gem_metadata args = {0};
64 args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
65 args.data.flags = info->flags;
66 args.data.tiling_info = info->tiling_info;
68 if (info->size_metadata > sizeof(args.data.data))
71 if (info->size_metadata) {
72 args.data.data_size_bytes = info->size_metadata;
73 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
76 return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args,
80 static int amdgpu_read_mm_regs(int fd, unsigned dword_offset,
81 unsigned count, uint32_t instance,
82 uint32_t flags, uint32_t *values)
84 struct drm_amdgpu_info request;
86 memset(&request, 0, sizeof(request));
87 request.return_pointer = (uintptr_t) values;
88 request.return_size = count * sizeof(uint32_t);
89 request.query = AMDGPU_INFO_READ_MMR_REG;
90 request.read_mmr_reg.dword_offset = dword_offset;
91 request.read_mmr_reg.count = count;
92 request.read_mmr_reg.instance = instance;
93 request.read_mmr_reg.flags = flags;
95 return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request,
96 sizeof(struct drm_amdgpu_info));
99 static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info)
107 instance = AMDGPU_INFO_MMR_SH_INDEX_MASK <<
108 AMDGPU_INFO_MMR_SH_INDEX_SHIFT;
110 ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0,
111 &gpu_info->backend_disable[0]);
114 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
115 gpu_info->backend_disable[0] =
116 (gpu_info->backend_disable[0] >> 16) & 0xff;
118 ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0,
119 gpu_info->gb_tile_mode);
123 ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0,
124 gpu_info->gb_macro_tile_mode);
128 ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0,
129 &gpu_info->gb_addr_cfg);
133 ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0,
134 &gpu_info->mc_arb_ramcfg);
141 static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in)
143 return malloc(in->sizeInBytes);
146 static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in)
152 static int amdgpu_addrlib_compute(void *addrlib, uint32_t width,
153 uint32_t height, uint32_t format,
154 uint32_t usage, uint32_t *tiling_flags,
155 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out)
157 ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = {0};
158 ADDR_TILEINFO addr_tile_info = {0};
159 ADDR_TILEINFO addr_tile_info_out = {0};
161 addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
163 /* Set the requested tiling mode. */
164 addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1;
165 if (usage & (BO_USE_CURSOR | BO_USE_LINEAR))
166 addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED;
167 if (width <= 16 || height <= 16)
168 addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1;
170 /* Bits per pixel should be calculated from format*/
171 addr_surf_info_in.bpp = drv_bpp_from_format(format, 0);
172 addr_surf_info_in.numSamples = 1;
173 addr_surf_info_in.width = width;
174 addr_surf_info_in.height = height;
175 addr_surf_info_in.numSlices = 1;
176 addr_surf_info_in.pTileInfo = &addr_tile_info;
177 addr_surf_info_in.tileIndex = -1;
179 /* This disables incorrect calculations (hacks) in addrlib. */
180 addr_surf_info_in.flags.noStencil = 1;
182 /* Set the micro tile type. */
183 if (usage & BO_USE_SCANOUT)
184 addr_surf_info_in.tileType = ADDR_DISPLAYABLE;
186 addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE;
188 addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
189 addr_out->pTileInfo = &addr_tile_info_out;
191 if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in,
192 addr_out) != ADDR_OK)
195 ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = {0};
196 ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = {0};
197 ADDR_TILEINFO s_tile_hw_info_out = {0};
199 s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT);
200 /* Convert from real value to HW value */
202 s_in.pTileInfo = &addr_tile_info_out;
205 s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT);
206 s_out.pTileInfo = &s_tile_hw_info_out;
208 if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK)
211 if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1)
213 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4);
214 else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1)
216 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2);
219 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1);
221 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH,
222 drv_log_base2(addr_tile_info_out.bankWidth));
223 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT,
224 drv_log_base2(addr_tile_info_out.bankHeight));
225 *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT,
226 s_tile_hw_info_out.tileSplitBytes);
227 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT,
228 drv_log_base2(addr_tile_info_out.macroAspectRatio));
229 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG,
230 s_tile_hw_info_out.pipeConfig);
231 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks);
236 static void *amdgpu_addrlib_init(int fd)
239 ADDR_CREATE_INPUT addr_create_input = {0};
240 ADDR_CREATE_OUTPUT addr_create_output = {0};
241 ADDR_REGISTER_VALUE reg_value = {0};
242 ADDR_CREATE_FLAGS create_flags = { {0} };
243 ADDR_E_RETURNCODE addr_ret;
245 addr_create_input.size = sizeof(ADDR_CREATE_INPUT);
246 addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT);
248 struct amdgpu_gpu_info gpu_info = {0};
250 ret = amdgpu_query_gpu(fd, &gpu_info);
253 fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret);
257 reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3;
258 reg_value.gbAddrConfig = gpu_info.gb_addr_cfg;
259 reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2;
261 reg_value.backendDisables = gpu_info.backend_disable[0];
262 reg_value.pTileConfig = gpu_info.gb_tile_mode;
263 reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode)
264 / sizeof(gpu_info.gb_tile_mode[0]);
265 reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode;
266 reg_value.noOfMacroEntries = sizeof(gpu_info.gb_macro_tile_mode)
267 / sizeof(gpu_info.gb_macro_tile_mode[0]);
268 create_flags.value = 0;
269 create_flags.useTileIndex = 1;
271 addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
273 addr_create_input.chipFamily = FAMILY_CZ;
274 addr_create_input.createFlags = create_flags;
275 addr_create_input.callbacks.allocSysMem = alloc_sys_mem;
276 addr_create_input.callbacks.freeSysMem = free_sys_mem;
277 addr_create_input.callbacks.debugPrint = 0;
278 addr_create_input.regValue = reg_value;
280 addr_ret = AddrCreate(&addr_create_input, &addr_create_output);
282 if (addr_ret != ADDR_OK) {
283 fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret);
287 return addr_create_output.hLib;
290 static int amdgpu_init(struct driver *drv)
294 addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
300 drv_insert_combinations(drv, combos, ARRAY_SIZE(combos));
301 return drv_add_kms_flags(drv);
304 static void amdgpu_close(struct driver *drv)
306 AddrDestroy(drv->priv);
310 static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height,
311 uint32_t format, uint32_t usage)
313 void *addrlib = bo->drv->priv;
314 union drm_amdgpu_gem_create gem_create;
315 struct amdgpu_bo_metadata metadata = {0};
316 ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = {0};
317 uint32_t tiling_flags = 0;
320 if (amdgpu_addrlib_compute(addrlib, width,
321 height, format, usage,
326 bo->tiling = tiling_flags;
328 bo->sizes[0] = addr_out.surfSize;
329 bo->strides[0] = addr_out.pixelPitch
330 * DIV_ROUND_UP(addr_out.pixelBits, 8);
332 memset(&gem_create, 0, sizeof(gem_create));
333 gem_create.in.bo_size = bo->sizes[0];
334 gem_create.in.alignment = addr_out.baseAlign;
335 /* Set the placement. */
336 gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
337 gem_create.in.domain_flags = usage;
339 /* Allocate the buffer with the preferred heap. */
340 ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE,
341 &gem_create, sizeof(gem_create));
346 bo->handles[0].u32 = gem_create.out.handle;
348 metadata.tiling_info = tiling_flags;
350 ret = amdgpu_set_metadata(drv_get_fd(bo->drv),
351 bo->handles[0].u32, &metadata);
356 static void *amdgpu_bo_map(struct bo *bo, struct map_info *data, size_t plane)
359 union drm_amdgpu_gem_mmap gem_map;
361 memset(&gem_map, 0, sizeof(gem_map));
362 gem_map.in.handle = bo->handles[0].u32;
364 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
366 fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
369 data->length = bo->sizes[0];
371 return mmap(0, bo->sizes[0], PROT_READ | PROT_WRITE, MAP_SHARED,
372 bo->drv->fd, gem_map.out.addr_ptr);
375 struct backend backend_amdgpu = {
378 .close = amdgpu_close,
379 .bo_create = amdgpu_bo_create,
380 .bo_destroy = drv_gem_bo_destroy,
381 .bo_map = amdgpu_bo_map,