OSDN Git Service

i915: Allow allocating ARGB buffers for scanout
[android-x86/external-minigbm.git] / amdgpu.c
1 /*
2  * Copyright 2016 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 #ifdef DRV_AMDGPU
7 #include <amdgpu.h>
8 #include <amdgpu_drm.h>
9 #include <errno.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <xf86drm.h>
15
16 #include "addrinterface.h"
17 #include "drv_priv.h"
18 #include "helpers.h"
19 #include "util.h"
20
21 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
22 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
23 #endif
24
25 // clang-format off
26 #define mmCC_RB_BACKEND_DISABLE         0x263d
27 #define mmGB_TILE_MODE0                 0x2644
28 #define mmGB_MACROTILE_MODE0            0x2664
29 #define mmGB_ADDR_CONFIG                0x263e
30 #define mmMC_ARB_RAMCFG                 0x9d8
31
32 enum {
33         FAMILY_UNKNOWN,
34         FAMILY_SI,
35         FAMILY_CI,
36         FAMILY_KV,
37         FAMILY_VI,
38         FAMILY_CZ,
39         FAMILY_PI,
40         FAMILY_LAST,
41 };
42 // clang-format on
43
44 struct amdgpu_priv {
45         void *addrlib;
46         int drm_version;
47 };
48
49 const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
50                                                   DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
51                                                   DRM_FORMAT_XRGB8888 };
52
53 const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21,
54                                                    DRM_FORMAT_NV12, DRM_FORMAT_YVU420_ANDROID };
55
56 static int amdgpu_set_metadata(int fd, uint32_t handle, struct amdgpu_bo_metadata *info)
57 {
58         struct drm_amdgpu_gem_metadata args = { 0 };
59
60         if (!info)
61                 return -EINVAL;
62
63         args.handle = handle;
64         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
65         args.data.flags = info->flags;
66         args.data.tiling_info = info->tiling_info;
67
68         if (info->size_metadata > sizeof(args.data.data))
69                 return -EINVAL;
70
71         if (info->size_metadata) {
72                 args.data.data_size_bytes = info->size_metadata;
73                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
74         }
75
76         return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
77 }
78
79 static int amdgpu_read_mm_regs(int fd, unsigned dword_offset, unsigned count, uint32_t instance,
80                                uint32_t flags, uint32_t *values)
81 {
82         struct drm_amdgpu_info request;
83
84         memset(&request, 0, sizeof(request));
85         request.return_pointer = (uintptr_t)values;
86         request.return_size = count * sizeof(uint32_t);
87         request.query = AMDGPU_INFO_READ_MMR_REG;
88         request.read_mmr_reg.dword_offset = dword_offset;
89         request.read_mmr_reg.count = count;
90         request.read_mmr_reg.instance = instance;
91         request.read_mmr_reg.flags = flags;
92
93         return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
94 }
95
96 static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info)
97 {
98         int ret;
99         uint32_t instance;
100
101         if (!gpu_info)
102                 return -EINVAL;
103
104         instance = AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT;
105
106         ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0,
107                                   &gpu_info->backend_disable[0]);
108         if (ret)
109                 return ret;
110         /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
111         gpu_info->backend_disable[0] = (gpu_info->backend_disable[0] >> 16) & 0xff;
112
113         ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0, gpu_info->gb_tile_mode);
114         if (ret)
115                 return ret;
116
117         ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0,
118                                   gpu_info->gb_macro_tile_mode);
119         if (ret)
120                 return ret;
121
122         ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0, &gpu_info->gb_addr_cfg);
123         if (ret)
124                 return ret;
125
126         ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0, &gpu_info->mc_arb_ramcfg);
127         if (ret)
128                 return ret;
129
130         return 0;
131 }
132
133 static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in)
134 {
135         return malloc(in->sizeInBytes);
136 }
137
138 static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in)
139 {
140         free(in->pVirtAddr);
141         return ADDR_OK;
142 }
143
144 static int amdgpu_addrlib_compute(void *addrlib, uint32_t width, uint32_t height, uint32_t format,
145                                   uint64_t use_flags, uint32_t *tiling_flags,
146                                   ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out)
147 {
148         ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = { 0 };
149         ADDR_TILEINFO addr_tile_info = { 0 };
150         ADDR_TILEINFO addr_tile_info_out = { 0 };
151         uint32_t bits_per_pixel;
152
153         addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
154
155         /* Set the requested tiling mode. */
156         addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1;
157         if (use_flags &
158             (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
159                 addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED;
160         else if (width <= 16 || height <= 16)
161                 addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1;
162
163         bits_per_pixel = drv_stride_from_format(format, 1, 0) * 8;
164         /* Bits per pixel should be calculated from format*/
165         addr_surf_info_in.bpp = bits_per_pixel;
166         addr_surf_info_in.numSamples = 1;
167         addr_surf_info_in.width = width;
168         addr_surf_info_in.height = height;
169         addr_surf_info_in.numSlices = 1;
170         addr_surf_info_in.pTileInfo = &addr_tile_info;
171         addr_surf_info_in.tileIndex = -1;
172
173         /* This disables incorrect calculations (hacks) in addrlib. */
174         addr_surf_info_in.flags.noStencil = 1;
175
176         /* Set the micro tile type. */
177         if (use_flags & BO_USE_SCANOUT)
178                 addr_surf_info_in.tileType = ADDR_DISPLAYABLE;
179         else
180                 addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE;
181
182         addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
183         addr_out->pTileInfo = &addr_tile_info_out;
184
185         if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in, addr_out) != ADDR_OK)
186                 return -EINVAL;
187
188         ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = { 0 };
189         ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = { 0 };
190         ADDR_TILEINFO s_tile_hw_info_out = { 0 };
191
192         s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT);
193         /* Convert from real value to HW value */
194         s_in.reverse = 0;
195         s_in.pTileInfo = &addr_tile_info_out;
196         s_in.tileIndex = -1;
197
198         s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT);
199         s_out.pTileInfo = &s_tile_hw_info_out;
200
201         if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK)
202                 return -EINVAL;
203
204         if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1)
205                 /* 2D_TILED_THIN1 */
206                 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4);
207         else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1)
208                 /* 1D_TILED_THIN1 */
209                 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2);
210         else
211                 /* LINEAR_ALIGNED */
212                 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1);
213
214         *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, drv_log_base2(addr_tile_info_out.bankWidth));
215         *tiling_flags |=
216             AMDGPU_TILING_SET(BANK_HEIGHT, drv_log_base2(addr_tile_info_out.bankHeight));
217         *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, s_tile_hw_info_out.tileSplitBytes);
218         *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT,
219                                            drv_log_base2(addr_tile_info_out.macroAspectRatio));
220         *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, s_tile_hw_info_out.pipeConfig);
221         *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks);
222
223         return 0;
224 }
225
226 static void *amdgpu_addrlib_init(int fd)
227 {
228         int ret;
229         ADDR_CREATE_INPUT addr_create_input = { 0 };
230         ADDR_CREATE_OUTPUT addr_create_output = { 0 };
231         ADDR_REGISTER_VALUE reg_value = { 0 };
232         ADDR_CREATE_FLAGS create_flags = { { 0 } };
233         ADDR_E_RETURNCODE addr_ret;
234
235         addr_create_input.size = sizeof(ADDR_CREATE_INPUT);
236         addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT);
237
238         struct amdgpu_gpu_info gpu_info = { 0 };
239
240         ret = amdgpu_query_gpu(fd, &gpu_info);
241
242         if (ret) {
243                 drv_log("failed with error =%d\n", ret);
244                 return NULL;
245         }
246
247         reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3;
248         reg_value.gbAddrConfig = gpu_info.gb_addr_cfg;
249         reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2;
250
251         reg_value.backendDisables = gpu_info.backend_disable[0];
252         reg_value.pTileConfig = gpu_info.gb_tile_mode;
253         reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode) / sizeof(gpu_info.gb_tile_mode[0]);
254         reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode;
255         reg_value.noOfMacroEntries =
256             sizeof(gpu_info.gb_macro_tile_mode) / sizeof(gpu_info.gb_macro_tile_mode[0]);
257         create_flags.value = 0;
258         create_flags.useTileIndex = 1;
259
260         addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
261
262         addr_create_input.chipFamily = FAMILY_CZ;
263         addr_create_input.createFlags = create_flags;
264         addr_create_input.callbacks.allocSysMem = alloc_sys_mem;
265         addr_create_input.callbacks.freeSysMem = free_sys_mem;
266         addr_create_input.callbacks.debugPrint = 0;
267         addr_create_input.regValue = reg_value;
268
269         addr_ret = AddrCreate(&addr_create_input, &addr_create_output);
270
271         if (addr_ret != ADDR_OK) {
272                 drv_log("failed error =%d\n", addr_ret);
273                 return NULL;
274         }
275
276         return addr_create_output.hLib;
277 }
278
279 static int amdgpu_init(struct driver *drv)
280 {
281         struct amdgpu_priv *priv;
282         drmVersionPtr drm_version;
283         struct format_metadata metadata;
284         uint64_t use_flags = BO_USE_RENDER_MASK;
285
286         priv = calloc(1, sizeof(struct amdgpu_priv));
287         if (!priv)
288                 return -1;
289
290         drm_version = drmGetVersion(drv_get_fd(drv));
291         if (!drm_version) {
292                 free(priv);
293                 return -1;
294         }
295
296         priv->drm_version = drm_version->version_minor;
297         drmFreeVersion(drm_version);
298
299         priv->addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
300         if (!priv->addrlib) {
301                 free(priv);
302                 return -1;
303         }
304
305         drv->priv = priv;
306
307         drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
308                              &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
309
310         /* YUV format for camera */
311         drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
312                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
313         /*
314          * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
315          * from camera.
316          */
317         drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
318                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
319
320         drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_SCANOUT);
321         drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_SCANOUT);
322
323         metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
324         metadata.priority = 2;
325         metadata.modifier = DRM_FORMAT_MOD_LINEAR;
326
327         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
328                              &metadata, use_flags);
329
330         drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
331         drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
332         drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
333
334         metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
335         metadata.priority = 3;
336         metadata.modifier = DRM_FORMAT_MOD_LINEAR;
337
338         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
339                              &metadata, use_flags);
340
341         use_flags &= ~BO_USE_SW_WRITE_OFTEN;
342         use_flags &= ~BO_USE_SW_READ_OFTEN;
343         use_flags &= ~BO_USE_LINEAR;
344
345         metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
346         metadata.priority = 4;
347
348         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
349                              &metadata, use_flags);
350
351         drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
352         drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
353         drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
354
355         metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
356         metadata.priority = 5;
357
358         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
359                              &metadata, use_flags);
360
361         return 0;
362 }
363
364 static void amdgpu_close(struct driver *drv)
365 {
366         struct amdgpu_priv *priv = (struct amdgpu_priv *)drv->priv;
367         AddrDestroy(priv->addrlib);
368         free(priv);
369         drv->priv = NULL;
370 }
371
372 static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
373                             uint64_t use_flags)
374 {
375         struct amdgpu_priv *priv = (struct amdgpu_priv *)bo->drv->priv;
376         void *addrlib = priv->addrlib;
377         union drm_amdgpu_gem_create gem_create;
378         struct amdgpu_bo_metadata metadata = { 0 };
379         ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = { 0 };
380         uint32_t tiling_flags = 0;
381         size_t plane;
382         int ret;
383
384         if (format == DRM_FORMAT_NV12 || format == DRM_FORMAT_NV21) {
385                 drv_bo_from_format(bo, ALIGN(width, 64), height, format);
386         } else if (format == DRM_FORMAT_YVU420_ANDROID) {
387                 drv_bo_from_format(bo, ALIGN(width, 128), height, format);
388         } else {
389                 if (amdgpu_addrlib_compute(addrlib, width, height, format, use_flags, &tiling_flags,
390                                            &addr_out) < 0)
391                         return -EINVAL;
392
393                 bo->tiling = tiling_flags;
394                 /* RGB has 1 plane only */
395                 bo->offsets[0] = 0;
396                 bo->total_size = bo->sizes[0] = addr_out.surfSize;
397                 bo->strides[0] = addr_out.pixelPitch * DIV_ROUND_UP(addr_out.pixelBits, 8);
398         }
399
400         memset(&gem_create, 0, sizeof(gem_create));
401
402         gem_create.in.bo_size = bo->total_size;
403         gem_create.in.alignment = addr_out.baseAlign;
404         /* Set the placement. */
405
406         gem_create.in.domain_flags = 0;
407         if (use_flags & (BO_USE_LINEAR | BO_USE_SW))
408                 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
409
410         if (use_flags & (BO_USE_SCANOUT | BO_USE_CURSOR)) {
411                 /* TODO(dbehr) do not use VRAM after we enable display VM */
412                 gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
413         } else {
414                 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
415                 if (!(use_flags & BO_USE_SW_READ_OFTEN))
416                         gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
417         }
418
419         /* If drm_version >= 21 everything exposes explicit synchronization primitives
420            and chromeos/arc++ will use them. Disable implicit synchronization. */
421         if (priv->drm_version >= 21) {
422                 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
423         }
424
425         /* Allocate the buffer with the preferred heap. */
426         ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
427                                   sizeof(gem_create));
428
429         if (ret < 0)
430                 return ret;
431
432         metadata.tiling_info = tiling_flags;
433
434         for (plane = 0; plane < bo->num_planes; plane++)
435                 bo->handles[plane].u32 = gem_create.out.handle;
436
437         ret = amdgpu_set_metadata(drv_get_fd(bo->drv), bo->handles[0].u32, &metadata);
438
439         return ret;
440 }
441
442 static void *amdgpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
443 {
444         int ret;
445         union drm_amdgpu_gem_mmap gem_map;
446
447         memset(&gem_map, 0, sizeof(gem_map));
448         gem_map.in.handle = bo->handles[plane].u32;
449
450         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
451         if (ret) {
452                 drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
453                 return MAP_FAILED;
454         }
455
456         vma->length = bo->total_size;
457
458         return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
459                     gem_map.out.addr_ptr);
460 }
461
462 static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags)
463 {
464         switch (format) {
465         case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
466                 /* Camera subsystem requires NV12. */
467                 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
468                         return DRM_FORMAT_NV12;
469                 /*HACK: See b/28671744 */
470                 return DRM_FORMAT_XBGR8888;
471         case DRM_FORMAT_FLEX_YCbCr_420_888:
472                 return DRM_FORMAT_NV12;
473         default:
474                 return format;
475         }
476 }
477
478 const struct backend backend_amdgpu = {
479         .name = "amdgpu",
480         .init = amdgpu_init,
481         .close = amdgpu_close,
482         .bo_create = amdgpu_bo_create,
483         .bo_destroy = drv_gem_bo_destroy,
484         .bo_import = drv_prime_bo_import,
485         .bo_map = amdgpu_bo_map,
486         .bo_unmap = drv_bo_munmap,
487         .resolve_format = amdgpu_resolve_format,
488 };
489
490 #endif