OSDN Git Service

Merge 'aosp/upstream-master' into 'aosp/master'
[android-x86/external-minigbm.git] / amdgpu.c
1 /*
2  * Copyright 2016 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 #ifdef DRV_AMDGPU
7 #include <amdgpu.h>
8 #include <amdgpu_drm.h>
9 #include <errno.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <xf86drm.h>
15
16 #include "dri.h"
17 #include "drv_priv.h"
18 #include "helpers.h"
19 #include "util.h"
20
21 // clang-format off
22 #define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so)
23 // clang-format on
24
25 #define TILE_TYPE_LINEAR 0
26 /* DRI backend decides tiling in this case. */
27 #define TILE_TYPE_DRI 1
28
29 /* Height alignement for Encoder/Decoder buffers */
30 #define CHROME_HEIGHT_ALIGN 16
31
32 struct amdgpu_priv {
33         struct dri_driver dri;
34         int drm_version;
35
36         /* sdma */
37         struct drm_amdgpu_info_device dev_info;
38         uint32_t sdma_ctx;
39         uint32_t sdma_cmdbuf_bo;
40         uint64_t sdma_cmdbuf_addr;
41         uint64_t sdma_cmdbuf_size;
42         uint32_t *sdma_cmdbuf_map;
43 };
44
45 struct amdgpu_linear_vma_priv {
46         uint32_t handle;
47         uint32_t map_flags;
48 };
49
50 const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
51                                                   DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
52                                                   DRM_FORMAT_XRGB8888 };
53
54 const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88,           DRM_FORMAT_R8,
55                                                    DRM_FORMAT_NV21,           DRM_FORMAT_NV12,
56                                                    DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 };
57
58 static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
59 {
60         struct drm_amdgpu_info info_args = { 0 };
61
62         info_args.return_pointer = (uintptr_t)dev_info;
63         info_args.return_size = sizeof(*dev_info);
64         info_args.query = AMDGPU_INFO_DEV_INFO;
65
66         return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
67 }
68
69 static int sdma_init(struct amdgpu_priv *priv, int fd)
70 {
71         union drm_amdgpu_ctx ctx_args = { { 0 } };
72         union drm_amdgpu_gem_create gem_create = { { 0 } };
73         struct drm_amdgpu_gem_va va_args = { 0 };
74         union drm_amdgpu_gem_mmap gem_map = { { 0 } };
75         struct drm_gem_close gem_close = { 0 };
76         int ret;
77
78         /* Ensure we can make a submission without BO lists. */
79         if (priv->drm_version < 27)
80                 return 0;
81
82         /* Anything outside this range needs adjustments to the SDMA copy commands */
83         if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
84                 return 0;
85
86         ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
87
88         ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
89         if (ret < 0)
90                 return ret;
91
92         priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
93
94         priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
95         gem_create.in.bo_size = priv->sdma_cmdbuf_size;
96         gem_create.in.alignment = 4096;
97         gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
98
99         ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
100         if (ret < 0)
101                 goto fail_ctx;
102
103         priv->sdma_cmdbuf_bo = gem_create.out.handle;
104
105         priv->sdma_cmdbuf_addr =
106             ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
107
108         /* Map the buffer into the GPU address space so we can use it from the GPU */
109         va_args.handle = priv->sdma_cmdbuf_bo;
110         va_args.operation = AMDGPU_VA_OP_MAP;
111         va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
112         va_args.va_address = priv->sdma_cmdbuf_addr;
113         va_args.offset_in_bo = 0;
114         va_args.map_size = priv->sdma_cmdbuf_size;
115
116         ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
117         if (ret)
118                 goto fail_bo;
119
120         gem_map.in.handle = priv->sdma_cmdbuf_bo;
121         ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
122         if (ret)
123                 goto fail_va;
124
125         priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
126                                      fd, gem_map.out.addr_ptr);
127         if (priv->sdma_cmdbuf_map == MAP_FAILED) {
128                 priv->sdma_cmdbuf_map = NULL;
129                 ret = -ENOMEM;
130                 goto fail_va;
131         }
132
133         return 0;
134 fail_va:
135         va_args.operation = AMDGPU_VA_OP_UNMAP;
136         va_args.flags = 0;
137         drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
138 fail_bo:
139         gem_close.handle = priv->sdma_cmdbuf_bo;
140         drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
141 fail_ctx:
142         memset(&ctx_args, 0, sizeof(ctx_args));
143         ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
144         ctx_args.in.ctx_id = priv->sdma_ctx;
145         drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
146         return ret;
147 }
148
149 static void sdma_finish(struct amdgpu_priv *priv, int fd)
150 {
151         union drm_amdgpu_ctx ctx_args = { { 0 } };
152         struct drm_amdgpu_gem_va va_args = { 0 };
153         struct drm_gem_close gem_close = { 0 };
154
155         if (!priv->sdma_cmdbuf_map)
156                 return;
157
158         va_args.handle = priv->sdma_cmdbuf_bo;
159         va_args.operation = AMDGPU_VA_OP_UNMAP;
160         va_args.flags = 0;
161         va_args.va_address = priv->sdma_cmdbuf_addr;
162         va_args.offset_in_bo = 0;
163         va_args.map_size = priv->sdma_cmdbuf_size;
164         drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
165
166         gem_close.handle = priv->sdma_cmdbuf_bo;
167         drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
168
169         ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
170         ctx_args.in.ctx_id = priv->sdma_ctx;
171         drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
172 }
173
174 static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
175                      uint64_t size)
176 {
177         const uint64_t max_size_per_cmd = 0x3fff00;
178         const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
179         const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
180         uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
181         uint64_t dst_addr = src_addr + size;
182         struct drm_amdgpu_gem_va va_args = { 0 };
183         unsigned cmd = 0;
184         uint64_t remaining_size = size;
185         uint64_t cur_src_addr = src_addr;
186         uint64_t cur_dst_addr = dst_addr;
187         struct drm_amdgpu_cs_chunk_ib ib = { 0 };
188         struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
189         uint64_t chunk_ptrs[2];
190         union drm_amdgpu_cs cs = { { 0 } };
191         struct drm_amdgpu_bo_list_in bo_list = { 0 };
192         struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
193         union drm_amdgpu_wait_cs wait_cs = { { 0 } };
194         int ret = 0;
195
196         if (size > UINT64_MAX - max_size_per_cmd ||
197             DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
198                 return -ENOMEM;
199
200         /* Map both buffers into the GPU address space so we can access them from the GPU. */
201         va_args.handle = src_handle;
202         va_args.operation = AMDGPU_VA_OP_MAP;
203         va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
204         va_args.va_address = src_addr;
205         va_args.map_size = size;
206
207         ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
208         if (ret)
209                 return ret;
210
211         va_args.handle = dst_handle;
212         va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
213         va_args.va_address = dst_addr;
214
215         ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
216         if (ret)
217                 goto unmap_src;
218
219         while (remaining_size) {
220                 uint64_t cur_size = remaining_size;
221                 if (cur_size > max_size_per_cmd)
222                         cur_size = max_size_per_cmd;
223
224                 priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
225                 priv->sdma_cmdbuf_map[cmd++] =
226                     priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
227                 priv->sdma_cmdbuf_map[cmd++] = 0;
228                 priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
229                 priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
230                 priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
231                 priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
232
233                 remaining_size -= cur_size;
234                 cur_src_addr += cur_size;
235                 cur_dst_addr += cur_size;
236         }
237
238         ib.va_start = priv->sdma_cmdbuf_addr;
239         ib.ib_bytes = cmd * 4;
240         ib.ip_type = AMDGPU_HW_IP_DMA;
241
242         chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
243         chunks[1].length_dw = sizeof(ib) / 4;
244         chunks[1].chunk_data = (uintptr_t)&ib;
245
246         bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
247         bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
248         bo_list_entries[1].bo_handle = src_handle;
249         bo_list_entries[1].bo_priority = 8;
250         bo_list_entries[2].bo_handle = dst_handle;
251         bo_list_entries[2].bo_priority = 8;
252
253         bo_list.bo_number = 3;
254         bo_list.bo_info_size = sizeof(bo_list_entries[0]);
255         bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
256
257         chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
258         chunks[0].length_dw = sizeof(bo_list) / 4;
259         chunks[0].chunk_data = (uintptr_t)&bo_list;
260
261         chunk_ptrs[0] = (uintptr_t)&chunks[0];
262         chunk_ptrs[1] = (uintptr_t)&chunks[1];
263
264         cs.in.ctx_id = priv->sdma_ctx;
265         cs.in.num_chunks = 2;
266         cs.in.chunks = (uintptr_t)chunk_ptrs;
267
268         ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
269         if (ret) {
270                 drv_log("SDMA copy command buffer submission failed %d\n", ret);
271                 goto unmap_dst;
272         }
273
274         wait_cs.in.handle = cs.out.handle;
275         wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
276         wait_cs.in.ctx_id = priv->sdma_ctx;
277         wait_cs.in.timeout = INT64_MAX;
278
279         ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
280         if (ret) {
281                 drv_log("Could not wait for CS to finish\n");
282         } else if (wait_cs.out.status) {
283                 drv_log("Infinite wait timed out, likely GPU hang.\n");
284                 ret = -ENODEV;
285         }
286
287 unmap_dst:
288         va_args.handle = dst_handle;
289         va_args.operation = AMDGPU_VA_OP_UNMAP;
290         va_args.flags = AMDGPU_VM_DELAY_UPDATE;
291         va_args.va_address = dst_addr;
292         drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
293
294 unmap_src:
295         va_args.handle = src_handle;
296         va_args.operation = AMDGPU_VA_OP_UNMAP;
297         va_args.flags = AMDGPU_VM_DELAY_UPDATE;
298         va_args.va_address = src_addr;
299         drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
300
301         return ret;
302 }
303
304 static int amdgpu_init(struct driver *drv)
305 {
306         struct amdgpu_priv *priv;
307         drmVersionPtr drm_version;
308         struct format_metadata metadata;
309         uint64_t use_flags = BO_USE_RENDER_MASK;
310
311         priv = calloc(1, sizeof(struct amdgpu_priv));
312         if (!priv)
313                 return -ENOMEM;
314
315         drm_version = drmGetVersion(drv_get_fd(drv));
316         if (!drm_version) {
317                 free(priv);
318                 return -ENODEV;
319         }
320
321         priv->drm_version = drm_version->version_minor;
322         drmFreeVersion(drm_version);
323
324         drv->priv = priv;
325
326         if (query_dev_info(drv_get_fd(drv), &priv->dev_info)) {
327                 free(priv);
328                 drv->priv = NULL;
329                 return -ENODEV;
330         }
331         if (dri_init(drv, DRI_PATH, "radeonsi")) {
332                 free(priv);
333                 drv->priv = NULL;
334                 return -ENODEV;
335         }
336
337         if (sdma_init(priv, drv_get_fd(drv))) {
338                 drv_log("SDMA init failed\n");
339
340                 /* Continue, as we can still succesfully map things without SDMA. */
341         }
342
343         metadata.tiling = TILE_TYPE_LINEAR;
344         metadata.priority = 1;
345         metadata.modifier = DRM_FORMAT_MOD_LINEAR;
346
347         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
348                              &metadata, use_flags);
349
350         drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
351                              &metadata, BO_USE_TEXTURE_MASK);
352
353         /* NV12 format for camera, display, decoding and encoding. */
354         drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
355                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
356                                    BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
357
358         /* Android CTS tests require this. */
359         drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
360
361         /* Linear formats supported by display. */
362         drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
363         drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
364         drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
365         drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
366
367         drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
368
369         /*
370          * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
371          * from camera and input/output from hardware decoder/encoder.
372          */
373         drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
374                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
375                                    BO_USE_HW_VIDEO_ENCODER);
376
377         /*
378          * The following formats will be allocated by the DRI backend and may be potentially tiled.
379          * Since format modifier support hasn't been implemented fully yet, it's not
380          * possible to enumerate the different types of buffers (like i915 can).
381          */
382         use_flags &= ~BO_USE_RENDERSCRIPT;
383         use_flags &= ~BO_USE_SW_WRITE_OFTEN;
384         use_flags &= ~BO_USE_SW_READ_OFTEN;
385         use_flags &= ~BO_USE_LINEAR;
386
387         metadata.tiling = TILE_TYPE_DRI;
388         metadata.priority = 2;
389
390         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
391                              &metadata, use_flags);
392
393         /* Potentially tiled formats supported by display. */
394         drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
395         drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
396         drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
397         drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
398         return 0;
399 }
400
401 static void amdgpu_close(struct driver *drv)
402 {
403         sdma_finish(drv->priv, drv_get_fd(drv));
404         dri_close(drv);
405         free(drv->priv);
406         drv->priv = NULL;
407 }
408
409 static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
410                                    uint64_t use_flags)
411 {
412         int ret;
413         uint32_t plane, stride;
414         union drm_amdgpu_gem_create gem_create;
415         struct amdgpu_priv *priv = bo->drv->priv;
416
417         stride = drv_stride_from_format(format, width, 0);
418         stride = ALIGN(stride, 256);
419
420         /*
421         * Currently, allocator used by chrome aligns the height for Encoder/
422         * Decoder buffers while allocator used by android(gralloc/minigbm)
423         * doesn't provide any aligment.
424         *
425         * See b/153130069
426         */
427         if (use_flags & (BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER))
428                 height = ALIGN(height, CHROME_HEIGHT_ALIGN);
429
430         drv_bo_from_format(bo, stride, height, format);
431
432         memset(&gem_create, 0, sizeof(gem_create));
433         gem_create.in.bo_size =
434             ALIGN(bo->meta.total_size, priv->dev_info.virtual_address_alignment);
435         gem_create.in.alignment = 256;
436         gem_create.in.domain_flags = 0;
437
438         if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
439                 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
440
441         gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
442
443         /* Scanout in GTT requires USWC, otherwise try to use cachable memory
444          * for buffers that are read often, because uncacheable reads can be
445          * very slow. USWC should be faster on the GPU though. */
446         if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
447                 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
448
449         /* Allocate the buffer with the preferred heap. */
450         ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
451                                   sizeof(gem_create));
452         if (ret < 0)
453                 return ret;
454
455         for (plane = 0; plane < bo->meta.num_planes; plane++)
456                 bo->handles[plane].u32 = gem_create.out.handle;
457
458         bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR;
459
460         return 0;
461 }
462
463 static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
464                             uint64_t use_flags)
465 {
466         struct combination *combo;
467
468         combo = drv_get_combination(bo->drv, format, use_flags);
469         if (!combo)
470                 return -EINVAL;
471
472         if (combo->metadata.tiling == TILE_TYPE_DRI) {
473                 bool needs_alignment = false;
474 #ifdef __ANDROID__
475                 /*
476                  * Currently, the gralloc API doesn't differentiate between allocation time and map
477                  * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at
478                  * allocation time.
479                  *
480                  * See b/115946221,b/117942643
481                  */
482                 if (use_flags & (BO_USE_SW_MASK))
483                         needs_alignment = true;
484 #endif
485                 // See b/122049612
486                 if (use_flags & (BO_USE_SCANOUT))
487                         needs_alignment = true;
488
489                 if (needs_alignment) {
490                         uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0);
491                         width = ALIGN(width, 256 / bytes_per_pixel);
492                 }
493
494                 return dri_bo_create(bo, width, height, format, use_flags);
495         }
496
497         return amdgpu_create_bo_linear(bo, width, height, format, use_flags);
498 }
499
500 static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
501                                            uint32_t format, const uint64_t *modifiers,
502                                            uint32_t count)
503 {
504         bool only_use_linear = true;
505
506         for (uint32_t i = 0; i < count; ++i)
507                 if (modifiers[i] != DRM_FORMAT_MOD_LINEAR)
508                         only_use_linear = false;
509
510         if (only_use_linear)
511                 return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT);
512
513         return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count);
514 }
515
516 static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
517 {
518         bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR;
519         if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) {
520                 struct combination *combo;
521                 combo = drv_get_combination(bo->drv, data->format, data->use_flags);
522                 if (!combo)
523                         return -EINVAL;
524
525                 dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI;
526         }
527
528         if (dri_tiling)
529                 return dri_bo_import(bo, data);
530         else
531                 return drv_prime_bo_import(bo, data);
532 }
533
534 static int amdgpu_destroy_bo(struct bo *bo)
535 {
536         if (bo->priv)
537                 return dri_bo_destroy(bo);
538         else
539                 return drv_gem_bo_destroy(bo);
540 }
541
542 static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
543 {
544         void *addr = MAP_FAILED;
545         int ret;
546         union drm_amdgpu_gem_mmap gem_map;
547         struct drm_amdgpu_gem_create_in bo_info = { 0 };
548         struct drm_amdgpu_gem_op gem_op = { 0 };
549         uint32_t handle = bo->handles[plane].u32;
550         struct amdgpu_linear_vma_priv *priv = NULL;
551         struct amdgpu_priv *drv_priv;
552
553         if (bo->priv)
554                 return dri_bo_map(bo, vma, plane, map_flags);
555
556         drv_priv = bo->drv->priv;
557         gem_op.handle = handle;
558         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
559         gem_op.value = (uintptr_t)&bo_info;
560
561         ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
562         if (ret)
563                 return MAP_FAILED;
564
565         vma->length = bo_info.bo_size;
566
567         if (((bo_info.domains & AMDGPU_GEM_DOMAIN_VRAM) ||
568              (bo_info.domain_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)) &&
569             drv_priv->sdma_cmdbuf_map) {
570                 union drm_amdgpu_gem_create gem_create = { { 0 } };
571
572                 priv = calloc(1, sizeof(struct amdgpu_linear_vma_priv));
573                 if (!priv)
574                         return MAP_FAILED;
575
576                 gem_create.in.bo_size = bo_info.bo_size;
577                 gem_create.in.alignment = 4096;
578                 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
579
580                 ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_CREATE, &gem_create,
581                                           sizeof(gem_create));
582                 if (ret < 0) {
583                         drv_log("GEM create failed\n");
584                         free(priv);
585                         return MAP_FAILED;
586                 }
587
588                 priv->map_flags = map_flags;
589                 handle = priv->handle = gem_create.out.handle;
590
591                 ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handles[0].u32, priv->handle,
592                                 bo_info.bo_size);
593                 if (ret) {
594                         drv_log("SDMA copy for read failed\n");
595                         goto fail;
596                 }
597         }
598
599         memset(&gem_map, 0, sizeof(gem_map));
600         gem_map.in.handle = handle;
601
602         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
603         if (ret) {
604                 drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
605                 goto fail;
606         }
607
608         addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
609                     gem_map.out.addr_ptr);
610         if (addr == MAP_FAILED)
611                 goto fail;
612
613         vma->priv = priv;
614         return addr;
615
616 fail:
617         if (priv) {
618                 struct drm_gem_close gem_close = { 0 };
619                 gem_close.handle = priv->handle;
620                 drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
621                 free(priv);
622         }
623         return MAP_FAILED;
624 }
625
626 static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
627 {
628         if (bo->priv)
629                 return dri_bo_unmap(bo, vma);
630         else {
631                 int r = munmap(vma->addr, vma->length);
632                 if (r)
633                         return r;
634
635                 if (vma->priv) {
636                         struct amdgpu_linear_vma_priv *priv = vma->priv;
637                         struct drm_gem_close gem_close = { 0 };
638
639                         if (BO_MAP_WRITE & priv->map_flags) {
640                                 r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
641                                               bo->handles[0].u32, vma->length);
642                                 if (r)
643                                         return r;
644                         }
645
646                         gem_close.handle = priv->handle;
647                         r = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
648                 }
649
650                 return 0;
651         }
652 }
653
654 static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
655 {
656         int ret;
657         union drm_amdgpu_gem_wait_idle wait_idle;
658
659         if (bo->priv)
660                 return 0;
661
662         memset(&wait_idle, 0, sizeof(wait_idle));
663         wait_idle.in.handle = bo->handles[0].u32;
664         wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE;
665
666         ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &wait_idle,
667                                   sizeof(wait_idle));
668
669         if (ret < 0) {
670                 drv_log("DRM_AMDGPU_GEM_WAIT_IDLE failed with %d\n", ret);
671                 return ret;
672         }
673
674         if (ret == 0 && wait_idle.out.status)
675                 drv_log("DRM_AMDGPU_GEM_WAIT_IDLE BO is busy\n");
676
677         return 0;
678 }
679
680 static uint32_t amdgpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
681 {
682         switch (format) {
683         case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
684                 /* Camera subsystem requires NV12. */
685                 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
686                         return DRM_FORMAT_NV12;
687                 /*HACK: See b/28671744 */
688                 return DRM_FORMAT_XBGR8888;
689         case DRM_FORMAT_FLEX_YCbCr_420_888:
690                 return DRM_FORMAT_NV12;
691         default:
692                 return format;
693         }
694 }
695
696 const struct backend backend_amdgpu = {
697         .name = "amdgpu",
698         .init = amdgpu_init,
699         .close = amdgpu_close,
700         .bo_create = amdgpu_create_bo,
701         .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers,
702         .bo_destroy = amdgpu_destroy_bo,
703         .bo_import = amdgpu_import_bo,
704         .bo_map = amdgpu_map_bo,
705         .bo_unmap = amdgpu_unmap_bo,
706         .bo_invalidate = amdgpu_bo_invalidate,
707         .resolve_format = amdgpu_resolve_format,
708         .num_planes_from_modifier = dri_num_planes_from_modifier,
709 };
710
711 #endif