2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
8 #include <amdgpu_drm.h>
22 #define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so)
25 #define TILE_TYPE_LINEAR 0
26 /* DRI backend decides tiling in this case. */
27 #define TILE_TYPE_DRI 1
29 /* Height alignement for Encoder/Decoder buffers */
30 #define CHROME_HEIGHT_ALIGN 16
33 struct dri_driver dri;
37 struct drm_amdgpu_info_device dev_info;
39 uint32_t sdma_cmdbuf_bo;
40 uint64_t sdma_cmdbuf_addr;
41 uint64_t sdma_cmdbuf_size;
42 uint32_t *sdma_cmdbuf_map;
45 struct amdgpu_linear_vma_priv {
50 const static uint32_t render_target_formats[] = {
51 DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
52 DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR2101010,
53 DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
56 const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
57 DRM_FORMAT_NV21, DRM_FORMAT_NV12,
58 DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 };
60 static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
62 struct drm_amdgpu_info info_args = { 0 };
64 info_args.return_pointer = (uintptr_t)dev_info;
65 info_args.return_size = sizeof(*dev_info);
66 info_args.query = AMDGPU_INFO_DEV_INFO;
68 return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
71 static int sdma_init(struct amdgpu_priv *priv, int fd)
73 union drm_amdgpu_ctx ctx_args = { { 0 } };
74 union drm_amdgpu_gem_create gem_create = { { 0 } };
75 struct drm_amdgpu_gem_va va_args = { 0 };
76 union drm_amdgpu_gem_mmap gem_map = { { 0 } };
77 struct drm_gem_close gem_close = { 0 };
80 /* Ensure we can make a submission without BO lists. */
81 if (priv->drm_version < 27)
84 /* Anything outside this range needs adjustments to the SDMA copy commands */
85 if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
88 ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
90 ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
94 priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
96 priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
97 gem_create.in.bo_size = priv->sdma_cmdbuf_size;
98 gem_create.in.alignment = 4096;
99 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
101 ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
105 priv->sdma_cmdbuf_bo = gem_create.out.handle;
107 priv->sdma_cmdbuf_addr =
108 ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
110 /* Map the buffer into the GPU address space so we can use it from the GPU */
111 va_args.handle = priv->sdma_cmdbuf_bo;
112 va_args.operation = AMDGPU_VA_OP_MAP;
113 va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
114 va_args.va_address = priv->sdma_cmdbuf_addr;
115 va_args.offset_in_bo = 0;
116 va_args.map_size = priv->sdma_cmdbuf_size;
118 ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
122 gem_map.in.handle = priv->sdma_cmdbuf_bo;
123 ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
127 priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
128 fd, gem_map.out.addr_ptr);
129 if (priv->sdma_cmdbuf_map == MAP_FAILED) {
130 priv->sdma_cmdbuf_map = NULL;
137 va_args.operation = AMDGPU_VA_OP_UNMAP;
139 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
141 gem_close.handle = priv->sdma_cmdbuf_bo;
142 drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
144 memset(&ctx_args, 0, sizeof(ctx_args));
145 ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
146 ctx_args.in.ctx_id = priv->sdma_ctx;
147 drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
151 static void sdma_finish(struct amdgpu_priv *priv, int fd)
153 union drm_amdgpu_ctx ctx_args = { { 0 } };
154 struct drm_amdgpu_gem_va va_args = { 0 };
155 struct drm_gem_close gem_close = { 0 };
157 if (!priv->sdma_cmdbuf_map)
160 va_args.handle = priv->sdma_cmdbuf_bo;
161 va_args.operation = AMDGPU_VA_OP_UNMAP;
163 va_args.va_address = priv->sdma_cmdbuf_addr;
164 va_args.offset_in_bo = 0;
165 va_args.map_size = priv->sdma_cmdbuf_size;
166 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
168 gem_close.handle = priv->sdma_cmdbuf_bo;
169 drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
171 ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
172 ctx_args.in.ctx_id = priv->sdma_ctx;
173 drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
176 static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
179 const uint64_t max_size_per_cmd = 0x3fff00;
180 const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
181 const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
182 uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
183 uint64_t dst_addr = src_addr + size;
184 struct drm_amdgpu_gem_va va_args = { 0 };
186 uint64_t remaining_size = size;
187 uint64_t cur_src_addr = src_addr;
188 uint64_t cur_dst_addr = dst_addr;
189 struct drm_amdgpu_cs_chunk_ib ib = { 0 };
190 struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
191 uint64_t chunk_ptrs[2];
192 union drm_amdgpu_cs cs = { { 0 } };
193 struct drm_amdgpu_bo_list_in bo_list = { 0 };
194 struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
195 union drm_amdgpu_wait_cs wait_cs = { { 0 } };
198 if (size > UINT64_MAX - max_size_per_cmd ||
199 DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
202 /* Map both buffers into the GPU address space so we can access them from the GPU. */
203 va_args.handle = src_handle;
204 va_args.operation = AMDGPU_VA_OP_MAP;
205 va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
206 va_args.va_address = src_addr;
207 va_args.map_size = size;
209 ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
213 va_args.handle = dst_handle;
214 va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
215 va_args.va_address = dst_addr;
217 ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
221 while (remaining_size) {
222 uint64_t cur_size = remaining_size;
223 if (cur_size > max_size_per_cmd)
224 cur_size = max_size_per_cmd;
226 priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
227 priv->sdma_cmdbuf_map[cmd++] =
228 priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
229 priv->sdma_cmdbuf_map[cmd++] = 0;
230 priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
231 priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
232 priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
233 priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
235 remaining_size -= cur_size;
236 cur_src_addr += cur_size;
237 cur_dst_addr += cur_size;
240 ib.va_start = priv->sdma_cmdbuf_addr;
241 ib.ib_bytes = cmd * 4;
242 ib.ip_type = AMDGPU_HW_IP_DMA;
244 chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
245 chunks[1].length_dw = sizeof(ib) / 4;
246 chunks[1].chunk_data = (uintptr_t)&ib;
248 bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
249 bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
250 bo_list_entries[1].bo_handle = src_handle;
251 bo_list_entries[1].bo_priority = 8;
252 bo_list_entries[2].bo_handle = dst_handle;
253 bo_list_entries[2].bo_priority = 8;
255 bo_list.bo_number = 3;
256 bo_list.bo_info_size = sizeof(bo_list_entries[0]);
257 bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
259 chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
260 chunks[0].length_dw = sizeof(bo_list) / 4;
261 chunks[0].chunk_data = (uintptr_t)&bo_list;
263 chunk_ptrs[0] = (uintptr_t)&chunks[0];
264 chunk_ptrs[1] = (uintptr_t)&chunks[1];
266 cs.in.ctx_id = priv->sdma_ctx;
267 cs.in.num_chunks = 2;
268 cs.in.chunks = (uintptr_t)chunk_ptrs;
270 ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
272 drv_log("SDMA copy command buffer submission failed %d\n", ret);
276 wait_cs.in.handle = cs.out.handle;
277 wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
278 wait_cs.in.ctx_id = priv->sdma_ctx;
279 wait_cs.in.timeout = INT64_MAX;
281 ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
283 drv_log("Could not wait for CS to finish\n");
284 } else if (wait_cs.out.status) {
285 drv_log("Infinite wait timed out, likely GPU hang.\n");
290 va_args.handle = dst_handle;
291 va_args.operation = AMDGPU_VA_OP_UNMAP;
292 va_args.flags = AMDGPU_VM_DELAY_UPDATE;
293 va_args.va_address = dst_addr;
294 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
297 va_args.handle = src_handle;
298 va_args.operation = AMDGPU_VA_OP_UNMAP;
299 va_args.flags = AMDGPU_VM_DELAY_UPDATE;
300 va_args.va_address = src_addr;
301 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
306 static int amdgpu_init(struct driver *drv)
308 struct amdgpu_priv *priv;
309 drmVersionPtr drm_version;
310 struct format_metadata metadata;
311 uint64_t use_flags = BO_USE_RENDER_MASK;
313 priv = calloc(1, sizeof(struct amdgpu_priv));
317 drm_version = drmGetVersion(drv_get_fd(drv));
323 priv->drm_version = drm_version->version_minor;
324 drmFreeVersion(drm_version);
328 if (query_dev_info(drv_get_fd(drv), &priv->dev_info)) {
333 if (dri_init(drv, DRI_PATH, "radeonsi")) {
339 if (sdma_init(priv, drv_get_fd(drv))) {
340 drv_log("SDMA init failed\n");
342 /* Continue, as we can still succesfully map things without SDMA. */
345 metadata.tiling = TILE_TYPE_LINEAR;
346 metadata.priority = 1;
347 metadata.modifier = DRM_FORMAT_MOD_LINEAR;
349 drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
350 &metadata, use_flags);
352 drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
353 &metadata, BO_USE_TEXTURE_MASK);
355 /* NV12 format for camera, display, decoding and encoding. */
356 drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
357 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
358 BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
360 /* Android CTS tests require this. */
361 drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
363 /* Linear formats supported by display. */
364 drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
365 drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
366 drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
367 drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
369 drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
370 drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
371 drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
372 drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
374 drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
377 * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
378 * from camera and input/output from hardware decoder/encoder.
380 drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
381 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
382 BO_USE_HW_VIDEO_ENCODER);
385 * The following formats will be allocated by the DRI backend and may be potentially tiled.
386 * Since format modifier support hasn't been implemented fully yet, it's not
387 * possible to enumerate the different types of buffers (like i915 can).
389 use_flags &= ~BO_USE_RENDERSCRIPT;
390 use_flags &= ~BO_USE_SW_WRITE_OFTEN;
391 use_flags &= ~BO_USE_SW_READ_OFTEN;
392 use_flags &= ~BO_USE_LINEAR;
394 metadata.tiling = TILE_TYPE_DRI;
395 metadata.priority = 2;
397 drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
398 &metadata, use_flags);
400 /* Potentially tiled formats supported by display. */
401 drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
402 drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
403 drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
404 drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
406 drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
407 drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
408 drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
409 drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
413 static void amdgpu_close(struct driver *drv)
415 sdma_finish(drv->priv, drv_get_fd(drv));
421 static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
425 uint32_t plane, stride;
426 union drm_amdgpu_gem_create gem_create = { { 0 } };
427 struct amdgpu_priv *priv = bo->drv->priv;
429 stride = drv_stride_from_format(format, width, 0);
430 stride = ALIGN(stride, 256);
433 * Currently, allocator used by chrome aligns the height for Encoder/
434 * Decoder buffers while allocator used by android(gralloc/minigbm)
435 * doesn't provide any aligment.
439 if (use_flags & (BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER))
440 height = ALIGN(height, CHROME_HEIGHT_ALIGN);
442 drv_bo_from_format(bo, stride, height, format);
444 gem_create.in.bo_size =
445 ALIGN(bo->meta.total_size, priv->dev_info.virtual_address_alignment);
446 gem_create.in.alignment = 256;
447 gem_create.in.domain_flags = 0;
449 if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
450 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
452 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
454 /* Scanout in GTT requires USWC, otherwise try to use cachable memory
455 * for buffers that are read often, because uncacheable reads can be
456 * very slow. USWC should be faster on the GPU though. */
457 if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
458 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
460 /* Allocate the buffer with the preferred heap. */
461 ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
466 for (plane = 0; plane < bo->meta.num_planes; plane++)
467 bo->handles[plane].u32 = gem_create.out.handle;
469 bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR;
474 static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
477 struct combination *combo;
479 combo = drv_get_combination(bo->drv, format, use_flags);
483 if (combo->metadata.tiling == TILE_TYPE_DRI) {
484 bool needs_alignment = false;
487 * Currently, the gralloc API doesn't differentiate between allocation time and map
488 * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at
491 * See b/115946221,b/117942643
493 if (use_flags & (BO_USE_SW_MASK))
494 needs_alignment = true;
497 if (use_flags & (BO_USE_SCANOUT))
498 needs_alignment = true;
500 if (needs_alignment) {
501 uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0);
502 width = ALIGN(width, 256 / bytes_per_pixel);
505 return dri_bo_create(bo, width, height, format, use_flags);
508 return amdgpu_create_bo_linear(bo, width, height, format, use_flags);
511 static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
512 uint32_t format, const uint64_t *modifiers,
515 bool only_use_linear = true;
517 for (uint32_t i = 0; i < count; ++i)
518 if (modifiers[i] != DRM_FORMAT_MOD_LINEAR)
519 only_use_linear = false;
522 return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT);
524 return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count);
527 static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
529 bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR;
530 if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) {
531 struct combination *combo;
532 combo = drv_get_combination(bo->drv, data->format, data->use_flags);
536 dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI;
540 return dri_bo_import(bo, data);
542 return drv_prime_bo_import(bo, data);
545 static int amdgpu_destroy_bo(struct bo *bo)
548 return dri_bo_destroy(bo);
550 return drv_gem_bo_destroy(bo);
553 static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
555 void *addr = MAP_FAILED;
557 union drm_amdgpu_gem_mmap gem_map = { { 0 } };
558 struct drm_amdgpu_gem_create_in bo_info = { 0 };
559 struct drm_amdgpu_gem_op gem_op = { 0 };
560 uint32_t handle = bo->handles[plane].u32;
561 struct amdgpu_linear_vma_priv *priv = NULL;
562 struct amdgpu_priv *drv_priv;
565 return dri_bo_map(bo, vma, plane, map_flags);
567 drv_priv = bo->drv->priv;
568 gem_op.handle = handle;
569 gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
570 gem_op.value = (uintptr_t)&bo_info;
572 ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
576 vma->length = bo_info.bo_size;
578 if (((bo_info.domains & AMDGPU_GEM_DOMAIN_VRAM) ||
579 (bo_info.domain_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)) &&
580 drv_priv->sdma_cmdbuf_map) {
581 union drm_amdgpu_gem_create gem_create = { { 0 } };
583 priv = calloc(1, sizeof(struct amdgpu_linear_vma_priv));
587 gem_create.in.bo_size = bo_info.bo_size;
588 gem_create.in.alignment = 4096;
589 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
591 ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_CREATE, &gem_create,
594 drv_log("GEM create failed\n");
599 priv->map_flags = map_flags;
600 handle = priv->handle = gem_create.out.handle;
602 ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handles[0].u32, priv->handle,
605 drv_log("SDMA copy for read failed\n");
610 gem_map.in.handle = handle;
611 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
613 drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
617 addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
618 gem_map.out.addr_ptr);
619 if (addr == MAP_FAILED)
627 struct drm_gem_close gem_close = { 0 };
628 gem_close.handle = priv->handle;
629 drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
635 static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
638 return dri_bo_unmap(bo, vma);
640 int r = munmap(vma->addr, vma->length);
645 struct amdgpu_linear_vma_priv *priv = vma->priv;
646 struct drm_gem_close gem_close = { 0 };
648 if (BO_MAP_WRITE & priv->map_flags) {
649 r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
650 bo->handles[0].u32, vma->length);
655 gem_close.handle = priv->handle;
656 r = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
663 static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
666 union drm_amdgpu_gem_wait_idle wait_idle = { { 0 } };
671 wait_idle.in.handle = bo->handles[0].u32;
672 wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE;
674 ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &wait_idle,
678 drv_log("DRM_AMDGPU_GEM_WAIT_IDLE failed with %d\n", ret);
682 if (ret == 0 && wait_idle.out.status)
683 drv_log("DRM_AMDGPU_GEM_WAIT_IDLE BO is busy\n");
688 static uint32_t amdgpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
691 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
692 /* Camera subsystem requires NV12. */
693 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
694 return DRM_FORMAT_NV12;
695 /*HACK: See b/28671744 */
696 return DRM_FORMAT_XBGR8888;
697 case DRM_FORMAT_FLEX_YCbCr_420_888:
698 return DRM_FORMAT_NV12;
704 const struct backend backend_amdgpu = {
707 .close = amdgpu_close,
708 .bo_create = amdgpu_create_bo,
709 .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers,
710 .bo_destroy = amdgpu_destroy_bo,
711 .bo_import = amdgpu_import_bo,
712 .bo_map = amdgpu_map_bo,
713 .bo_unmap = amdgpu_unmap_bo,
714 .bo_invalidate = amdgpu_bo_invalidate,
715 .resolve_format = amdgpu_resolve_format,
716 .num_planes_from_modifier = dri_num_planes_from_modifier,