2 * Copyright © 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
34 #include <sys/ioctl.h>
38 #include "libdrm_macros.h"
40 #include "amdgpu_drm.h"
41 #include "amdgpu_internal.h"
42 #include "util_hash_table.h"
43 #include "util_math.h"
45 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
48 struct drm_gem_close args = {};
51 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
54 void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
56 /* Remove the buffer from the hash tables. */
57 pthread_mutex_lock(&bo->dev->bo_table_mutex);
58 util_hash_table_remove(bo->dev->bo_handles,
59 (void*)(uintptr_t)bo->handle);
61 util_hash_table_remove(bo->dev->bo_flink_names,
62 (void*)(uintptr_t)bo->flink_name);
64 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
66 /* Release CPU access. */
67 if (bo->cpu_map_count > 0) {
68 bo->cpu_map_count = 1;
69 amdgpu_bo_cpu_unmap(bo);
72 amdgpu_close_kms_handle(bo->dev, bo->handle);
73 pthread_mutex_destroy(&bo->cpu_access_mutex);
74 amdgpu_vamgr_free_va(&bo->dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
78 /* map the buffer to the GPU virtual address space */
79 static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
81 amdgpu_device_handle dev = bo->dev;
82 union drm_amdgpu_gem_va va;
85 memset(&va, 0, sizeof(va));
87 bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr,
88 bo->alloc_size, alignment);
90 if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
93 va.in.handle = bo->handle;
94 va.in.operation = AMDGPU_VA_OP_MAP;
95 va.in.flags = AMDGPU_VM_PAGE_READABLE |
96 AMDGPU_VM_PAGE_WRITEABLE |
97 AMDGPU_VM_PAGE_EXECUTABLE;
98 va.in.va_address = bo->virtual_mc_base_address;
99 va.in.offset_in_bo = 0;
100 va.in.map_size = ALIGN(bo->alloc_size, getpagesize());
102 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
103 if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
104 amdgpu_bo_free_internal(bo);
111 int amdgpu_bo_alloc(amdgpu_device_handle dev,
112 struct amdgpu_bo_alloc_request *alloc_buffer,
113 struct amdgpu_bo_alloc_result *info)
115 struct amdgpu_bo *bo;
116 union drm_amdgpu_gem_create args;
117 unsigned heap = alloc_buffer->preferred_heap;
120 /* It's an error if the heap is not specified */
121 if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
124 bo = calloc(1, sizeof(struct amdgpu_bo));
128 atomic_set(&bo->refcount, 1);
130 bo->alloc_size = alloc_buffer->alloc_size;
132 memset(&args, 0, sizeof(args));
133 args.in.bo_size = alloc_buffer->alloc_size;
134 args.in.alignment = alloc_buffer->phys_alignment;
136 /* Set the placement. */
137 args.in.domains = heap & AMDGPU_GEM_DOMAIN_MASK;
138 args.in.domain_flags = alloc_buffer->flags & AMDGPU_GEM_CREATE_CPU_GTT_MASK;
140 /* Allocate the buffer with the preferred heap. */
141 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
142 &args, sizeof(args));
148 bo->handle = args.out.handle;
150 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
152 r = amdgpu_bo_map(bo, alloc_buffer->phys_alignment);
154 amdgpu_bo_free_internal(bo);
158 info->buf_handle = bo;
159 info->virtual_mc_base_address = bo->virtual_mc_base_address;
163 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
164 struct amdgpu_bo_metadata *info)
166 struct drm_amdgpu_gem_metadata args = {};
168 args.handle = bo->handle;
169 args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
170 args.data.flags = info->flags;
171 args.data.tiling_info = info->tiling_info;
173 if (info->size_metadata > sizeof(args.data.data))
176 if (info->size_metadata) {
177 args.data.data_size_bytes = info->size_metadata;
178 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
181 return drmCommandWriteRead(bo->dev->fd,
182 DRM_AMDGPU_GEM_METADATA,
183 &args, sizeof(args));
186 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
187 struct amdgpu_bo_info *info)
189 struct drm_amdgpu_gem_metadata metadata = {};
190 struct drm_amdgpu_gem_create_in bo_info = {};
191 struct drm_amdgpu_gem_op gem_op = {};
194 /* Query metadata. */
195 metadata.handle = bo->handle;
196 metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
198 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
199 &metadata, sizeof(metadata));
203 if (metadata.data.data_size_bytes >
204 sizeof(info->metadata.umd_metadata))
207 /* Query buffer info. */
208 gem_op.handle = bo->handle;
209 gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
210 gem_op.value = (uintptr_t)&bo_info;
212 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
213 &gem_op, sizeof(gem_op));
217 memset(info, 0, sizeof(*info));
218 info->alloc_size = bo_info.bo_size;
219 info->phys_alignment = bo_info.alignment;
220 info->virtual_mc_base_address = bo->virtual_mc_base_address;
221 info->preferred_heap = bo_info.domains;
222 info->alloc_flags = bo_info.domain_flags;
223 info->metadata.flags = metadata.data.flags;
224 info->metadata.tiling_info = metadata.data.tiling_info;
226 info->metadata.size_metadata = metadata.data.data_size_bytes;
227 if (metadata.data.data_size_bytes > 0)
228 memcpy(info->metadata.umd_metadata, metadata.data.data,
229 metadata.data.data_size_bytes);
234 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
236 pthread_mutex_lock(&bo->dev->bo_table_mutex);
237 util_hash_table_set(bo->dev->bo_handles,
238 (void*)(uintptr_t)bo->handle, bo);
239 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
242 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
244 struct drm_gem_flink flink;
255 if (bo->dev->flink_fd != bo->dev->fd) {
256 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
259 r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
264 fd = bo->dev->flink_fd;
266 memset(&flink, 0, sizeof(flink));
267 flink.handle = handle;
269 r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
273 bo->flink_name = flink.name;
275 if (bo->dev->flink_fd != bo->dev->fd) {
276 struct drm_gem_close args = {};
277 args.handle = handle;
278 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
281 pthread_mutex_lock(&bo->dev->bo_table_mutex);
282 util_hash_table_set(bo->dev->bo_flink_names,
283 (void*)(uintptr_t)bo->flink_name,
285 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
290 int amdgpu_bo_export(amdgpu_bo_handle bo,
291 enum amdgpu_bo_handle_type type,
292 uint32_t *shared_handle)
297 case amdgpu_bo_handle_type_gem_flink_name:
298 r = amdgpu_bo_export_flink(bo);
302 *shared_handle = bo->flink_name;
305 case amdgpu_bo_handle_type_kms:
306 r = amdgpu_bo_export_flink(bo);
310 amdgpu_add_handle_to_table(bo);
311 *shared_handle = bo->handle;
314 case amdgpu_bo_handle_type_dma_buf_fd:
315 amdgpu_add_handle_to_table(bo);
316 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
317 (int*)shared_handle);
322 int amdgpu_bo_import(amdgpu_device_handle dev,
323 enum amdgpu_bo_handle_type type,
324 uint32_t shared_handle,
325 struct amdgpu_bo_import_result *output)
327 struct drm_gem_open open_arg = {};
328 struct amdgpu_bo *bo = NULL;
331 uint64_t dma_buf_size = 0;
333 /* Convert a DMA buf handle to a KMS handle now. */
334 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
338 /* Get a KMS handle. */
339 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
344 /* Query the buffer size. */
345 size = lseek(shared_handle, 0, SEEK_END);
346 if (size == (off_t)-1) {
347 amdgpu_close_kms_handle(dev, handle);
350 lseek(shared_handle, 0, SEEK_SET);
353 shared_handle = handle;
356 /* We must maintain a list of pairs <handle, bo>, so that we always
357 * return the same amdgpu_bo instance for the same handle. */
358 pthread_mutex_lock(&dev->bo_table_mutex);
360 /* If we have already created a buffer with this handle, find it. */
362 case amdgpu_bo_handle_type_gem_flink_name:
363 bo = util_hash_table_get(dev->bo_flink_names,
364 (void*)(uintptr_t)shared_handle);
367 case amdgpu_bo_handle_type_dma_buf_fd:
368 bo = util_hash_table_get(dev->bo_handles,
369 (void*)(uintptr_t)shared_handle);
372 case amdgpu_bo_handle_type_kms:
373 /* Importing a KMS handle in not allowed. */
374 pthread_mutex_unlock(&dev->bo_table_mutex);
378 pthread_mutex_unlock(&dev->bo_table_mutex);
383 pthread_mutex_unlock(&dev->bo_table_mutex);
385 /* The buffer already exists, just bump the refcount. */
386 atomic_inc(&bo->refcount);
388 output->buf_handle = bo;
389 output->alloc_size = bo->alloc_size;
390 output->virtual_mc_base_address =
391 bo->virtual_mc_base_address;
395 bo = calloc(1, sizeof(struct amdgpu_bo));
397 pthread_mutex_unlock(&dev->bo_table_mutex);
398 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
399 amdgpu_close_kms_handle(dev, shared_handle);
404 /* Open the handle. */
406 case amdgpu_bo_handle_type_gem_flink_name:
407 open_arg.name = shared_handle;
408 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
411 pthread_mutex_unlock(&dev->bo_table_mutex);
415 bo->handle = open_arg.handle;
416 if (dev->flink_fd != dev->fd) {
417 r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
420 pthread_mutex_unlock(&dev->bo_table_mutex);
423 r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
429 pthread_mutex_unlock(&dev->bo_table_mutex);
433 bo->flink_name = shared_handle;
434 bo->alloc_size = open_arg.size;
435 util_hash_table_set(dev->bo_flink_names,
436 (void*)(uintptr_t)bo->flink_name, bo);
439 case amdgpu_bo_handle_type_dma_buf_fd:
440 bo->handle = shared_handle;
441 bo->alloc_size = dma_buf_size;
444 case amdgpu_bo_handle_type_kms:
445 assert(0); /* unreachable */
449 atomic_set(&bo->refcount, 1);
451 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
452 pthread_mutex_unlock(&dev->bo_table_mutex);
454 r = amdgpu_bo_map(bo, 1 << 20);
456 amdgpu_bo_reference(&bo, NULL);
460 util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
461 pthread_mutex_unlock(&dev->bo_table_mutex);
463 output->buf_handle = bo;
464 output->alloc_size = bo->alloc_size;
465 output->virtual_mc_base_address = bo->virtual_mc_base_address;
469 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
471 /* Just drop the reference. */
472 amdgpu_bo_reference(&buf_handle, NULL);
476 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
478 union drm_amdgpu_gem_mmap args;
482 pthread_mutex_lock(&bo->cpu_access_mutex);
486 assert(bo->cpu_map_count > 0);
489 pthread_mutex_unlock(&bo->cpu_access_mutex);
493 assert(bo->cpu_map_count == 0);
495 memset(&args, 0, sizeof(args));
497 /* Query the buffer address (args.addr_ptr).
498 * The kernel driver ignores the offset and size parameters. */
499 args.in.handle = bo->handle;
501 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
504 pthread_mutex_unlock(&bo->cpu_access_mutex);
508 /* Map the buffer. */
509 ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
510 bo->dev->fd, args.out.addr_ptr);
511 if (ptr == MAP_FAILED) {
512 pthread_mutex_unlock(&bo->cpu_access_mutex);
517 bo->cpu_map_count = 1;
518 pthread_mutex_unlock(&bo->cpu_access_mutex);
524 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
528 pthread_mutex_lock(&bo->cpu_access_mutex);
529 assert(bo->cpu_map_count >= 0);
531 if (bo->cpu_map_count == 0) {
533 pthread_mutex_unlock(&bo->cpu_access_mutex);
538 if (bo->cpu_map_count > 0) {
539 /* mapped multiple times */
540 pthread_mutex_unlock(&bo->cpu_access_mutex);
544 r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
546 pthread_mutex_unlock(&bo->cpu_access_mutex);
550 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
551 struct amdgpu_buffer_size_alignments *info)
553 info->size_local = dev->dev_info.pte_fragment_size;
554 info->size_remote = dev->dev_info.gart_page_size;
558 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
562 union drm_amdgpu_gem_wait_idle args;
565 memset(&args, 0, sizeof(args));
566 args.in.handle = bo->handle;
567 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
569 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
570 &args, sizeof(args));
573 *busy = args.out.status;
576 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
581 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
584 struct amdgpu_bo_alloc_result *info)
587 struct amdgpu_bo *bo;
588 struct drm_amdgpu_gem_userptr args;
592 memset(&args, 0, sizeof(args));
595 cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
596 off = (uintptr_t)cpu - cpu0;
597 size = ROUND_UP(size + off, ps);
600 args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
602 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
603 &args, sizeof(args));
607 bo = calloc(1, sizeof(struct amdgpu_bo));
611 atomic_set(&bo->refcount, 1);
613 bo->alloc_size = size;
614 bo->handle = args.handle;
616 r = amdgpu_bo_map(bo, 1 << 12);
618 amdgpu_bo_free_internal(bo);
622 info->buf_handle = bo;
623 info->virtual_mc_base_address = bo->virtual_mc_base_address;
624 info->virtual_mc_base_address += off;
629 int amdgpu_bo_list_create(amdgpu_device_handle dev,
630 uint32_t number_of_resources,
631 amdgpu_bo_handle *resources,
632 uint8_t *resource_prios,
633 amdgpu_bo_list_handle *result)
635 struct drm_amdgpu_bo_list_entry *list;
636 union drm_amdgpu_bo_list args;
640 list = calloc(number_of_resources, sizeof(struct drm_amdgpu_bo_list_entry));
645 memset(&args, 0, sizeof(args));
646 args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
647 args.in.bo_number = number_of_resources;
648 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
649 args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
651 for (i = 0; i < number_of_resources; i++) {
652 list[i].bo_handle = resources[i]->handle;
654 list[i].bo_priority = resource_prios[i];
656 list[i].bo_priority = 0;
659 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
660 &args, sizeof(args));
664 *result = calloc(1, sizeof(struct amdgpu_bo_list));
665 (*result)->dev = dev;
666 (*result)->handle = args.out.list_handle;
672 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
674 union drm_amdgpu_bo_list args;
677 memset(&args, 0, sizeof(args));
678 args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
679 args.in.list_handle = list->handle;
681 r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
682 &args, sizeof(args));
690 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
691 uint32_t number_of_resources,
692 amdgpu_bo_handle *resources,
693 uint8_t *resource_prios)
695 struct drm_amdgpu_bo_list_entry *list;
696 union drm_amdgpu_bo_list args;
700 list = calloc(number_of_resources, sizeof(struct drm_amdgpu_bo_list_entry));
704 memset(&args, 0, sizeof(args));
705 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
706 args.in.list_handle = handle->handle;
707 args.in.bo_number = number_of_resources;
708 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
709 args.in.bo_info_ptr = (uintptr_t)list;
711 for (i = 0; i < number_of_resources; i++) {
712 list[i].bo_handle = resources[i]->handle;
714 list[i].bo_priority = resource_prios[i];
716 list[i].bo_priority = 0;
719 r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
720 &args, sizeof(args));