OSDN Git Service

amdgpu: add public bo list interface v3
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <sys/time.h>
37
38 #include "libdrm_macros.h"
39 #include "xf86drm.h"
40 #include "amdgpu_drm.h"
41 #include "amdgpu_internal.h"
42 #include "util_hash_table.h"
43
44 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
45                                      uint32_t handle)
46 {
47         struct drm_gem_close args = {};
48
49         args.handle = handle;
50         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
51 }
52
53 void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
54 {
55         /* Remove the buffer from the hash tables. */
56         pthread_mutex_lock(&bo->dev->bo_table_mutex);
57         util_hash_table_remove(bo->dev->bo_handles,
58                                (void*)(uintptr_t)bo->handle);
59         if (bo->flink_name) {
60                 util_hash_table_remove(bo->dev->bo_flink_names,
61                                        (void*)(uintptr_t)bo->flink_name);
62         }
63         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
64
65         /* Release CPU access. */
66         if (bo->cpu_map_count > 0) {
67                 bo->cpu_map_count = 1;
68                 amdgpu_bo_cpu_unmap(bo);
69         }
70
71         amdgpu_close_kms_handle(bo->dev, bo->handle);
72         pthread_mutex_destroy(&bo->cpu_access_mutex);
73         amdgpu_vamgr_free_va(&bo->dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
74         free(bo);
75 }
76
77 int amdgpu_bo_alloc(amdgpu_device_handle dev,
78                     struct amdgpu_bo_alloc_request *alloc_buffer,
79                     struct amdgpu_bo_alloc_result *info)
80 {
81         struct amdgpu_bo *bo;
82         union drm_amdgpu_gem_create args;
83         unsigned heap = alloc_buffer->preferred_heap;
84         int r = 0;
85
86         /* It's an error if the heap is not specified */
87         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
88                 return -EINVAL;
89
90         bo = calloc(1, sizeof(struct amdgpu_bo));
91         if (!bo)
92                 return -ENOMEM;
93
94         atomic_set(&bo->refcount, 1);
95         bo->dev = dev;
96         bo->alloc_size = alloc_buffer->alloc_size;
97
98         memset(&args, 0, sizeof(args));
99         args.in.bo_size = alloc_buffer->alloc_size;
100         args.in.alignment = alloc_buffer->phys_alignment;
101
102         /* Set the placement. */
103         args.in.domains = heap & AMDGPU_GEM_DOMAIN_MASK;
104         args.in.domain_flags = alloc_buffer->flags & AMDGPU_GEM_CREATE_CPU_GTT_MASK;
105
106         /* Allocate the buffer with the preferred heap. */
107         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
108                                 &args, sizeof(args));
109         if (r) {
110                 free(bo);
111                 return r;
112         }
113
114         bo->handle = args.out.handle;
115
116         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
117
118         /* map the buffer to the GPU virtual address space */
119         {
120                 union drm_amdgpu_gem_va va;
121
122                 memset(&va, 0, sizeof(va));
123
124                 bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, alloc_buffer->alloc_size, alloc_buffer->phys_alignment);
125
126                 va.in.handle = bo->handle;
127                 va.in.operation = AMDGPU_VA_OP_MAP;
128                 va.in.flags =   AMDGPU_VM_PAGE_READABLE |
129                                 AMDGPU_VM_PAGE_WRITEABLE |
130                                 AMDGPU_VM_PAGE_EXECUTABLE;
131                 va.in.va_address = bo->virtual_mc_base_address;
132
133                 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
134                 if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
135                         amdgpu_bo_free_internal(bo);
136                         return r;
137                 }
138                 pthread_mutex_lock(&dev->bo_table_mutex);
139
140                 util_hash_table_set(dev->bo_vas,
141                                     (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
142                 pthread_mutex_unlock(&dev->bo_table_mutex);
143         }
144
145         info->buf_handle = bo;
146         info->virtual_mc_base_address = bo->virtual_mc_base_address;
147         return 0;
148 }
149
150 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
151                            struct amdgpu_bo_metadata *info)
152 {
153         struct drm_amdgpu_gem_metadata args = {};
154
155         args.handle = bo->handle;
156         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
157         args.data.flags = info->flags;
158         args.data.tiling_info = info->tiling_info;
159
160         if (info->size_metadata > sizeof(args.data.data))
161                 return -EINVAL;
162
163         if (info->size_metadata) {
164                 args.data.data_size_bytes = info->size_metadata;
165                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
166         }
167
168         return drmCommandWriteRead(bo->dev->fd,
169                                    DRM_AMDGPU_GEM_METADATA,
170                                    &args, sizeof(args));
171 }
172
173 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
174                          struct amdgpu_bo_info *info)
175 {
176         struct drm_amdgpu_gem_metadata metadata = {};
177         struct drm_amdgpu_gem_create_in bo_info = {};
178         struct drm_amdgpu_gem_op gem_op = {};
179         int r;
180
181         /* Query metadata. */
182         metadata.handle = bo->handle;
183         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
184
185         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
186                                 &metadata, sizeof(metadata));
187         if (r)
188                 return r;
189
190         if (metadata.data.data_size_bytes >
191             sizeof(info->metadata.umd_metadata))
192                 return -EINVAL;
193
194         /* Query buffer info. */
195         gem_op.handle = bo->handle;
196         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
197         gem_op.value = (intptr_t)&bo_info;
198
199         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
200                                 &gem_op, sizeof(gem_op));
201         if (r)
202                 return r;
203
204         memset(info, 0, sizeof(*info));
205         info->alloc_size = bo_info.bo_size;
206         info->phys_alignment = bo_info.alignment;
207         info->virtual_mc_base_address = bo->virtual_mc_base_address;
208         info->preferred_heap = bo_info.domains;
209         info->alloc_flags = bo_info.domain_flags;
210         info->metadata.flags = metadata.data.flags;
211         info->metadata.tiling_info = metadata.data.tiling_info;
212
213         info->metadata.size_metadata = metadata.data.data_size_bytes;
214         if (metadata.data.data_size_bytes > 0)
215                 memcpy(info->metadata.umd_metadata, metadata.data.data,
216                        metadata.data.data_size_bytes);
217
218         return 0;
219 }
220
221 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
222 {
223         pthread_mutex_lock(&bo->dev->bo_table_mutex);
224         util_hash_table_set(bo->dev->bo_handles,
225                             (void*)(uintptr_t)bo->handle, bo);
226         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
227 }
228
229 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
230 {
231         struct drm_gem_flink flink;
232         int fd, dma_fd;
233         uint32_t handle;
234         int r;
235
236         fd = bo->dev->fd;
237         handle = bo->handle;
238         if (bo->flink_name)
239                 return 0;
240
241
242         if (bo->dev->flink_fd != bo->dev->fd) {
243                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
244                                        &dma_fd);
245                 if (!r) {
246                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
247                         close(dma_fd);
248                 }
249                 if (r)
250                         return r;
251                 fd = bo->dev->flink_fd;
252         }
253         memset(&flink, 0, sizeof(flink));
254         flink.handle = handle;
255
256         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
257         if (r)
258                 return r;
259
260         bo->flink_name = flink.name;
261
262         if (bo->dev->flink_fd != bo->dev->fd) {
263                 struct drm_gem_close args = {};
264                 args.handle = handle;
265                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
266         }
267
268         pthread_mutex_lock(&bo->dev->bo_table_mutex);
269         util_hash_table_set(bo->dev->bo_flink_names,
270                             (void*)(uintptr_t)bo->flink_name,
271                             bo);
272         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
273
274         return 0;
275 }
276
277 int amdgpu_bo_export(amdgpu_bo_handle bo,
278                      enum amdgpu_bo_handle_type type,
279                      uint32_t *shared_handle)
280 {
281         int r;
282
283         switch (type) {
284         case amdgpu_bo_handle_type_gem_flink_name:
285                 r = amdgpu_bo_export_flink(bo);
286                 if (r)
287                         return r;
288
289                 *shared_handle = bo->flink_name;
290                 return 0;
291
292         case amdgpu_bo_handle_type_kms:
293                 r = amdgpu_bo_export_flink(bo);
294                 if (r)
295                         return r;
296
297                 amdgpu_add_handle_to_table(bo);
298                 *shared_handle = bo->handle;
299                 return 0;
300
301         case amdgpu_bo_handle_type_dma_buf_fd:
302                 amdgpu_add_handle_to_table(bo);
303                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
304                                        (int*)shared_handle);
305         }
306         return -EINVAL;
307 }
308
309 int amdgpu_bo_import(amdgpu_device_handle dev,
310                      enum amdgpu_bo_handle_type type,
311                      uint32_t shared_handle,
312                      struct amdgpu_bo_import_result *output)
313 {
314         struct drm_gem_open open_arg = {};
315         union drm_amdgpu_gem_va va;
316         struct amdgpu_bo *bo = NULL;
317         int r;
318         int dma_fd;
319         uint64_t dma_buf_size = 0;
320
321         /* Convert a DMA buf handle to a KMS handle now. */
322         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
323                 uint32_t handle;
324                 off_t size;
325
326                 /* Get a KMS handle. */
327                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
328                 if (r) {
329                         return r;
330                 }
331
332                 /* Query the buffer size. */
333                 size = lseek(shared_handle, 0, SEEK_END);
334                 if (size == (off_t)-1) {
335                         amdgpu_close_kms_handle(dev, handle);
336                         return -errno;
337                 }
338                 lseek(shared_handle, 0, SEEK_SET);
339
340                 dma_buf_size = size;
341                 shared_handle = handle;
342         }
343
344         /* We must maintain a list of pairs <handle, bo>, so that we always
345          * return the same amdgpu_bo instance for the same handle. */
346         pthread_mutex_lock(&dev->bo_table_mutex);
347
348         /* If we have already created a buffer with this handle, find it. */
349         switch (type) {
350         case amdgpu_bo_handle_type_gem_flink_name:
351                 bo = util_hash_table_get(dev->bo_flink_names,
352                                          (void*)(uintptr_t)shared_handle);
353                 break;
354
355         case amdgpu_bo_handle_type_dma_buf_fd:
356                 bo = util_hash_table_get(dev->bo_handles,
357                                          (void*)(uintptr_t)shared_handle);
358                 break;
359
360         case amdgpu_bo_handle_type_kms:
361                 /* Importing a KMS handle in not allowed. */
362                 pthread_mutex_unlock(&dev->bo_table_mutex);
363                 return -EPERM;
364
365         default:
366                 pthread_mutex_unlock(&dev->bo_table_mutex);
367                 return -EINVAL;
368         }
369
370         if (bo) {
371                 pthread_mutex_unlock(&dev->bo_table_mutex);
372
373                 /* The buffer already exists, just bump the refcount. */
374                 atomic_inc(&bo->refcount);
375
376                 output->buf_handle = bo;
377                 output->alloc_size = bo->alloc_size;
378                 output->virtual_mc_base_address =
379                         bo->virtual_mc_base_address;
380                 return 0;
381         }
382
383         bo = calloc(1, sizeof(struct amdgpu_bo));
384         if (!bo) {
385                 pthread_mutex_unlock(&dev->bo_table_mutex);
386                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
387                         amdgpu_close_kms_handle(dev, shared_handle);
388                 }
389                 return -ENOMEM;
390         }
391
392         /* Open the handle. */
393         switch (type) {
394         case amdgpu_bo_handle_type_gem_flink_name:
395                 open_arg.name = shared_handle;
396                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
397                 if (r) {
398                         free(bo);
399                         pthread_mutex_unlock(&dev->bo_table_mutex);
400                         return r;
401                 }
402
403                 bo->handle = open_arg.handle;
404                 if (dev->flink_fd != dev->fd) {
405                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
406                         if (r) {
407                                 free(bo);
408                                 pthread_mutex_unlock(&dev->bo_table_mutex);
409                                 return r;
410                         }
411                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
412
413                         close(dma_fd);
414
415                         if (r) {
416                                 free(bo);
417                                 pthread_mutex_unlock(&dev->bo_table_mutex);
418                                 return r;
419                         }
420                 }
421                 bo->flink_name = shared_handle;
422                 bo->alloc_size = open_arg.size;
423                 util_hash_table_set(dev->bo_flink_names,
424                                     (void*)(uintptr_t)bo->flink_name, bo);
425                 break;
426
427         case amdgpu_bo_handle_type_dma_buf_fd:
428                 bo->handle = shared_handle;
429                 bo->alloc_size = dma_buf_size;
430                 break;
431
432         case amdgpu_bo_handle_type_kms:
433                 assert(0); /* unreachable */
434         }
435
436         /* Initialize it. */
437         atomic_set(&bo->refcount, 1);
438         bo->dev = dev;
439         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
440
441         bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, bo->alloc_size, 1 << 20);
442
443         memset(&va, 0, sizeof(va));
444         va.in.handle = bo->handle;
445         va.in.operation = AMDGPU_VA_OP_MAP;
446         va.in.va_address = bo->virtual_mc_base_address;
447         va.in.flags =   AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
448                         AMDGPU_VM_PAGE_EXECUTABLE;
449
450         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
451         if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
452                 pthread_mutex_unlock(&dev->bo_table_mutex);
453                 amdgpu_vamgr_free_va(&dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
454                 amdgpu_bo_reference(&bo, NULL);
455                 return r;
456         }
457
458         util_hash_table_set(dev->bo_vas,
459                             (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
460         util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
461         pthread_mutex_unlock(&dev->bo_table_mutex);
462
463         output->buf_handle = bo;
464         output->alloc_size = bo->alloc_size;
465         output->virtual_mc_base_address = bo->virtual_mc_base_address;
466         return 0;
467 }
468
469 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
470 {
471         /* Just drop the reference. */
472         amdgpu_bo_reference(&buf_handle, NULL);
473         return 0;
474 }
475
476 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
477 {
478         union drm_amdgpu_gem_mmap args;
479         void *ptr;
480         int r;
481
482         pthread_mutex_lock(&bo->cpu_access_mutex);
483
484         if (bo->cpu_ptr) {
485                 /* already mapped */
486                 assert(bo->cpu_map_count > 0);
487                 bo->cpu_map_count++;
488                 *cpu = bo->cpu_ptr;
489                 pthread_mutex_unlock(&bo->cpu_access_mutex);
490                 return 0;
491         }
492
493         assert(bo->cpu_map_count == 0);
494
495         memset(&args, 0, sizeof(args));
496
497         /* Query the buffer address (args.addr_ptr).
498          * The kernel driver ignores the offset and size parameters. */
499         args.in.handle = bo->handle;
500
501         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
502                                 sizeof(args));
503         if (r) {
504                 pthread_mutex_unlock(&bo->cpu_access_mutex);
505                 return r;
506         }
507
508         /* Map the buffer. */
509         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
510                        bo->dev->fd, args.out.addr_ptr);
511         if (ptr == MAP_FAILED) {
512                 pthread_mutex_unlock(&bo->cpu_access_mutex);
513                 return -errno;
514         }
515
516         bo->cpu_ptr = ptr;
517         bo->cpu_map_count = 1;
518         pthread_mutex_unlock(&bo->cpu_access_mutex);
519
520         *cpu = ptr;
521         return 0;
522 }
523
524 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
525 {
526         int r;
527
528         pthread_mutex_lock(&bo->cpu_access_mutex);
529         assert(bo->cpu_map_count >= 0);
530
531         if (bo->cpu_map_count == 0) {
532                 /* not mapped */
533                 pthread_mutex_unlock(&bo->cpu_access_mutex);
534                 return -EBADMSG;
535         }
536
537         bo->cpu_map_count--;
538         if (bo->cpu_map_count > 0) {
539                 /* mapped multiple times */
540                 pthread_mutex_unlock(&bo->cpu_access_mutex);
541                 return 0;
542         }
543
544         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
545         bo->cpu_ptr = NULL;
546         pthread_mutex_unlock(&bo->cpu_access_mutex);
547         return r;
548 }
549
550 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
551                                 struct amdgpu_buffer_size_alignments *info)
552 {
553         info->size_local = dev->dev_info.pte_fragment_size;
554         info->size_remote = dev->dev_info.gart_page_size;
555         return 0;
556 }
557
558 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
559                             uint64_t timeout_ns,
560                             bool *busy)
561 {
562         union drm_amdgpu_gem_wait_idle args;
563         int r;
564
565         memset(&args, 0, sizeof(args));
566         args.in.handle = bo->handle;
567         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
568
569         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
570                                 &args, sizeof(args));
571
572         if (r == 0) {
573                 *busy = args.out.status;
574                 return 0;
575         } else {
576                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
577                 return r;
578         }
579 }
580
581 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
582                                     void *cpu,
583                                     uint64_t size,
584                                     struct amdgpu_bo_alloc_result *info)
585 {
586         int r;
587         struct amdgpu_bo *bo;
588         struct drm_amdgpu_gem_userptr args;
589         union drm_amdgpu_gem_va va;
590         uintptr_t cpu0;
591         uint32_t ps, off;
592
593         memset(&args, 0, sizeof(args));
594         ps = getpagesize();
595
596         cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
597         off = (uintptr_t)cpu - cpu0;
598         size = ROUND_UP(size + off, ps);
599
600         args.addr = cpu0;
601         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
602         args.size = size;
603         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
604                                 &args, sizeof(args));
605         if (r)
606                 return r;
607
608         bo = calloc(1, sizeof(struct amdgpu_bo));
609         if (!bo)
610                 return -ENOMEM;
611
612         atomic_set(&bo->refcount, 1);
613         bo->dev = dev;
614         bo->alloc_size = size;
615         bo->handle = args.handle;
616         bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, size, 4 * 1024);
617
618         memset(&va, 0, sizeof(va));
619         va.in.handle = bo->handle;
620         va.in.operation = AMDGPU_VA_OP_MAP;
621         va.in.flags =   AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
622                         AMDGPU_VM_PAGE_EXECUTABLE;
623         va.in.va_address = bo->virtual_mc_base_address;
624         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
625         if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
626                 amdgpu_bo_free_internal(bo);
627                 return r;
628         }
629         pthread_mutex_lock(&dev->bo_table_mutex);
630         util_hash_table_set(dev->bo_vas,
631                             (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
632         pthread_mutex_unlock(&dev->bo_table_mutex);
633         info->buf_handle = bo;
634         info->virtual_mc_base_address = bo->virtual_mc_base_address;
635         info->virtual_mc_base_address += off;
636
637         return r;
638 }
639
640 int amdgpu_bo_list_create(amdgpu_device_handle dev,
641                           uint32_t number_of_resources,
642                           amdgpu_bo_handle *resources,
643                           uint8_t *resource_prios,
644                           amdgpu_bo_list_handle *result)
645 {
646         struct drm_amdgpu_bo_list_entry *list;
647         union drm_amdgpu_bo_list args;
648         unsigned i;
649         int r;
650
651         list = alloca(sizeof(struct drm_amdgpu_bo_list_entry) * number_of_resources);
652
653         memset(&args, 0, sizeof(args));
654         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
655         args.in.bo_number = number_of_resources;
656         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
657         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
658
659         for (i = 0; i < number_of_resources; i++) {
660                 list[i].bo_handle = resources[i]->handle;
661                 if (resource_prios)
662                         list[i].bo_priority = resource_prios[i];
663                 else
664                         list[i].bo_priority = 0;
665         }
666
667         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
668                                 &args, sizeof(args));
669         if (r)
670                 return r;
671
672         *result = calloc(1, sizeof(struct amdgpu_bo_list));
673         (*result)->dev = dev;
674         (*result)->handle = args.out.list_handle;
675         return 0;
676 }
677
678 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
679 {
680         union drm_amdgpu_bo_list args;
681         int r;
682
683         memset(&args, 0, sizeof(args));
684         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
685         args.in.list_handle = list->handle;
686
687         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
688                                 &args, sizeof(args));
689
690         if (!r)
691                 free(list);
692
693         return r;
694 }