OSDN Git Service

amdgpu: add helper for VM mapping v2
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <sys/time.h>
37
38 #include "libdrm_macros.h"
39 #include "xf86drm.h"
40 #include "amdgpu_drm.h"
41 #include "amdgpu_internal.h"
42 #include "util_hash_table.h"
43 #include "util_math.h"
44
45 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
46                                      uint32_t handle)
47 {
48         struct drm_gem_close args = {};
49
50         args.handle = handle;
51         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
52 }
53
54 void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
55 {
56         /* Remove the buffer from the hash tables. */
57         pthread_mutex_lock(&bo->dev->bo_table_mutex);
58         util_hash_table_remove(bo->dev->bo_handles,
59                                (void*)(uintptr_t)bo->handle);
60         if (bo->flink_name) {
61                 util_hash_table_remove(bo->dev->bo_flink_names,
62                                        (void*)(uintptr_t)bo->flink_name);
63         }
64         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
65
66         /* Release CPU access. */
67         if (bo->cpu_map_count > 0) {
68                 bo->cpu_map_count = 1;
69                 amdgpu_bo_cpu_unmap(bo);
70         }
71
72         amdgpu_close_kms_handle(bo->dev, bo->handle);
73         pthread_mutex_destroy(&bo->cpu_access_mutex);
74         amdgpu_vamgr_free_va(&bo->dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
75         free(bo);
76 }
77
78 /* map the buffer to the GPU virtual address space */
79 static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
80 {
81         amdgpu_device_handle dev = bo->dev;
82         union drm_amdgpu_gem_va va;
83         int r;
84
85         memset(&va, 0, sizeof(va));
86
87         bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr,
88                                          bo->alloc_size, alignment);
89
90         if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
91                 return -ENOSPC;
92
93         va.in.handle = bo->handle;
94         va.in.operation = AMDGPU_VA_OP_MAP;
95         va.in.flags =   AMDGPU_VM_PAGE_READABLE |
96                         AMDGPU_VM_PAGE_WRITEABLE |
97                         AMDGPU_VM_PAGE_EXECUTABLE;
98         va.in.va_address = bo->virtual_mc_base_address;
99         va.in.offset_in_bo = 0;
100         va.in.map_size = ALIGN(bo->alloc_size, getpagesize());
101
102         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
103         if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
104                 amdgpu_bo_free_internal(bo);
105                 return r;
106         }
107
108         return 0;
109 }
110
111 int amdgpu_bo_alloc(amdgpu_device_handle dev,
112                     struct amdgpu_bo_alloc_request *alloc_buffer,
113                     struct amdgpu_bo_alloc_result *info)
114 {
115         struct amdgpu_bo *bo;
116         union drm_amdgpu_gem_create args;
117         unsigned heap = alloc_buffer->preferred_heap;
118         int r = 0;
119
120         /* It's an error if the heap is not specified */
121         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
122                 return -EINVAL;
123
124         bo = calloc(1, sizeof(struct amdgpu_bo));
125         if (!bo)
126                 return -ENOMEM;
127
128         atomic_set(&bo->refcount, 1);
129         bo->dev = dev;
130         bo->alloc_size = alloc_buffer->alloc_size;
131
132         memset(&args, 0, sizeof(args));
133         args.in.bo_size = alloc_buffer->alloc_size;
134         args.in.alignment = alloc_buffer->phys_alignment;
135
136         /* Set the placement. */
137         args.in.domains = heap & AMDGPU_GEM_DOMAIN_MASK;
138         args.in.domain_flags = alloc_buffer->flags & AMDGPU_GEM_CREATE_CPU_GTT_MASK;
139
140         /* Allocate the buffer with the preferred heap. */
141         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
142                                 &args, sizeof(args));
143         if (r) {
144                 free(bo);
145                 return r;
146         }
147
148         bo->handle = args.out.handle;
149
150         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
151
152         r = amdgpu_bo_map(bo, alloc_buffer->phys_alignment);
153         if (r) {
154                 amdgpu_bo_free_internal(bo);
155                 return r;
156         }
157
158         info->buf_handle = bo;
159         info->virtual_mc_base_address = bo->virtual_mc_base_address;
160         return 0;
161 }
162
163 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
164                            struct amdgpu_bo_metadata *info)
165 {
166         struct drm_amdgpu_gem_metadata args = {};
167
168         args.handle = bo->handle;
169         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
170         args.data.flags = info->flags;
171         args.data.tiling_info = info->tiling_info;
172
173         if (info->size_metadata > sizeof(args.data.data))
174                 return -EINVAL;
175
176         if (info->size_metadata) {
177                 args.data.data_size_bytes = info->size_metadata;
178                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
179         }
180
181         return drmCommandWriteRead(bo->dev->fd,
182                                    DRM_AMDGPU_GEM_METADATA,
183                                    &args, sizeof(args));
184 }
185
186 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
187                          struct amdgpu_bo_info *info)
188 {
189         struct drm_amdgpu_gem_metadata metadata = {};
190         struct drm_amdgpu_gem_create_in bo_info = {};
191         struct drm_amdgpu_gem_op gem_op = {};
192         int r;
193
194         /* Query metadata. */
195         metadata.handle = bo->handle;
196         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
197
198         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
199                                 &metadata, sizeof(metadata));
200         if (r)
201                 return r;
202
203         if (metadata.data.data_size_bytes >
204             sizeof(info->metadata.umd_metadata))
205                 return -EINVAL;
206
207         /* Query buffer info. */
208         gem_op.handle = bo->handle;
209         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
210         gem_op.value = (uintptr_t)&bo_info;
211
212         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
213                                 &gem_op, sizeof(gem_op));
214         if (r)
215                 return r;
216
217         memset(info, 0, sizeof(*info));
218         info->alloc_size = bo_info.bo_size;
219         info->phys_alignment = bo_info.alignment;
220         info->virtual_mc_base_address = bo->virtual_mc_base_address;
221         info->preferred_heap = bo_info.domains;
222         info->alloc_flags = bo_info.domain_flags;
223         info->metadata.flags = metadata.data.flags;
224         info->metadata.tiling_info = metadata.data.tiling_info;
225
226         info->metadata.size_metadata = metadata.data.data_size_bytes;
227         if (metadata.data.data_size_bytes > 0)
228                 memcpy(info->metadata.umd_metadata, metadata.data.data,
229                        metadata.data.data_size_bytes);
230
231         return 0;
232 }
233
234 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
235 {
236         pthread_mutex_lock(&bo->dev->bo_table_mutex);
237         util_hash_table_set(bo->dev->bo_handles,
238                             (void*)(uintptr_t)bo->handle, bo);
239         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
240 }
241
242 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
243 {
244         struct drm_gem_flink flink;
245         int fd, dma_fd;
246         uint32_t handle;
247         int r;
248
249         fd = bo->dev->fd;
250         handle = bo->handle;
251         if (bo->flink_name)
252                 return 0;
253
254
255         if (bo->dev->flink_fd != bo->dev->fd) {
256                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
257                                        &dma_fd);
258                 if (!r) {
259                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
260                         close(dma_fd);
261                 }
262                 if (r)
263                         return r;
264                 fd = bo->dev->flink_fd;
265         }
266         memset(&flink, 0, sizeof(flink));
267         flink.handle = handle;
268
269         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
270         if (r)
271                 return r;
272
273         bo->flink_name = flink.name;
274
275         if (bo->dev->flink_fd != bo->dev->fd) {
276                 struct drm_gem_close args = {};
277                 args.handle = handle;
278                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
279         }
280
281         pthread_mutex_lock(&bo->dev->bo_table_mutex);
282         util_hash_table_set(bo->dev->bo_flink_names,
283                             (void*)(uintptr_t)bo->flink_name,
284                             bo);
285         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
286
287         return 0;
288 }
289
290 int amdgpu_bo_export(amdgpu_bo_handle bo,
291                      enum amdgpu_bo_handle_type type,
292                      uint32_t *shared_handle)
293 {
294         int r;
295
296         switch (type) {
297         case amdgpu_bo_handle_type_gem_flink_name:
298                 r = amdgpu_bo_export_flink(bo);
299                 if (r)
300                         return r;
301
302                 *shared_handle = bo->flink_name;
303                 return 0;
304
305         case amdgpu_bo_handle_type_kms:
306                 r = amdgpu_bo_export_flink(bo);
307                 if (r)
308                         return r;
309
310                 amdgpu_add_handle_to_table(bo);
311                 *shared_handle = bo->handle;
312                 return 0;
313
314         case amdgpu_bo_handle_type_dma_buf_fd:
315                 amdgpu_add_handle_to_table(bo);
316                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
317                                        (int*)shared_handle);
318         }
319         return -EINVAL;
320 }
321
322 int amdgpu_bo_import(amdgpu_device_handle dev,
323                      enum amdgpu_bo_handle_type type,
324                      uint32_t shared_handle,
325                      struct amdgpu_bo_import_result *output)
326 {
327         struct drm_gem_open open_arg = {};
328         struct amdgpu_bo *bo = NULL;
329         int r;
330         int dma_fd;
331         uint64_t dma_buf_size = 0;
332
333         /* Convert a DMA buf handle to a KMS handle now. */
334         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
335                 uint32_t handle;
336                 off_t size;
337
338                 /* Get a KMS handle. */
339                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
340                 if (r) {
341                         return r;
342                 }
343
344                 /* Query the buffer size. */
345                 size = lseek(shared_handle, 0, SEEK_END);
346                 if (size == (off_t)-1) {
347                         amdgpu_close_kms_handle(dev, handle);
348                         return -errno;
349                 }
350                 lseek(shared_handle, 0, SEEK_SET);
351
352                 dma_buf_size = size;
353                 shared_handle = handle;
354         }
355
356         /* We must maintain a list of pairs <handle, bo>, so that we always
357          * return the same amdgpu_bo instance for the same handle. */
358         pthread_mutex_lock(&dev->bo_table_mutex);
359
360         /* If we have already created a buffer with this handle, find it. */
361         switch (type) {
362         case amdgpu_bo_handle_type_gem_flink_name:
363                 bo = util_hash_table_get(dev->bo_flink_names,
364                                          (void*)(uintptr_t)shared_handle);
365                 break;
366
367         case amdgpu_bo_handle_type_dma_buf_fd:
368                 bo = util_hash_table_get(dev->bo_handles,
369                                          (void*)(uintptr_t)shared_handle);
370                 break;
371
372         case amdgpu_bo_handle_type_kms:
373                 /* Importing a KMS handle in not allowed. */
374                 pthread_mutex_unlock(&dev->bo_table_mutex);
375                 return -EPERM;
376
377         default:
378                 pthread_mutex_unlock(&dev->bo_table_mutex);
379                 return -EINVAL;
380         }
381
382         if (bo) {
383                 pthread_mutex_unlock(&dev->bo_table_mutex);
384
385                 /* The buffer already exists, just bump the refcount. */
386                 atomic_inc(&bo->refcount);
387
388                 output->buf_handle = bo;
389                 output->alloc_size = bo->alloc_size;
390                 output->virtual_mc_base_address =
391                         bo->virtual_mc_base_address;
392                 return 0;
393         }
394
395         bo = calloc(1, sizeof(struct amdgpu_bo));
396         if (!bo) {
397                 pthread_mutex_unlock(&dev->bo_table_mutex);
398                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
399                         amdgpu_close_kms_handle(dev, shared_handle);
400                 }
401                 return -ENOMEM;
402         }
403
404         /* Open the handle. */
405         switch (type) {
406         case amdgpu_bo_handle_type_gem_flink_name:
407                 open_arg.name = shared_handle;
408                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
409                 if (r) {
410                         free(bo);
411                         pthread_mutex_unlock(&dev->bo_table_mutex);
412                         return r;
413                 }
414
415                 bo->handle = open_arg.handle;
416                 if (dev->flink_fd != dev->fd) {
417                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
418                         if (r) {
419                                 free(bo);
420                                 pthread_mutex_unlock(&dev->bo_table_mutex);
421                                 return r;
422                         }
423                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
424
425                         close(dma_fd);
426
427                         if (r) {
428                                 free(bo);
429                                 pthread_mutex_unlock(&dev->bo_table_mutex);
430                                 return r;
431                         }
432                 }
433                 bo->flink_name = shared_handle;
434                 bo->alloc_size = open_arg.size;
435                 util_hash_table_set(dev->bo_flink_names,
436                                     (void*)(uintptr_t)bo->flink_name, bo);
437                 break;
438
439         case amdgpu_bo_handle_type_dma_buf_fd:
440                 bo->handle = shared_handle;
441                 bo->alloc_size = dma_buf_size;
442                 break;
443
444         case amdgpu_bo_handle_type_kms:
445                 assert(0); /* unreachable */
446         }
447
448         /* Initialize it. */
449         atomic_set(&bo->refcount, 1);
450         bo->dev = dev;
451         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
452         pthread_mutex_unlock(&dev->bo_table_mutex);
453
454         r = amdgpu_bo_map(bo, 1 << 20);
455         if (r) {
456                 amdgpu_bo_reference(&bo, NULL);
457                 return r;
458         }
459
460         util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
461         pthread_mutex_unlock(&dev->bo_table_mutex);
462
463         output->buf_handle = bo;
464         output->alloc_size = bo->alloc_size;
465         output->virtual_mc_base_address = bo->virtual_mc_base_address;
466         return 0;
467 }
468
469 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
470 {
471         /* Just drop the reference. */
472         amdgpu_bo_reference(&buf_handle, NULL);
473         return 0;
474 }
475
476 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
477 {
478         union drm_amdgpu_gem_mmap args;
479         void *ptr;
480         int r;
481
482         pthread_mutex_lock(&bo->cpu_access_mutex);
483
484         if (bo->cpu_ptr) {
485                 /* already mapped */
486                 assert(bo->cpu_map_count > 0);
487                 bo->cpu_map_count++;
488                 *cpu = bo->cpu_ptr;
489                 pthread_mutex_unlock(&bo->cpu_access_mutex);
490                 return 0;
491         }
492
493         assert(bo->cpu_map_count == 0);
494
495         memset(&args, 0, sizeof(args));
496
497         /* Query the buffer address (args.addr_ptr).
498          * The kernel driver ignores the offset and size parameters. */
499         args.in.handle = bo->handle;
500
501         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
502                                 sizeof(args));
503         if (r) {
504                 pthread_mutex_unlock(&bo->cpu_access_mutex);
505                 return r;
506         }
507
508         /* Map the buffer. */
509         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
510                        bo->dev->fd, args.out.addr_ptr);
511         if (ptr == MAP_FAILED) {
512                 pthread_mutex_unlock(&bo->cpu_access_mutex);
513                 return -errno;
514         }
515
516         bo->cpu_ptr = ptr;
517         bo->cpu_map_count = 1;
518         pthread_mutex_unlock(&bo->cpu_access_mutex);
519
520         *cpu = ptr;
521         return 0;
522 }
523
524 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
525 {
526         int r;
527
528         pthread_mutex_lock(&bo->cpu_access_mutex);
529         assert(bo->cpu_map_count >= 0);
530
531         if (bo->cpu_map_count == 0) {
532                 /* not mapped */
533                 pthread_mutex_unlock(&bo->cpu_access_mutex);
534                 return -EBADMSG;
535         }
536
537         bo->cpu_map_count--;
538         if (bo->cpu_map_count > 0) {
539                 /* mapped multiple times */
540                 pthread_mutex_unlock(&bo->cpu_access_mutex);
541                 return 0;
542         }
543
544         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
545         bo->cpu_ptr = NULL;
546         pthread_mutex_unlock(&bo->cpu_access_mutex);
547         return r;
548 }
549
550 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
551                                 struct amdgpu_buffer_size_alignments *info)
552 {
553         info->size_local = dev->dev_info.pte_fragment_size;
554         info->size_remote = dev->dev_info.gart_page_size;
555         return 0;
556 }
557
558 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
559                             uint64_t timeout_ns,
560                             bool *busy)
561 {
562         union drm_amdgpu_gem_wait_idle args;
563         int r;
564
565         memset(&args, 0, sizeof(args));
566         args.in.handle = bo->handle;
567         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
568
569         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
570                                 &args, sizeof(args));
571
572         if (r == 0) {
573                 *busy = args.out.status;
574                 return 0;
575         } else {
576                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
577                 return r;
578         }
579 }
580
581 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
582                                     void *cpu,
583                                     uint64_t size,
584                                     struct amdgpu_bo_alloc_result *info)
585 {
586         int r;
587         struct amdgpu_bo *bo;
588         struct drm_amdgpu_gem_userptr args;
589         uintptr_t cpu0;
590         uint32_t ps, off;
591
592         memset(&args, 0, sizeof(args));
593         ps = getpagesize();
594
595         cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
596         off = (uintptr_t)cpu - cpu0;
597         size = ROUND_UP(size + off, ps);
598
599         args.addr = cpu0;
600         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
601         args.size = size;
602         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
603                                 &args, sizeof(args));
604         if (r)
605                 return r;
606
607         bo = calloc(1, sizeof(struct amdgpu_bo));
608         if (!bo)
609                 return -ENOMEM;
610
611         atomic_set(&bo->refcount, 1);
612         bo->dev = dev;
613         bo->alloc_size = size;
614         bo->handle = args.handle;
615
616         r = amdgpu_bo_map(bo, 1 << 12);
617         if (r) {
618                 amdgpu_bo_free_internal(bo);
619                 return r;
620         }
621
622         info->buf_handle = bo;
623         info->virtual_mc_base_address = bo->virtual_mc_base_address;
624         info->virtual_mc_base_address += off;
625
626         return r;
627 }
628
629 int amdgpu_bo_list_create(amdgpu_device_handle dev,
630                           uint32_t number_of_resources,
631                           amdgpu_bo_handle *resources,
632                           uint8_t *resource_prios,
633                           amdgpu_bo_list_handle *result)
634 {
635         struct drm_amdgpu_bo_list_entry *list;
636         union drm_amdgpu_bo_list args;
637         unsigned i;
638         int r;
639
640         list = calloc(number_of_resources, sizeof(struct drm_amdgpu_bo_list_entry));
641
642         if (list == NULL)
643                 return -ENOMEM;
644
645         memset(&args, 0, sizeof(args));
646         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
647         args.in.bo_number = number_of_resources;
648         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
649         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
650
651         for (i = 0; i < number_of_resources; i++) {
652                 list[i].bo_handle = resources[i]->handle;
653                 if (resource_prios)
654                         list[i].bo_priority = resource_prios[i];
655                 else
656                         list[i].bo_priority = 0;
657         }
658
659         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
660                                 &args, sizeof(args));
661         if (r)
662                 goto out;
663
664         *result = calloc(1, sizeof(struct amdgpu_bo_list));
665         (*result)->dev = dev;
666         (*result)->handle = args.out.list_handle;
667 out:
668         free(list);
669         return r;
670 }
671
672 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
673 {
674         union drm_amdgpu_bo_list args;
675         int r;
676
677         memset(&args, 0, sizeof(args));
678         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
679         args.in.list_handle = list->handle;
680
681         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
682                                 &args, sizeof(args));
683
684         if (!r)
685                 free(list);
686
687         return r;
688 }
689
690 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
691                           uint32_t number_of_resources,
692                           amdgpu_bo_handle *resources,
693                           uint8_t *resource_prios)
694 {
695         struct drm_amdgpu_bo_list_entry *list;
696         union drm_amdgpu_bo_list args;
697         unsigned i;
698         int r;
699
700         list = calloc(number_of_resources, sizeof(struct drm_amdgpu_bo_list_entry));
701         if (list == NULL)
702                 return -ENOMEM;
703
704         memset(&args, 0, sizeof(args));
705         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
706         args.in.list_handle = handle->handle;
707         args.in.bo_number = number_of_resources;
708         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
709         args.in.bo_info_ptr = (uintptr_t)list;
710
711         for (i = 0; i < number_of_resources; i++) {
712                 list[i].bo_handle = resources[i]->handle;
713                 if (resource_prios)
714                         list[i].bo_priority = resource_prios[i];
715                 else
716                         list[i].bo_priority = 0;
717         }
718
719         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
720                                 &args, sizeof(args));
721         free(list);
722         return r;
723 }