OSDN Git Service

amdgpu: Use drmIoctl in amdgpu_ioctl_wait_cs
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdint.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/ioctl.h>
36 #include <sys/mman.h>
37 #include <sys/time.h>
38
39 #include "libdrm_macros.h"
40 #include "xf86drm.h"
41 #include "amdgpu_drm.h"
42 #include "amdgpu_internal.h"
43 #include "util_hash_table.h"
44 #include "util_math.h"
45
46 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
47                                      uint32_t handle)
48 {
49         struct drm_gem_close args = {};
50
51         args.handle = handle;
52         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
53 }
54
55 /* map the buffer to the GPU virtual address space */
56 static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
57 {
58         amdgpu_device_handle dev = bo->dev;
59         struct drm_amdgpu_gem_va va;
60         int r;
61
62         memset(&va, 0, sizeof(va));
63
64         bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr,
65                                          bo->alloc_size, alignment, 0);
66
67         if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
68                 return -ENOSPC;
69
70         va.handle = bo->handle;
71         va.operation = AMDGPU_VA_OP_MAP;
72         va.flags =      AMDGPU_VM_PAGE_READABLE |
73                         AMDGPU_VM_PAGE_WRITEABLE |
74                         AMDGPU_VM_PAGE_EXECUTABLE;
75         va.va_address = bo->virtual_mc_base_address;
76         va.offset_in_bo = 0;
77         va.map_size = ALIGN(bo->alloc_size, getpagesize());
78
79         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
80         if (r) {
81                 amdgpu_bo_free_internal(bo);
82                 return r;
83         }
84
85         return 0;
86 }
87
88 /* unmap the buffer from the GPU virtual address space */
89 static void amdgpu_bo_unmap(amdgpu_bo_handle bo)
90 {
91         amdgpu_device_handle dev = bo->dev;
92         struct drm_amdgpu_gem_va va;
93         int r;
94
95         if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
96                 return;
97
98         memset(&va, 0, sizeof(va));
99
100         va.handle = bo->handle;
101         va.operation = AMDGPU_VA_OP_UNMAP;
102         va.flags =      AMDGPU_VM_PAGE_READABLE |
103                         AMDGPU_VM_PAGE_WRITEABLE |
104                         AMDGPU_VM_PAGE_EXECUTABLE;
105         va.va_address = bo->virtual_mc_base_address;
106         va.offset_in_bo = 0;
107         va.map_size = ALIGN(bo->alloc_size, getpagesize());
108
109         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
110         if (r) {
111                 fprintf(stderr, "amdgpu: VA_OP_UNMAP failed with %d\n", r);
112                 return;
113         }
114
115         amdgpu_vamgr_free_va(bo->dev->vamgr, bo->virtual_mc_base_address,
116                              bo->alloc_size);
117
118         bo->virtual_mc_base_address = AMDGPU_INVALID_VA_ADDRESS;
119 }
120
121 void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
122 {
123         /* Remove the buffer from the hash tables. */
124         pthread_mutex_lock(&bo->dev->bo_table_mutex);
125         util_hash_table_remove(bo->dev->bo_handles,
126                                (void*)(uintptr_t)bo->handle);
127         if (bo->flink_name) {
128                 util_hash_table_remove(bo->dev->bo_flink_names,
129                                        (void*)(uintptr_t)bo->flink_name);
130         }
131         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
132
133         /* Release CPU access. */
134         if (bo->cpu_map_count > 0) {
135                 bo->cpu_map_count = 1;
136                 amdgpu_bo_cpu_unmap(bo);
137         }
138
139         amdgpu_bo_unmap(bo);
140         amdgpu_close_kms_handle(bo->dev, bo->handle);
141         pthread_mutex_destroy(&bo->cpu_access_mutex);
142         free(bo);
143 }
144
145 int amdgpu_bo_alloc(amdgpu_device_handle dev,
146                     struct amdgpu_bo_alloc_request *alloc_buffer,
147                     struct amdgpu_bo_alloc_result *info)
148 {
149         struct amdgpu_bo *bo;
150         union drm_amdgpu_gem_create args;
151         unsigned heap = alloc_buffer->preferred_heap;
152         int r = 0;
153
154         /* It's an error if the heap is not specified */
155         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
156                 return -EINVAL;
157
158         bo = calloc(1, sizeof(struct amdgpu_bo));
159         if (!bo)
160                 return -ENOMEM;
161
162         atomic_set(&bo->refcount, 1);
163         bo->dev = dev;
164         bo->alloc_size = alloc_buffer->alloc_size;
165
166         memset(&args, 0, sizeof(args));
167         args.in.bo_size = alloc_buffer->alloc_size;
168         args.in.alignment = alloc_buffer->phys_alignment;
169
170         /* Set the placement. */
171         args.in.domains = heap;
172         args.in.domain_flags = alloc_buffer->flags;
173
174         /* Allocate the buffer with the preferred heap. */
175         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
176                                 &args, sizeof(args));
177         if (r) {
178                 free(bo);
179                 return r;
180         }
181
182         bo->handle = args.out.handle;
183
184         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
185
186         r = amdgpu_bo_map(bo, alloc_buffer->phys_alignment);
187         if (r) {
188                 amdgpu_bo_free_internal(bo);
189                 return r;
190         }
191
192         info->buf_handle = bo;
193         info->virtual_mc_base_address = bo->virtual_mc_base_address;
194         return 0;
195 }
196
197 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
198                            struct amdgpu_bo_metadata *info)
199 {
200         struct drm_amdgpu_gem_metadata args = {};
201
202         args.handle = bo->handle;
203         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
204         args.data.flags = info->flags;
205         args.data.tiling_info = info->tiling_info;
206
207         if (info->size_metadata > sizeof(args.data.data))
208                 return -EINVAL;
209
210         if (info->size_metadata) {
211                 args.data.data_size_bytes = info->size_metadata;
212                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
213         }
214
215         return drmCommandWriteRead(bo->dev->fd,
216                                    DRM_AMDGPU_GEM_METADATA,
217                                    &args, sizeof(args));
218 }
219
220 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
221                          struct amdgpu_bo_info *info)
222 {
223         struct drm_amdgpu_gem_metadata metadata = {};
224         struct drm_amdgpu_gem_create_in bo_info = {};
225         struct drm_amdgpu_gem_op gem_op = {};
226         int r;
227
228         /* Validate the BO passed in */
229         if (!bo->handle)
230                 return -EINVAL;
231
232         /* Query metadata. */
233         metadata.handle = bo->handle;
234         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
235
236         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
237                                 &metadata, sizeof(metadata));
238         if (r)
239                 return r;
240
241         if (metadata.data.data_size_bytes >
242             sizeof(info->metadata.umd_metadata))
243                 return -EINVAL;
244
245         /* Query buffer info. */
246         gem_op.handle = bo->handle;
247         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
248         gem_op.value = (uintptr_t)&bo_info;
249
250         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
251                                 &gem_op, sizeof(gem_op));
252         if (r)
253                 return r;
254
255         memset(info, 0, sizeof(*info));
256         info->alloc_size = bo_info.bo_size;
257         info->phys_alignment = bo_info.alignment;
258         info->virtual_mc_base_address = bo->virtual_mc_base_address;
259         info->preferred_heap = bo_info.domains;
260         info->alloc_flags = bo_info.domain_flags;
261         info->metadata.flags = metadata.data.flags;
262         info->metadata.tiling_info = metadata.data.tiling_info;
263
264         info->metadata.size_metadata = metadata.data.data_size_bytes;
265         if (metadata.data.data_size_bytes > 0)
266                 memcpy(info->metadata.umd_metadata, metadata.data.data,
267                        metadata.data.data_size_bytes);
268
269         return 0;
270 }
271
272 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
273 {
274         pthread_mutex_lock(&bo->dev->bo_table_mutex);
275         util_hash_table_set(bo->dev->bo_handles,
276                             (void*)(uintptr_t)bo->handle, bo);
277         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
278 }
279
280 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
281 {
282         struct drm_gem_flink flink;
283         int fd, dma_fd;
284         uint32_t handle;
285         int r;
286
287         fd = bo->dev->fd;
288         handle = bo->handle;
289         if (bo->flink_name)
290                 return 0;
291
292
293         if (bo->dev->flink_fd != bo->dev->fd) {
294                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
295                                        &dma_fd);
296                 if (!r) {
297                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
298                         close(dma_fd);
299                 }
300                 if (r)
301                         return r;
302                 fd = bo->dev->flink_fd;
303         }
304         memset(&flink, 0, sizeof(flink));
305         flink.handle = handle;
306
307         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
308         if (r)
309                 return r;
310
311         bo->flink_name = flink.name;
312
313         if (bo->dev->flink_fd != bo->dev->fd) {
314                 struct drm_gem_close args = {};
315                 args.handle = handle;
316                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
317         }
318
319         pthread_mutex_lock(&bo->dev->bo_table_mutex);
320         util_hash_table_set(bo->dev->bo_flink_names,
321                             (void*)(uintptr_t)bo->flink_name,
322                             bo);
323         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
324
325         return 0;
326 }
327
328 int amdgpu_bo_export(amdgpu_bo_handle bo,
329                      enum amdgpu_bo_handle_type type,
330                      uint32_t *shared_handle)
331 {
332         int r;
333
334         switch (type) {
335         case amdgpu_bo_handle_type_gem_flink_name:
336                 r = amdgpu_bo_export_flink(bo);
337                 if (r)
338                         return r;
339
340                 *shared_handle = bo->flink_name;
341                 return 0;
342
343         case amdgpu_bo_handle_type_kms:
344                 amdgpu_add_handle_to_table(bo);
345                 *shared_handle = bo->handle;
346                 return 0;
347
348         case amdgpu_bo_handle_type_dma_buf_fd:
349                 amdgpu_add_handle_to_table(bo);
350                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
351                                        (int*)shared_handle);
352         }
353         return -EINVAL;
354 }
355
356 int amdgpu_bo_import(amdgpu_device_handle dev,
357                      enum amdgpu_bo_handle_type type,
358                      uint32_t shared_handle,
359                      struct amdgpu_bo_import_result *output)
360 {
361         struct drm_gem_open open_arg = {};
362         struct amdgpu_bo *bo = NULL;
363         int r;
364         int dma_fd;
365         uint64_t dma_buf_size = 0;
366
367         /* Convert a DMA buf handle to a KMS handle now. */
368         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
369                 uint32_t handle;
370                 off_t size;
371
372                 /* Get a KMS handle. */
373                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
374                 if (r) {
375                         return r;
376                 }
377
378                 /* Query the buffer size. */
379                 size = lseek(shared_handle, 0, SEEK_END);
380                 if (size == (off_t)-1) {
381                         amdgpu_close_kms_handle(dev, handle);
382                         return -errno;
383                 }
384                 lseek(shared_handle, 0, SEEK_SET);
385
386                 dma_buf_size = size;
387                 shared_handle = handle;
388         }
389
390         /* We must maintain a list of pairs <handle, bo>, so that we always
391          * return the same amdgpu_bo instance for the same handle. */
392         pthread_mutex_lock(&dev->bo_table_mutex);
393
394         /* If we have already created a buffer with this handle, find it. */
395         switch (type) {
396         case amdgpu_bo_handle_type_gem_flink_name:
397                 bo = util_hash_table_get(dev->bo_flink_names,
398                                          (void*)(uintptr_t)shared_handle);
399                 break;
400
401         case amdgpu_bo_handle_type_dma_buf_fd:
402                 bo = util_hash_table_get(dev->bo_handles,
403                                          (void*)(uintptr_t)shared_handle);
404                 break;
405
406         case amdgpu_bo_handle_type_kms:
407                 /* Importing a KMS handle in not allowed. */
408                 pthread_mutex_unlock(&dev->bo_table_mutex);
409                 return -EPERM;
410
411         default:
412                 pthread_mutex_unlock(&dev->bo_table_mutex);
413                 return -EINVAL;
414         }
415
416         if (bo) {
417                 pthread_mutex_unlock(&dev->bo_table_mutex);
418
419                 /* The buffer already exists, just bump the refcount. */
420                 atomic_inc(&bo->refcount);
421
422                 output->buf_handle = bo;
423                 output->alloc_size = bo->alloc_size;
424                 output->virtual_mc_base_address =
425                         bo->virtual_mc_base_address;
426                 return 0;
427         }
428
429         bo = calloc(1, sizeof(struct amdgpu_bo));
430         if (!bo) {
431                 pthread_mutex_unlock(&dev->bo_table_mutex);
432                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
433                         amdgpu_close_kms_handle(dev, shared_handle);
434                 }
435                 return -ENOMEM;
436         }
437
438         /* Open the handle. */
439         switch (type) {
440         case amdgpu_bo_handle_type_gem_flink_name:
441                 open_arg.name = shared_handle;
442                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
443                 if (r) {
444                         free(bo);
445                         pthread_mutex_unlock(&dev->bo_table_mutex);
446                         return r;
447                 }
448
449                 bo->handle = open_arg.handle;
450                 if (dev->flink_fd != dev->fd) {
451                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
452                         if (r) {
453                                 free(bo);
454                                 pthread_mutex_unlock(&dev->bo_table_mutex);
455                                 return r;
456                         }
457                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
458
459                         close(dma_fd);
460
461                         if (r) {
462                                 free(bo);
463                                 pthread_mutex_unlock(&dev->bo_table_mutex);
464                                 return r;
465                         }
466                 }
467                 bo->flink_name = shared_handle;
468                 bo->alloc_size = open_arg.size;
469                 util_hash_table_set(dev->bo_flink_names,
470                                     (void*)(uintptr_t)bo->flink_name, bo);
471                 break;
472
473         case amdgpu_bo_handle_type_dma_buf_fd:
474                 bo->handle = shared_handle;
475                 bo->alloc_size = dma_buf_size;
476                 break;
477
478         case amdgpu_bo_handle_type_kms:
479                 assert(0); /* unreachable */
480         }
481
482         /* Initialize it. */
483         atomic_set(&bo->refcount, 1);
484         bo->dev = dev;
485         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
486
487         r = amdgpu_bo_map(bo, 1 << 20);
488         if (r) {
489                 pthread_mutex_unlock(&dev->bo_table_mutex);
490                 amdgpu_bo_reference(&bo, NULL);
491                 return r;
492         }
493
494         util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
495         pthread_mutex_unlock(&dev->bo_table_mutex);
496
497         output->buf_handle = bo;
498         output->alloc_size = bo->alloc_size;
499         output->virtual_mc_base_address = bo->virtual_mc_base_address;
500         return 0;
501 }
502
503 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
504 {
505         /* Just drop the reference. */
506         amdgpu_bo_reference(&buf_handle, NULL);
507         return 0;
508 }
509
510 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
511 {
512         union drm_amdgpu_gem_mmap args;
513         void *ptr;
514         int r;
515
516         pthread_mutex_lock(&bo->cpu_access_mutex);
517
518         if (bo->cpu_ptr) {
519                 /* already mapped */
520                 assert(bo->cpu_map_count > 0);
521                 bo->cpu_map_count++;
522                 *cpu = bo->cpu_ptr;
523                 pthread_mutex_unlock(&bo->cpu_access_mutex);
524                 return 0;
525         }
526
527         assert(bo->cpu_map_count == 0);
528
529         memset(&args, 0, sizeof(args));
530
531         /* Query the buffer address (args.addr_ptr).
532          * The kernel driver ignores the offset and size parameters. */
533         args.in.handle = bo->handle;
534
535         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
536                                 sizeof(args));
537         if (r) {
538                 pthread_mutex_unlock(&bo->cpu_access_mutex);
539                 return r;
540         }
541
542         /* Map the buffer. */
543         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
544                        bo->dev->fd, args.out.addr_ptr);
545         if (ptr == MAP_FAILED) {
546                 pthread_mutex_unlock(&bo->cpu_access_mutex);
547                 return -errno;
548         }
549
550         bo->cpu_ptr = ptr;
551         bo->cpu_map_count = 1;
552         pthread_mutex_unlock(&bo->cpu_access_mutex);
553
554         *cpu = ptr;
555         return 0;
556 }
557
558 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
559 {
560         int r;
561
562         pthread_mutex_lock(&bo->cpu_access_mutex);
563         assert(bo->cpu_map_count >= 0);
564
565         if (bo->cpu_map_count == 0) {
566                 /* not mapped */
567                 pthread_mutex_unlock(&bo->cpu_access_mutex);
568                 return -EBADMSG;
569         }
570
571         bo->cpu_map_count--;
572         if (bo->cpu_map_count > 0) {
573                 /* mapped multiple times */
574                 pthread_mutex_unlock(&bo->cpu_access_mutex);
575                 return 0;
576         }
577
578         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
579         bo->cpu_ptr = NULL;
580         pthread_mutex_unlock(&bo->cpu_access_mutex);
581         return r;
582 }
583
584 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
585                                 struct amdgpu_buffer_size_alignments *info)
586 {
587         info->size_local = dev->dev_info.pte_fragment_size;
588         info->size_remote = dev->dev_info.gart_page_size;
589         return 0;
590 }
591
592 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
593                             uint64_t timeout_ns,
594                             bool *busy)
595 {
596         union drm_amdgpu_gem_wait_idle args;
597         int r;
598
599         memset(&args, 0, sizeof(args));
600         args.in.handle = bo->handle;
601         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
602
603         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
604                                 &args, sizeof(args));
605
606         if (r == 0) {
607                 *busy = args.out.status;
608                 return 0;
609         } else {
610                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
611                 return r;
612         }
613 }
614
615 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
616                                     void *cpu,
617                                     uint64_t size,
618                                     struct amdgpu_bo_alloc_result *info)
619 {
620         int r;
621         struct amdgpu_bo *bo;
622         struct drm_amdgpu_gem_userptr args;
623         uintptr_t cpu0;
624         uint32_t ps, off;
625
626         memset(&args, 0, sizeof(args));
627         ps = getpagesize();
628
629         cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
630         off = (uintptr_t)cpu - cpu0;
631         size = ROUND_UP(size + off, ps);
632
633         args.addr = cpu0;
634         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
635         args.size = size;
636         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
637                                 &args, sizeof(args));
638         if (r)
639                 return r;
640
641         bo = calloc(1, sizeof(struct amdgpu_bo));
642         if (!bo)
643                 return -ENOMEM;
644
645         atomic_set(&bo->refcount, 1);
646         bo->dev = dev;
647         bo->alloc_size = size;
648         bo->handle = args.handle;
649
650         r = amdgpu_bo_map(bo, 1 << 12);
651         if (r) {
652                 amdgpu_bo_free_internal(bo);
653                 return r;
654         }
655
656         info->buf_handle = bo;
657         info->virtual_mc_base_address = bo->virtual_mc_base_address;
658         info->virtual_mc_base_address += off;
659
660         return r;
661 }
662
663 int amdgpu_bo_list_create(amdgpu_device_handle dev,
664                           uint32_t number_of_resources,
665                           amdgpu_bo_handle *resources,
666                           uint8_t *resource_prios,
667                           amdgpu_bo_list_handle *result)
668 {
669         struct drm_amdgpu_bo_list_entry *list;
670         union drm_amdgpu_bo_list args;
671         unsigned i;
672         int r;
673
674         if (!number_of_resources)
675                 return -EINVAL;
676
677         /* overflow check for multiplication */
678         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
679                 return -EINVAL;
680
681         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
682         if (!list)
683                 return -ENOMEM;
684
685         memset(&args, 0, sizeof(args));
686         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
687         args.in.bo_number = number_of_resources;
688         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
689         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
690
691         for (i = 0; i < number_of_resources; i++) {
692                 list[i].bo_handle = resources[i]->handle;
693                 if (resource_prios)
694                         list[i].bo_priority = resource_prios[i];
695                 else
696                         list[i].bo_priority = 0;
697         }
698
699         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
700                                 &args, sizeof(args));
701         free(list);
702         if (r)
703                 return r;
704
705         *result = malloc(sizeof(struct amdgpu_bo_list));
706         (*result)->dev = dev;
707         (*result)->handle = args.out.list_handle;
708         return 0;
709 }
710
711 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
712 {
713         union drm_amdgpu_bo_list args;
714         int r;
715
716         memset(&args, 0, sizeof(args));
717         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
718         args.in.list_handle = list->handle;
719
720         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
721                                 &args, sizeof(args));
722
723         if (!r)
724                 free(list);
725
726         return r;
727 }
728
729 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
730                           uint32_t number_of_resources,
731                           amdgpu_bo_handle *resources,
732                           uint8_t *resource_prios)
733 {
734         struct drm_amdgpu_bo_list_entry *list;
735         union drm_amdgpu_bo_list args;
736         unsigned i;
737         int r;
738
739         if (!number_of_resources)
740                 return -EINVAL;
741
742         /* overflow check for multiplication */
743         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
744                 return -EINVAL;
745
746         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
747         if (list == NULL)
748                 return -ENOMEM;
749
750         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
751         args.in.list_handle = handle->handle;
752         args.in.bo_number = number_of_resources;
753         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
754         args.in.bo_info_ptr = (uintptr_t)list;
755
756         for (i = 0; i < number_of_resources; i++) {
757                 list[i].bo_handle = resources[i]->handle;
758                 if (resource_prios)
759                         list[i].bo_priority = resource_prios[i];
760                 else
761                         list[i].bo_priority = 0;
762         }
763
764         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
765                                 &args, sizeof(args));
766         free(list);
767         return r;
768 }