OSDN Git Service

amdgpu: cleanup VA IOCTL handling
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdint.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/ioctl.h>
36 #include <sys/mman.h>
37 #include <sys/time.h>
38
39 #include "libdrm_macros.h"
40 #include "xf86drm.h"
41 #include "amdgpu_drm.h"
42 #include "amdgpu_internal.h"
43 #include "util_hash_table.h"
44 #include "util_math.h"
45
46 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
47                                      uint32_t handle)
48 {
49         struct drm_gem_close args = {};
50
51         args.handle = handle;
52         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
53 }
54
55 /* map the buffer to the GPU virtual address space */
56 static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
57 {
58         amdgpu_device_handle dev = bo->dev;
59         struct drm_amdgpu_gem_va va;
60         int r;
61
62         memset(&va, 0, sizeof(va));
63
64         bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr,
65                                          bo->alloc_size, alignment);
66
67         if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
68                 return -ENOSPC;
69
70         va.handle = bo->handle;
71         va.operation = AMDGPU_VA_OP_MAP;
72         va.flags =      AMDGPU_VM_PAGE_READABLE |
73                         AMDGPU_VM_PAGE_WRITEABLE |
74                         AMDGPU_VM_PAGE_EXECUTABLE;
75         va.va_address = bo->virtual_mc_base_address;
76         va.offset_in_bo = 0;
77         va.map_size = ALIGN(bo->alloc_size, getpagesize());
78
79         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
80         if (r) {
81                 amdgpu_bo_free_internal(bo);
82                 return r;
83         }
84
85         return 0;
86 }
87
88 /* unmap the buffer from the GPU virtual address space */
89 static void amdgpu_bo_unmap(amdgpu_bo_handle bo)
90 {
91         amdgpu_device_handle dev = bo->dev;
92         struct drm_amdgpu_gem_va va;
93         int r;
94
95         if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
96                 return;
97
98         memset(&va, 0, sizeof(va));
99
100         va.handle = bo->handle;
101         va.operation = AMDGPU_VA_OP_UNMAP;
102         va.flags =      AMDGPU_VM_PAGE_READABLE |
103                         AMDGPU_VM_PAGE_WRITEABLE |
104                         AMDGPU_VM_PAGE_EXECUTABLE;
105         va.va_address = bo->virtual_mc_base_address;
106         va.offset_in_bo = 0;
107         va.map_size = ALIGN(bo->alloc_size, getpagesize());
108
109         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
110         if (r) {
111                 fprintf(stderr, "amdgpu: VA_OP_UNMAP failed with %d\n", r);
112                 return;
113         }
114
115         amdgpu_vamgr_free_va(bo->dev->vamgr, bo->virtual_mc_base_address,
116                              bo->alloc_size);
117
118         bo->virtual_mc_base_address = AMDGPU_INVALID_VA_ADDRESS;
119 }
120
121 void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
122 {
123         /* Remove the buffer from the hash tables. */
124         pthread_mutex_lock(&bo->dev->bo_table_mutex);
125         util_hash_table_remove(bo->dev->bo_handles,
126                                (void*)(uintptr_t)bo->handle);
127         if (bo->flink_name) {
128                 util_hash_table_remove(bo->dev->bo_flink_names,
129                                        (void*)(uintptr_t)bo->flink_name);
130         }
131         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
132
133         /* Release CPU access. */
134         if (bo->cpu_map_count > 0) {
135                 bo->cpu_map_count = 1;
136                 amdgpu_bo_cpu_unmap(bo);
137         }
138
139         amdgpu_bo_unmap(bo);
140         amdgpu_close_kms_handle(bo->dev, bo->handle);
141         pthread_mutex_destroy(&bo->cpu_access_mutex);
142         free(bo);
143 }
144
145 int amdgpu_bo_alloc(amdgpu_device_handle dev,
146                     struct amdgpu_bo_alloc_request *alloc_buffer,
147                     struct amdgpu_bo_alloc_result *info)
148 {
149         struct amdgpu_bo *bo;
150         union drm_amdgpu_gem_create args;
151         unsigned heap = alloc_buffer->preferred_heap;
152         int r = 0;
153
154         /* It's an error if the heap is not specified */
155         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
156                 return -EINVAL;
157
158         bo = calloc(1, sizeof(struct amdgpu_bo));
159         if (!bo)
160                 return -ENOMEM;
161
162         atomic_set(&bo->refcount, 1);
163         bo->dev = dev;
164         bo->alloc_size = alloc_buffer->alloc_size;
165
166         memset(&args, 0, sizeof(args));
167         args.in.bo_size = alloc_buffer->alloc_size;
168         args.in.alignment = alloc_buffer->phys_alignment;
169
170         /* Set the placement. */
171         args.in.domains = heap;
172         args.in.domain_flags = alloc_buffer->flags;
173
174         /* Allocate the buffer with the preferred heap. */
175         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
176                                 &args, sizeof(args));
177         if (r) {
178                 free(bo);
179                 return r;
180         }
181
182         bo->handle = args.out.handle;
183
184         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
185
186         r = amdgpu_bo_map(bo, alloc_buffer->phys_alignment);
187         if (r) {
188                 amdgpu_bo_free_internal(bo);
189                 return r;
190         }
191
192         info->buf_handle = bo;
193         info->virtual_mc_base_address = bo->virtual_mc_base_address;
194         return 0;
195 }
196
197 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
198                            struct amdgpu_bo_metadata *info)
199 {
200         struct drm_amdgpu_gem_metadata args = {};
201
202         args.handle = bo->handle;
203         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
204         args.data.flags = info->flags;
205         args.data.tiling_info = info->tiling_info;
206
207         if (info->size_metadata > sizeof(args.data.data))
208                 return -EINVAL;
209
210         if (info->size_metadata) {
211                 args.data.data_size_bytes = info->size_metadata;
212                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
213         }
214
215         return drmCommandWriteRead(bo->dev->fd,
216                                    DRM_AMDGPU_GEM_METADATA,
217                                    &args, sizeof(args));
218 }
219
220 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
221                          struct amdgpu_bo_info *info)
222 {
223         struct drm_amdgpu_gem_metadata metadata = {};
224         struct drm_amdgpu_gem_create_in bo_info = {};
225         struct drm_amdgpu_gem_op gem_op = {};
226         int r;
227
228         /* Query metadata. */
229         metadata.handle = bo->handle;
230         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
231
232         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
233                                 &metadata, sizeof(metadata));
234         if (r)
235                 return r;
236
237         if (metadata.data.data_size_bytes >
238             sizeof(info->metadata.umd_metadata))
239                 return -EINVAL;
240
241         /* Query buffer info. */
242         gem_op.handle = bo->handle;
243         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
244         gem_op.value = (uintptr_t)&bo_info;
245
246         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
247                                 &gem_op, sizeof(gem_op));
248         if (r)
249                 return r;
250
251         memset(info, 0, sizeof(*info));
252         info->alloc_size = bo_info.bo_size;
253         info->phys_alignment = bo_info.alignment;
254         info->virtual_mc_base_address = bo->virtual_mc_base_address;
255         info->preferred_heap = bo_info.domains;
256         info->alloc_flags = bo_info.domain_flags;
257         info->metadata.flags = metadata.data.flags;
258         info->metadata.tiling_info = metadata.data.tiling_info;
259
260         info->metadata.size_metadata = metadata.data.data_size_bytes;
261         if (metadata.data.data_size_bytes > 0)
262                 memcpy(info->metadata.umd_metadata, metadata.data.data,
263                        metadata.data.data_size_bytes);
264
265         return 0;
266 }
267
268 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
269 {
270         pthread_mutex_lock(&bo->dev->bo_table_mutex);
271         util_hash_table_set(bo->dev->bo_handles,
272                             (void*)(uintptr_t)bo->handle, bo);
273         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
274 }
275
276 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
277 {
278         struct drm_gem_flink flink;
279         int fd, dma_fd;
280         uint32_t handle;
281         int r;
282
283         fd = bo->dev->fd;
284         handle = bo->handle;
285         if (bo->flink_name)
286                 return 0;
287
288
289         if (bo->dev->flink_fd != bo->dev->fd) {
290                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
291                                        &dma_fd);
292                 if (!r) {
293                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
294                         close(dma_fd);
295                 }
296                 if (r)
297                         return r;
298                 fd = bo->dev->flink_fd;
299         }
300         memset(&flink, 0, sizeof(flink));
301         flink.handle = handle;
302
303         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
304         if (r)
305                 return r;
306
307         bo->flink_name = flink.name;
308
309         if (bo->dev->flink_fd != bo->dev->fd) {
310                 struct drm_gem_close args = {};
311                 args.handle = handle;
312                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
313         }
314
315         pthread_mutex_lock(&bo->dev->bo_table_mutex);
316         util_hash_table_set(bo->dev->bo_flink_names,
317                             (void*)(uintptr_t)bo->flink_name,
318                             bo);
319         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
320
321         return 0;
322 }
323
324 int amdgpu_bo_export(amdgpu_bo_handle bo,
325                      enum amdgpu_bo_handle_type type,
326                      uint32_t *shared_handle)
327 {
328         int r;
329
330         switch (type) {
331         case amdgpu_bo_handle_type_gem_flink_name:
332                 r = amdgpu_bo_export_flink(bo);
333                 if (r)
334                         return r;
335
336                 *shared_handle = bo->flink_name;
337                 return 0;
338
339         case amdgpu_bo_handle_type_kms:
340                 amdgpu_add_handle_to_table(bo);
341                 *shared_handle = bo->handle;
342                 return 0;
343
344         case amdgpu_bo_handle_type_dma_buf_fd:
345                 amdgpu_add_handle_to_table(bo);
346                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
347                                        (int*)shared_handle);
348         }
349         return -EINVAL;
350 }
351
352 int amdgpu_bo_import(amdgpu_device_handle dev,
353                      enum amdgpu_bo_handle_type type,
354                      uint32_t shared_handle,
355                      struct amdgpu_bo_import_result *output)
356 {
357         struct drm_gem_open open_arg = {};
358         struct amdgpu_bo *bo = NULL;
359         int r;
360         int dma_fd;
361         uint64_t dma_buf_size = 0;
362
363         /* Convert a DMA buf handle to a KMS handle now. */
364         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
365                 uint32_t handle;
366                 off_t size;
367
368                 /* Get a KMS handle. */
369                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
370                 if (r) {
371                         return r;
372                 }
373
374                 /* Query the buffer size. */
375                 size = lseek(shared_handle, 0, SEEK_END);
376                 if (size == (off_t)-1) {
377                         amdgpu_close_kms_handle(dev, handle);
378                         return -errno;
379                 }
380                 lseek(shared_handle, 0, SEEK_SET);
381
382                 dma_buf_size = size;
383                 shared_handle = handle;
384         }
385
386         /* We must maintain a list of pairs <handle, bo>, so that we always
387          * return the same amdgpu_bo instance for the same handle. */
388         pthread_mutex_lock(&dev->bo_table_mutex);
389
390         /* If we have already created a buffer with this handle, find it. */
391         switch (type) {
392         case amdgpu_bo_handle_type_gem_flink_name:
393                 bo = util_hash_table_get(dev->bo_flink_names,
394                                          (void*)(uintptr_t)shared_handle);
395                 break;
396
397         case amdgpu_bo_handle_type_dma_buf_fd:
398                 bo = util_hash_table_get(dev->bo_handles,
399                                          (void*)(uintptr_t)shared_handle);
400                 break;
401
402         case amdgpu_bo_handle_type_kms:
403                 /* Importing a KMS handle in not allowed. */
404                 pthread_mutex_unlock(&dev->bo_table_mutex);
405                 return -EPERM;
406
407         default:
408                 pthread_mutex_unlock(&dev->bo_table_mutex);
409                 return -EINVAL;
410         }
411
412         if (bo) {
413                 pthread_mutex_unlock(&dev->bo_table_mutex);
414
415                 /* The buffer already exists, just bump the refcount. */
416                 atomic_inc(&bo->refcount);
417
418                 output->buf_handle = bo;
419                 output->alloc_size = bo->alloc_size;
420                 output->virtual_mc_base_address =
421                         bo->virtual_mc_base_address;
422                 return 0;
423         }
424
425         bo = calloc(1, sizeof(struct amdgpu_bo));
426         if (!bo) {
427                 pthread_mutex_unlock(&dev->bo_table_mutex);
428                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
429                         amdgpu_close_kms_handle(dev, shared_handle);
430                 }
431                 return -ENOMEM;
432         }
433
434         /* Open the handle. */
435         switch (type) {
436         case amdgpu_bo_handle_type_gem_flink_name:
437                 open_arg.name = shared_handle;
438                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
439                 if (r) {
440                         free(bo);
441                         pthread_mutex_unlock(&dev->bo_table_mutex);
442                         return r;
443                 }
444
445                 bo->handle = open_arg.handle;
446                 if (dev->flink_fd != dev->fd) {
447                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
448                         if (r) {
449                                 free(bo);
450                                 pthread_mutex_unlock(&dev->bo_table_mutex);
451                                 return r;
452                         }
453                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
454
455                         close(dma_fd);
456
457                         if (r) {
458                                 free(bo);
459                                 pthread_mutex_unlock(&dev->bo_table_mutex);
460                                 return r;
461                         }
462                 }
463                 bo->flink_name = shared_handle;
464                 bo->alloc_size = open_arg.size;
465                 util_hash_table_set(dev->bo_flink_names,
466                                     (void*)(uintptr_t)bo->flink_name, bo);
467                 break;
468
469         case amdgpu_bo_handle_type_dma_buf_fd:
470                 bo->handle = shared_handle;
471                 bo->alloc_size = dma_buf_size;
472                 break;
473
474         case amdgpu_bo_handle_type_kms:
475                 assert(0); /* unreachable */
476         }
477
478         /* Initialize it. */
479         atomic_set(&bo->refcount, 1);
480         bo->dev = dev;
481         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
482
483         r = amdgpu_bo_map(bo, 1 << 20);
484         if (r) {
485                 pthread_mutex_unlock(&dev->bo_table_mutex);
486                 amdgpu_bo_reference(&bo, NULL);
487                 return r;
488         }
489
490         util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
491         pthread_mutex_unlock(&dev->bo_table_mutex);
492
493         output->buf_handle = bo;
494         output->alloc_size = bo->alloc_size;
495         output->virtual_mc_base_address = bo->virtual_mc_base_address;
496         return 0;
497 }
498
499 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
500 {
501         /* Just drop the reference. */
502         amdgpu_bo_reference(&buf_handle, NULL);
503         return 0;
504 }
505
506 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
507 {
508         union drm_amdgpu_gem_mmap args;
509         void *ptr;
510         int r;
511
512         pthread_mutex_lock(&bo->cpu_access_mutex);
513
514         if (bo->cpu_ptr) {
515                 /* already mapped */
516                 assert(bo->cpu_map_count > 0);
517                 bo->cpu_map_count++;
518                 *cpu = bo->cpu_ptr;
519                 pthread_mutex_unlock(&bo->cpu_access_mutex);
520                 return 0;
521         }
522
523         assert(bo->cpu_map_count == 0);
524
525         memset(&args, 0, sizeof(args));
526
527         /* Query the buffer address (args.addr_ptr).
528          * The kernel driver ignores the offset and size parameters. */
529         args.in.handle = bo->handle;
530
531         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
532                                 sizeof(args));
533         if (r) {
534                 pthread_mutex_unlock(&bo->cpu_access_mutex);
535                 return r;
536         }
537
538         /* Map the buffer. */
539         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
540                        bo->dev->fd, args.out.addr_ptr);
541         if (ptr == MAP_FAILED) {
542                 pthread_mutex_unlock(&bo->cpu_access_mutex);
543                 return -errno;
544         }
545
546         bo->cpu_ptr = ptr;
547         bo->cpu_map_count = 1;
548         pthread_mutex_unlock(&bo->cpu_access_mutex);
549
550         *cpu = ptr;
551         return 0;
552 }
553
554 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
555 {
556         int r;
557
558         pthread_mutex_lock(&bo->cpu_access_mutex);
559         assert(bo->cpu_map_count >= 0);
560
561         if (bo->cpu_map_count == 0) {
562                 /* not mapped */
563                 pthread_mutex_unlock(&bo->cpu_access_mutex);
564                 return -EBADMSG;
565         }
566
567         bo->cpu_map_count--;
568         if (bo->cpu_map_count > 0) {
569                 /* mapped multiple times */
570                 pthread_mutex_unlock(&bo->cpu_access_mutex);
571                 return 0;
572         }
573
574         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
575         bo->cpu_ptr = NULL;
576         pthread_mutex_unlock(&bo->cpu_access_mutex);
577         return r;
578 }
579
580 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
581                                 struct amdgpu_buffer_size_alignments *info)
582 {
583         info->size_local = dev->dev_info.pte_fragment_size;
584         info->size_remote = dev->dev_info.gart_page_size;
585         return 0;
586 }
587
588 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
589                             uint64_t timeout_ns,
590                             bool *busy)
591 {
592         union drm_amdgpu_gem_wait_idle args;
593         int r;
594
595         memset(&args, 0, sizeof(args));
596         args.in.handle = bo->handle;
597         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
598
599         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
600                                 &args, sizeof(args));
601
602         if (r == 0) {
603                 *busy = args.out.status;
604                 return 0;
605         } else {
606                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
607                 return r;
608         }
609 }
610
611 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
612                                     void *cpu,
613                                     uint64_t size,
614                                     struct amdgpu_bo_alloc_result *info)
615 {
616         int r;
617         struct amdgpu_bo *bo;
618         struct drm_amdgpu_gem_userptr args;
619         uintptr_t cpu0;
620         uint32_t ps, off;
621
622         memset(&args, 0, sizeof(args));
623         ps = getpagesize();
624
625         cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
626         off = (uintptr_t)cpu - cpu0;
627         size = ROUND_UP(size + off, ps);
628
629         args.addr = cpu0;
630         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
631         args.size = size;
632         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
633                                 &args, sizeof(args));
634         if (r)
635                 return r;
636
637         bo = calloc(1, sizeof(struct amdgpu_bo));
638         if (!bo)
639                 return -ENOMEM;
640
641         atomic_set(&bo->refcount, 1);
642         bo->dev = dev;
643         bo->alloc_size = size;
644         bo->handle = args.handle;
645
646         r = amdgpu_bo_map(bo, 1 << 12);
647         if (r) {
648                 amdgpu_bo_free_internal(bo);
649                 return r;
650         }
651
652         info->buf_handle = bo;
653         info->virtual_mc_base_address = bo->virtual_mc_base_address;
654         info->virtual_mc_base_address += off;
655
656         return r;
657 }
658
659 int amdgpu_bo_list_create(amdgpu_device_handle dev,
660                           uint32_t number_of_resources,
661                           amdgpu_bo_handle *resources,
662                           uint8_t *resource_prios,
663                           amdgpu_bo_list_handle *result)
664 {
665         struct drm_amdgpu_bo_list_entry *list;
666         union drm_amdgpu_bo_list args;
667         unsigned i;
668         int r;
669
670         if (!number_of_resources)
671                 return -EINVAL;
672
673         /* overflow check for multiplication */
674         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
675                 return -EINVAL;
676
677         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
678         if (!list)
679                 return -ENOMEM;
680
681         memset(&args, 0, sizeof(args));
682         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
683         args.in.bo_number = number_of_resources;
684         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
685         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
686
687         for (i = 0; i < number_of_resources; i++) {
688                 list[i].bo_handle = resources[i]->handle;
689                 if (resource_prios)
690                         list[i].bo_priority = resource_prios[i];
691                 else
692                         list[i].bo_priority = 0;
693         }
694
695         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
696                                 &args, sizeof(args));
697         free(list);
698         if (r)
699                 return r;
700
701         *result = malloc(sizeof(struct amdgpu_bo_list));
702         (*result)->dev = dev;
703         (*result)->handle = args.out.list_handle;
704         return 0;
705 }
706
707 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
708 {
709         union drm_amdgpu_bo_list args;
710         int r;
711
712         memset(&args, 0, sizeof(args));
713         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
714         args.in.list_handle = list->handle;
715
716         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
717                                 &args, sizeof(args));
718
719         if (!r)
720                 free(list);
721
722         return r;
723 }
724
725 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
726                           uint32_t number_of_resources,
727                           amdgpu_bo_handle *resources,
728                           uint8_t *resource_prios)
729 {
730         struct drm_amdgpu_bo_list_entry *list;
731         union drm_amdgpu_bo_list args;
732         unsigned i;
733         int r;
734
735         if (!number_of_resources)
736                 return -EINVAL;
737
738         /* overflow check for multiplication */
739         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
740                 return -EINVAL;
741
742         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
743         if (list == NULL)
744                 return -ENOMEM;
745
746         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
747         args.in.list_handle = handle->handle;
748         args.in.bo_number = number_of_resources;
749         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
750         args.in.bo_info_ptr = (uintptr_t)list;
751
752         for (i = 0; i < number_of_resources; i++) {
753                 list[i].bo_handle = resources[i]->handle;
754                 if (resource_prios)
755                         list[i].bo_priority = resource_prios[i];
756                 else
757                         list[i].bo_priority = 0;
758         }
759
760         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
761                                 &args, sizeof(args));
762         free(list);
763         return r;
764 }