OSDN Git Service

amdgpu: free flink bo in bo import
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <sys/ioctl.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35
36 #include "libdrm_macros.h"
37 #include "xf86drm.h"
38 #include "amdgpu_drm.h"
39 #include "amdgpu_internal.h"
40 #include "util_math.h"
41
42 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
43                                      uint32_t handle)
44 {
45         struct drm_gem_close args = {};
46
47         args.handle = handle;
48         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
49 }
50
51 int amdgpu_bo_alloc(amdgpu_device_handle dev,
52                     struct amdgpu_bo_alloc_request *alloc_buffer,
53                     amdgpu_bo_handle *buf_handle)
54 {
55         struct amdgpu_bo *bo;
56         union drm_amdgpu_gem_create args;
57         unsigned heap = alloc_buffer->preferred_heap;
58         int r = 0;
59
60         /* It's an error if the heap is not specified */
61         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
62                 return -EINVAL;
63
64         bo = calloc(1, sizeof(struct amdgpu_bo));
65         if (!bo)
66                 return -ENOMEM;
67
68         atomic_set(&bo->refcount, 1);
69         bo->dev = dev;
70         bo->alloc_size = alloc_buffer->alloc_size;
71
72         memset(&args, 0, sizeof(args));
73         args.in.bo_size = alloc_buffer->alloc_size;
74         args.in.alignment = alloc_buffer->phys_alignment;
75
76         /* Set the placement. */
77         args.in.domains = heap;
78         args.in.domain_flags = alloc_buffer->flags;
79
80         /* Allocate the buffer with the preferred heap. */
81         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
82                                 &args, sizeof(args));
83         if (r) {
84                 free(bo);
85                 return r;
86         }
87
88         bo->handle = args.out.handle;
89
90         pthread_mutex_lock(&bo->dev->bo_table_mutex);
91         r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
92         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
93
94         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
95
96         if (r)
97                 amdgpu_bo_free(bo);
98         else
99                 *buf_handle = bo;
100
101         return r;
102 }
103
104 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
105                            struct amdgpu_bo_metadata *info)
106 {
107         struct drm_amdgpu_gem_metadata args = {};
108
109         args.handle = bo->handle;
110         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
111         args.data.flags = info->flags;
112         args.data.tiling_info = info->tiling_info;
113
114         if (info->size_metadata > sizeof(args.data.data))
115                 return -EINVAL;
116
117         if (info->size_metadata) {
118                 args.data.data_size_bytes = info->size_metadata;
119                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
120         }
121
122         return drmCommandWriteRead(bo->dev->fd,
123                                    DRM_AMDGPU_GEM_METADATA,
124                                    &args, sizeof(args));
125 }
126
127 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
128                          struct amdgpu_bo_info *info)
129 {
130         struct drm_amdgpu_gem_metadata metadata = {};
131         struct drm_amdgpu_gem_create_in bo_info = {};
132         struct drm_amdgpu_gem_op gem_op = {};
133         int r;
134
135         /* Validate the BO passed in */
136         if (!bo->handle)
137                 return -EINVAL;
138
139         /* Query metadata. */
140         metadata.handle = bo->handle;
141         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
142
143         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
144                                 &metadata, sizeof(metadata));
145         if (r)
146                 return r;
147
148         if (metadata.data.data_size_bytes >
149             sizeof(info->metadata.umd_metadata))
150                 return -EINVAL;
151
152         /* Query buffer info. */
153         gem_op.handle = bo->handle;
154         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
155         gem_op.value = (uintptr_t)&bo_info;
156
157         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
158                                 &gem_op, sizeof(gem_op));
159         if (r)
160                 return r;
161
162         memset(info, 0, sizeof(*info));
163         info->alloc_size = bo_info.bo_size;
164         info->phys_alignment = bo_info.alignment;
165         info->preferred_heap = bo_info.domains;
166         info->alloc_flags = bo_info.domain_flags;
167         info->metadata.flags = metadata.data.flags;
168         info->metadata.tiling_info = metadata.data.tiling_info;
169
170         info->metadata.size_metadata = metadata.data.data_size_bytes;
171         if (metadata.data.data_size_bytes > 0)
172                 memcpy(info->metadata.umd_metadata, metadata.data.data,
173                        metadata.data.data_size_bytes);
174
175         return 0;
176 }
177
178 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
179 {
180         struct drm_gem_flink flink;
181         int fd, dma_fd;
182         uint32_t handle;
183         int r;
184
185         fd = bo->dev->fd;
186         handle = bo->handle;
187         if (bo->flink_name)
188                 return 0;
189
190
191         if (bo->dev->flink_fd != bo->dev->fd) {
192                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
193                                        &dma_fd);
194                 if (!r) {
195                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
196                         close(dma_fd);
197                 }
198                 if (r)
199                         return r;
200                 fd = bo->dev->flink_fd;
201         }
202         memset(&flink, 0, sizeof(flink));
203         flink.handle = handle;
204
205         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
206         if (r)
207                 return r;
208
209         bo->flink_name = flink.name;
210
211         if (bo->dev->flink_fd != bo->dev->fd) {
212                 struct drm_gem_close args = {};
213                 args.handle = handle;
214                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
215         }
216
217         pthread_mutex_lock(&bo->dev->bo_table_mutex);
218         r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
219         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
220
221         return r;
222 }
223
224 int amdgpu_bo_export(amdgpu_bo_handle bo,
225                      enum amdgpu_bo_handle_type type,
226                      uint32_t *shared_handle)
227 {
228         int r;
229
230         switch (type) {
231         case amdgpu_bo_handle_type_gem_flink_name:
232                 r = amdgpu_bo_export_flink(bo);
233                 if (r)
234                         return r;
235
236                 *shared_handle = bo->flink_name;
237                 return 0;
238
239         case amdgpu_bo_handle_type_kms:
240         case amdgpu_bo_handle_type_kms_noimport:
241                 *shared_handle = bo->handle;
242                 return 0;
243
244         case amdgpu_bo_handle_type_dma_buf_fd:
245                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
246                                           DRM_CLOEXEC | DRM_RDWR,
247                                           (int*)shared_handle);
248         }
249         return -EINVAL;
250 }
251
252 int amdgpu_bo_import(amdgpu_device_handle dev,
253                      enum amdgpu_bo_handle_type type,
254                      uint32_t shared_handle,
255                      struct amdgpu_bo_import_result *output)
256 {
257         struct drm_gem_open open_arg = {};
258         struct drm_gem_close close_arg = {};
259         struct amdgpu_bo *bo = NULL;
260         int r;
261         int dma_fd;
262         uint64_t dma_buf_size = 0;
263
264         /* We must maintain a list of pairs <handle, bo>, so that we always
265          * return the same amdgpu_bo instance for the same handle. */
266         pthread_mutex_lock(&dev->bo_table_mutex);
267
268         /* Convert a DMA buf handle to a KMS handle now. */
269         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
270                 uint32_t handle;
271                 off_t size;
272
273                 /* Get a KMS handle. */
274                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
275                 if (r) {
276                         pthread_mutex_unlock(&dev->bo_table_mutex);
277                         return r;
278                 }
279
280                 /* Query the buffer size. */
281                 size = lseek(shared_handle, 0, SEEK_END);
282                 if (size == (off_t)-1) {
283                         pthread_mutex_unlock(&dev->bo_table_mutex);
284                         amdgpu_close_kms_handle(dev, handle);
285                         return -errno;
286                 }
287                 lseek(shared_handle, 0, SEEK_SET);
288
289                 dma_buf_size = size;
290                 shared_handle = handle;
291         }
292
293         /* If we have already created a buffer with this handle, find it. */
294         switch (type) {
295         case amdgpu_bo_handle_type_gem_flink_name:
296                 bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
297                 break;
298
299         case amdgpu_bo_handle_type_dma_buf_fd:
300                 bo = handle_table_lookup(&dev->bo_handles, shared_handle);
301                 break;
302
303         case amdgpu_bo_handle_type_kms:
304         case amdgpu_bo_handle_type_kms_noimport:
305                 /* Importing a KMS handle in not allowed. */
306                 pthread_mutex_unlock(&dev->bo_table_mutex);
307                 return -EPERM;
308
309         default:
310                 pthread_mutex_unlock(&dev->bo_table_mutex);
311                 return -EINVAL;
312         }
313
314         if (bo) {
315                 /* The buffer already exists, just bump the refcount. */
316                 atomic_inc(&bo->refcount);
317                 pthread_mutex_unlock(&dev->bo_table_mutex);
318
319                 output->buf_handle = bo;
320                 output->alloc_size = bo->alloc_size;
321                 return 0;
322         }
323
324         bo = calloc(1, sizeof(struct amdgpu_bo));
325         if (!bo) {
326                 pthread_mutex_unlock(&dev->bo_table_mutex);
327                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
328                         amdgpu_close_kms_handle(dev, shared_handle);
329                 }
330                 return -ENOMEM;
331         }
332
333         /* Open the handle. */
334         switch (type) {
335         case amdgpu_bo_handle_type_gem_flink_name:
336                 open_arg.name = shared_handle;
337                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
338                 if (r) {
339                         free(bo);
340                         pthread_mutex_unlock(&dev->bo_table_mutex);
341                         return r;
342                 }
343
344                 bo->handle = open_arg.handle;
345                 if (dev->flink_fd != dev->fd) {
346                         close_arg.handle = open_arg.handle;
347                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
348                         if (r) {
349                                 free(bo);
350                                 drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
351                                          &close_arg);
352                                 pthread_mutex_unlock(&dev->bo_table_mutex);
353                                 return r;
354                         }
355                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
356
357                         close(dma_fd);
358                         drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
359
360                         if (r) {
361                                 free(bo);
362                                 pthread_mutex_unlock(&dev->bo_table_mutex);
363                                 return r;
364                         }
365                 }
366                 bo->flink_name = shared_handle;
367                 bo->alloc_size = open_arg.size;
368                 r = handle_table_insert(&dev->bo_flink_names, shared_handle,
369                                         bo);
370                 if (r) {
371                         pthread_mutex_unlock(&dev->bo_table_mutex);
372                         amdgpu_bo_free(bo);
373                         return r;
374                 }
375                 break;
376
377         case amdgpu_bo_handle_type_dma_buf_fd:
378                 bo->handle = shared_handle;
379                 bo->alloc_size = dma_buf_size;
380                 break;
381
382         case amdgpu_bo_handle_type_kms:
383         case amdgpu_bo_handle_type_kms_noimport:
384                 assert(0); /* unreachable */
385         }
386
387         /* Initialize it. */
388         atomic_set(&bo->refcount, 1);
389         bo->dev = dev;
390         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
391
392         handle_table_insert(&dev->bo_handles, bo->handle, bo);
393         pthread_mutex_unlock(&dev->bo_table_mutex);
394
395         output->buf_handle = bo;
396         output->alloc_size = bo->alloc_size;
397         return 0;
398 }
399
400 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
401 {
402         struct amdgpu_device *dev;
403         struct amdgpu_bo *bo = buf_handle;
404
405         assert(bo != NULL);
406         dev = bo->dev;
407         pthread_mutex_lock(&dev->bo_table_mutex);
408
409         if (update_references(&bo->refcount, NULL)) {
410                 /* Remove the buffer from the hash tables. */
411                 handle_table_remove(&dev->bo_handles, bo->handle);
412
413                 if (bo->flink_name)
414                         handle_table_remove(&dev->bo_flink_names,
415                                             bo->flink_name);
416
417                 /* Release CPU access. */
418                 if (bo->cpu_map_count > 0) {
419                         bo->cpu_map_count = 1;
420                         amdgpu_bo_cpu_unmap(bo);
421                 }
422
423                 amdgpu_close_kms_handle(dev, bo->handle);
424                 pthread_mutex_destroy(&bo->cpu_access_mutex);
425                 free(bo);
426         }
427
428         pthread_mutex_unlock(&dev->bo_table_mutex);
429         return 0;
430 }
431
432 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
433 {
434         union drm_amdgpu_gem_mmap args;
435         void *ptr;
436         int r;
437
438         pthread_mutex_lock(&bo->cpu_access_mutex);
439
440         if (bo->cpu_ptr) {
441                 /* already mapped */
442                 assert(bo->cpu_map_count > 0);
443                 bo->cpu_map_count++;
444                 *cpu = bo->cpu_ptr;
445                 pthread_mutex_unlock(&bo->cpu_access_mutex);
446                 return 0;
447         }
448
449         assert(bo->cpu_map_count == 0);
450
451         memset(&args, 0, sizeof(args));
452
453         /* Query the buffer address (args.addr_ptr).
454          * The kernel driver ignores the offset and size parameters. */
455         args.in.handle = bo->handle;
456
457         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
458                                 sizeof(args));
459         if (r) {
460                 pthread_mutex_unlock(&bo->cpu_access_mutex);
461                 return r;
462         }
463
464         /* Map the buffer. */
465         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
466                        bo->dev->fd, args.out.addr_ptr);
467         if (ptr == MAP_FAILED) {
468                 pthread_mutex_unlock(&bo->cpu_access_mutex);
469                 return -errno;
470         }
471
472         bo->cpu_ptr = ptr;
473         bo->cpu_map_count = 1;
474         pthread_mutex_unlock(&bo->cpu_access_mutex);
475
476         *cpu = ptr;
477         return 0;
478 }
479
480 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
481 {
482         int r;
483
484         pthread_mutex_lock(&bo->cpu_access_mutex);
485         assert(bo->cpu_map_count >= 0);
486
487         if (bo->cpu_map_count == 0) {
488                 /* not mapped */
489                 pthread_mutex_unlock(&bo->cpu_access_mutex);
490                 return -EINVAL;
491         }
492
493         bo->cpu_map_count--;
494         if (bo->cpu_map_count > 0) {
495                 /* mapped multiple times */
496                 pthread_mutex_unlock(&bo->cpu_access_mutex);
497                 return 0;
498         }
499
500         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
501         bo->cpu_ptr = NULL;
502         pthread_mutex_unlock(&bo->cpu_access_mutex);
503         return r;
504 }
505
506 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
507                                 struct amdgpu_buffer_size_alignments *info)
508 {
509         info->size_local = dev->dev_info.pte_fragment_size;
510         info->size_remote = dev->dev_info.gart_page_size;
511         return 0;
512 }
513
514 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
515                             uint64_t timeout_ns,
516                             bool *busy)
517 {
518         union drm_amdgpu_gem_wait_idle args;
519         int r;
520
521         memset(&args, 0, sizeof(args));
522         args.in.handle = bo->handle;
523         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
524
525         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
526                                 &args, sizeof(args));
527
528         if (r == 0) {
529                 *busy = args.out.status;
530                 return 0;
531         } else {
532                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
533                 return r;
534         }
535 }
536
537 int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
538                                   void *cpu,
539                                   uint64_t size,
540                                   amdgpu_bo_handle *buf_handle,
541                                   uint64_t *offset_in_bo)
542 {
543         uint32_t i;
544         struct amdgpu_bo *bo;
545
546         if (cpu == NULL || size == 0)
547                 return -EINVAL;
548
549         /*
550          * Workaround for a buggy application which tries to import previously
551          * exposed CPU pointers. If we find a real world use case we should
552          * improve that by asking the kernel for the right handle.
553          */
554         pthread_mutex_lock(&dev->bo_table_mutex);
555         for (i = 0; i < dev->bo_handles.max_key; i++) {
556                 bo = handle_table_lookup(&dev->bo_handles, i);
557                 if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
558                         continue;
559                 if (cpu >= bo->cpu_ptr &&
560                     cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
561                         break;
562         }
563
564         if (i < dev->bo_handles.max_key) {
565                 atomic_inc(&bo->refcount);
566                 *buf_handle = bo;
567                 *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
568         } else {
569                 *buf_handle = NULL;
570                 *offset_in_bo = 0;
571         }
572         pthread_mutex_unlock(&dev->bo_table_mutex);
573
574         return 0;
575 }
576
577 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
578                                     void *cpu,
579                                     uint64_t size,
580                                     amdgpu_bo_handle *buf_handle)
581 {
582         int r;
583         struct amdgpu_bo *bo;
584         struct drm_amdgpu_gem_userptr args;
585
586         args.addr = (uintptr_t)cpu;
587         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
588                 AMDGPU_GEM_USERPTR_VALIDATE;
589         args.size = size;
590         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
591                                 &args, sizeof(args));
592         if (r)
593                 return r;
594
595         bo = calloc(1, sizeof(struct amdgpu_bo));
596         if (!bo)
597                 return -ENOMEM;
598
599         atomic_set(&bo->refcount, 1);
600         bo->dev = dev;
601         bo->alloc_size = size;
602         bo->handle = args.handle;
603
604         pthread_mutex_lock(&bo->dev->bo_table_mutex);
605         r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
606         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
607
608         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
609
610         if (r)
611                 amdgpu_bo_free(bo);
612         else
613                 *buf_handle = bo;
614
615         return r;
616 }
617
618 int amdgpu_bo_list_create(amdgpu_device_handle dev,
619                           uint32_t number_of_resources,
620                           amdgpu_bo_handle *resources,
621                           uint8_t *resource_prios,
622                           amdgpu_bo_list_handle *result)
623 {
624         struct drm_amdgpu_bo_list_entry *list;
625         union drm_amdgpu_bo_list args;
626         unsigned i;
627         int r;
628
629         if (!number_of_resources)
630                 return -EINVAL;
631
632         /* overflow check for multiplication */
633         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
634                 return -EINVAL;
635
636         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
637         if (!list)
638                 return -ENOMEM;
639
640         *result = malloc(sizeof(struct amdgpu_bo_list));
641         if (!*result) {
642                 free(list);
643                 return -ENOMEM;
644         }
645
646         memset(&args, 0, sizeof(args));
647         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
648         args.in.bo_number = number_of_resources;
649         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
650         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
651
652         for (i = 0; i < number_of_resources; i++) {
653                 list[i].bo_handle = resources[i]->handle;
654                 if (resource_prios)
655                         list[i].bo_priority = resource_prios[i];
656                 else
657                         list[i].bo_priority = 0;
658         }
659
660         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
661                                 &args, sizeof(args));
662         free(list);
663         if (r) {
664                 free(*result);
665                 return r;
666         }
667
668         (*result)->dev = dev;
669         (*result)->handle = args.out.list_handle;
670         return 0;
671 }
672
673 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
674 {
675         union drm_amdgpu_bo_list args;
676         int r;
677
678         memset(&args, 0, sizeof(args));
679         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
680         args.in.list_handle = list->handle;
681
682         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
683                                 &args, sizeof(args));
684
685         if (!r)
686                 free(list);
687
688         return r;
689 }
690
691 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
692                           uint32_t number_of_resources,
693                           amdgpu_bo_handle *resources,
694                           uint8_t *resource_prios)
695 {
696         struct drm_amdgpu_bo_list_entry *list;
697         union drm_amdgpu_bo_list args;
698         unsigned i;
699         int r;
700
701         if (!number_of_resources)
702                 return -EINVAL;
703
704         /* overflow check for multiplication */
705         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
706                 return -EINVAL;
707
708         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
709         if (!list)
710                 return -ENOMEM;
711
712         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
713         args.in.list_handle = handle->handle;
714         args.in.bo_number = number_of_resources;
715         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
716         args.in.bo_info_ptr = (uintptr_t)list;
717
718         for (i = 0; i < number_of_resources; i++) {
719                 list[i].bo_handle = resources[i]->handle;
720                 if (resource_prios)
721                         list[i].bo_priority = resource_prios[i];
722                 else
723                         list[i].bo_priority = 0;
724         }
725
726         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
727                                 &args, sizeof(args));
728         free(list);
729         return r;
730 }
731
732 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
733                      uint64_t offset,
734                      uint64_t size,
735                      uint64_t addr,
736                      uint64_t flags,
737                      uint32_t ops)
738 {
739         amdgpu_device_handle dev = bo->dev;
740
741         size = ALIGN(size, getpagesize());
742
743         return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
744                                    AMDGPU_VM_PAGE_READABLE |
745                                    AMDGPU_VM_PAGE_WRITEABLE |
746                                    AMDGPU_VM_PAGE_EXECUTABLE, ops);
747 }
748
749 int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
750                         amdgpu_bo_handle bo,
751                         uint64_t offset,
752                         uint64_t size,
753                         uint64_t addr,
754                         uint64_t flags,
755                         uint32_t ops)
756 {
757         struct drm_amdgpu_gem_va va;
758         int r;
759
760         if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
761             ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
762                 return -EINVAL;
763
764         memset(&va, 0, sizeof(va));
765         va.handle = bo ? bo->handle : 0;
766         va.operation = ops;
767         va.flags = flags;
768         va.va_address = addr;
769         va.offset_in_bo = offset;
770         va.map_size = size;
771
772         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
773
774         return r;
775 }