OSDN Git Service

amdgpu: fix missing mutex unlock before return
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <stdint.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sys/ioctl.h>
37 #include <sys/mman.h>
38 #include <sys/time.h>
39
40 #include "libdrm_macros.h"
41 #include "xf86drm.h"
42 #include "amdgpu_drm.h"
43 #include "amdgpu_internal.h"
44 #include "util_hash_table.h"
45 #include "util_math.h"
46
47 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
48                                      uint32_t handle)
49 {
50         struct drm_gem_close args = {};
51
52         args.handle = handle;
53         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
54 }
55
56 drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
57 {
58         /* Remove the buffer from the hash tables. */
59         pthread_mutex_lock(&bo->dev->bo_table_mutex);
60         util_hash_table_remove(bo->dev->bo_handles,
61                                (void*)(uintptr_t)bo->handle);
62         if (bo->flink_name) {
63                 util_hash_table_remove(bo->dev->bo_flink_names,
64                                        (void*)(uintptr_t)bo->flink_name);
65         }
66         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
67
68         /* Release CPU access. */
69         if (bo->cpu_map_count > 0) {
70                 bo->cpu_map_count = 1;
71                 amdgpu_bo_cpu_unmap(bo);
72         }
73
74         amdgpu_close_kms_handle(bo->dev, bo->handle);
75         pthread_mutex_destroy(&bo->cpu_access_mutex);
76         free(bo);
77 }
78
79 int amdgpu_bo_alloc(amdgpu_device_handle dev,
80                     struct amdgpu_bo_alloc_request *alloc_buffer,
81                     amdgpu_bo_handle *buf_handle)
82 {
83         struct amdgpu_bo *bo;
84         union drm_amdgpu_gem_create args;
85         unsigned heap = alloc_buffer->preferred_heap;
86         int r = 0;
87
88         /* It's an error if the heap is not specified */
89         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
90                 return -EINVAL;
91
92         bo = calloc(1, sizeof(struct amdgpu_bo));
93         if (!bo)
94                 return -ENOMEM;
95
96         atomic_set(&bo->refcount, 1);
97         bo->dev = dev;
98         bo->alloc_size = alloc_buffer->alloc_size;
99
100         memset(&args, 0, sizeof(args));
101         args.in.bo_size = alloc_buffer->alloc_size;
102         args.in.alignment = alloc_buffer->phys_alignment;
103
104         /* Set the placement. */
105         args.in.domains = heap;
106         args.in.domain_flags = alloc_buffer->flags;
107
108         /* Allocate the buffer with the preferred heap. */
109         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
110                                 &args, sizeof(args));
111         if (r) {
112                 free(bo);
113                 return r;
114         }
115
116         bo->handle = args.out.handle;
117
118         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
119
120         *buf_handle = bo;
121         return 0;
122 }
123
124 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
125                            struct amdgpu_bo_metadata *info)
126 {
127         struct drm_amdgpu_gem_metadata args = {};
128
129         args.handle = bo->handle;
130         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
131         args.data.flags = info->flags;
132         args.data.tiling_info = info->tiling_info;
133
134         if (info->size_metadata > sizeof(args.data.data))
135                 return -EINVAL;
136
137         if (info->size_metadata) {
138                 args.data.data_size_bytes = info->size_metadata;
139                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
140         }
141
142         return drmCommandWriteRead(bo->dev->fd,
143                                    DRM_AMDGPU_GEM_METADATA,
144                                    &args, sizeof(args));
145 }
146
147 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
148                          struct amdgpu_bo_info *info)
149 {
150         struct drm_amdgpu_gem_metadata metadata = {};
151         struct drm_amdgpu_gem_create_in bo_info = {};
152         struct drm_amdgpu_gem_op gem_op = {};
153         int r;
154
155         /* Validate the BO passed in */
156         if (!bo->handle)
157                 return -EINVAL;
158
159         /* Query metadata. */
160         metadata.handle = bo->handle;
161         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
162
163         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
164                                 &metadata, sizeof(metadata));
165         if (r)
166                 return r;
167
168         if (metadata.data.data_size_bytes >
169             sizeof(info->metadata.umd_metadata))
170                 return -EINVAL;
171
172         /* Query buffer info. */
173         gem_op.handle = bo->handle;
174         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
175         gem_op.value = (uintptr_t)&bo_info;
176
177         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
178                                 &gem_op, sizeof(gem_op));
179         if (r)
180                 return r;
181
182         memset(info, 0, sizeof(*info));
183         info->alloc_size = bo_info.bo_size;
184         info->phys_alignment = bo_info.alignment;
185         info->preferred_heap = bo_info.domains;
186         info->alloc_flags = bo_info.domain_flags;
187         info->metadata.flags = metadata.data.flags;
188         info->metadata.tiling_info = metadata.data.tiling_info;
189
190         info->metadata.size_metadata = metadata.data.data_size_bytes;
191         if (metadata.data.data_size_bytes > 0)
192                 memcpy(info->metadata.umd_metadata, metadata.data.data,
193                        metadata.data.data_size_bytes);
194
195         return 0;
196 }
197
198 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
199 {
200         pthread_mutex_lock(&bo->dev->bo_table_mutex);
201         util_hash_table_set(bo->dev->bo_handles,
202                             (void*)(uintptr_t)bo->handle, bo);
203         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
204 }
205
206 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
207 {
208         struct drm_gem_flink flink;
209         int fd, dma_fd;
210         uint32_t handle;
211         int r;
212
213         fd = bo->dev->fd;
214         handle = bo->handle;
215         if (bo->flink_name)
216                 return 0;
217
218
219         if (bo->dev->flink_fd != bo->dev->fd) {
220                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
221                                        &dma_fd);
222                 if (!r) {
223                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
224                         close(dma_fd);
225                 }
226                 if (r)
227                         return r;
228                 fd = bo->dev->flink_fd;
229         }
230         memset(&flink, 0, sizeof(flink));
231         flink.handle = handle;
232
233         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
234         if (r)
235                 return r;
236
237         bo->flink_name = flink.name;
238
239         if (bo->dev->flink_fd != bo->dev->fd) {
240                 struct drm_gem_close args = {};
241                 args.handle = handle;
242                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
243         }
244
245         pthread_mutex_lock(&bo->dev->bo_table_mutex);
246         util_hash_table_set(bo->dev->bo_flink_names,
247                             (void*)(uintptr_t)bo->flink_name,
248                             bo);
249         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
250
251         return 0;
252 }
253
254 int amdgpu_bo_export(amdgpu_bo_handle bo,
255                      enum amdgpu_bo_handle_type type,
256                      uint32_t *shared_handle)
257 {
258         int r;
259
260         switch (type) {
261         case amdgpu_bo_handle_type_gem_flink_name:
262                 r = amdgpu_bo_export_flink(bo);
263                 if (r)
264                         return r;
265
266                 *shared_handle = bo->flink_name;
267                 return 0;
268
269         case amdgpu_bo_handle_type_kms:
270                 amdgpu_add_handle_to_table(bo);
271                 *shared_handle = bo->handle;
272                 return 0;
273
274         case amdgpu_bo_handle_type_dma_buf_fd:
275                 amdgpu_add_handle_to_table(bo);
276                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
277                                        (int*)shared_handle);
278         }
279         return -EINVAL;
280 }
281
282 int amdgpu_bo_import(amdgpu_device_handle dev,
283                      enum amdgpu_bo_handle_type type,
284                      uint32_t shared_handle,
285                      struct amdgpu_bo_import_result *output)
286 {
287         struct drm_gem_open open_arg = {};
288         struct amdgpu_bo *bo = NULL;
289         int r;
290         int dma_fd;
291         uint64_t dma_buf_size = 0;
292
293         /* We must maintain a list of pairs <handle, bo>, so that we always
294          * return the same amdgpu_bo instance for the same handle. */
295         pthread_mutex_lock(&dev->bo_table_mutex);
296
297         /* Convert a DMA buf handle to a KMS handle now. */
298         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
299                 uint32_t handle;
300                 off_t size;
301
302                 /* Get a KMS handle. */
303                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
304                 if (r) {
305                         pthread_mutex_unlock(&dev->bo_table_mutex);
306                         return r;
307                 }
308
309                 /* Query the buffer size. */
310                 size = lseek(shared_handle, 0, SEEK_END);
311                 if (size == (off_t)-1) {
312                         pthread_mutex_unlock(&dev->bo_table_mutex);
313                         amdgpu_close_kms_handle(dev, handle);
314                         return -errno;
315                 }
316                 lseek(shared_handle, 0, SEEK_SET);
317
318                 dma_buf_size = size;
319                 shared_handle = handle;
320         }
321
322         /* If we have already created a buffer with this handle, find it. */
323         switch (type) {
324         case amdgpu_bo_handle_type_gem_flink_name:
325                 bo = util_hash_table_get(dev->bo_flink_names,
326                                          (void*)(uintptr_t)shared_handle);
327                 break;
328
329         case amdgpu_bo_handle_type_dma_buf_fd:
330                 bo = util_hash_table_get(dev->bo_handles,
331                                          (void*)(uintptr_t)shared_handle);
332                 break;
333
334         case amdgpu_bo_handle_type_kms:
335                 /* Importing a KMS handle in not allowed. */
336                 pthread_mutex_unlock(&dev->bo_table_mutex);
337                 return -EPERM;
338
339         default:
340                 pthread_mutex_unlock(&dev->bo_table_mutex);
341                 return -EINVAL;
342         }
343
344         if (bo) {
345                 pthread_mutex_unlock(&dev->bo_table_mutex);
346
347                 /* The buffer already exists, just bump the refcount. */
348                 atomic_inc(&bo->refcount);
349
350                 output->buf_handle = bo;
351                 output->alloc_size = bo->alloc_size;
352                 return 0;
353         }
354
355         bo = calloc(1, sizeof(struct amdgpu_bo));
356         if (!bo) {
357                 pthread_mutex_unlock(&dev->bo_table_mutex);
358                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
359                         amdgpu_close_kms_handle(dev, shared_handle);
360                 }
361                 return -ENOMEM;
362         }
363
364         /* Open the handle. */
365         switch (type) {
366         case amdgpu_bo_handle_type_gem_flink_name:
367                 open_arg.name = shared_handle;
368                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
369                 if (r) {
370                         free(bo);
371                         pthread_mutex_unlock(&dev->bo_table_mutex);
372                         return r;
373                 }
374
375                 bo->handle = open_arg.handle;
376                 if (dev->flink_fd != dev->fd) {
377                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
378                         if (r) {
379                                 free(bo);
380                                 pthread_mutex_unlock(&dev->bo_table_mutex);
381                                 return r;
382                         }
383                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
384
385                         close(dma_fd);
386
387                         if (r) {
388                                 free(bo);
389                                 pthread_mutex_unlock(&dev->bo_table_mutex);
390                                 return r;
391                         }
392                 }
393                 bo->flink_name = shared_handle;
394                 bo->alloc_size = open_arg.size;
395                 util_hash_table_set(dev->bo_flink_names,
396                                     (void*)(uintptr_t)bo->flink_name, bo);
397                 break;
398
399         case amdgpu_bo_handle_type_dma_buf_fd:
400                 bo->handle = shared_handle;
401                 bo->alloc_size = dma_buf_size;
402                 break;
403
404         case amdgpu_bo_handle_type_kms:
405                 assert(0); /* unreachable */
406         }
407
408         /* Initialize it. */
409         atomic_set(&bo->refcount, 1);
410         bo->dev = dev;
411         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
412
413         util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
414         pthread_mutex_unlock(&dev->bo_table_mutex);
415
416         output->buf_handle = bo;
417         output->alloc_size = bo->alloc_size;
418         return 0;
419 }
420
421 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
422 {
423         /* Just drop the reference. */
424         amdgpu_bo_reference(&buf_handle, NULL);
425         return 0;
426 }
427
428 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
429 {
430         union drm_amdgpu_gem_mmap args;
431         void *ptr;
432         int r;
433
434         pthread_mutex_lock(&bo->cpu_access_mutex);
435
436         if (bo->cpu_ptr) {
437                 /* already mapped */
438                 assert(bo->cpu_map_count > 0);
439                 bo->cpu_map_count++;
440                 *cpu = bo->cpu_ptr;
441                 pthread_mutex_unlock(&bo->cpu_access_mutex);
442                 return 0;
443         }
444
445         assert(bo->cpu_map_count == 0);
446
447         memset(&args, 0, sizeof(args));
448
449         /* Query the buffer address (args.addr_ptr).
450          * The kernel driver ignores the offset and size parameters. */
451         args.in.handle = bo->handle;
452
453         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
454                                 sizeof(args));
455         if (r) {
456                 pthread_mutex_unlock(&bo->cpu_access_mutex);
457                 return r;
458         }
459
460         /* Map the buffer. */
461         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
462                        bo->dev->fd, args.out.addr_ptr);
463         if (ptr == MAP_FAILED) {
464                 pthread_mutex_unlock(&bo->cpu_access_mutex);
465                 return -errno;
466         }
467
468         bo->cpu_ptr = ptr;
469         bo->cpu_map_count = 1;
470         pthread_mutex_unlock(&bo->cpu_access_mutex);
471
472         *cpu = ptr;
473         return 0;
474 }
475
476 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
477 {
478         int r;
479
480         pthread_mutex_lock(&bo->cpu_access_mutex);
481         assert(bo->cpu_map_count >= 0);
482
483         if (bo->cpu_map_count == 0) {
484                 /* not mapped */
485                 pthread_mutex_unlock(&bo->cpu_access_mutex);
486                 return -EINVAL;
487         }
488
489         bo->cpu_map_count--;
490         if (bo->cpu_map_count > 0) {
491                 /* mapped multiple times */
492                 pthread_mutex_unlock(&bo->cpu_access_mutex);
493                 return 0;
494         }
495
496         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
497         bo->cpu_ptr = NULL;
498         pthread_mutex_unlock(&bo->cpu_access_mutex);
499         return r;
500 }
501
502 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
503                                 struct amdgpu_buffer_size_alignments *info)
504 {
505         info->size_local = dev->dev_info.pte_fragment_size;
506         info->size_remote = dev->dev_info.gart_page_size;
507         return 0;
508 }
509
510 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
511                             uint64_t timeout_ns,
512                             bool *busy)
513 {
514         union drm_amdgpu_gem_wait_idle args;
515         int r;
516
517         memset(&args, 0, sizeof(args));
518         args.in.handle = bo->handle;
519         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
520
521         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
522                                 &args, sizeof(args));
523
524         if (r == 0) {
525                 *busy = args.out.status;
526                 return 0;
527         } else {
528                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
529                 return r;
530         }
531 }
532
533 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
534                                     void *cpu,
535                                     uint64_t size,
536                                     amdgpu_bo_handle *buf_handle)
537 {
538         int r;
539         struct amdgpu_bo *bo;
540         struct drm_amdgpu_gem_userptr args;
541
542         args.addr = (uintptr_t)cpu;
543         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
544                 AMDGPU_GEM_USERPTR_VALIDATE;
545         args.size = size;
546         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
547                                 &args, sizeof(args));
548         if (r)
549                 return r;
550
551         bo = calloc(1, sizeof(struct amdgpu_bo));
552         if (!bo)
553                 return -ENOMEM;
554
555         atomic_set(&bo->refcount, 1);
556         bo->dev = dev;
557         bo->alloc_size = size;
558         bo->handle = args.handle;
559
560         *buf_handle = bo;
561
562         return r;
563 }
564
565 int amdgpu_bo_list_create(amdgpu_device_handle dev,
566                           uint32_t number_of_resources,
567                           amdgpu_bo_handle *resources,
568                           uint8_t *resource_prios,
569                           amdgpu_bo_list_handle *result)
570 {
571         struct drm_amdgpu_bo_list_entry *list;
572         union drm_amdgpu_bo_list args;
573         unsigned i;
574         int r;
575
576         if (!number_of_resources)
577                 return -EINVAL;
578
579         /* overflow check for multiplication */
580         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
581                 return -EINVAL;
582
583         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
584         if (!list)
585                 return -ENOMEM;
586
587         *result = malloc(sizeof(struct amdgpu_bo_list));
588         if (!*result) {
589                 free(list);
590                 return -ENOMEM;
591         }
592
593         memset(&args, 0, sizeof(args));
594         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
595         args.in.bo_number = number_of_resources;
596         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
597         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
598
599         for (i = 0; i < number_of_resources; i++) {
600                 list[i].bo_handle = resources[i]->handle;
601                 if (resource_prios)
602                         list[i].bo_priority = resource_prios[i];
603                 else
604                         list[i].bo_priority = 0;
605         }
606
607         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
608                                 &args, sizeof(args));
609         free(list);
610         if (r) {
611                 free(*result);
612                 return r;
613         }
614
615         (*result)->dev = dev;
616         (*result)->handle = args.out.list_handle;
617         return 0;
618 }
619
620 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
621 {
622         union drm_amdgpu_bo_list args;
623         int r;
624
625         memset(&args, 0, sizeof(args));
626         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
627         args.in.list_handle = list->handle;
628
629         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
630                                 &args, sizeof(args));
631
632         if (!r)
633                 free(list);
634
635         return r;
636 }
637
638 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
639                           uint32_t number_of_resources,
640                           amdgpu_bo_handle *resources,
641                           uint8_t *resource_prios)
642 {
643         struct drm_amdgpu_bo_list_entry *list;
644         union drm_amdgpu_bo_list args;
645         unsigned i;
646         int r;
647
648         if (!number_of_resources)
649                 return -EINVAL;
650
651         /* overflow check for multiplication */
652         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
653                 return -EINVAL;
654
655         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
656         if (!list)
657                 return -ENOMEM;
658
659         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
660         args.in.list_handle = handle->handle;
661         args.in.bo_number = number_of_resources;
662         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
663         args.in.bo_info_ptr = (uintptr_t)list;
664
665         for (i = 0; i < number_of_resources; i++) {
666                 list[i].bo_handle = resources[i]->handle;
667                 if (resource_prios)
668                         list[i].bo_priority = resource_prios[i];
669                 else
670                         list[i].bo_priority = 0;
671         }
672
673         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
674                                 &args, sizeof(args));
675         free(list);
676         return r;
677 }
678
679 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
680                      uint64_t offset,
681                      uint64_t size,
682                      uint64_t addr,
683                      uint64_t flags,
684                      uint32_t ops)
685 {
686         amdgpu_device_handle dev = bo->dev;
687
688         size = ALIGN(size, getpagesize());
689
690         return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
691                                    AMDGPU_VM_PAGE_READABLE |
692                                    AMDGPU_VM_PAGE_WRITEABLE |
693                                    AMDGPU_VM_PAGE_EXECUTABLE, ops);
694 }
695
696 int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
697                         amdgpu_bo_handle bo,
698                         uint64_t offset,
699                         uint64_t size,
700                         uint64_t addr,
701                         uint64_t flags,
702                         uint32_t ops)
703 {
704         struct drm_amdgpu_gem_va va;
705         int r;
706
707         if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
708             ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
709                 return -EINVAL;
710
711         memset(&va, 0, sizeof(va));
712         va.handle = bo ? bo->handle : 0;
713         va.operation = ops;
714         va.flags = flags;
715         va.va_address = addr;
716         va.offset_in_bo = offset;
717         va.map_size = size;
718
719         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
720
721         return r;
722 }