OSDN Git Service

amdgpu: add amdgpu_bo_va_op for va map/unmap support v3
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdint.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/ioctl.h>
36 #include <sys/mman.h>
37 #include <sys/time.h>
38
39 #include "libdrm_macros.h"
40 #include "xf86drm.h"
41 #include "amdgpu_drm.h"
42 #include "amdgpu_internal.h"
43 #include "util_hash_table.h"
44 #include "util_math.h"
45
46 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
47                                      uint32_t handle)
48 {
49         struct drm_gem_close args = {};
50
51         args.handle = handle;
52         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
53 }
54
55 void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
56 {
57         /* Remove the buffer from the hash tables. */
58         pthread_mutex_lock(&bo->dev->bo_table_mutex);
59         util_hash_table_remove(bo->dev->bo_handles,
60                                (void*)(uintptr_t)bo->handle);
61         if (bo->flink_name) {
62                 util_hash_table_remove(bo->dev->bo_flink_names,
63                                        (void*)(uintptr_t)bo->flink_name);
64         }
65         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
66
67         /* Release CPU access. */
68         if (bo->cpu_map_count > 0) {
69                 bo->cpu_map_count = 1;
70                 amdgpu_bo_cpu_unmap(bo);
71         }
72
73         amdgpu_close_kms_handle(bo->dev, bo->handle);
74         pthread_mutex_destroy(&bo->cpu_access_mutex);
75         free(bo);
76 }
77
78 int amdgpu_bo_alloc(amdgpu_device_handle dev,
79                     struct amdgpu_bo_alloc_request *alloc_buffer,
80                     amdgpu_bo_handle *buf_handle)
81 {
82         struct amdgpu_bo *bo;
83         union drm_amdgpu_gem_create args;
84         unsigned heap = alloc_buffer->preferred_heap;
85         int r = 0;
86
87         /* It's an error if the heap is not specified */
88         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
89                 return -EINVAL;
90
91         bo = calloc(1, sizeof(struct amdgpu_bo));
92         if (!bo)
93                 return -ENOMEM;
94
95         atomic_set(&bo->refcount, 1);
96         bo->dev = dev;
97         bo->alloc_size = alloc_buffer->alloc_size;
98
99         memset(&args, 0, sizeof(args));
100         args.in.bo_size = alloc_buffer->alloc_size;
101         args.in.alignment = alloc_buffer->phys_alignment;
102
103         /* Set the placement. */
104         args.in.domains = heap;
105         args.in.domain_flags = alloc_buffer->flags;
106
107         /* Allocate the buffer with the preferred heap. */
108         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
109                                 &args, sizeof(args));
110         if (r) {
111                 free(bo);
112                 return r;
113         }
114
115         bo->handle = args.out.handle;
116
117         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
118
119         *buf_handle = bo;
120         return 0;
121 }
122
123 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
124                            struct amdgpu_bo_metadata *info)
125 {
126         struct drm_amdgpu_gem_metadata args = {};
127
128         args.handle = bo->handle;
129         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
130         args.data.flags = info->flags;
131         args.data.tiling_info = info->tiling_info;
132
133         if (info->size_metadata > sizeof(args.data.data))
134                 return -EINVAL;
135
136         if (info->size_metadata) {
137                 args.data.data_size_bytes = info->size_metadata;
138                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
139         }
140
141         return drmCommandWriteRead(bo->dev->fd,
142                                    DRM_AMDGPU_GEM_METADATA,
143                                    &args, sizeof(args));
144 }
145
146 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
147                          struct amdgpu_bo_info *info)
148 {
149         struct drm_amdgpu_gem_metadata metadata = {};
150         struct drm_amdgpu_gem_create_in bo_info = {};
151         struct drm_amdgpu_gem_op gem_op = {};
152         int r;
153
154         /* Validate the BO passed in */
155         if (!bo->handle)
156                 return -EINVAL;
157
158         /* Query metadata. */
159         metadata.handle = bo->handle;
160         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
161
162         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
163                                 &metadata, sizeof(metadata));
164         if (r)
165                 return r;
166
167         if (metadata.data.data_size_bytes >
168             sizeof(info->metadata.umd_metadata))
169                 return -EINVAL;
170
171         /* Query buffer info. */
172         gem_op.handle = bo->handle;
173         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
174         gem_op.value = (uintptr_t)&bo_info;
175
176         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
177                                 &gem_op, sizeof(gem_op));
178         if (r)
179                 return r;
180
181         memset(info, 0, sizeof(*info));
182         info->alloc_size = bo_info.bo_size;
183         info->phys_alignment = bo_info.alignment;
184         info->preferred_heap = bo_info.domains;
185         info->alloc_flags = bo_info.domain_flags;
186         info->metadata.flags = metadata.data.flags;
187         info->metadata.tiling_info = metadata.data.tiling_info;
188
189         info->metadata.size_metadata = metadata.data.data_size_bytes;
190         if (metadata.data.data_size_bytes > 0)
191                 memcpy(info->metadata.umd_metadata, metadata.data.data,
192                        metadata.data.data_size_bytes);
193
194         return 0;
195 }
196
197 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
198 {
199         pthread_mutex_lock(&bo->dev->bo_table_mutex);
200         util_hash_table_set(bo->dev->bo_handles,
201                             (void*)(uintptr_t)bo->handle, bo);
202         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
203 }
204
205 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
206 {
207         struct drm_gem_flink flink;
208         int fd, dma_fd;
209         uint32_t handle;
210         int r;
211
212         fd = bo->dev->fd;
213         handle = bo->handle;
214         if (bo->flink_name)
215                 return 0;
216
217
218         if (bo->dev->flink_fd != bo->dev->fd) {
219                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
220                                        &dma_fd);
221                 if (!r) {
222                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
223                         close(dma_fd);
224                 }
225                 if (r)
226                         return r;
227                 fd = bo->dev->flink_fd;
228         }
229         memset(&flink, 0, sizeof(flink));
230         flink.handle = handle;
231
232         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
233         if (r)
234                 return r;
235
236         bo->flink_name = flink.name;
237
238         if (bo->dev->flink_fd != bo->dev->fd) {
239                 struct drm_gem_close args = {};
240                 args.handle = handle;
241                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
242         }
243
244         pthread_mutex_lock(&bo->dev->bo_table_mutex);
245         util_hash_table_set(bo->dev->bo_flink_names,
246                             (void*)(uintptr_t)bo->flink_name,
247                             bo);
248         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
249
250         return 0;
251 }
252
253 int amdgpu_bo_export(amdgpu_bo_handle bo,
254                      enum amdgpu_bo_handle_type type,
255                      uint32_t *shared_handle)
256 {
257         int r;
258
259         switch (type) {
260         case amdgpu_bo_handle_type_gem_flink_name:
261                 r = amdgpu_bo_export_flink(bo);
262                 if (r)
263                         return r;
264
265                 *shared_handle = bo->flink_name;
266                 return 0;
267
268         case amdgpu_bo_handle_type_kms:
269                 amdgpu_add_handle_to_table(bo);
270                 *shared_handle = bo->handle;
271                 return 0;
272
273         case amdgpu_bo_handle_type_dma_buf_fd:
274                 amdgpu_add_handle_to_table(bo);
275                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
276                                        (int*)shared_handle);
277         }
278         return -EINVAL;
279 }
280
281 int amdgpu_bo_import(amdgpu_device_handle dev,
282                      enum amdgpu_bo_handle_type type,
283                      uint32_t shared_handle,
284                      struct amdgpu_bo_import_result *output)
285 {
286         struct drm_gem_open open_arg = {};
287         struct amdgpu_bo *bo = NULL;
288         int r;
289         int dma_fd;
290         uint64_t dma_buf_size = 0;
291
292         /* Convert a DMA buf handle to a KMS handle now. */
293         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
294                 uint32_t handle;
295                 off_t size;
296
297                 /* Get a KMS handle. */
298                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
299                 if (r) {
300                         return r;
301                 }
302
303                 /* Query the buffer size. */
304                 size = lseek(shared_handle, 0, SEEK_END);
305                 if (size == (off_t)-1) {
306                         amdgpu_close_kms_handle(dev, handle);
307                         return -errno;
308                 }
309                 lseek(shared_handle, 0, SEEK_SET);
310
311                 dma_buf_size = size;
312                 shared_handle = handle;
313         }
314
315         /* We must maintain a list of pairs <handle, bo>, so that we always
316          * return the same amdgpu_bo instance for the same handle. */
317         pthread_mutex_lock(&dev->bo_table_mutex);
318
319         /* If we have already created a buffer with this handle, find it. */
320         switch (type) {
321         case amdgpu_bo_handle_type_gem_flink_name:
322                 bo = util_hash_table_get(dev->bo_flink_names,
323                                          (void*)(uintptr_t)shared_handle);
324                 break;
325
326         case amdgpu_bo_handle_type_dma_buf_fd:
327                 bo = util_hash_table_get(dev->bo_handles,
328                                          (void*)(uintptr_t)shared_handle);
329                 break;
330
331         case amdgpu_bo_handle_type_kms:
332                 /* Importing a KMS handle in not allowed. */
333                 pthread_mutex_unlock(&dev->bo_table_mutex);
334                 return -EPERM;
335
336         default:
337                 pthread_mutex_unlock(&dev->bo_table_mutex);
338                 return -EINVAL;
339         }
340
341         if (bo) {
342                 pthread_mutex_unlock(&dev->bo_table_mutex);
343
344                 /* The buffer already exists, just bump the refcount. */
345                 atomic_inc(&bo->refcount);
346
347                 output->buf_handle = bo;
348                 output->alloc_size = bo->alloc_size;
349                 return 0;
350         }
351
352         bo = calloc(1, sizeof(struct amdgpu_bo));
353         if (!bo) {
354                 pthread_mutex_unlock(&dev->bo_table_mutex);
355                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
356                         amdgpu_close_kms_handle(dev, shared_handle);
357                 }
358                 return -ENOMEM;
359         }
360
361         /* Open the handle. */
362         switch (type) {
363         case amdgpu_bo_handle_type_gem_flink_name:
364                 open_arg.name = shared_handle;
365                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
366                 if (r) {
367                         free(bo);
368                         pthread_mutex_unlock(&dev->bo_table_mutex);
369                         return r;
370                 }
371
372                 bo->handle = open_arg.handle;
373                 if (dev->flink_fd != dev->fd) {
374                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
375                         if (r) {
376                                 free(bo);
377                                 pthread_mutex_unlock(&dev->bo_table_mutex);
378                                 return r;
379                         }
380                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
381
382                         close(dma_fd);
383
384                         if (r) {
385                                 free(bo);
386                                 pthread_mutex_unlock(&dev->bo_table_mutex);
387                                 return r;
388                         }
389                 }
390                 bo->flink_name = shared_handle;
391                 bo->alloc_size = open_arg.size;
392                 util_hash_table_set(dev->bo_flink_names,
393                                     (void*)(uintptr_t)bo->flink_name, bo);
394                 break;
395
396         case amdgpu_bo_handle_type_dma_buf_fd:
397                 bo->handle = shared_handle;
398                 bo->alloc_size = dma_buf_size;
399                 break;
400
401         case amdgpu_bo_handle_type_kms:
402                 assert(0); /* unreachable */
403         }
404
405         /* Initialize it. */
406         atomic_set(&bo->refcount, 1);
407         bo->dev = dev;
408         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
409
410         util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
411         pthread_mutex_unlock(&dev->bo_table_mutex);
412
413         output->buf_handle = bo;
414         output->alloc_size = bo->alloc_size;
415         return 0;
416 }
417
418 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
419 {
420         /* Just drop the reference. */
421         amdgpu_bo_reference(&buf_handle, NULL);
422         return 0;
423 }
424
425 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
426 {
427         union drm_amdgpu_gem_mmap args;
428         void *ptr;
429         int r;
430
431         pthread_mutex_lock(&bo->cpu_access_mutex);
432
433         if (bo->cpu_ptr) {
434                 /* already mapped */
435                 assert(bo->cpu_map_count > 0);
436                 bo->cpu_map_count++;
437                 *cpu = bo->cpu_ptr;
438                 pthread_mutex_unlock(&bo->cpu_access_mutex);
439                 return 0;
440         }
441
442         assert(bo->cpu_map_count == 0);
443
444         memset(&args, 0, sizeof(args));
445
446         /* Query the buffer address (args.addr_ptr).
447          * The kernel driver ignores the offset and size parameters. */
448         args.in.handle = bo->handle;
449
450         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
451                                 sizeof(args));
452         if (r) {
453                 pthread_mutex_unlock(&bo->cpu_access_mutex);
454                 return r;
455         }
456
457         /* Map the buffer. */
458         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
459                        bo->dev->fd, args.out.addr_ptr);
460         if (ptr == MAP_FAILED) {
461                 pthread_mutex_unlock(&bo->cpu_access_mutex);
462                 return -errno;
463         }
464
465         bo->cpu_ptr = ptr;
466         bo->cpu_map_count = 1;
467         pthread_mutex_unlock(&bo->cpu_access_mutex);
468
469         *cpu = ptr;
470         return 0;
471 }
472
473 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
474 {
475         int r;
476
477         pthread_mutex_lock(&bo->cpu_access_mutex);
478         assert(bo->cpu_map_count >= 0);
479
480         if (bo->cpu_map_count == 0) {
481                 /* not mapped */
482                 pthread_mutex_unlock(&bo->cpu_access_mutex);
483                 return -EBADMSG;
484         }
485
486         bo->cpu_map_count--;
487         if (bo->cpu_map_count > 0) {
488                 /* mapped multiple times */
489                 pthread_mutex_unlock(&bo->cpu_access_mutex);
490                 return 0;
491         }
492
493         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
494         bo->cpu_ptr = NULL;
495         pthread_mutex_unlock(&bo->cpu_access_mutex);
496         return r;
497 }
498
499 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
500                                 struct amdgpu_buffer_size_alignments *info)
501 {
502         info->size_local = dev->dev_info.pte_fragment_size;
503         info->size_remote = dev->dev_info.gart_page_size;
504         return 0;
505 }
506
507 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
508                             uint64_t timeout_ns,
509                             bool *busy)
510 {
511         union drm_amdgpu_gem_wait_idle args;
512         int r;
513
514         memset(&args, 0, sizeof(args));
515         args.in.handle = bo->handle;
516         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
517
518         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
519                                 &args, sizeof(args));
520
521         if (r == 0) {
522                 *busy = args.out.status;
523                 return 0;
524         } else {
525                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
526                 return r;
527         }
528 }
529
530 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
531                                     void *cpu,
532                                     uint64_t size,
533                                     amdgpu_bo_handle *buf_handle)
534 {
535         int r;
536         struct amdgpu_bo *bo;
537         struct drm_amdgpu_gem_userptr args;
538         uintptr_t cpu0;
539         uint32_t ps, off;
540
541         memset(&args, 0, sizeof(args));
542         ps = getpagesize();
543
544         cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
545         off = (uintptr_t)cpu - cpu0;
546         size = ROUND_UP(size + off, ps);
547
548         args.addr = cpu0;
549         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
550         args.size = size;
551         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
552                                 &args, sizeof(args));
553         if (r)
554                 return r;
555
556         bo = calloc(1, sizeof(struct amdgpu_bo));
557         if (!bo)
558                 return -ENOMEM;
559
560         atomic_set(&bo->refcount, 1);
561         bo->dev = dev;
562         bo->alloc_size = size;
563         bo->handle = args.handle;
564
565         *buf_handle = bo;
566
567         return r;
568 }
569
570 int amdgpu_bo_list_create(amdgpu_device_handle dev,
571                           uint32_t number_of_resources,
572                           amdgpu_bo_handle *resources,
573                           uint8_t *resource_prios,
574                           amdgpu_bo_list_handle *result)
575 {
576         struct drm_amdgpu_bo_list_entry *list;
577         union drm_amdgpu_bo_list args;
578         unsigned i;
579         int r;
580
581         if (!number_of_resources)
582                 return -EINVAL;
583
584         /* overflow check for multiplication */
585         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
586                 return -EINVAL;
587
588         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
589         if (!list)
590                 return -ENOMEM;
591
592         memset(&args, 0, sizeof(args));
593         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
594         args.in.bo_number = number_of_resources;
595         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
596         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
597
598         for (i = 0; i < number_of_resources; i++) {
599                 list[i].bo_handle = resources[i]->handle;
600                 if (resource_prios)
601                         list[i].bo_priority = resource_prios[i];
602                 else
603                         list[i].bo_priority = 0;
604         }
605
606         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
607                                 &args, sizeof(args));
608         free(list);
609         if (r)
610                 return r;
611
612         *result = malloc(sizeof(struct amdgpu_bo_list));
613         (*result)->dev = dev;
614         (*result)->handle = args.out.list_handle;
615         return 0;
616 }
617
618 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
619 {
620         union drm_amdgpu_bo_list args;
621         int r;
622
623         memset(&args, 0, sizeof(args));
624         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
625         args.in.list_handle = list->handle;
626
627         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
628                                 &args, sizeof(args));
629
630         if (!r)
631                 free(list);
632
633         return r;
634 }
635
636 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
637                           uint32_t number_of_resources,
638                           amdgpu_bo_handle *resources,
639                           uint8_t *resource_prios)
640 {
641         struct drm_amdgpu_bo_list_entry *list;
642         union drm_amdgpu_bo_list args;
643         unsigned i;
644         int r;
645
646         if (!number_of_resources)
647                 return -EINVAL;
648
649         /* overflow check for multiplication */
650         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
651                 return -EINVAL;
652
653         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
654         if (list == NULL)
655                 return -ENOMEM;
656
657         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
658         args.in.list_handle = handle->handle;
659         args.in.bo_number = number_of_resources;
660         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
661         args.in.bo_info_ptr = (uintptr_t)list;
662
663         for (i = 0; i < number_of_resources; i++) {
664                 list[i].bo_handle = resources[i]->handle;
665                 if (resource_prios)
666                         list[i].bo_priority = resource_prios[i];
667                 else
668                         list[i].bo_priority = 0;
669         }
670
671         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
672                                 &args, sizeof(args));
673         free(list);
674         return r;
675 }
676
677 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
678                      uint64_t offset,
679                      uint64_t size,
680                      uint64_t addr,
681                      uint64_t flags,
682                      uint32_t ops)
683 {
684         amdgpu_device_handle dev = bo->dev;
685         struct drm_amdgpu_gem_va va;
686         int r;
687
688         if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
689                 return -EINVAL;
690
691         memset(&va, 0, sizeof(va));
692         va.handle = bo->handle;
693         va.operation = ops;
694         va.flags = AMDGPU_VM_PAGE_READABLE |
695                    AMDGPU_VM_PAGE_WRITEABLE |
696                    AMDGPU_VM_PAGE_EXECUTABLE;
697         va.va_address = addr;
698         va.offset_in_bo = offset;
699         va.map_size = ALIGN(size, getpagesize());
700
701         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
702
703         return r;
704 }