OSDN Git Service

422c7c99e6d046df965f5f57c43533a80efe0ae7
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <sys/ioctl.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35
36 #include "libdrm_macros.h"
37 #include "xf86drm.h"
38 #include "amdgpu_drm.h"
39 #include "amdgpu_internal.h"
40 #include "util_math.h"
41
42 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
43                                      uint32_t handle)
44 {
45         struct drm_gem_close args = {};
46
47         args.handle = handle;
48         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
49 }
50
51 int amdgpu_bo_alloc(amdgpu_device_handle dev,
52                     struct amdgpu_bo_alloc_request *alloc_buffer,
53                     amdgpu_bo_handle *buf_handle)
54 {
55         struct amdgpu_bo *bo;
56         union drm_amdgpu_gem_create args;
57         unsigned heap = alloc_buffer->preferred_heap;
58         int r = 0;
59
60         /* It's an error if the heap is not specified */
61         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
62                 return -EINVAL;
63
64         bo = calloc(1, sizeof(struct amdgpu_bo));
65         if (!bo)
66                 return -ENOMEM;
67
68         atomic_set(&bo->refcount, 1);
69         bo->dev = dev;
70         bo->alloc_size = alloc_buffer->alloc_size;
71
72         memset(&args, 0, sizeof(args));
73         args.in.bo_size = alloc_buffer->alloc_size;
74         args.in.alignment = alloc_buffer->phys_alignment;
75
76         /* Set the placement. */
77         args.in.domains = heap;
78         args.in.domain_flags = alloc_buffer->flags;
79
80         /* Allocate the buffer with the preferred heap. */
81         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
82                                 &args, sizeof(args));
83         if (r) {
84                 free(bo);
85                 return r;
86         }
87
88         bo->handle = args.out.handle;
89
90         pthread_mutex_lock(&bo->dev->bo_table_mutex);
91         r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
92         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
93
94         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
95
96         if (r)
97                 amdgpu_bo_free(bo);
98         else
99                 *buf_handle = bo;
100
101         return r;
102 }
103
104 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
105                            struct amdgpu_bo_metadata *info)
106 {
107         struct drm_amdgpu_gem_metadata args = {};
108
109         args.handle = bo->handle;
110         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
111         args.data.flags = info->flags;
112         args.data.tiling_info = info->tiling_info;
113
114         if (info->size_metadata > sizeof(args.data.data))
115                 return -EINVAL;
116
117         if (info->size_metadata) {
118                 args.data.data_size_bytes = info->size_metadata;
119                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
120         }
121
122         return drmCommandWriteRead(bo->dev->fd,
123                                    DRM_AMDGPU_GEM_METADATA,
124                                    &args, sizeof(args));
125 }
126
127 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
128                          struct amdgpu_bo_info *info)
129 {
130         struct drm_amdgpu_gem_metadata metadata = {};
131         struct drm_amdgpu_gem_create_in bo_info = {};
132         struct drm_amdgpu_gem_op gem_op = {};
133         int r;
134
135         /* Validate the BO passed in */
136         if (!bo->handle)
137                 return -EINVAL;
138
139         /* Query metadata. */
140         metadata.handle = bo->handle;
141         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
142
143         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
144                                 &metadata, sizeof(metadata));
145         if (r)
146                 return r;
147
148         if (metadata.data.data_size_bytes >
149             sizeof(info->metadata.umd_metadata))
150                 return -EINVAL;
151
152         /* Query buffer info. */
153         gem_op.handle = bo->handle;
154         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
155         gem_op.value = (uintptr_t)&bo_info;
156
157         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
158                                 &gem_op, sizeof(gem_op));
159         if (r)
160                 return r;
161
162         memset(info, 0, sizeof(*info));
163         info->alloc_size = bo_info.bo_size;
164         info->phys_alignment = bo_info.alignment;
165         info->preferred_heap = bo_info.domains;
166         info->alloc_flags = bo_info.domain_flags;
167         info->metadata.flags = metadata.data.flags;
168         info->metadata.tiling_info = metadata.data.tiling_info;
169
170         info->metadata.size_metadata = metadata.data.data_size_bytes;
171         if (metadata.data.data_size_bytes > 0)
172                 memcpy(info->metadata.umd_metadata, metadata.data.data,
173                        metadata.data.data_size_bytes);
174
175         return 0;
176 }
177
178 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
179 {
180         struct drm_gem_flink flink;
181         int fd, dma_fd;
182         uint32_t handle;
183         int r;
184
185         fd = bo->dev->fd;
186         handle = bo->handle;
187         if (bo->flink_name)
188                 return 0;
189
190
191         if (bo->dev->flink_fd != bo->dev->fd) {
192                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
193                                        &dma_fd);
194                 if (!r) {
195                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
196                         close(dma_fd);
197                 }
198                 if (r)
199                         return r;
200                 fd = bo->dev->flink_fd;
201         }
202         memset(&flink, 0, sizeof(flink));
203         flink.handle = handle;
204
205         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
206         if (r)
207                 return r;
208
209         bo->flink_name = flink.name;
210
211         if (bo->dev->flink_fd != bo->dev->fd) {
212                 struct drm_gem_close args = {};
213                 args.handle = handle;
214                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
215         }
216
217         pthread_mutex_lock(&bo->dev->bo_table_mutex);
218         r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
219         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
220
221         return r;
222 }
223
224 int amdgpu_bo_export(amdgpu_bo_handle bo,
225                      enum amdgpu_bo_handle_type type,
226                      uint32_t *shared_handle)
227 {
228         int r;
229
230         switch (type) {
231         case amdgpu_bo_handle_type_gem_flink_name:
232                 r = amdgpu_bo_export_flink(bo);
233                 if (r)
234                         return r;
235
236                 *shared_handle = bo->flink_name;
237                 return 0;
238
239         case amdgpu_bo_handle_type_kms:
240         case amdgpu_bo_handle_type_kms_noimport:
241                 *shared_handle = bo->handle;
242                 return 0;
243
244         case amdgpu_bo_handle_type_dma_buf_fd:
245                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
246                                           DRM_CLOEXEC | DRM_RDWR,
247                                           (int*)shared_handle);
248         }
249         return -EINVAL;
250 }
251
252 int amdgpu_bo_import(amdgpu_device_handle dev,
253                      enum amdgpu_bo_handle_type type,
254                      uint32_t shared_handle,
255                      struct amdgpu_bo_import_result *output)
256 {
257         struct drm_gem_open open_arg = {};
258         struct amdgpu_bo *bo = NULL;
259         int r;
260         int dma_fd;
261         uint64_t dma_buf_size = 0;
262
263         /* We must maintain a list of pairs <handle, bo>, so that we always
264          * return the same amdgpu_bo instance for the same handle. */
265         pthread_mutex_lock(&dev->bo_table_mutex);
266
267         /* Convert a DMA buf handle to a KMS handle now. */
268         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
269                 uint32_t handle;
270                 off_t size;
271
272                 /* Get a KMS handle. */
273                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
274                 if (r) {
275                         pthread_mutex_unlock(&dev->bo_table_mutex);
276                         return r;
277                 }
278
279                 /* Query the buffer size. */
280                 size = lseek(shared_handle, 0, SEEK_END);
281                 if (size == (off_t)-1) {
282                         pthread_mutex_unlock(&dev->bo_table_mutex);
283                         amdgpu_close_kms_handle(dev, handle);
284                         return -errno;
285                 }
286                 lseek(shared_handle, 0, SEEK_SET);
287
288                 dma_buf_size = size;
289                 shared_handle = handle;
290         }
291
292         /* If we have already created a buffer with this handle, find it. */
293         switch (type) {
294         case amdgpu_bo_handle_type_gem_flink_name:
295                 bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
296                 break;
297
298         case amdgpu_bo_handle_type_dma_buf_fd:
299                 bo = handle_table_lookup(&dev->bo_handles, shared_handle);
300                 break;
301
302         case amdgpu_bo_handle_type_kms:
303         case amdgpu_bo_handle_type_kms_noimport:
304                 /* Importing a KMS handle in not allowed. */
305                 pthread_mutex_unlock(&dev->bo_table_mutex);
306                 return -EPERM;
307
308         default:
309                 pthread_mutex_unlock(&dev->bo_table_mutex);
310                 return -EINVAL;
311         }
312
313         if (bo) {
314                 /* The buffer already exists, just bump the refcount. */
315                 atomic_inc(&bo->refcount);
316                 pthread_mutex_unlock(&dev->bo_table_mutex);
317
318                 output->buf_handle = bo;
319                 output->alloc_size = bo->alloc_size;
320                 return 0;
321         }
322
323         bo = calloc(1, sizeof(struct amdgpu_bo));
324         if (!bo) {
325                 pthread_mutex_unlock(&dev->bo_table_mutex);
326                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
327                         amdgpu_close_kms_handle(dev, shared_handle);
328                 }
329                 return -ENOMEM;
330         }
331
332         /* Open the handle. */
333         switch (type) {
334         case amdgpu_bo_handle_type_gem_flink_name:
335                 open_arg.name = shared_handle;
336                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
337                 if (r) {
338                         free(bo);
339                         pthread_mutex_unlock(&dev->bo_table_mutex);
340                         return r;
341                 }
342
343                 bo->handle = open_arg.handle;
344                 if (dev->flink_fd != dev->fd) {
345                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
346                         if (r) {
347                                 free(bo);
348                                 pthread_mutex_unlock(&dev->bo_table_mutex);
349                                 return r;
350                         }
351                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
352
353                         close(dma_fd);
354
355                         if (r) {
356                                 free(bo);
357                                 pthread_mutex_unlock(&dev->bo_table_mutex);
358                                 return r;
359                         }
360                 }
361                 bo->flink_name = shared_handle;
362                 bo->alloc_size = open_arg.size;
363                 r = handle_table_insert(&dev->bo_flink_names, shared_handle,
364                                         bo);
365                 if (r) {
366                         pthread_mutex_unlock(&dev->bo_table_mutex);
367                         amdgpu_bo_free(bo);
368                         return r;
369                 }
370                 break;
371
372         case amdgpu_bo_handle_type_dma_buf_fd:
373                 bo->handle = shared_handle;
374                 bo->alloc_size = dma_buf_size;
375                 break;
376
377         case amdgpu_bo_handle_type_kms:
378         case amdgpu_bo_handle_type_kms_noimport:
379                 assert(0); /* unreachable */
380         }
381
382         /* Initialize it. */
383         atomic_set(&bo->refcount, 1);
384         bo->dev = dev;
385         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
386
387         handle_table_insert(&dev->bo_handles, bo->handle, bo);
388         pthread_mutex_unlock(&dev->bo_table_mutex);
389
390         output->buf_handle = bo;
391         output->alloc_size = bo->alloc_size;
392         return 0;
393 }
394
395 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
396 {
397         struct amdgpu_device *dev;
398         struct amdgpu_bo *bo = buf_handle;
399
400         assert(bo != NULL);
401         dev = bo->dev;
402         pthread_mutex_lock(&dev->bo_table_mutex);
403
404         if (update_references(&bo->refcount, NULL)) {
405                 /* Remove the buffer from the hash tables. */
406                 handle_table_remove(&dev->bo_handles, bo->handle);
407
408                 if (bo->flink_name)
409                         handle_table_remove(&dev->bo_flink_names,
410                                             bo->flink_name);
411
412                 /* Release CPU access. */
413                 if (bo->cpu_map_count > 0) {
414                         bo->cpu_map_count = 1;
415                         amdgpu_bo_cpu_unmap(bo);
416                 }
417
418                 amdgpu_close_kms_handle(dev, bo->handle);
419                 pthread_mutex_destroy(&bo->cpu_access_mutex);
420                 free(bo);
421         }
422
423         pthread_mutex_unlock(&dev->bo_table_mutex);
424         return 0;
425 }
426
427 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
428 {
429         union drm_amdgpu_gem_mmap args;
430         void *ptr;
431         int r;
432
433         pthread_mutex_lock(&bo->cpu_access_mutex);
434
435         if (bo->cpu_ptr) {
436                 /* already mapped */
437                 assert(bo->cpu_map_count > 0);
438                 bo->cpu_map_count++;
439                 *cpu = bo->cpu_ptr;
440                 pthread_mutex_unlock(&bo->cpu_access_mutex);
441                 return 0;
442         }
443
444         assert(bo->cpu_map_count == 0);
445
446         memset(&args, 0, sizeof(args));
447
448         /* Query the buffer address (args.addr_ptr).
449          * The kernel driver ignores the offset and size parameters. */
450         args.in.handle = bo->handle;
451
452         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
453                                 sizeof(args));
454         if (r) {
455                 pthread_mutex_unlock(&bo->cpu_access_mutex);
456                 return r;
457         }
458
459         /* Map the buffer. */
460         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
461                        bo->dev->fd, args.out.addr_ptr);
462         if (ptr == MAP_FAILED) {
463                 pthread_mutex_unlock(&bo->cpu_access_mutex);
464                 return -errno;
465         }
466
467         bo->cpu_ptr = ptr;
468         bo->cpu_map_count = 1;
469         pthread_mutex_unlock(&bo->cpu_access_mutex);
470
471         *cpu = ptr;
472         return 0;
473 }
474
475 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
476 {
477         int r;
478
479         pthread_mutex_lock(&bo->cpu_access_mutex);
480         assert(bo->cpu_map_count >= 0);
481
482         if (bo->cpu_map_count == 0) {
483                 /* not mapped */
484                 pthread_mutex_unlock(&bo->cpu_access_mutex);
485                 return -EINVAL;
486         }
487
488         bo->cpu_map_count--;
489         if (bo->cpu_map_count > 0) {
490                 /* mapped multiple times */
491                 pthread_mutex_unlock(&bo->cpu_access_mutex);
492                 return 0;
493         }
494
495         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
496         bo->cpu_ptr = NULL;
497         pthread_mutex_unlock(&bo->cpu_access_mutex);
498         return r;
499 }
500
501 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
502                                 struct amdgpu_buffer_size_alignments *info)
503 {
504         info->size_local = dev->dev_info.pte_fragment_size;
505         info->size_remote = dev->dev_info.gart_page_size;
506         return 0;
507 }
508
509 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
510                             uint64_t timeout_ns,
511                             bool *busy)
512 {
513         union drm_amdgpu_gem_wait_idle args;
514         int r;
515
516         memset(&args, 0, sizeof(args));
517         args.in.handle = bo->handle;
518         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
519
520         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
521                                 &args, sizeof(args));
522
523         if (r == 0) {
524                 *busy = args.out.status;
525                 return 0;
526         } else {
527                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
528                 return r;
529         }
530 }
531
532 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
533                                     void *cpu,
534                                     uint64_t size,
535                                     amdgpu_bo_handle *buf_handle)
536 {
537         int r;
538         struct amdgpu_bo *bo;
539         struct drm_amdgpu_gem_userptr args;
540
541         args.addr = (uintptr_t)cpu;
542         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
543                 AMDGPU_GEM_USERPTR_VALIDATE;
544         args.size = size;
545         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
546                                 &args, sizeof(args));
547         if (r)
548                 return r;
549
550         bo = calloc(1, sizeof(struct amdgpu_bo));
551         if (!bo)
552                 return -ENOMEM;
553
554         atomic_set(&bo->refcount, 1);
555         bo->dev = dev;
556         bo->alloc_size = size;
557         bo->handle = args.handle;
558
559         *buf_handle = bo;
560
561         return r;
562 }
563
564 int amdgpu_bo_list_create(amdgpu_device_handle dev,
565                           uint32_t number_of_resources,
566                           amdgpu_bo_handle *resources,
567                           uint8_t *resource_prios,
568                           amdgpu_bo_list_handle *result)
569 {
570         struct drm_amdgpu_bo_list_entry *list;
571         union drm_amdgpu_bo_list args;
572         unsigned i;
573         int r;
574
575         if (!number_of_resources)
576                 return -EINVAL;
577
578         /* overflow check for multiplication */
579         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
580                 return -EINVAL;
581
582         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
583         if (!list)
584                 return -ENOMEM;
585
586         *result = malloc(sizeof(struct amdgpu_bo_list));
587         if (!*result) {
588                 free(list);
589                 return -ENOMEM;
590         }
591
592         memset(&args, 0, sizeof(args));
593         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
594         args.in.bo_number = number_of_resources;
595         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
596         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
597
598         for (i = 0; i < number_of_resources; i++) {
599                 list[i].bo_handle = resources[i]->handle;
600                 if (resource_prios)
601                         list[i].bo_priority = resource_prios[i];
602                 else
603                         list[i].bo_priority = 0;
604         }
605
606         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
607                                 &args, sizeof(args));
608         free(list);
609         if (r) {
610                 free(*result);
611                 return r;
612         }
613
614         (*result)->dev = dev;
615         (*result)->handle = args.out.list_handle;
616         return 0;
617 }
618
619 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
620 {
621         union drm_amdgpu_bo_list args;
622         int r;
623
624         memset(&args, 0, sizeof(args));
625         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
626         args.in.list_handle = list->handle;
627
628         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
629                                 &args, sizeof(args));
630
631         if (!r)
632                 free(list);
633
634         return r;
635 }
636
637 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
638                           uint32_t number_of_resources,
639                           amdgpu_bo_handle *resources,
640                           uint8_t *resource_prios)
641 {
642         struct drm_amdgpu_bo_list_entry *list;
643         union drm_amdgpu_bo_list args;
644         unsigned i;
645         int r;
646
647         if (!number_of_resources)
648                 return -EINVAL;
649
650         /* overflow check for multiplication */
651         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
652                 return -EINVAL;
653
654         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
655         if (!list)
656                 return -ENOMEM;
657
658         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
659         args.in.list_handle = handle->handle;
660         args.in.bo_number = number_of_resources;
661         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
662         args.in.bo_info_ptr = (uintptr_t)list;
663
664         for (i = 0; i < number_of_resources; i++) {
665                 list[i].bo_handle = resources[i]->handle;
666                 if (resource_prios)
667                         list[i].bo_priority = resource_prios[i];
668                 else
669                         list[i].bo_priority = 0;
670         }
671
672         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
673                                 &args, sizeof(args));
674         free(list);
675         return r;
676 }
677
678 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
679                      uint64_t offset,
680                      uint64_t size,
681                      uint64_t addr,
682                      uint64_t flags,
683                      uint32_t ops)
684 {
685         amdgpu_device_handle dev = bo->dev;
686
687         size = ALIGN(size, getpagesize());
688
689         return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
690                                    AMDGPU_VM_PAGE_READABLE |
691                                    AMDGPU_VM_PAGE_WRITEABLE |
692                                    AMDGPU_VM_PAGE_EXECUTABLE, ops);
693 }
694
695 int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
696                         amdgpu_bo_handle bo,
697                         uint64_t offset,
698                         uint64_t size,
699                         uint64_t addr,
700                         uint64_t flags,
701                         uint32_t ops)
702 {
703         struct drm_amdgpu_gem_va va;
704         int r;
705
706         if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
707             ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
708                 return -EINVAL;
709
710         memset(&va, 0, sizeof(va));
711         va.handle = bo ? bo->handle : 0;
712         va.operation = ops;
713         va.flags = flags;
714         va.va_address = addr;
715         va.offset_in_bo = offset;
716         va.map_size = size;
717
718         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
719
720         return r;
721 }