OSDN Git Service

drm mode : fix memory leak when freeing drmModePropertyPtr
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
index d78bb9a..5bdb8fe 100644 (file)
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
+ *
  */
 
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
 #include <stdlib.h>
 #include <stdio.h>
+#include <stdint.h>
 #include <string.h>
 #include <errno.h>
 #include <fcntl.h>
 #include "xf86drm.h"
 #include "amdgpu_drm.h"
 #include "amdgpu_internal.h"
-#include "util_hash_table.h"
+#include "util_math.h"
 
-static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
-                                    uint32_t handle)
+static int amdgpu_close_kms_handle(int fd, uint32_t handle)
 {
        struct drm_gem_close args = {};
 
        args.handle = handle;
-       drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
-}
-
-void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
-{
-       /* Remove the buffer from the hash tables. */
-       pthread_mutex_lock(&bo->dev->bo_table_mutex);
-       util_hash_table_remove(bo->dev->bo_handles,
-                              (void*)(uintptr_t)bo->handle);
-       if (bo->flink_name) {
-               util_hash_table_remove(bo->dev->bo_flink_names,
-                                      (void*)(uintptr_t)bo->flink_name);
-       }
-       pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-
-       /* Release CPU access. */
-       if (bo->cpu_map_count > 0) {
-               bo->cpu_map_count = 1;
-               amdgpu_bo_cpu_unmap(bo);
-       }
-
-       amdgpu_close_kms_handle(bo->dev, bo->handle);
-       pthread_mutex_destroy(&bo->cpu_access_mutex);
-       amdgpu_vamgr_free_va(&bo->dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
-       free(bo);
+       return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
 }
 
-int amdgpu_bo_alloc(amdgpu_device_handle dev,
-                   struct amdgpu_bo_alloc_request *alloc_buffer,
-                   struct amdgpu_bo_alloc_result *info)
+static int amdgpu_bo_create(amdgpu_device_handle dev,
+                           uint64_t size,
+                           uint32_t handle,
+                           amdgpu_bo_handle *buf_handle)
 {
        struct amdgpu_bo *bo;
-       union drm_amdgpu_gem_create args;
-       unsigned heap = alloc_buffer->preferred_heap;
-       int r = 0;
-
-       /* It's an error if the heap is not specified */
-       if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
-               return -EINVAL;
+       int r;
 
        bo = calloc(1, sizeof(struct amdgpu_bo));
        if (!bo)
                return -ENOMEM;
 
+       r = handle_table_insert(&dev->bo_handles, handle, bo);
+       if (r) {
+               free(bo);
+               return r;
+       }
+
        atomic_set(&bo->refcount, 1);
        bo->dev = dev;
-       bo->alloc_size = alloc_buffer->alloc_size;
+       bo->alloc_size = size;
+       bo->handle = handle;
+       pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+
+       *buf_handle = bo;
+       return 0;
+}
+
+drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
+                              struct amdgpu_bo_alloc_request *alloc_buffer,
+                              amdgpu_bo_handle *buf_handle)
+{
+       union drm_amdgpu_gem_create args;
+       int r;
 
        memset(&args, 0, sizeof(args));
        args.in.bo_size = alloc_buffer->alloc_size;
        args.in.alignment = alloc_buffer->phys_alignment;
 
        /* Set the placement. */
-       args.in.domains = heap & AMDGPU_GEM_DOMAIN_MASK;
-       args.in.domain_flags = alloc_buffer->flags & AMDGPU_GEM_CREATE_CPU_GTT_MASK;
+       args.in.domains = alloc_buffer->preferred_heap;
+       args.in.domain_flags = alloc_buffer->flags;
 
        /* Allocate the buffer with the preferred heap. */
        r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
                                &args, sizeof(args));
-       if (r) {
-               free(bo);
-               return r;
-       }
-
-       bo->handle = args.out.handle;
-
-       pthread_mutex_init(&bo->cpu_access_mutex, NULL);
-
-       /* map the buffer to the GPU virtual address space */
-       {
-               union drm_amdgpu_gem_va va;
-
-               memset(&va, 0, sizeof(va));
-
-               bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr,
-                                                alloc_buffer->alloc_size,
-                                                alloc_buffer->phys_alignment);
-
-               if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
-                       amdgpu_bo_free_internal(bo);
-                       return -ENOSPC;
-               }
-
-               va.in.handle = bo->handle;
-               va.in.operation = AMDGPU_VA_OP_MAP;
-               va.in.flags =   AMDGPU_VM_PAGE_READABLE |
-                               AMDGPU_VM_PAGE_WRITEABLE |
-                               AMDGPU_VM_PAGE_EXECUTABLE;
-               va.in.va_address = bo->virtual_mc_base_address;
-
-               r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
-               if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
-                       amdgpu_bo_free_internal(bo);
-                       return r;
-               }
-               pthread_mutex_lock(&dev->bo_table_mutex);
+       if (r)
+               goto out;
 
-               util_hash_table_set(dev->bo_vas,
-                                   (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
-               pthread_mutex_unlock(&dev->bo_table_mutex);
+       pthread_mutex_lock(&dev->bo_table_mutex);
+       r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
+                            buf_handle);
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+       if (r) {
+               amdgpu_close_kms_handle(dev->fd, args.out.handle);
        }
 
-       info->buf_handle = bo;
-       info->virtual_mc_base_address = bo->virtual_mc_base_address;
-       return 0;
+out:
+       return r;
 }
 
-int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
-                          struct amdgpu_bo_metadata *info)
+drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
+                                     struct amdgpu_bo_metadata *info)
 {
        struct drm_amdgpu_gem_metadata args = {};
 
@@ -177,14 +131,18 @@ int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
                                   &args, sizeof(args));
 }
 
-int amdgpu_bo_query_info(amdgpu_bo_handle bo,
-                        struct amdgpu_bo_info *info)
+drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
+                                   struct amdgpu_bo_info *info)
 {
        struct drm_amdgpu_gem_metadata metadata = {};
        struct drm_amdgpu_gem_create_in bo_info = {};
        struct drm_amdgpu_gem_op gem_op = {};
        int r;
 
+       /* Validate the BO passed in */
+       if (!bo->handle)
+               return -EINVAL;
+
        /* Query metadata. */
        metadata.handle = bo->handle;
        metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
@@ -211,7 +169,6 @@ int amdgpu_bo_query_info(amdgpu_bo_handle bo,
        memset(info, 0, sizeof(*info));
        info->alloc_size = bo_info.bo_size;
        info->phys_alignment = bo_info.alignment;
-       info->virtual_mc_base_address = bo->virtual_mc_base_address;
        info->preferred_heap = bo_info.domains;
        info->alloc_flags = bo_info.domain_flags;
        info->metadata.flags = metadata.data.flags;
@@ -225,14 +182,6 @@ int amdgpu_bo_query_info(amdgpu_bo_handle bo,
        return 0;
 }
 
-static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
-{
-       pthread_mutex_lock(&bo->dev->bo_table_mutex);
-       util_hash_table_set(bo->dev->bo_handles,
-                           (void*)(uintptr_t)bo->handle, bo);
-       pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-}
-
 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
 {
        struct drm_gem_flink flink;
@@ -266,24 +215,19 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
 
        bo->flink_name = flink.name;
 
-       if (bo->dev->flink_fd != bo->dev->fd) {
-               struct drm_gem_close args = {};
-               args.handle = handle;
-               drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
-       }
+       if (bo->dev->flink_fd != bo->dev->fd)
+               amdgpu_close_kms_handle(bo->dev->flink_fd, handle);
 
        pthread_mutex_lock(&bo->dev->bo_table_mutex);
-       util_hash_table_set(bo->dev->bo_flink_names,
-                           (void*)(uintptr_t)bo->flink_name,
-                           bo);
+       r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
        pthread_mutex_unlock(&bo->dev->bo_table_mutex);
 
-       return 0;
+       return r;
 }
 
-int amdgpu_bo_export(amdgpu_bo_handle bo,
-                    enum amdgpu_bo_handle_type type,
-                    uint32_t *shared_handle)
+drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
+                               enum amdgpu_bo_handle_type type,
+                               uint32_t *shared_handle)
 {
        int r;
 
@@ -297,50 +241,49 @@ int amdgpu_bo_export(amdgpu_bo_handle bo,
                return 0;
 
        case amdgpu_bo_handle_type_kms:
-               r = amdgpu_bo_export_flink(bo);
-               if (r)
-                       return r;
-
-               amdgpu_add_handle_to_table(bo);
+       case amdgpu_bo_handle_type_kms_noimport:
                *shared_handle = bo->handle;
                return 0;
 
        case amdgpu_bo_handle_type_dma_buf_fd:
-               amdgpu_add_handle_to_table(bo);
-               return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
-                                      (int*)shared_handle);
+               return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
+                                         DRM_CLOEXEC | DRM_RDWR,
+                                         (int*)shared_handle);
        }
        return -EINVAL;
 }
 
-int amdgpu_bo_import(amdgpu_device_handle dev,
-                    enum amdgpu_bo_handle_type type,
-                    uint32_t shared_handle,
+drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
+                               enum amdgpu_bo_handle_type type,
+                               uint32_t shared_handle,
                     struct amdgpu_bo_import_result *output)
 {
        struct drm_gem_open open_arg = {};
-       union drm_amdgpu_gem_va va;
        struct amdgpu_bo *bo = NULL;
-       int r;
+       uint32_t handle = 0, flink_name = 0;
+       uint64_t alloc_size = 0;
+       int r = 0;
        int dma_fd;
        uint64_t dma_buf_size = 0;
 
+       /* We must maintain a list of pairs <handle, bo>, so that we always
+        * return the same amdgpu_bo instance for the same handle. */
+       pthread_mutex_lock(&dev->bo_table_mutex);
+
        /* Convert a DMA buf handle to a KMS handle now. */
        if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-               uint32_t handle;
                off_t size;
 
                /* Get a KMS handle. */
                r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
-               if (r) {
-                       return r;
-               }
+               if (r)
+                       goto unlock;
 
                /* Query the buffer size. */
                size = lseek(shared_handle, 0, SEEK_END);
                if (size == (off_t)-1) {
-                       amdgpu_close_kms_handle(dev, handle);
-                       return -errno;
+                       r = -errno;
+                       goto free_bo_handle;
                }
                lseek(shared_handle, 0, SEEK_SET);
 
@@ -348,145 +291,146 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
                shared_handle = handle;
        }
 
-       /* We must maintain a list of pairs <handle, bo>, so that we always
-        * return the same amdgpu_bo instance for the same handle. */
-       pthread_mutex_lock(&dev->bo_table_mutex);
-
        /* If we have already created a buffer with this handle, find it. */
        switch (type) {
        case amdgpu_bo_handle_type_gem_flink_name:
-               bo = util_hash_table_get(dev->bo_flink_names,
-                                        (void*)(uintptr_t)shared_handle);
+               bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
                break;
 
        case amdgpu_bo_handle_type_dma_buf_fd:
-               bo = util_hash_table_get(dev->bo_handles,
-                                        (void*)(uintptr_t)shared_handle);
+               bo = handle_table_lookup(&dev->bo_handles, shared_handle);
                break;
 
        case amdgpu_bo_handle_type_kms:
+       case amdgpu_bo_handle_type_kms_noimport:
                /* Importing a KMS handle in not allowed. */
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               return -EPERM;
+               r = -EPERM;
+               goto unlock;
 
        default:
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               return -EINVAL;
+               r = -EINVAL;
+               goto unlock;
        }
 
        if (bo) {
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-
                /* The buffer already exists, just bump the refcount. */
                atomic_inc(&bo->refcount);
+               pthread_mutex_unlock(&dev->bo_table_mutex);
 
                output->buf_handle = bo;
                output->alloc_size = bo->alloc_size;
-               output->virtual_mc_base_address =
-                       bo->virtual_mc_base_address;
                return 0;
        }
 
-       bo = calloc(1, sizeof(struct amdgpu_bo));
-       if (!bo) {
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-                       amdgpu_close_kms_handle(dev, shared_handle);
-               }
-               return -ENOMEM;
-       }
-
        /* Open the handle. */
        switch (type) {
        case amdgpu_bo_handle_type_gem_flink_name:
                open_arg.name = shared_handle;
                r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
-               if (r) {
-                       free(bo);
-                       pthread_mutex_unlock(&dev->bo_table_mutex);
-                       return r;
-               }
+               if (r)
+                       goto unlock;
 
-               bo->handle = open_arg.handle;
+               flink_name = shared_handle;
+               handle = open_arg.handle;
+               alloc_size = open_arg.size;
                if (dev->flink_fd != dev->fd) {
-                       r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
-                       if (r) {
-                               free(bo);
-                               pthread_mutex_unlock(&dev->bo_table_mutex);
-                               return r;
-                       }
-                       r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
-
+                       r = drmPrimeHandleToFD(dev->flink_fd, handle,
+                                              DRM_CLOEXEC, &dma_fd);
+                       if (r)
+                               goto free_bo_handle;
+                       r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
                        close(dma_fd);
-
-                       if (r) {
-                               free(bo);
-                               pthread_mutex_unlock(&dev->bo_table_mutex);
-                               return r;
-                       }
+                       if (r)
+                               goto free_bo_handle;
+                       r = amdgpu_close_kms_handle(dev->flink_fd,
+                                                   open_arg.handle);
+                       if (r)
+                               goto free_bo_handle;
                }
-               bo->flink_name = shared_handle;
-               bo->alloc_size = open_arg.size;
-               util_hash_table_set(dev->bo_flink_names,
-                                   (void*)(uintptr_t)bo->flink_name, bo);
+               open_arg.handle = 0;
                break;
 
        case amdgpu_bo_handle_type_dma_buf_fd:
-               bo->handle = shared_handle;
-               bo->alloc_size = dma_buf_size;
+               handle = shared_handle;
+               alloc_size = dma_buf_size;
                break;
 
        case amdgpu_bo_handle_type_kms:
+       case amdgpu_bo_handle_type_kms_noimport:
                assert(0); /* unreachable */
        }
 
        /* Initialize it. */
-       atomic_set(&bo->refcount, 1);
-       bo->dev = dev;
-       pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+       r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
+       if (r)
+               goto free_bo_handle;
 
-       bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, bo->alloc_size, 1 << 20);
+       if (flink_name) {
+               bo->flink_name = flink_name;
+               r = handle_table_insert(&dev->bo_flink_names, flink_name,
+                                       bo);
+               if (r)
+                       goto free_bo_handle;
 
-       if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               amdgpu_bo_reference(&bo, NULL);
-               return -ENOSPC;
        }
 
-       memset(&va, 0, sizeof(va));
-       va.in.handle = bo->handle;
-       va.in.operation = AMDGPU_VA_OP_MAP;
-       va.in.va_address = bo->virtual_mc_base_address;
-       va.in.flags =   AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
-                       AMDGPU_VM_PAGE_EXECUTABLE;
+       output->buf_handle = bo;
+       output->alloc_size = bo->alloc_size;
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+       return 0;
 
-       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
-       if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               amdgpu_vamgr_free_va(&dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
-               amdgpu_bo_reference(&bo, NULL);
-               return r;
+free_bo_handle:
+       if (flink_name && open_arg.handle)
+               amdgpu_close_kms_handle(dev->flink_fd, open_arg.handle);
+
+       if (bo)
+               amdgpu_bo_free(bo);
+       else
+               amdgpu_close_kms_handle(dev->fd, handle);
+unlock:
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+       return r;
+}
+
+drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
+{
+       struct amdgpu_device *dev;
+       struct amdgpu_bo *bo = buf_handle;
+
+       assert(bo != NULL);
+       dev = bo->dev;
+       pthread_mutex_lock(&dev->bo_table_mutex);
+
+       if (update_references(&bo->refcount, NULL)) {
+               /* Remove the buffer from the hash tables. */
+               handle_table_remove(&dev->bo_handles, bo->handle);
+
+               if (bo->flink_name)
+                       handle_table_remove(&dev->bo_flink_names,
+                                           bo->flink_name);
+
+               /* Release CPU access. */
+               if (bo->cpu_map_count > 0) {
+                       bo->cpu_map_count = 1;
+                       amdgpu_bo_cpu_unmap(bo);
+               }
+
+               amdgpu_close_kms_handle(dev->fd, bo->handle);
+               pthread_mutex_destroy(&bo->cpu_access_mutex);
+               free(bo);
        }
 
-       util_hash_table_set(dev->bo_vas,
-                           (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
-       util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
        pthread_mutex_unlock(&dev->bo_table_mutex);
 
-       output->buf_handle = bo;
-       output->alloc_size = bo->alloc_size;
-       output->virtual_mc_base_address = bo->virtual_mc_base_address;
        return 0;
 }
 
-int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
+drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
 {
-       /* Just drop the reference. */
-       amdgpu_bo_reference(&buf_handle, NULL);
-       return 0;
+       atomic_inc(&bo->refcount);
 }
 
-int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
+drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
 {
        union drm_amdgpu_gem_mmap args;
        void *ptr;
@@ -534,7 +478,7 @@ int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
        return 0;
 }
 
-int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
+drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
 {
        int r;
 
@@ -544,7 +488,7 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
        if (bo->cpu_map_count == 0) {
                /* not mapped */
                pthread_mutex_unlock(&bo->cpu_access_mutex);
-               return -EBADMSG;
+               return -EINVAL;
        }
 
        bo->cpu_map_count--;
@@ -560,7 +504,7 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
        return r;
 }
 
-int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
+drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
                                struct amdgpu_buffer_size_alignments *info)
 {
        info->size_local = dev->dev_info.pte_fragment_size;
@@ -568,8 +512,8 @@ int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
        return 0;
 }
 
-int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
-                           uint64_t timeout_ns,
+drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
+                                      uint64_t timeout_ns,
                            bool *busy)
 {
        union drm_amdgpu_gem_wait_idle args;
@@ -591,82 +535,137 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
        }
 }
 
-int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
-                                   void *cpu,
-                                   uint64_t size,
-                                   struct amdgpu_bo_alloc_result *info)
+drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+                                            void *cpu,
+                                            uint64_t size,
+                                            amdgpu_bo_handle *buf_handle,
+                                            uint64_t *offset_in_bo)
 {
-       int r;
        struct amdgpu_bo *bo;
-       struct drm_amdgpu_gem_userptr args;
-       union drm_amdgpu_gem_va va;
-       uintptr_t cpu0;
-       uint32_t ps, off;
+       uint32_t i;
+       int r = 0;
 
-       memset(&args, 0, sizeof(args));
-       ps = getpagesize();
+       if (cpu == NULL || size == 0)
+               return -EINVAL;
 
-       cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
-       off = (uintptr_t)cpu - cpu0;
-       size = ROUND_UP(size + off, ps);
+       /*
+        * Workaround for a buggy application which tries to import previously
+        * exposed CPU pointers. If we find a real world use case we should
+        * improve that by asking the kernel for the right handle.
+        */
+       pthread_mutex_lock(&dev->bo_table_mutex);
+       for (i = 0; i < dev->bo_handles.max_key; i++) {
+               bo = handle_table_lookup(&dev->bo_handles, i);
+               if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
+                       continue;
+               if (cpu >= bo->cpu_ptr &&
+                   cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
+                       break;
+       }
 
-       args.addr = cpu0;
-       args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
+       if (i < dev->bo_handles.max_key) {
+               atomic_inc(&bo->refcount);
+               *buf_handle = bo;
+               *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
+       } else {
+               *buf_handle = NULL;
+               *offset_in_bo = 0;
+               r = -ENXIO;
+       }
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+
+       return r;
+}
+
+drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
+                                             void *cpu,
+                                             uint64_t size,
+                                             amdgpu_bo_handle *buf_handle)
+{
+       int r;
+       struct drm_amdgpu_gem_userptr args;
+
+       args.addr = (uintptr_t)cpu;
+       args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
+               AMDGPU_GEM_USERPTR_VALIDATE;
        args.size = size;
        r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
                                &args, sizeof(args));
        if (r)
-               return r;
+               goto out;
 
-       bo = calloc(1, sizeof(struct amdgpu_bo));
-       if (!bo)
-               return -ENOMEM;
+       pthread_mutex_lock(&dev->bo_table_mutex);
+       r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+       if (r) {
+               amdgpu_close_kms_handle(dev->fd, args.handle);
+       }
 
-       atomic_set(&bo->refcount, 1);
-       bo->dev = dev;
-       bo->alloc_size = size;
-       bo->handle = args.handle;
-       bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, size, 4 * 1024);
+out:
+       return r;
+}
 
-       if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
-               amdgpu_bo_free_internal(bo);
-               return -ENOSPC;
-       }
+drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
+                                        uint32_t number_of_buffers,
+                                        struct drm_amdgpu_bo_list_entry *buffers,
+                                        uint32_t *result)
+{
+       union drm_amdgpu_bo_list args;
+       int r;
 
-       memset(&va, 0, sizeof(va));
-       va.in.handle = bo->handle;
-       va.in.operation = AMDGPU_VA_OP_MAP;
-       va.in.flags =   AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
-                       AMDGPU_VM_PAGE_EXECUTABLE;
-       va.in.va_address = bo->virtual_mc_base_address;
-       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
-       if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
-               amdgpu_bo_free_internal(bo);
-               return r;
-       }
-       pthread_mutex_lock(&dev->bo_table_mutex);
-       util_hash_table_set(dev->bo_vas,
-                           (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
-       pthread_mutex_unlock(&dev->bo_table_mutex);
-       info->buf_handle = bo;
-       info->virtual_mc_base_address = bo->virtual_mc_base_address;
-       info->virtual_mc_base_address += off;
+       memset(&args, 0, sizeof(args));
+       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
+       args.in.bo_number = number_of_buffers;
+       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
 
+       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+                               &args, sizeof(args));
+       if (!r)
+               *result = args.out.list_handle;
        return r;
 }
 
-int amdgpu_bo_list_create(amdgpu_device_handle dev,
-                         uint32_t number_of_resources,
-                         amdgpu_bo_handle *resources,
-                         uint8_t *resource_prios,
-                         amdgpu_bo_list_handle *result)
+drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
+                                         uint32_t bo_list)
+{
+       union drm_amdgpu_bo_list args;
+
+       memset(&args, 0, sizeof(args));
+       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
+       args.in.list_handle = bo_list;
+
+       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+                                  &args, sizeof(args));
+}
+
+drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
+                                    uint32_t number_of_resources,
+                                    amdgpu_bo_handle *resources,
+                                    uint8_t *resource_prios,
+                                    amdgpu_bo_list_handle *result)
 {
        struct drm_amdgpu_bo_list_entry *list;
        union drm_amdgpu_bo_list args;
        unsigned i;
        int r;
 
-       list = alloca(sizeof(struct drm_amdgpu_bo_list_entry) * number_of_resources);
+       if (!number_of_resources)
+               return -EINVAL;
+
+       /* overflow check for multiplication */
+       if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+               return -EINVAL;
+
+       list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+       if (!list)
+               return -ENOMEM;
+
+       *result = malloc(sizeof(struct amdgpu_bo_list));
+       if (!*result) {
+               free(list);
+               return -ENOMEM;
+       }
 
        memset(&args, 0, sizeof(args));
        args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
@@ -684,16 +683,18 @@ int amdgpu_bo_list_create(amdgpu_device_handle dev,
 
        r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
                                &args, sizeof(args));
-       if (r)
+       free(list);
+       if (r) {
+               free(*result);
                return r;
+       }
 
-       *result = calloc(1, sizeof(struct amdgpu_bo_list));
        (*result)->dev = dev;
        (*result)->handle = args.out.list_handle;
        return 0;
 }
 
-int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
+drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
 {
        union drm_amdgpu_bo_list args;
        int r;
@@ -710,3 +711,89 @@ int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
 
        return r;
 }
+
+drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
+                                    uint32_t number_of_resources,
+                                    amdgpu_bo_handle *resources,
+                                    uint8_t *resource_prios)
+{
+       struct drm_amdgpu_bo_list_entry *list;
+       union drm_amdgpu_bo_list args;
+       unsigned i;
+       int r;
+
+       if (!number_of_resources)
+               return -EINVAL;
+
+       /* overflow check for multiplication */
+       if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+               return -EINVAL;
+
+       list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+       if (!list)
+               return -ENOMEM;
+
+       args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
+       args.in.list_handle = handle->handle;
+       args.in.bo_number = number_of_resources;
+       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+       args.in.bo_info_ptr = (uintptr_t)list;
+
+       for (i = 0; i < number_of_resources; i++) {
+               list[i].bo_handle = resources[i]->handle;
+               if (resource_prios)
+                       list[i].bo_priority = resource_prios[i];
+               else
+                       list[i].bo_priority = 0;
+       }
+
+       r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
+                               &args, sizeof(args));
+       free(list);
+       return r;
+}
+
+drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+                              uint64_t offset,
+                              uint64_t size,
+                              uint64_t addr,
+                              uint64_t flags,
+                              uint32_t ops)
+{
+       amdgpu_device_handle dev = bo->dev;
+
+       size = ALIGN(size, getpagesize());
+
+       return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
+                                  AMDGPU_VM_PAGE_READABLE |
+                                  AMDGPU_VM_PAGE_WRITEABLE |
+                                  AMDGPU_VM_PAGE_EXECUTABLE, ops);
+}
+
+drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
+                                  amdgpu_bo_handle bo,
+                                  uint64_t offset,
+                                  uint64_t size,
+                                  uint64_t addr,
+                                  uint64_t flags,
+                                  uint32_t ops)
+{
+       struct drm_amdgpu_gem_va va;
+       int r;
+
+       if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
+           ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
+               return -EINVAL;
+
+       memset(&va, 0, sizeof(va));
+       va.handle = bo ? bo->handle : 0;
+       va.operation = ops;
+       va.flags = flags;
+       va.va_address = addr;
+       va.offset_in_bo = offset;
+       va.map_size = size;
+
+       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
+
+       return r;
+}