OSDN Git Service

amdgpu: add error return value for finding bo by cpu mapping (v2)
[android-x86/external-libdrm.git] / amdgpu / amdgpu_bo.c
index 1a5a401..a2fc525 100644 (file)
  *
  */
 
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdint.h>
@@ -41,7 +37,6 @@
 #include "xf86drm.h"
 #include "amdgpu_drm.h"
 #include "amdgpu_internal.h"
-#include "util_hash_table.h"
 #include "util_math.h"
 
 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
@@ -53,34 +48,31 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
        drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
 }
 
-drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
+static int amdgpu_bo_create(amdgpu_device_handle dev,
+                           uint64_t size,
+                           uint32_t handle,
+                           amdgpu_bo_handle *buf_handle)
 {
-       /* Remove the buffer from the hash tables. */
-       pthread_mutex_lock(&bo->dev->bo_table_mutex);
-       util_hash_table_remove(bo->dev->bo_handles,
-                              (void*)(uintptr_t)bo->handle);
-       if (bo->flink_name) {
-               util_hash_table_remove(bo->dev->bo_flink_names,
-                                      (void*)(uintptr_t)bo->flink_name);
-       }
-       pthread_mutex_unlock(&bo->dev->bo_table_mutex);
+       struct amdgpu_bo *bo;
 
-       /* Release CPU access. */
-       if (bo->cpu_map_count > 0) {
-               bo->cpu_map_count = 1;
-               amdgpu_bo_cpu_unmap(bo);
-       }
+       bo = calloc(1, sizeof(struct amdgpu_bo));
+       if (!bo)
+               return -ENOMEM;
+
+       atomic_set(&bo->refcount, 1);
+       bo->dev = dev;
+       bo->alloc_size = size;
+       bo->handle = handle;
+       pthread_mutex_init(&bo->cpu_access_mutex, NULL);
 
-       amdgpu_close_kms_handle(bo->dev, bo->handle);
-       pthread_mutex_destroy(&bo->cpu_access_mutex);
-       free(bo);
+       *buf_handle = bo;
+       return 0;
 }
 
 int amdgpu_bo_alloc(amdgpu_device_handle dev,
                    struct amdgpu_bo_alloc_request *alloc_buffer,
                    amdgpu_bo_handle *buf_handle)
 {
-       struct amdgpu_bo *bo;
        union drm_amdgpu_gem_create args;
        unsigned heap = alloc_buffer->preferred_heap;
        int r = 0;
@@ -89,14 +81,6 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
        if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
                return -EINVAL;
 
-       bo = calloc(1, sizeof(struct amdgpu_bo));
-       if (!bo)
-               return -ENOMEM;
-
-       atomic_set(&bo->refcount, 1);
-       bo->dev = dev;
-       bo->alloc_size = alloc_buffer->alloc_size;
-
        memset(&args, 0, sizeof(args));
        args.in.bo_size = alloc_buffer->alloc_size;
        args.in.alignment = alloc_buffer->phys_alignment;
@@ -108,17 +92,24 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
        /* Allocate the buffer with the preferred heap. */
        r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
                                &args, sizeof(args));
+       if (r)
+               goto out;
+
+       r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
+                            buf_handle);
        if (r) {
-               free(bo);
-               return r;
+               amdgpu_close_kms_handle(dev, args.out.handle);
+               goto out;
        }
 
-       bo->handle = args.out.handle;
-
-       pthread_mutex_init(&bo->cpu_access_mutex, NULL);
-
-       *buf_handle = bo;
-       return 0;
+       pthread_mutex_lock(&dev->bo_table_mutex);
+       r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+                               *buf_handle);
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+       if (r)
+               amdgpu_bo_free(*buf_handle);
+out:
+       return r;
 }
 
 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
@@ -195,14 +186,6 @@ int amdgpu_bo_query_info(amdgpu_bo_handle bo,
        return 0;
 }
 
-static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
-{
-       pthread_mutex_lock(&bo->dev->bo_table_mutex);
-       util_hash_table_set(bo->dev->bo_handles,
-                           (void*)(uintptr_t)bo->handle, bo);
-       pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-}
-
 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
 {
        struct drm_gem_flink flink;
@@ -243,12 +226,10 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
        }
 
        pthread_mutex_lock(&bo->dev->bo_table_mutex);
-       util_hash_table_set(bo->dev->bo_flink_names,
-                           (void*)(uintptr_t)bo->flink_name,
-                           bo);
+       r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
        pthread_mutex_unlock(&bo->dev->bo_table_mutex);
 
-       return 0;
+       return r;
 }
 
 int amdgpu_bo_export(amdgpu_bo_handle bo,
@@ -267,14 +248,14 @@ int amdgpu_bo_export(amdgpu_bo_handle bo,
                return 0;
 
        case amdgpu_bo_handle_type_kms:
-               amdgpu_add_handle_to_table(bo);
+       case amdgpu_bo_handle_type_kms_noimport:
                *shared_handle = bo->handle;
                return 0;
 
        case amdgpu_bo_handle_type_dma_buf_fd:
-               amdgpu_add_handle_to_table(bo);
-               return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
-                                      (int*)shared_handle);
+               return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
+                                         DRM_CLOEXEC | DRM_RDWR,
+                                         (int*)shared_handle);
        }
        return -EINVAL;
 }
@@ -285,8 +266,11 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
                     struct amdgpu_bo_import_result *output)
 {
        struct drm_gem_open open_arg = {};
+       struct drm_gem_close close_arg = {};
        struct amdgpu_bo *bo = NULL;
-       int r;
+       uint32_t handle = 0, flink_name = 0;
+       uint64_t alloc_size = 0;
+       int r = 0;
        int dma_fd;
        uint64_t dma_buf_size = 0;
 
@@ -296,21 +280,18 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 
        /* Convert a DMA buf handle to a KMS handle now. */
        if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-               uint32_t handle;
                off_t size;
 
                /* Get a KMS handle. */
                r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
-               if (r) {
-                       return r;
-               }
+               if (r)
+                       goto unlock;
 
                /* Query the buffer size. */
                size = lseek(shared_handle, 0, SEEK_END);
                if (size == (off_t)-1) {
-                       pthread_mutex_unlock(&dev->bo_table_mutex);
-                       amdgpu_close_kms_handle(dev, handle);
-                       return -errno;
+                       r = -errno;
+                       goto free_bo_handle;
                }
                lseek(shared_handle, 0, SEEK_SET);
 
@@ -321,106 +302,139 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
        /* If we have already created a buffer with this handle, find it. */
        switch (type) {
        case amdgpu_bo_handle_type_gem_flink_name:
-               bo = util_hash_table_get(dev->bo_flink_names,
-                                        (void*)(uintptr_t)shared_handle);
+               bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
                break;
 
        case amdgpu_bo_handle_type_dma_buf_fd:
-               bo = util_hash_table_get(dev->bo_handles,
-                                        (void*)(uintptr_t)shared_handle);
+               bo = handle_table_lookup(&dev->bo_handles, shared_handle);
                break;
 
        case amdgpu_bo_handle_type_kms:
+       case amdgpu_bo_handle_type_kms_noimport:
                /* Importing a KMS handle in not allowed. */
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               return -EPERM;
+               r = -EPERM;
+               goto unlock;
 
        default:
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               return -EINVAL;
+               r = -EINVAL;
+               goto unlock;
        }
 
        if (bo) {
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-
                /* The buffer already exists, just bump the refcount. */
                atomic_inc(&bo->refcount);
+               pthread_mutex_unlock(&dev->bo_table_mutex);
 
                output->buf_handle = bo;
                output->alloc_size = bo->alloc_size;
                return 0;
        }
 
-       bo = calloc(1, sizeof(struct amdgpu_bo));
-       if (!bo) {
-               pthread_mutex_unlock(&dev->bo_table_mutex);
-               if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-                       amdgpu_close_kms_handle(dev, shared_handle);
-               }
-               return -ENOMEM;
-       }
-
        /* Open the handle. */
        switch (type) {
        case amdgpu_bo_handle_type_gem_flink_name:
                open_arg.name = shared_handle;
                r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
-               if (r) {
-                       free(bo);
-                       pthread_mutex_unlock(&dev->bo_table_mutex);
-                       return r;
-               }
+               if (r)
+                       goto unlock;
 
-               bo->handle = open_arg.handle;
+               flink_name = shared_handle;
+               handle = open_arg.handle;
+               alloc_size = open_arg.size;
                if (dev->flink_fd != dev->fd) {
-                       r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
-                       if (r) {
-                               free(bo);
-                               pthread_mutex_unlock(&dev->bo_table_mutex);
-                               return r;
-                       }
-                       r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
-
+                       r = drmPrimeHandleToFD(dev->flink_fd, handle,
+                                              DRM_CLOEXEC, &dma_fd);
+                       if (r)
+                               goto free_bo_handle;
+                       r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
                        close(dma_fd);
-
-                       if (r) {
-                               free(bo);
-                               pthread_mutex_unlock(&dev->bo_table_mutex);
-                               return r;
-                       }
+                       if (r)
+                               goto free_bo_handle;
+                       close_arg.handle = open_arg.handle;
+                       r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
+                                    &close_arg);
+                       if (r)
+                               goto free_bo_handle;
                }
-               bo->flink_name = shared_handle;
-               bo->alloc_size = open_arg.size;
-               util_hash_table_set(dev->bo_flink_names,
-                                   (void*)(uintptr_t)bo->flink_name, bo);
                break;
 
        case amdgpu_bo_handle_type_dma_buf_fd:
-               bo->handle = shared_handle;
-               bo->alloc_size = dma_buf_size;
+               handle = shared_handle;
+               alloc_size = dma_buf_size;
                break;
 
        case amdgpu_bo_handle_type_kms:
+       case amdgpu_bo_handle_type_kms_noimport:
                assert(0); /* unreachable */
        }
 
        /* Initialize it. */
-       atomic_set(&bo->refcount, 1);
-       bo->dev = dev;
-       pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+       r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
+       if (r)
+               goto free_bo_handle;
 
-       util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
-       pthread_mutex_unlock(&dev->bo_table_mutex);
+       r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
+       if (r)
+               goto free_bo_handle;
+       if (flink_name) {
+               bo->flink_name = flink_name;
+               r = handle_table_insert(&dev->bo_flink_names, flink_name,
+                                       bo);
+               if (r)
+                       goto remove_handle;
+
+       }
 
        output->buf_handle = bo;
        output->alloc_size = bo->alloc_size;
+       pthread_mutex_unlock(&dev->bo_table_mutex);
        return 0;
+
+remove_handle:
+       handle_table_remove(&dev->bo_handles, bo->handle);
+free_bo_handle:
+       if (flink_name && !close_arg.handle && open_arg.handle) {
+               close_arg.handle = open_arg.handle;
+               drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
+       }
+       if (bo)
+               amdgpu_bo_free(bo);
+       else
+               amdgpu_close_kms_handle(dev, handle);
+unlock:
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+       return r;
 }
 
 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
 {
-       /* Just drop the reference. */
-       amdgpu_bo_reference(&buf_handle, NULL);
+       struct amdgpu_device *dev;
+       struct amdgpu_bo *bo = buf_handle;
+
+       assert(bo != NULL);
+       dev = bo->dev;
+       pthread_mutex_lock(&dev->bo_table_mutex);
+
+       if (update_references(&bo->refcount, NULL)) {
+               /* Remove the buffer from the hash tables. */
+               handle_table_remove(&dev->bo_handles, bo->handle);
+
+               if (bo->flink_name)
+                       handle_table_remove(&dev->bo_flink_names,
+                                           bo->flink_name);
+
+               /* Release CPU access. */
+               if (bo->cpu_map_count > 0) {
+                       bo->cpu_map_count = 1;
+                       amdgpu_bo_cpu_unmap(bo);
+               }
+
+               amdgpu_close_kms_handle(dev, bo->handle);
+               pthread_mutex_destroy(&bo->cpu_access_mutex);
+               free(bo);
+       }
+
+       pthread_mutex_unlock(&dev->bo_table_mutex);
        return 0;
 }
 
@@ -529,43 +543,78 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
        }
 }
 
+int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+                                 void *cpu,
+                                 uint64_t size,
+                                 amdgpu_bo_handle *buf_handle,
+                                 uint64_t *offset_in_bo)
+{
+       struct amdgpu_bo *bo;
+       uint32_t i;
+       int r = 0;
+
+       if (cpu == NULL || size == 0)
+               return -EINVAL;
+
+       /*
+        * Workaround for a buggy application which tries to import previously
+        * exposed CPU pointers. If we find a real world use case we should
+        * improve that by asking the kernel for the right handle.
+        */
+       pthread_mutex_lock(&dev->bo_table_mutex);
+       for (i = 0; i < dev->bo_handles.max_key; i++) {
+               bo = handle_table_lookup(&dev->bo_handles, i);
+               if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
+                       continue;
+               if (cpu >= bo->cpu_ptr &&
+                   cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
+                       break;
+       }
+
+       if (i < dev->bo_handles.max_key) {
+               atomic_inc(&bo->refcount);
+               *buf_handle = bo;
+               *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
+       } else {
+               *buf_handle = NULL;
+               *offset_in_bo = 0;
+               r = -ENXIO;
+       }
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+
+       return r;
+}
+
 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
                                    void *cpu,
                                    uint64_t size,
                                    amdgpu_bo_handle *buf_handle)
 {
        int r;
-       struct amdgpu_bo *bo;
        struct drm_amdgpu_gem_userptr args;
-       uintptr_t cpu0;
-       uint32_t ps, off;
 
-       memset(&args, 0, sizeof(args));
-       ps = getpagesize();
-
-       cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
-       off = (uintptr_t)cpu - cpu0;
-       size = ROUND_UP(size + off, ps);
-
-       args.addr = cpu0;
-       args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
+       args.addr = (uintptr_t)cpu;
+       args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
+               AMDGPU_GEM_USERPTR_VALIDATE;
        args.size = size;
        r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
                                &args, sizeof(args));
        if (r)
-               return r;
-
-       bo = calloc(1, sizeof(struct amdgpu_bo));
-       if (!bo)
-               return -ENOMEM;
+               goto out;
 
-       atomic_set(&bo->refcount, 1);
-       bo->dev = dev;
-       bo->alloc_size = size;
-       bo->handle = args.handle;
-
-       *buf_handle = bo;
+       r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
+       if (r) {
+               amdgpu_close_kms_handle(dev, args.handle);
+               goto out;
+       }
 
+       pthread_mutex_lock(&dev->bo_table_mutex);
+       r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+                               *buf_handle);
+       pthread_mutex_unlock(&dev->bo_table_mutex);
+       if (r)
+               amdgpu_bo_free(*buf_handle);
+out:
        return r;
 }
 
@@ -660,7 +709,7 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
                return -EINVAL;
 
        list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
-       if (list == NULL)
+       if (!list)
                return -ENOMEM;
 
        args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
@@ -691,21 +740,37 @@ int amdgpu_bo_va_op(amdgpu_bo_handle bo,
                     uint32_t ops)
 {
        amdgpu_device_handle dev = bo->dev;
+
+       size = ALIGN(size, getpagesize());
+
+       return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
+                                  AMDGPU_VM_PAGE_READABLE |
+                                  AMDGPU_VM_PAGE_WRITEABLE |
+                                  AMDGPU_VM_PAGE_EXECUTABLE, ops);
+}
+
+int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
+                       amdgpu_bo_handle bo,
+                       uint64_t offset,
+                       uint64_t size,
+                       uint64_t addr,
+                       uint64_t flags,
+                       uint32_t ops)
+{
        struct drm_amdgpu_gem_va va;
        int r;
 
-       if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
+       if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
+           ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
                return -EINVAL;
 
        memset(&va, 0, sizeof(va));
-       va.handle = bo->handle;
+       va.handle = bo ? bo->handle : 0;
        va.operation = ops;
-       va.flags = AMDGPU_VM_PAGE_READABLE |
-                  AMDGPU_VM_PAGE_WRITEABLE |
-                  AMDGPU_VM_PAGE_EXECUTABLE;
+       va.flags = flags;
        va.va_address = addr;
        va.offset_in_bo = offset;
-       va.map_size = ALIGN(size, getpagesize());
+       va.map_size = size;
 
        r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));