* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
#include <stdlib.h>
#include <stdio.h>
+#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include "xf86drm.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
-#include "util_hash_table.h"
+#include "util_math.h"
static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
uint32_t handle)
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
}
-void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
+static int amdgpu_bo_create(amdgpu_device_handle dev,
+ uint64_t size,
+ uint32_t handle,
+ amdgpu_bo_handle *buf_handle)
{
- /* Remove the buffer from the hash tables. */
- pthread_mutex_lock(&bo->dev->bo_table_mutex);
- util_hash_table_remove(bo->dev->bo_handles,
- (void*)(uintptr_t)bo->handle);
- if (bo->flink_name) {
- util_hash_table_remove(bo->dev->bo_flink_names,
- (void*)(uintptr_t)bo->flink_name);
- }
- pthread_mutex_unlock(&bo->dev->bo_table_mutex);
+ struct amdgpu_bo *bo;
- /* Release CPU access. */
- if (bo->cpu_map_count > 0) {
- bo->cpu_map_count = 1;
- amdgpu_bo_cpu_unmap(bo);
- }
+ bo = calloc(1, sizeof(struct amdgpu_bo));
+ if (!bo)
+ return -ENOMEM;
+
+ atomic_set(&bo->refcount, 1);
+ bo->dev = dev;
+ bo->alloc_size = size;
+ bo->handle = handle;
+ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
- amdgpu_close_kms_handle(bo->dev, bo->handle);
- pthread_mutex_destroy(&bo->cpu_access_mutex);
- amdgpu_vamgr_free_va(&bo->dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
- free(bo);
+ *buf_handle = bo;
+ return 0;
}
int amdgpu_bo_alloc(amdgpu_device_handle dev,
struct amdgpu_bo_alloc_request *alloc_buffer,
- struct amdgpu_bo_alloc_result *info)
+ amdgpu_bo_handle *buf_handle)
{
- struct amdgpu_bo *bo;
union drm_amdgpu_gem_create args;
unsigned heap = alloc_buffer->preferred_heap;
int r = 0;
if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
return -EINVAL;
- bo = calloc(1, sizeof(struct amdgpu_bo));
- if (!bo)
- return -ENOMEM;
-
- atomic_set(&bo->refcount, 1);
- bo->dev = dev;
- bo->alloc_size = alloc_buffer->alloc_size;
-
memset(&args, 0, sizeof(args));
args.in.bo_size = alloc_buffer->alloc_size;
args.in.alignment = alloc_buffer->phys_alignment;
/* Set the placement. */
- args.in.domains = heap & AMDGPU_GEM_DOMAIN_MASK;
- args.in.domain_flags = alloc_buffer->flags & AMDGPU_GEM_CREATE_CPU_GTT_MASK;
+ args.in.domains = heap;
+ args.in.domain_flags = alloc_buffer->flags;
/* Allocate the buffer with the preferred heap. */
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
&args, sizeof(args));
- if (r) {
- free(bo);
- return r;
- }
-
- bo->handle = args.out.handle;
-
- pthread_mutex_init(&bo->cpu_access_mutex, NULL);
-
- /* map the buffer to the GPU virtual address space */
- {
- union drm_amdgpu_gem_va va;
-
- memset(&va, 0, sizeof(va));
-
- bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr,
- alloc_buffer->alloc_size,
- alloc_buffer->phys_alignment);
-
- if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
- amdgpu_bo_free_internal(bo);
- return -ENOSPC;
- }
-
- va.in.handle = bo->handle;
- va.in.operation = AMDGPU_VA_OP_MAP;
- va.in.flags = AMDGPU_VM_PAGE_READABLE |
- AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE;
- va.in.va_address = bo->virtual_mc_base_address;
-
- r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
- if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
- amdgpu_bo_free_internal(bo);
- return r;
- }
- pthread_mutex_lock(&dev->bo_table_mutex);
+ if (r)
+ goto out;
- util_hash_table_set(dev->bo_vas,
- (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
- pthread_mutex_unlock(&dev->bo_table_mutex);
+ r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
+ buf_handle);
+ if (r) {
+ amdgpu_close_kms_handle(dev, args.out.handle);
+ goto out;
}
- info->buf_handle = bo;
- info->virtual_mc_base_address = bo->virtual_mc_base_address;
- return 0;
+ pthread_mutex_lock(&dev->bo_table_mutex);
+ r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+ *buf_handle);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ if (r)
+ amdgpu_bo_free(*buf_handle);
+out:
+ return r;
}
int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
struct drm_amdgpu_gem_op gem_op = {};
int r;
+ /* Validate the BO passed in */
+ if (!bo->handle)
+ return -EINVAL;
+
/* Query metadata. */
metadata.handle = bo->handle;
metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
memset(info, 0, sizeof(*info));
info->alloc_size = bo_info.bo_size;
info->phys_alignment = bo_info.alignment;
- info->virtual_mc_base_address = bo->virtual_mc_base_address;
info->preferred_heap = bo_info.domains;
info->alloc_flags = bo_info.domain_flags;
info->metadata.flags = metadata.data.flags;
return 0;
}
-static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
-{
- pthread_mutex_lock(&bo->dev->bo_table_mutex);
- util_hash_table_set(bo->dev->bo_handles,
- (void*)(uintptr_t)bo->handle, bo);
- pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-}
-
static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
{
struct drm_gem_flink flink;
}
pthread_mutex_lock(&bo->dev->bo_table_mutex);
- util_hash_table_set(bo->dev->bo_flink_names,
- (void*)(uintptr_t)bo->flink_name,
- bo);
+ r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
pthread_mutex_unlock(&bo->dev->bo_table_mutex);
- return 0;
+ return r;
}
int amdgpu_bo_export(amdgpu_bo_handle bo,
return 0;
case amdgpu_bo_handle_type_kms:
- r = amdgpu_bo_export_flink(bo);
- if (r)
- return r;
-
- amdgpu_add_handle_to_table(bo);
+ case amdgpu_bo_handle_type_kms_noimport:
*shared_handle = bo->handle;
return 0;
case amdgpu_bo_handle_type_dma_buf_fd:
- amdgpu_add_handle_to_table(bo);
- return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
- (int*)shared_handle);
+ return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
+ DRM_CLOEXEC | DRM_RDWR,
+ (int*)shared_handle);
}
return -EINVAL;
}
struct amdgpu_bo_import_result *output)
{
struct drm_gem_open open_arg = {};
- union drm_amdgpu_gem_va va;
+ struct drm_gem_close close_arg = {};
struct amdgpu_bo *bo = NULL;
- int r;
+ uint32_t handle = 0, flink_name = 0;
+ uint64_t alloc_size = 0;
+ int r = 0;
int dma_fd;
uint64_t dma_buf_size = 0;
+ /* We must maintain a list of pairs <handle, bo>, so that we always
+ * return the same amdgpu_bo instance for the same handle. */
+ pthread_mutex_lock(&dev->bo_table_mutex);
+
/* Convert a DMA buf handle to a KMS handle now. */
if (type == amdgpu_bo_handle_type_dma_buf_fd) {
- uint32_t handle;
off_t size;
/* Get a KMS handle. */
r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
- if (r) {
- return r;
- }
+ if (r)
+ goto unlock;
/* Query the buffer size. */
size = lseek(shared_handle, 0, SEEK_END);
if (size == (off_t)-1) {
- amdgpu_close_kms_handle(dev, handle);
- return -errno;
+ r = -errno;
+ goto free_bo_handle;
}
lseek(shared_handle, 0, SEEK_SET);
shared_handle = handle;
}
- /* We must maintain a list of pairs <handle, bo>, so that we always
- * return the same amdgpu_bo instance for the same handle. */
- pthread_mutex_lock(&dev->bo_table_mutex);
-
/* If we have already created a buffer with this handle, find it. */
switch (type) {
case amdgpu_bo_handle_type_gem_flink_name:
- bo = util_hash_table_get(dev->bo_flink_names,
- (void*)(uintptr_t)shared_handle);
+ bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
break;
case amdgpu_bo_handle_type_dma_buf_fd:
- bo = util_hash_table_get(dev->bo_handles,
- (void*)(uintptr_t)shared_handle);
+ bo = handle_table_lookup(&dev->bo_handles, shared_handle);
break;
case amdgpu_bo_handle_type_kms:
+ case amdgpu_bo_handle_type_kms_noimport:
/* Importing a KMS handle in not allowed. */
- pthread_mutex_unlock(&dev->bo_table_mutex);
- return -EPERM;
+ r = -EPERM;
+ goto unlock;
default:
- pthread_mutex_unlock(&dev->bo_table_mutex);
- return -EINVAL;
+ r = -EINVAL;
+ goto unlock;
}
if (bo) {
- pthread_mutex_unlock(&dev->bo_table_mutex);
-
/* The buffer already exists, just bump the refcount. */
atomic_inc(&bo->refcount);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
output->buf_handle = bo;
output->alloc_size = bo->alloc_size;
- output->virtual_mc_base_address =
- bo->virtual_mc_base_address;
return 0;
}
- bo = calloc(1, sizeof(struct amdgpu_bo));
- if (!bo) {
- pthread_mutex_unlock(&dev->bo_table_mutex);
- if (type == amdgpu_bo_handle_type_dma_buf_fd) {
- amdgpu_close_kms_handle(dev, shared_handle);
- }
- return -ENOMEM;
- }
-
/* Open the handle. */
switch (type) {
case amdgpu_bo_handle_type_gem_flink_name:
open_arg.name = shared_handle;
r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
- if (r) {
- free(bo);
- pthread_mutex_unlock(&dev->bo_table_mutex);
- return r;
- }
+ if (r)
+ goto unlock;
- bo->handle = open_arg.handle;
+ flink_name = shared_handle;
+ handle = open_arg.handle;
+ alloc_size = open_arg.size;
if (dev->flink_fd != dev->fd) {
- r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
- if (r) {
- free(bo);
- pthread_mutex_unlock(&dev->bo_table_mutex);
- return r;
- }
- r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
-
+ r = drmPrimeHandleToFD(dev->flink_fd, handle,
+ DRM_CLOEXEC, &dma_fd);
+ if (r)
+ goto free_bo_handle;
+ r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
close(dma_fd);
-
- if (r) {
- free(bo);
- pthread_mutex_unlock(&dev->bo_table_mutex);
- return r;
- }
+ if (r)
+ goto free_bo_handle;
+ close_arg.handle = open_arg.handle;
+ r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
+ &close_arg);
+ if (r)
+ goto free_bo_handle;
}
- bo->flink_name = shared_handle;
- bo->alloc_size = open_arg.size;
- util_hash_table_set(dev->bo_flink_names,
- (void*)(uintptr_t)bo->flink_name, bo);
break;
case amdgpu_bo_handle_type_dma_buf_fd:
- bo->handle = shared_handle;
- bo->alloc_size = dma_buf_size;
+ handle = shared_handle;
+ alloc_size = dma_buf_size;
break;
case amdgpu_bo_handle_type_kms:
+ case amdgpu_bo_handle_type_kms_noimport:
assert(0); /* unreachable */
}
/* Initialize it. */
- atomic_set(&bo->refcount, 1);
- bo->dev = dev;
- pthread_mutex_init(&bo->cpu_access_mutex, NULL);
-
- bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, bo->alloc_size, 1 << 20);
-
- if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
- pthread_mutex_unlock(&dev->bo_table_mutex);
- amdgpu_bo_reference(&bo, NULL);
- return -ENOSPC;
- }
+ r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
+ if (r)
+ goto free_bo_handle;
- memset(&va, 0, sizeof(va));
- va.in.handle = bo->handle;
- va.in.operation = AMDGPU_VA_OP_MAP;
- va.in.va_address = bo->virtual_mc_base_address;
- va.in.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE;
+ r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
+ if (r)
+ goto free_bo_handle;
+ if (flink_name) {
+ bo->flink_name = flink_name;
+ r = handle_table_insert(&dev->bo_flink_names, flink_name,
+ bo);
+ if (r)
+ goto remove_handle;
- r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
- if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
- pthread_mutex_unlock(&dev->bo_table_mutex);
- amdgpu_vamgr_free_va(&dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
- amdgpu_bo_reference(&bo, NULL);
- return r;
}
- util_hash_table_set(dev->bo_vas,
- (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
- util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
- pthread_mutex_unlock(&dev->bo_table_mutex);
-
output->buf_handle = bo;
output->alloc_size = bo->alloc_size;
- output->virtual_mc_base_address = bo->virtual_mc_base_address;
+ pthread_mutex_unlock(&dev->bo_table_mutex);
return 0;
+
+remove_handle:
+ handle_table_remove(&dev->bo_handles, bo->handle);
+free_bo_handle:
+ if (flink_name && !close_arg.handle && open_arg.handle) {
+ close_arg.handle = open_arg.handle;
+ drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
+ }
+ if (bo)
+ amdgpu_bo_free(bo);
+ else
+ amdgpu_close_kms_handle(dev, handle);
+unlock:
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return r;
}
int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
{
- /* Just drop the reference. */
- amdgpu_bo_reference(&buf_handle, NULL);
+ struct amdgpu_device *dev;
+ struct amdgpu_bo *bo = buf_handle;
+
+ assert(bo != NULL);
+ dev = bo->dev;
+ pthread_mutex_lock(&dev->bo_table_mutex);
+
+ if (update_references(&bo->refcount, NULL)) {
+ /* Remove the buffer from the hash tables. */
+ handle_table_remove(&dev->bo_handles, bo->handle);
+
+ if (bo->flink_name)
+ handle_table_remove(&dev->bo_flink_names,
+ bo->flink_name);
+
+ /* Release CPU access. */
+ if (bo->cpu_map_count > 0) {
+ bo->cpu_map_count = 1;
+ amdgpu_bo_cpu_unmap(bo);
+ }
+
+ amdgpu_close_kms_handle(dev, bo->handle);
+ pthread_mutex_destroy(&bo->cpu_access_mutex);
+ free(bo);
+ }
+
+ pthread_mutex_unlock(&dev->bo_table_mutex);
return 0;
}
if (bo->cpu_map_count == 0) {
/* not mapped */
pthread_mutex_unlock(&bo->cpu_access_mutex);
- return -EBADMSG;
+ return -EINVAL;
}
bo->cpu_map_count--;
}
}
+int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle,
+ uint64_t *offset_in_bo)
+{
+ struct amdgpu_bo *bo;
+ uint32_t i;
+ int r = 0;
+
+ if (cpu == NULL || size == 0)
+ return -EINVAL;
+
+ /*
+ * Workaround for a buggy application which tries to import previously
+ * exposed CPU pointers. If we find a real world use case we should
+ * improve that by asking the kernel for the right handle.
+ */
+ pthread_mutex_lock(&dev->bo_table_mutex);
+ for (i = 0; i < dev->bo_handles.max_key; i++) {
+ bo = handle_table_lookup(&dev->bo_handles, i);
+ if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
+ continue;
+ if (cpu >= bo->cpu_ptr &&
+ cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
+ break;
+ }
+
+ if (i < dev->bo_handles.max_key) {
+ atomic_inc(&bo->refcount);
+ *buf_handle = bo;
+ *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
+ } else {
+ *buf_handle = NULL;
+ *offset_in_bo = 0;
+ r = -ENXIO;
+ }
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+
+ return r;
+}
+
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
- struct amdgpu_bo_alloc_result *info)
+ amdgpu_bo_handle *buf_handle)
{
int r;
- struct amdgpu_bo *bo;
struct drm_amdgpu_gem_userptr args;
- union drm_amdgpu_gem_va va;
- uintptr_t cpu0;
- uint32_t ps, off;
- memset(&args, 0, sizeof(args));
- ps = getpagesize();
-
- cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
- off = (uintptr_t)cpu - cpu0;
- size = ROUND_UP(size + off, ps);
-
- args.addr = cpu0;
- args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
+ args.addr = (uintptr_t)cpu;
+ args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
+ AMDGPU_GEM_USERPTR_VALIDATE;
args.size = size;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
&args, sizeof(args));
if (r)
- return r;
-
- bo = calloc(1, sizeof(struct amdgpu_bo));
- if (!bo)
- return -ENOMEM;
-
- atomic_set(&bo->refcount, 1);
- bo->dev = dev;
- bo->alloc_size = size;
- bo->handle = args.handle;
- bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, size, 4 * 1024);
+ goto out;
- if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
- amdgpu_bo_free_internal(bo);
- return -ENOSPC;
+ r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
+ if (r) {
+ amdgpu_close_kms_handle(dev, args.handle);
+ goto out;
}
- memset(&va, 0, sizeof(va));
- va.in.handle = bo->handle;
- va.in.operation = AMDGPU_VA_OP_MAP;
- va.in.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE;
- va.in.va_address = bo->virtual_mc_base_address;
- r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
- if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
- amdgpu_bo_free_internal(bo);
- return r;
- }
pthread_mutex_lock(&dev->bo_table_mutex);
- util_hash_table_set(dev->bo_vas,
- (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
+ r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+ *buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
- info->buf_handle = bo;
- info->virtual_mc_base_address = bo->virtual_mc_base_address;
- info->virtual_mc_base_address += off;
-
+ if (r)
+ amdgpu_bo_free(*buf_handle);
+out:
return r;
}
unsigned i;
int r;
- list = calloc(number_of_resources, sizeof(struct drm_amdgpu_bo_list_entry));
+ if (!number_of_resources)
+ return -EINVAL;
+
+ /* overflow check for multiplication */
+ if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+ return -EINVAL;
- if (list == NULL)
+ list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+ if (!list)
return -ENOMEM;
+ *result = malloc(sizeof(struct amdgpu_bo_list));
+ if (!*result) {
+ free(list);
+ return -ENOMEM;
+ }
+
memset(&args, 0, sizeof(args));
args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
args.in.bo_number = number_of_resources;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
&args, sizeof(args));
- if (r)
- goto out;
+ free(list);
+ if (r) {
+ free(*result);
+ return r;
+ }
- *result = calloc(1, sizeof(struct amdgpu_bo_list));
(*result)->dev = dev;
(*result)->handle = args.out.list_handle;
-out:
- free(list);
- return r;
+ return 0;
}
int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
unsigned i;
int r;
- list = calloc(number_of_resources, sizeof(struct drm_amdgpu_bo_list_entry));
- if (list == NULL)
+ if (!number_of_resources)
+ return -EINVAL;
+
+ /* overflow check for multiplication */
+ if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+ return -EINVAL;
+
+ list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+ if (!list)
return -ENOMEM;
- memset(&args, 0, sizeof(args));
args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
args.in.list_handle = handle->handle;
args.in.bo_number = number_of_resources;
free(list);
return r;
}
+
+int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops)
+{
+ amdgpu_device_handle dev = bo->dev;
+
+ size = ALIGN(size, getpagesize());
+
+ return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
+ AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE, ops);
+}
+
+int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
+ amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops)
+{
+ struct drm_amdgpu_gem_va va;
+ int r;
+
+ if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
+ ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
+ return -EINVAL;
+
+ memset(&va, 0, sizeof(va));
+ va.handle = bo ? bo->handle : 0;
+ va.operation = ops;
+ va.flags = flags;
+ va.va_address = addr;
+ va.offset_in_bo = offset;
+ va.map_size = size;
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
+
+ return r;
+}