#include <assert.h>
#include <pthread.h>
#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <stdbool.h>
#ifndef ETIME
#define ETIME ETIMEDOUT
#endif
+#include "libdrm_macros.h"
#include "libdrm_lists.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "intel_chipset.h"
-#include "intel_aub.h"
#include "string.h"
#include "i915_drm.h"
+#include "uthash.h"
#ifdef HAVE_VALGRIND
#include <valgrind.h>
#define VG(x)
#endif
-#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
+#define memclear(s) memset(&s, 0, sizeof(s))
#define DBG(...) do { \
if (bufmgr_gem->bufmgr.debug) \
} while (0)
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define MAX2(A, B) ((A) > (B) ? (A) : (B))
+
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ *
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
+ */
+#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
+
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((__u32)(n))
typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
typedef struct _drm_intel_bufmgr_gem {
drm_intel_bufmgr bufmgr;
+ atomic_t refcount;
+
int fd;
int max_relocs;
int num_buckets;
time_t time;
- drmMMListHead named;
+ drmMMListHead managers;
+
+ drm_intel_bo_gem *name_table;
+ drm_intel_bo_gem *handle_table;
+
drmMMListHead vma_cache;
int vma_count, vma_open, vma_max;
unsigned int has_vebox : 1;
bool fenced_relocs;
- char *aub_filename;
- FILE *aub_file;
- uint32_t aub_offset;
+ struct {
+ void *ptr;
+ uint32_t handle;
+ } userptr_active;
+
} drm_intel_bufmgr_gem;
#define DRM_INTEL_RELOC_FENCE (1<<0)
/**
* Kenel-assigned global name for this object
+ *
+ * List contains both flink named and prime fd'd objects
*/
unsigned int global_name;
- drmMMListHead name_list;
+
+ UT_hash_handle handle_hh;
+ UT_hash_handle name_hh;
/**
* Index of the buffer within the validation list while preparing a
drm_intel_reloc_target *reloc_target_info;
/** Number of entries in relocs */
int reloc_count;
+ /** Array of BOs that are referenced by this buffer and will be softpinned */
+ drm_intel_bo **softpin_target;
+ /** Number softpinned BOs that are referenced by this buffer */
+ int softpin_target_count;
+ /** Maximum amount of softpinned BOs that are referenced by this buffer */
+ int softpin_target_size;
+
/** Mapped address for the buffer, saved across map/unmap cycles */
void *mem_virtual;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
void *gtt_virtual;
+ /** WC CPU address for the buffer, saved across map/unmap cycles */
+ void *wc_virtual;
+ /**
+ * Virtual address of the buffer allocated by user, used for userptr
+ * objects only.
+ */
+ void *user_virtual;
int map_count;
drmMMListHead vma_list;
bool reusable;
/**
+ * Boolean of whether the GPU is definitely not accessing the buffer.
+ *
+ * This is only valid when reusable, since non-reusable
+ * buffers are those that have been shared with other
+ * processes, so we don't know their state.
+ */
+ bool idle;
+
+ /**
+ * Boolean of whether this buffer was allocated with userptr
+ */
+ bool is_userptr;
+
+ /**
+ * Boolean of whether this buffer can be placed in the full 48-bit
+ * address range on gen8+.
+ *
+ * By default, buffers will be keep in a 32-bit range, unless this
+ * flag is explicitly set.
+ */
+ bool use_48b_address_range;
+
+ /**
+ * Whether this buffer is softpinned at offset specified by the user
+ */
+ bool is_softpin;
+
+ /**
* Size in bytes of this buffer and its relocation descendents.
*
* Used to avoid costly tree walking in
*/
int reloc_tree_fences;
- /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
+ /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
bool mapped_cpu_write;
-
- uint32_t aub_offset;
-
- drm_intel_aub_annotation *aub_annotations;
- unsigned aub_annotation_count;
};
static unsigned int
static void drm_intel_gem_bo_free(drm_intel_bo *bo);
+static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
+{
+ return (drm_intel_bo_gem *)bo;
+}
+
static unsigned long
drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
uint32_t *tiling_mode)
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- if (bo_gem->relocs == NULL) {
- DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
+ DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
bo_gem->name);
continue;
}
drm_intel_bo_gem *target_gem =
(drm_intel_bo_gem *) target_bo;
- DBG("%2d: %d (%s)@0x%08llx -> "
- "%d (%s)@0x%08lx + 0x%08x\n",
+ DBG("%2d: %d %s(%s)@0x%08x %08x -> "
+ "%d (%s)@0x%08x %08x + 0x%08x\n",
i,
- bo_gem->gem_handle, bo_gem->name,
- (unsigned long long)bo_gem->relocs[j].offset,
+ bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
+ bo_gem->name,
+ upper_32_bits(bo_gem->relocs[j].offset),
+ lower_32_bits(bo_gem->relocs[j].offset),
target_gem->gem_handle,
target_gem->name,
- target_bo->offset,
+ upper_32_bits(target_bo->offset64),
+ lower_32_bits(target_bo->offset64),
bo_gem->relocs[j].delta);
}
+
+ for (j = 0; j < bo_gem->softpin_target_count; j++) {
+ drm_intel_bo *target_bo = bo_gem->softpin_target[j];
+ drm_intel_bo_gem *target_gem =
+ (drm_intel_bo_gem *) target_bo;
+ DBG("%2d: %d %s(%s) -> "
+ "%d *(%s)@0x%08x %08x\n",
+ i,
+ bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
+ bo_gem->name,
+ target_gem->gem_handle,
+ target_gem->name,
+ upper_32_bits(target_bo->offset64),
+ lower_32_bits(target_bo->offset64));
+ }
}
}
bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
- bufmgr_gem->exec_objects[index].alignment = 0;
+ bufmgr_gem->exec_objects[index].alignment = bo->align;
bufmgr_gem->exec_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
bufmgr_gem->exec_count++;
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int index;
+ int flags = 0;
+
+ if (need_fence)
+ flags |= EXEC_OBJECT_NEEDS_FENCE;
+ if (bo_gem->use_48b_address_range)
+ flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ if (bo_gem->is_softpin)
+ flags |= EXEC_OBJECT_PINNED;
if (bo_gem->validate_index != -1) {
- if (need_fence)
- bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
- EXEC_OBJECT_NEEDS_FENCE;
+ bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
return;
}
bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
- bufmgr_gem->exec2_objects[index].alignment = 0;
- bufmgr_gem->exec2_objects[index].offset = 0;
+ bufmgr_gem->exec2_objects[index].alignment = bo->align;
+ bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ?
+ bo->offset64 : 0;
bufmgr_gem->exec_bos[index] = bo;
- bufmgr_gem->exec2_objects[index].flags = 0;
+ bufmgr_gem->exec2_objects[index].flags = flags;
bufmgr_gem->exec2_objects[index].rsvd1 = 0;
bufmgr_gem->exec2_objects[index].rsvd2 = 0;
- if (need_fence) {
- bufmgr_gem->exec2_objects[index].flags |=
- EXEC_OBJECT_NEEDS_FENCE;
- }
bufmgr_gem->exec_count++;
}
static void
drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
- drm_intel_bo_gem *bo_gem)
+ drm_intel_bo_gem *bo_gem,
+ unsigned int alignment)
{
- int size;
+ unsigned int size;
assert(!bo_gem->used_as_reloc_target);
*/
size = bo_gem->bo.size;
if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
- int min_size;
+ unsigned int min_size;
if (bufmgr_gem->has_relaxed_fencing) {
if (bufmgr_gem->gen == 3)
min_size = size;
/* Account for worst-case alignment. */
- size = 2 * min_size;
+ alignment = MAX2(alignment, min_size);
}
- bo_gem->reloc_tree_size = size;
+ bo_gem->reloc_tree_size = size + alignment;
}
static int
struct drm_i915_gem_busy busy;
int ret;
- VG_CLEAR(busy);
+ if (bo_gem->reusable && bo_gem->idle)
+ return false;
+
+ memclear(busy);
busy.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
-
+ if (ret == 0) {
+ bo_gem->idle = !busy.busy;
+ return busy.busy;
+ } else {
+ return false;
+ }
return (ret == 0 && busy.busy);
}
{
struct drm_i915_gem_madvise madv;
- VG_CLEAR(madv);
+ memclear(madv);
madv.handle = bo_gem->gem_handle;
madv.madv = state;
madv.retained = 1;
unsigned long size,
unsigned long flags,
uint32_t tiling_mode,
- unsigned long stride)
+ unsigned long stride,
+ unsigned int alignment)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
drm_intel_bo_gem *bo_gem;
bucket->head.prev, head);
DRMLISTDEL(&bo_gem->head);
alloc_from_cache = true;
+ bo_gem->bo.align = alignment;
} else {
+ assert(alignment == 0);
/* For non-render-target BOs (where we're probably
* going to map it first thing in order to fill it
* with data), check if the last BO in the cache is
}
}
}
- pthread_mutex_unlock(&bufmgr_gem->lock);
if (!alloc_from_cache) {
struct drm_i915_gem_create create;
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
- return NULL;
+ goto err;
+
+ /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
+ list (vma_list), so better set the list head here */
+ DRMINITLISTHEAD(&bo_gem->vma_list);
bo_gem->bo.size = bo_size;
- VG_CLEAR(create);
+ memclear(create);
create.size = bo_size;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_CREATE,
&create);
- bo_gem->gem_handle = create.handle;
- bo_gem->bo.handle = bo_gem->gem_handle;
if (ret != 0) {
free(bo_gem);
- return NULL;
+ goto err;
}
+
+ bo_gem->gem_handle = create.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->bo.align = alignment;
bo_gem->tiling_mode = I915_TILING_NONE;
bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
tiling_mode,
- stride)) {
- drm_intel_gem_bo_free(&bo_gem->bo);
- return NULL;
- }
+ stride))
+ goto err_free;
- DRMINITLISTHEAD(&bo_gem->name_list);
- DRMINITLISTHEAD(&bo_gem->vma_list);
+ HASH_ADD(handle_hh, bufmgr_gem->handle_table,
+ gem_handle, sizeof(bo_gem->gem_handle),
+ bo_gem);
}
bo_gem->name = name;
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
bo_gem->reusable = true;
- bo_gem->aub_annotations = NULL;
- bo_gem->aub_annotation_count = 0;
+ bo_gem->use_48b_address_range = false;
- drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
DBG("bo_create: buf %d (%s) %ldb\n",
bo_gem->gem_handle, bo_gem->name, size);
return &bo_gem->bo;
+
+err_free:
+ drm_intel_gem_bo_free(&bo_gem->bo);
+err:
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
}
static drm_intel_bo *
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
BO_ALLOC_FOR_RENDER,
- I915_TILING_NONE, 0);
+ I915_TILING_NONE, 0,
+ alignment);
}
static drm_intel_bo *
unsigned int alignment)
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
- I915_TILING_NONE, 0);
+ I915_TILING_NONE, 0, 0);
}
static drm_intel_bo *
stride = 0;
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
- tiling, stride);
+ tiling, stride, 0);
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ int ret;
+ struct drm_i915_gem_userptr userptr;
+
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (tiling_mode != I915_TILING_NONE)
+ return NULL;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ atomic_set(&bo_gem->refcount, 1);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+
+ bo_gem->bo.size = size;
+
+ memclear(userptr);
+ userptr.user_ptr = (__u64)((unsigned long)addr);
+ userptr.user_size = size;
+ userptr.flags = flags;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_USERPTR,
+ &userptr);
+ if (ret != 0) {
+ DBG("bo_create_userptr: "
+ "ioctl failed with user ptr %p size 0x%lx, "
+ "user flags 0x%lx\n", addr, size, flags);
+ free(bo_gem);
+ return NULL;
+ }
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ bo_gem->gem_handle = userptr.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->is_userptr = true;
+ bo_gem->bo.virtual = addr;
+ /* Save the address provided by user */
+ bo_gem->user_virtual = addr;
+ bo_gem->tiling_mode = I915_TILING_NONE;
+ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo_gem->stride = 0;
+
+ HASH_ADD(handle_hh, bufmgr_gem->handle_table,
+ gem_handle, sizeof(bo_gem->gem_handle),
+ bo_gem);
+
+ bo_gem->name = name;
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = false;
+ bo_gem->use_48b_address_range = false;
+
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ DBG("bo_create_userptr: "
+ "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
+ addr, bo_gem->gem_handle, bo_gem->name,
+ size, stride, tiling_mode);
+
+ return &bo_gem->bo;
+}
+
+static bool
+has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int ret;
+ void *ptr;
+ long pgsz;
+ struct drm_i915_gem_userptr userptr;
+
+ pgsz = sysconf(_SC_PAGESIZE);
+ assert(pgsz > 0);
+
+ ret = posix_memalign(&ptr, pgsz, pgsz);
+ if (ret) {
+ DBG("Failed to get a page (%ld) for userptr detection!\n",
+ pgsz);
+ return false;
+ }
+
+ memclear(userptr);
+ userptr.user_ptr = (__u64)(unsigned long)ptr;
+ userptr.user_size = pgsz;
+
+retry:
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
+ if (ret) {
+ if (errno == ENODEV && userptr.flags == 0) {
+ userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
+ goto retry;
+ }
+ free(ptr);
+ return false;
+ }
+
+ /* We don't release the userptr bo here as we want to keep the
+ * kernel mm tracking alive for our lifetime. The first time we
+ * create a userptr object the kernel has to install a mmu_notifer
+ * which is a heavyweight operation (e.g. it requires taking all
+ * mm_locks and stop_machine()).
+ */
+
+ bufmgr_gem->userptr_active.ptr = ptr;
+ bufmgr_gem->userptr_active.handle = userptr.handle;
+
+ return true;
+}
+
+static drm_intel_bo *
+check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
+ bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
+ else
+ bufmgr->bo_alloc_userptr = NULL;
+
+ return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
+ tiling_mode, stride, size, flags);
}
/**
int ret;
struct drm_gem_open open_arg;
struct drm_i915_gem_get_tiling get_tiling;
- drmMMListHead *list;
/* At the moment most applications only have a few named bo.
* For instance, in a DRI client only the render buffers passed
* alternating names for the front/back buffer a linear search
* provides a sufficiently fast match.
*/
- for (list = bufmgr_gem->named.next;
- list != &bufmgr_gem->named;
- list = list->next) {
- bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
- if (bo_gem->global_name == handle) {
- drm_intel_gem_bo_reference(&bo_gem->bo);
- return &bo_gem->bo;
- }
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ HASH_FIND(name_hh, bufmgr_gem->name_table,
+ &handle, sizeof(handle), bo_gem);
+ if (bo_gem) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ goto out;
}
- bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
- return NULL;
-
- VG_CLEAR(open_arg);
+ memclear(open_arg);
open_arg.name = handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_GEM_OPEN,
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
- free(bo_gem);
- return NULL;
+ bo_gem = NULL;
+ goto out;
}
+ /* Now see if someone has used a prime handle to get this
+ * object from the kernel before by looking through the list
+ * again for a matching gem_handle
+ */
+ HASH_FIND(handle_hh, bufmgr_gem->handle_table,
+ &open_arg.handle, sizeof(open_arg.handle), bo_gem);
+ if (bo_gem) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ goto out;
+ }
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ goto out;
+
+ atomic_set(&bo_gem->refcount, 1);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+
bo_gem->bo.size = open_arg.size;
bo_gem->bo.offset = 0;
+ bo_gem->bo.offset64 = 0;
bo_gem->bo.virtual = NULL;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->name = name;
- atomic_set(&bo_gem->refcount, 1);
bo_gem->validate_index = -1;
bo_gem->gem_handle = open_arg.handle;
bo_gem->bo.handle = open_arg.handle;
bo_gem->global_name = handle;
bo_gem->reusable = false;
+ bo_gem->use_48b_address_range = false;
+
+ HASH_ADD(handle_hh, bufmgr_gem->handle_table,
+ gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
+ HASH_ADD(name_hh, bufmgr_gem->name_table,
+ global_name, sizeof(bo_gem->global_name), bo_gem);
- VG_CLEAR(get_tiling);
+ memclear(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_GET_TILING,
&get_tiling);
- if (ret != 0) {
- drm_intel_gem_bo_unreference(&bo_gem->bo);
- return NULL;
- }
+ if (ret != 0)
+ goto err_unref;
+
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
/* XXX stride is unknown */
- drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
-
- DRMINITLISTHEAD(&bo_gem->vma_list);
- DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+out:
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return &bo_gem->bo;
+
+err_unref:
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
}
static void
DRMLISTDEL(&bo_gem->vma_list);
if (bo_gem->mem_virtual) {
VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
- munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ bufmgr_gem->vma_count--;
+ }
+ if (bo_gem->wc_virtual) {
+ VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
+ drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
bufmgr_gem->vma_count--;
}
if (bo_gem->gtt_virtual) {
- munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
bufmgr_gem->vma_count--;
}
+ if (bo_gem->global_name)
+ HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
+ HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
+
/* Close this object */
- VG_CLEAR(close);
+ memclear(close);
close.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo_gem->gem_handle, bo_gem->name, strerror(errno));
}
- free(bo_gem->aub_annotations);
free(bo);
}
if (bo_gem->mem_virtual)
VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
+ if (bo_gem->wc_virtual)
+ VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
+
if (bo_gem->gtt_virtual)
VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
#endif
DRMLISTDELINIT(&bo_gem->vma_list);
if (bo_gem->mem_virtual) {
- munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
bo_gem->mem_virtual = NULL;
bufmgr_gem->vma_count--;
}
+ if (bo_gem->wc_virtual) {
+ drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
+ bo_gem->wc_virtual = NULL;
+ bufmgr_gem->vma_count--;
+ }
if (bo_gem->gtt_virtual) {
- munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
bo_gem->gtt_virtual = NULL;
bufmgr_gem->vma_count--;
}
DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
if (bo_gem->mem_virtual)
bufmgr_gem->vma_count++;
+ if (bo_gem->wc_virtual)
+ bufmgr_gem->vma_count++;
if (bo_gem->gtt_virtual)
bufmgr_gem->vma_count++;
drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
DRMLISTDEL(&bo_gem->vma_list);
if (bo_gem->mem_virtual)
bufmgr_gem->vma_count--;
+ if (bo_gem->wc_virtual)
+ bufmgr_gem->vma_count--;
if (bo_gem->gtt_virtual)
bufmgr_gem->vma_count--;
drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
time);
}
}
+ for (i = 0; i < bo_gem->softpin_target_count; i++)
+ drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
+ time);
bo_gem->reloc_count = 0;
bo_gem->used_as_reloc_target = false;
+ bo_gem->softpin_target_count = 0;
DBG("bo_unreference final: %d (%s)\n",
bo_gem->gem_handle, bo_gem->name);
free(bo_gem->relocs);
bo_gem->relocs = NULL;
}
+ if (bo_gem->softpin_target) {
+ free(bo_gem->softpin_target);
+ bo_gem->softpin_target = NULL;
+ bo_gem->softpin_target_size = 0;
+ }
/* Clear any left-over mappings */
if (bo_gem->map_count) {
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
}
- DRMLISTDEL(&bo_gem->name_list);
-
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
assert(atomic_read(&bo_gem->refcount) > 0);
- if (atomic_dec_and_test(&bo_gem->refcount)) {
+
+ if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
pthread_mutex_lock(&bufmgr_gem->lock);
- drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
- drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+
+ if (atomic_dec_and_test(&bo_gem->refcount)) {
+ drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+ drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+ }
+
pthread_mutex_unlock(&bufmgr_gem->lock);
}
}
struct drm_i915_gem_set_domain set_domain;
int ret;
+ if (bo_gem->is_userptr) {
+ /* Return the same user ptr */
+ bo->virtual = bo_gem->user_virtual;
+ return 0;
+ }
+
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->map_count++ == 0)
DBG("bo_map: %d (%s), map_count=%d\n",
bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
- VG_CLEAR(mmap_arg);
+ memclear(mmap_arg);
mmap_arg.handle = bo_gem->gem_handle;
- mmap_arg.offset = 0;
mmap_arg.size = bo->size;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_MMAP,
bo_gem->mem_virtual);
bo->virtual = bo_gem->mem_virtual;
- VG_CLEAR(set_domain);
+ memclear(set_domain);
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
if (write_enable)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
if (bo_gem->map_count++ == 0)
drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
- VG_CLEAR(mmap_arg);
+ memclear(mmap_arg);
mmap_arg.handle = bo_gem->gem_handle;
/* Get the fake offset back... */
}
/* and mmap it */
- bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
- MAP_SHARED, bufmgr_gem->fd,
- mmap_arg.offset);
+ bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
if (bo_gem->gtt_virtual == MAP_FAILED) {
bo_gem->gtt_virtual = NULL;
ret = -errno;
return 0;
}
-int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+int
+drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
* tell it when we're about to use things if we had done
* rendering and it still happens to be bound to the GTT.
*/
- VG_CLEAR(set_domain);
+ memclear(set_domain);
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
* undefined).
*/
-int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
+int
+drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+#ifdef HAVE_VALGRIND
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+#endif
int ret;
/* If the CPU cache isn't coherent with the GTT, then use a
static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret = 0;
if (bo == NULL)
return 0;
+ if (bo_gem->is_userptr)
+ return 0;
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->map_count <= 0) {
* Unlike GTT set domains, this only does work if the
* buffer should be scanout-related.
*/
- VG_CLEAR(sw_finish);
+ memclear(sw_finish);
sw_finish.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SW_FINISH,
}
/* We need to unmap after every innovation as we cannot track
- * an open vma for every bo as that will exhaasut the system
+ * an open vma for every bo as that will exhaust the system
* limits and cause later failures.
*/
if (--bo_gem->map_count == 0) {
return ret;
}
-int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+int
+drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_unmap(bo);
}
struct drm_i915_gem_pwrite pwrite;
int ret;
- VG_CLEAR(pwrite);
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
+ memclear(pwrite);
pwrite.handle = bo_gem->gem_handle;
pwrite.offset = offset;
pwrite.size = size;
struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
int ret;
- VG_CLEAR(get_pipe_from_crtc_id);
+ memclear(get_pipe_from_crtc_id);
get_pipe_from_crtc_id.crtc_id = crtc_id;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
struct drm_i915_gem_pread pread;
int ret;
- VG_CLEAR(pread);
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
+ memclear(pread);
pread.handle = bo_gem->gem_handle;
pread.offset = offset;
pread.size = size;
* not guarantee that the buffer is re-issued via another thread, or an flinked
* handle. Userspace must make sure this race does not occur if such precision
* is important.
+ *
+ * Note that some kernels have broken the inifite wait for negative values
+ * promise, upgrade to latest stable kernels if this is the case.
*/
-int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
+int
+drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
}
}
+ memclear(wait);
wait.bo_handle = bo_gem->gem_handle;
wait.timeout_ns = timeout_ns;
- wait.flags = 0;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
if (ret == -1)
return -errno;
struct drm_i915_gem_set_domain set_domain;
int ret;
- VG_CLEAR(set_domain);
+ memclear(set_domain);
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
- int i;
+ struct drm_gem_close close_bo;
+ int i, ret;
free(bufmgr_gem->exec2_objects);
free(bufmgr_gem->exec_objects);
free(bufmgr_gem->exec_bos);
- free(bufmgr_gem->aub_filename);
pthread_mutex_destroy(&bufmgr_gem->lock);
}
}
+ /* Release userptr bo kept hanging around for optimisation. */
+ if (bufmgr_gem->userptr_active.ptr) {
+ memclear(close_bo);
+ close_bo.handle = bufmgr_gem->userptr_active.handle;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
+ free(bufmgr_gem->userptr_active.ptr);
+ if (ret)
+ fprintf(stderr,
+ "Failed to release test userptr object! (%d) "
+ "i915 kernel driver may not be sane!\n", errno);
+ }
+
free(bufmgr);
}
assert(offset <= bo->size - 4);
assert((write_domain & (write_domain - 1)) == 0);
+ /* An object needing a fence is a tiled buffer, so it won't have
+ * relocs to other buffers.
+ */
+ if (need_fence) {
+ assert(target_bo_gem->reloc_count == 0);
+ target_bo_gem->reloc_tree_fences = 1;
+ }
+
/* Make sure that we're not adding a reloc to something whose size has
* already been accounted for.
*/
if (target_bo_gem != bo_gem) {
target_bo_gem->used_as_reloc_target = true;
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+ bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
}
- /* An object needing a fence is a tiled buffer, so it won't have
- * relocs to other buffers.
- */
- if (need_fence)
- target_bo_gem->reloc_tree_fences = 1;
- bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
-
- bo_gem->relocs[bo_gem->reloc_count].offset = offset;
- bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
- bo_gem->relocs[bo_gem->reloc_count].target_handle =
- target_bo_gem->gem_handle;
- bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
- bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
- bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
if (target_bo != bo)
else
bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
+ bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+ bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
+ bo_gem->relocs[bo_gem->reloc_count].target_handle =
+ target_bo_gem->gem_handle;
+ bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+ bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+ bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
bo_gem->reloc_count++;
return 0;
}
-static int
-drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
- drm_intel_bo *target_bo, uint32_t target_offset,
- uint32_t read_domains, uint32_t write_domain)
+static void
+drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-
- return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
- read_domains, write_domain,
- !bufmgr_gem->fenced_relocs);
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ bo_gem->use_48b_address_range = enable;
}
static int
-drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
- drm_intel_bo *target_bo,
- uint32_t target_offset,
- uint32_t read_domains, uint32_t write_domain)
-{
- return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
- read_domains, write_domain, true);
-}
-
-int
-drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
+drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+ if (bo_gem->has_error)
+ return -ENOMEM;
+
+ if (target_bo_gem->has_error) {
+ bo_gem->has_error = true;
+ return -ENOMEM;
+ }
+
+ if (!target_bo_gem->is_softpin)
+ return -EINVAL;
+ if (target_bo_gem == bo_gem)
+ return -EINVAL;
+
+ if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
+ int new_size = bo_gem->softpin_target_size * 2;
+ if (new_size == 0)
+ new_size = bufmgr_gem->max_relocs;
+
+ bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
+ sizeof(drm_intel_bo *));
+ if (!bo_gem->softpin_target)
+ return -ENOMEM;
+
+ bo_gem->softpin_target_size = new_size;
+ }
+ bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
+ drm_intel_gem_bo_reference(target_bo);
+ bo_gem->softpin_target_count++;
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
+
+ if (target_bo_gem->is_softpin)
+ return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
+ else
+ return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
+ read_domains, write_domain,
+ !bufmgr_gem->fenced_relocs);
+}
+
+static int
+drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
+ read_domains, write_domain, true);
+}
+
+int
+drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ return bo_gem->reloc_count;
+}
- return bo_gem->reloc_count;
-}
-
/**
* Removes existing relocation entries in the BO after "start".
*
*
* Any further drm_intel_bufmgr_check_aperture_space() queries
* involving this buffer in the tree are undefined after this call.
+ *
+ * This also removes all softpinned targets being referenced by the BO.
*/
void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
assert(bo_gem->reloc_count >= start);
+
/* Unreference the cleared target buffers */
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
for (i = start; i < bo_gem->reloc_count; i++) {
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
if (&target_bo_gem->bo != bo) {
}
}
bo_gem->reloc_count = start;
+
+ for (i = 0; i < bo_gem->softpin_target_count; i++) {
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
+ drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
+ }
+ bo_gem->softpin_target_count = 0;
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
}
/**
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int i;
- if (bo_gem->relocs == NULL)
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
return;
for (i = 0; i < bo_gem->reloc_count; i++) {
/* Add the target to the validate list */
drm_intel_add_validate_buffer2(target_bo, need_fence);
}
+
+ for (i = 0; i < bo_gem->softpin_target_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->softpin_target[i];
+
+ if (target_bo == bo)
+ continue;
+
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ drm_intel_gem_bo_process_reloc2(target_bo);
+ drm_intel_add_validate_buffer2(target_bo, false);
+ }
}
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
/* Update the buffer offset */
- if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
- DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
- bo_gem->gem_handle, bo_gem->name, bo->offset,
- (unsigned long long)bufmgr_gem->exec_objects[i].
- offset);
+ if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
+ DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
+ bo_gem->gem_handle, bo_gem->name,
+ upper_32_bits(bo->offset64),
+ lower_32_bits(bo->offset64),
+ upper_32_bits(bufmgr_gem->exec_objects[i].offset),
+ lower_32_bits(bufmgr_gem->exec_objects[i].offset));
+ bo->offset64 = bufmgr_gem->exec_objects[i].offset;
bo->offset = bufmgr_gem->exec_objects[i].offset;
}
}
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
/* Update the buffer offset */
- if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
- DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
- bo_gem->gem_handle, bo_gem->name, bo->offset,
- (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
+ if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
+ /* If we're seeing softpinned object here it means that the kernel
+ * has relocated our object... Indicating a programming error
+ */
+ assert(!bo_gem->is_softpin);
+ DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
+ bo_gem->gem_handle, bo_gem->name,
+ upper_32_bits(bo->offset64),
+ lower_32_bits(bo->offset64),
+ upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
+ lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
+ bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
bo->offset = bufmgr_gem->exec2_objects[i].offset;
}
}
}
-static void
-aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
-{
- fwrite(&data, 1, 4, bufmgr_gem->aub_file);
-}
-
-static void
-aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
-{
- fwrite(data, 1, size, bufmgr_gem->aub_file);
-}
-
-static void
-aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
-{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- uint32_t *data;
- unsigned int i;
-
- data = malloc(bo->size);
- drm_intel_bo_get_subdata(bo, offset, size, data);
-
- /* Easy mode: write out bo with no relocations */
- if (!bo_gem->reloc_count) {
- aub_out_data(bufmgr_gem, data, size);
- free(data);
- return;
- }
-
- /* Otherwise, handle the relocations while writing. */
- for (i = 0; i < size / 4; i++) {
- int r;
- for (r = 0; r < bo_gem->reloc_count; r++) {
- struct drm_i915_gem_relocation_entry *reloc;
- drm_intel_reloc_target *info;
-
- reloc = &bo_gem->relocs[r];
- info = &bo_gem->reloc_target_info[r];
-
- if (reloc->offset == offset + i * 4) {
- drm_intel_bo_gem *target_gem;
- uint32_t val;
-
- target_gem = (drm_intel_bo_gem *)info->bo;
-
- val = reloc->delta;
- val += target_gem->aub_offset;
-
- aub_out(bufmgr_gem, val);
- data[i] = val;
- break;
- }
- }
- if (r == bo_gem->reloc_count) {
- /* no relocation, just the data */
- aub_out(bufmgr_gem, data[i]);
- }
- }
-
- free(data);
-}
-
-static void
-aub_bo_get_address(drm_intel_bo *bo)
-{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-
- /* Give the object a graphics address in the AUB file. We
- * don't just use the GEM object address because we do AUB
- * dumping before execution -- we want to successfully log
- * when the hardware might hang, and we might even want to aub
- * capture for a driver trying to execute on a different
- * generation of hardware by disabling the actual kernel exec
- * call.
- */
- bo_gem->aub_offset = bufmgr_gem->aub_offset;
- bufmgr_gem->aub_offset += bo->size;
- /* XXX: Handle aperture overflow. */
- assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
-}
-
-static void
-aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
- uint32_t offset, uint32_t size)
-{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-
- aub_out(bufmgr_gem,
- CMD_AUB_TRACE_HEADER_BLOCK |
- ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
- aub_out(bufmgr_gem,
- AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
- aub_out(bufmgr_gem, subtype);
- aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
- aub_out(bufmgr_gem, size);
- if (bufmgr_gem->gen >= 8)
- aub_out(bufmgr_gem, 0);
- aub_write_bo_data(bo, offset, size);
-}
-
-/**
- * Break up large objects into multiple writes. Otherwise a 128kb VBO
- * would overflow the 16 bits of size field in the packet header and
- * everything goes badly after that.
- */
-static void
-aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
- uint32_t offset, uint32_t size)
-{
- uint32_t block_size;
- uint32_t sub_offset;
-
- for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
- block_size = size - sub_offset;
-
- if (block_size > 8 * 4096)
- block_size = 8 * 4096;
-
- aub_write_trace_block(bo, type, subtype, offset + sub_offset,
- block_size);
- }
-}
-
-static void
-aub_write_bo(drm_intel_bo *bo)
-{
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- uint32_t offset = 0;
- unsigned i;
-
- aub_bo_get_address(bo);
-
- /* Write out each annotated section separately. */
- for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
- drm_intel_aub_annotation *annotation =
- &bo_gem->aub_annotations[i];
- uint32_t ending_offset = annotation->ending_offset;
- if (ending_offset > bo->size)
- ending_offset = bo->size;
- if (ending_offset > offset) {
- aub_write_large_trace_block(bo, annotation->type,
- annotation->subtype,
- offset,
- ending_offset - offset);
- offset = ending_offset;
- }
- }
-
- /* Write out any remaining unannotated data */
- if (offset < bo->size) {
- aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
- offset, bo->size - offset);
- }
-}
-
-/*
- * Make a ringbuffer on fly and dump it
- */
-static void
-aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
- uint32_t batch_buffer, int ring_flag)
-{
- uint32_t ringbuffer[4096];
- int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
- int ring_count = 0;
-
- if (ring_flag == I915_EXEC_BSD)
- ring = AUB_TRACE_TYPE_RING_PRB1;
- else if (ring_flag == I915_EXEC_BLT)
- ring = AUB_TRACE_TYPE_RING_PRB2;
-
- /* Make a ring buffer to execute our batchbuffer. */
- memset(ringbuffer, 0, sizeof(ringbuffer));
- ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
- ringbuffer[ring_count++] = batch_buffer;
-
- /* Write out the ring. This appears to trigger execution of
- * the ring in the simulator.
- */
- aub_out(bufmgr_gem,
- CMD_AUB_TRACE_HEADER_BLOCK |
- ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
- aub_out(bufmgr_gem,
- AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
- aub_out(bufmgr_gem, 0); /* general/surface subtype */
- aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
- aub_out(bufmgr_gem, ring_count * 4);
- if (bufmgr_gem->gen >= 8)
- aub_out(bufmgr_gem, 0);
-
- /* FIXME: Need some flush operations here? */
- aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
-
- /* Update offset pointer */
- bufmgr_gem->aub_offset += 4096;
-}
-
void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
int pitch, int offset)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
- uint32_t cpp;
-
- switch (format) {
- case AUB_DUMP_BMP_FORMAT_8BIT:
- cpp = 1;
- break;
- case AUB_DUMP_BMP_FORMAT_ARGB_4444:
- cpp = 2;
- break;
- case AUB_DUMP_BMP_FORMAT_ARGB_0888:
- case AUB_DUMP_BMP_FORMAT_ARGB_8888:
- cpp = 4;
- break;
- default:
- printf("Unknown AUB dump format %d\n", format);
- return;
- }
-
- if (!bufmgr_gem->aub_file)
- return;
-
- aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
- aub_out(bufmgr_gem, (y1 << 16) | x1);
- aub_out(bufmgr_gem,
- (format << 24) |
- (cpp << 19) |
- pitch / 4);
- aub_out(bufmgr_gem, (height << 16) | width);
- aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
- aub_out(bufmgr_gem,
- ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
- ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
-}
-
-static void
-aub_exec(drm_intel_bo *bo, int ring_flag, int used)
-{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- int i;
- bool batch_buffer_needs_annotations;
-
- if (!bufmgr_gem->aub_file)
- return;
-
- /* If batch buffer is not annotated, annotate it the best we
- * can.
- */
- batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
- if (batch_buffer_needs_annotations) {
- drm_intel_aub_annotation annotations[2] = {
- { AUB_TRACE_TYPE_BATCH, 0, used },
- { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
- };
- drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
- }
-
- /* Write out all buffers to AUB memory */
- for (i = 0; i < bufmgr_gem->exec_count; i++) {
- aub_write_bo(bufmgr_gem->exec_bos[i]);
- }
-
- /* Remove any annotations we added */
- if (batch_buffer_needs_annotations)
- drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
-
- /* Dump ring buffer */
- aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
-
- fflush(bufmgr_gem->aub_file);
-
- /*
- * One frame has been dumped. So reset the aub_offset for the next frame.
- *
- * FIXME: Can we do this?
- */
- bufmgr_gem->aub_offset = 0x10000;
}
static int
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_execbuffer execbuf;
int ret, i;
- if (bo_gem->has_error)
+ if (to_bo_gem(bo)->has_error)
return -ENOMEM;
pthread_mutex_lock(&bufmgr_gem->lock);
*/
drm_intel_add_validate_buffer(bo);
- VG_CLEAR(execbuf);
+ memclear(execbuf);
execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
execbuf.buffer_count = bufmgr_gem->exec_count;
execbuf.batch_start_offset = 0;
drm_intel_gem_dump_validation_list(bufmgr_gem);
for (i = 0; i < bufmgr_gem->exec_count; i++) {
- drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
+
+ bo_gem->idle = false;
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
int ret = 0;
int i;
+ if (to_bo_gem(bo)->has_error)
+ return -ENOMEM;
+
switch (flags & 0x7) {
default:
return -EINVAL;
*/
drm_intel_add_validate_buffer2(bo, 0);
- VG_CLEAR(execbuf);
+ memclear(execbuf);
execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
execbuf.buffer_count = bufmgr_gem->exec_count;
execbuf.batch_start_offset = 0;
i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
execbuf.rsvd2 = 0;
- aub_exec(bo, flags, used);
-
if (bufmgr_gem->no_exec)
goto skip_execution;
drm_intel_gem_dump_validation_list(bufmgr_gem);
for (i = 0; i < bufmgr_gem->exec_count; i++) {
- drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
+
+ bo_gem->idle = false;
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
struct drm_i915_gem_pin pin;
int ret;
- VG_CLEAR(pin);
+ memclear(pin);
pin.handle = bo_gem->gem_handle;
pin.alignment = alignment;
if (ret != 0)
return -errno;
+ bo->offset64 = pin.offset;
bo->offset = pin.offset;
return 0;
}
struct drm_i915_gem_unpin unpin;
int ret;
- VG_CLEAR(unpin);
+ memclear(unpin);
unpin.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
/* Linear buffers have no stride. By ensuring that we only ever use
* stride 0 with linear buffers, we simplify our code.
*/
ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
if (ret == 0)
- drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
*tiling_mode = bo_gem->tiling_mode;
return ret;
return 0;
}
+static int
+drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ bo_gem->is_softpin = true;
+ bo->offset64 = offset;
+ bo->offset = offset;
+ return 0;
+}
+
drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
drm_intel_bo_gem *bo_gem;
struct drm_i915_gem_get_tiling get_tiling;
+ pthread_mutex_lock(&bufmgr_gem->lock);
ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
if (ret) {
- fprintf(stderr,"ret is %d %d\n", ret, errno);
+ DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
}
+ /*
+ * See if the kernel has already returned this buffer to us. Just as
+ * for named buffers, we must not create two bo's pointing at the same
+ * kernel object
+ */
+ HASH_FIND(handle_hh, bufmgr_gem->handle_table,
+ &handle, sizeof(handle), bo_gem);
+ if (bo_gem) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ goto out;
+ }
+
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
- return NULL;
+ goto out;
+
+ atomic_set(&bo_gem->refcount, 1);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
/* Determine size of bo. The fd-to-handle ioctl really should
* return the size, but it doesn't. If we have kernel 3.12 or
bo_gem->bo.bufmgr = bufmgr;
bo_gem->gem_handle = handle;
-
- atomic_set(&bo_gem->refcount, 1);
+ HASH_ADD(handle_hh, bufmgr_gem->handle_table,
+ gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
bo_gem->name = "prime";
bo_gem->validate_index = -1;
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
bo_gem->reusable = false;
+ bo_gem->use_48b_address_range = false;
- DRMINITLISTHEAD(&bo_gem->name_list);
- DRMINITLISTHEAD(&bo_gem->vma_list);
-
- VG_CLEAR(get_tiling);
+ memclear(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
- ret = drmIoctl(bufmgr_gem->fd,
- DRM_IOCTL_I915_GEM_GET_TILING,
- &get_tiling);
- if (ret != 0) {
- drm_intel_gem_bo_unreference(&bo_gem->bo);
- return NULL;
- }
+ if (drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_GET_TILING,
+ &get_tiling))
+ goto err;
+
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
/* XXX stride is unknown */
- drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
+out:
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return &bo_gem->bo;
+
+err:
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
}
int
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- int ret;
if (!bo_gem->global_name) {
struct drm_gem_flink flink;
- VG_CLEAR(flink);
+ memclear(flink);
flink.handle = bo_gem->gem_handle;
-
- ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
- if (ret != 0)
+ if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
return -errno;
- bo_gem->global_name = flink.name;
- bo_gem->reusable = false;
-
- DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ if (!bo_gem->global_name) {
+ HASH_ADD(name_hh, bufmgr_gem->name_table,
+ global_name, sizeof(bo_gem->global_name),
+ bo_gem);
+ bo_gem->global_name = flink.name;
+ bo_gem->reusable = false;
+ }
+ pthread_mutex_unlock(&bufmgr_gem->lock);
}
*name = bo_gem->global_name;
return 1;
}
+ for (i = 0; i< bo_gem->softpin_target_count; i++) {
+ if (bo_gem->softpin_target[i] == target_bo)
+ return 1;
+ if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
+ return 1;
+ }
+
return 0;
}
drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
}
+static int
+parse_devid_override(const char *devid_override)
+{
+ static const struct {
+ const char *name;
+ int pci_id;
+ } name_map[] = {
+ { "brw", PCI_CHIP_I965_GM },
+ { "g4x", PCI_CHIP_GM45_GM },
+ { "ilk", PCI_CHIP_ILD_G },
+ { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
+ { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
+ { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
+ { "byt", PCI_CHIP_VALLEYVIEW_3 },
+ { "bdw", 0x1620 | BDW_ULX },
+ { "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
+ { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(name_map); i++) {
+ if (!strcmp(name_map[i].name, devid_override))
+ return name_map[i].pci_id;
+ }
+
+ return strtod(devid_override, NULL);
+}
+
/**
* Get the PCI ID for the device. This can be overridden by setting the
* INTEL_DEVID_OVERRIDE environment variable to the desired ID.
get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
{
char *devid_override;
- int devid;
+ int devid = 0;
int ret;
drm_i915_getparam_t gp;
devid_override = getenv("INTEL_DEVID_OVERRIDE");
if (devid_override) {
bufmgr_gem->no_exec = true;
- return strtod(devid_override, NULL);
+ return parse_devid_override(devid_override);
}
}
- VG_CLEAR(devid);
- VG_CLEAR(gp);
+ memclear(gp);
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &devid;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-
- free(bufmgr_gem->aub_filename);
- if (filename)
- bufmgr_gem->aub_filename = strdup(filename);
}
/**
void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
- int entry = 0x200003;
- int i;
- int gtt_size = 0x10000;
- const char *filename;
-
- if (!enable) {
- if (bufmgr_gem->aub_file) {
- fclose(bufmgr_gem->aub_file);
- bufmgr_gem->aub_file = NULL;
- }
- return;
- }
-
- if (geteuid() != getuid())
- return;
-
- if (bufmgr_gem->aub_filename)
- filename = bufmgr_gem->aub_filename;
- else
- filename = "intel.aub";
- bufmgr_gem->aub_file = fopen(filename, "w+");
- if (!bufmgr_gem->aub_file)
- return;
-
- /* Start allocating objects from just after the GTT. */
- bufmgr_gem->aub_offset = gtt_size;
-
- /* Start with a (required) version packet. */
- aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
- aub_out(bufmgr_gem,
- (4 << AUB_HEADER_MAJOR_SHIFT) |
- (0 << AUB_HEADER_MINOR_SHIFT));
- for (i = 0; i < 8; i++) {
- aub_out(bufmgr_gem, 0); /* app name */
- }
- aub_out(bufmgr_gem, 0); /* timestamp */
- aub_out(bufmgr_gem, 0); /* timestamp */
- aub_out(bufmgr_gem, 0); /* comment len */
-
- /* Set up the GTT. The max we can handle is 256M */
- aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
- aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
- aub_out(bufmgr_gem, 0); /* subtype */
- aub_out(bufmgr_gem, 0); /* offset */
- aub_out(bufmgr_gem, gtt_size); /* size */
- if (bufmgr_gem->gen >= 8)
- aub_out(bufmgr_gem, 0);
- for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
- aub_out(bufmgr_gem, entry);
- }
+ fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
+ "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
+ "then run (for example)\n\n"
+ "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
+ "See the intel_aubdump man page for more details.\n");
}
drm_intel_context *
drm_intel_context *context = NULL;
int ret;
- VG_CLEAR(create);
+ context = calloc(1, sizeof(*context));
+ if (!context)
+ return NULL;
+
+ memclear(create);
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
strerror(errno));
+ free(context);
return NULL;
}
- context = calloc(1, sizeof(*context));
context->ctx_id = create.ctx_id;
context->bufmgr = bufmgr;
return context;
}
+int
+drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
+{
+ if (ctx == NULL)
+ return -EINVAL;
+
+ *ctx_id = ctx->ctx_id;
+
+ return 0;
+}
+
void
drm_intel_gem_context_destroy(drm_intel_context *ctx)
{
if (ctx == NULL)
return;
- VG_CLEAR(destroy);
+ memclear(destroy);
bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
destroy.ctx_id = ctx->ctx_id;
}
int
+drm_intel_get_reset_stats(drm_intel_context *ctx,
+ uint32_t *reset_count,
+ uint32_t *active,
+ uint32_t *pending)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_reset_stats stats;
+ int ret;
+
+ if (ctx == NULL)
+ return -EINVAL;
+
+ memclear(stats);
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
+ stats.ctx_id = ctx->ctx_id;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GET_RESET_STATS,
+ &stats);
+ if (ret == 0) {
+ if (reset_count != NULL)
+ *reset_count = stats.reset_count;
+
+ if (active != NULL)
+ *active = stats.batch_active;
+
+ if (pending != NULL)
+ *pending = stats.batch_pending;
+ }
+
+ return ret;
+}
+
+int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
struct drm_i915_reg_read reg_read;
int ret;
- VG_CLEAR(reg_read);
+ memclear(reg_read);
reg_read.offset = offset;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
return ret;
}
+int
+drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
+{
+ drm_i915_getparam_t gp;
+ int ret;
+
+ memclear(gp);
+ gp.value = (int*)subslice_total;
+ gp.param = I915_PARAM_SUBSLICE_TOTAL;
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret)
+ return -errno;
+
+ return 0;
+}
+
+int
+drm_intel_get_eu_total(int fd, unsigned int *eu_total)
+{
+ drm_i915_getparam_t gp;
+ int ret;
+
+ memclear(gp);
+ gp.value = (int*)eu_total;
+ gp.param = I915_PARAM_EU_TOTAL;
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret)
+ return -errno;
+
+ return 0;
+}
+
+int
+drm_intel_get_pooled_eu(int fd)
+{
+ drm_i915_getparam_t gp;
+ int ret = -1;
+
+ memclear(gp);
+ gp.param = I915_PARAM_HAS_POOLED_EU;
+ gp.value = &ret;
+ if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+ return -errno;
+
+ return ret;
+}
+
+int
+drm_intel_get_min_eu_in_pool(int fd)
+{
+ drm_i915_getparam_t gp;
+ int ret = -1;
+
+ memclear(gp);
+ gp.param = I915_PARAM_MIN_EU_IN_POOL;
+ gp.value = &ret;
+ if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+ return -errno;
+
+ return ret;
+}
/**
* Annotate the given bo for use in aub dumping.
drm_intel_aub_annotation *annotations,
unsigned count)
{
+}
+
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find(int fd)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+
+ DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+ if (bufmgr_gem->fd == fd) {
+ atomic_inc(&bufmgr_gem->refcount);
+ return bufmgr_gem;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+ DRMLISTDEL(&bufmgr_gem->managers);
+ drm_intel_bufmgr_gem_destroy(bufmgr);
+ }
+
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+ }
+}
+
+void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- unsigned size = sizeof(*annotations) * count;
- drm_intel_aub_annotation *new_annotations =
- count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
- if (new_annotations == NULL) {
- free(bo_gem->aub_annotations);
- bo_gem->aub_annotations = NULL;
- bo_gem->aub_annotation_count = 0;
- return;
+
+ if (bo_gem->gtt_virtual)
+ return bo_gem->gtt_virtual;
+
+ if (bo_gem->is_userptr)
+ return NULL;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ if (bo_gem->gtt_virtual == NULL) {
+ struct drm_i915_gem_mmap_gtt mmap_arg;
+ void *ptr;
+
+ DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
+ bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+ memclear(mmap_arg);
+ mmap_arg.handle = bo_gem->gem_handle;
+
+ /* Get the fake offset back... */
+ ptr = MAP_FAILED;
+ if (drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP_GTT,
+ &mmap_arg) == 0) {
+ /* and mmap it */
+ ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
+ }
+ if (ptr == MAP_FAILED) {
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ ptr = NULL;
+ }
+
+ bo_gem->gtt_virtual = ptr;
}
- memcpy(new_annotations, annotations, size);
- bo_gem->aub_annotations = new_annotations;
- bo_gem->aub_annotation_count = count;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return bo_gem->gtt_virtual;
+}
+
+void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ if (bo_gem->mem_virtual)
+ return bo_gem->mem_virtual;
+
+ if (bo_gem->is_userptr) {
+ /* Return the same user ptr */
+ return bo_gem->user_virtual;
+ }
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ if (!bo_gem->mem_virtual) {
+ struct drm_i915_gem_mmap mmap_arg;
+
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+ DBG("bo_map: %d (%s), map_count=%d\n",
+ bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+ memclear(mmap_arg);
+ mmap_arg.handle = bo_gem->gem_handle;
+ mmap_arg.size = bo->size;
+ if (drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP,
+ &mmap_arg)) {
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ bo_gem->name, strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ } else {
+ VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
+ bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+ }
+ }
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return bo_gem->mem_virtual;
+}
+
+void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ if (bo_gem->wc_virtual)
+ return bo_gem->wc_virtual;
+
+ if (bo_gem->is_userptr)
+ return NULL;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ if (!bo_gem->wc_virtual) {
+ struct drm_i915_gem_mmap mmap_arg;
+
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+ DBG("bo_map: %d (%s), map_count=%d\n",
+ bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+ memclear(mmap_arg);
+ mmap_arg.handle = bo_gem->gem_handle;
+ mmap_arg.size = bo->size;
+ mmap_arg.flags = I915_MMAP_WC;
+ if (drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP,
+ &mmap_arg)) {
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ bo_gem->name, strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ } else {
+ VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
+ bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+ }
+ }
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return bo_gem->wc_virtual;
}
/**
int ret, tmp;
bool exec2 = false;
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
+ if (bufmgr_gem)
+ goto exit;
+
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
if (bufmgr_gem == NULL)
- return NULL;
+ goto exit;
bufmgr_gem->fd = fd;
+ atomic_set(&bufmgr_gem->refcount, 1);
if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
free(bufmgr_gem);
- return NULL;
+ bufmgr_gem = NULL;
+ goto exit;
}
+ memclear(aperture);
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_GET_APERTURE,
&aperture);
bufmgr_gem->gen = 7;
else if (IS_GEN8(bufmgr_gem->pci_device))
bufmgr_gem->gen = 8;
+ else if (IS_GEN9(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 9;
else {
free(bufmgr_gem);
- return NULL;
+ bufmgr_gem = NULL;
+ goto exit;
}
if (IS_GEN3(bufmgr_gem->pci_device) &&
bufmgr_gem->gtt_size > 256*1024*1024) {
/* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
* be used for tiled blits. To simplify the accounting, just
- * substract the unmappable part (fixed to 256MB on all known
+ * subtract the unmappable part (fixed to 256MB on all known
* gen3 devices) if the kernel advertises it. */
bufmgr_gem->gtt_size -= 256*1024*1024;
}
- VG_CLEAR(gp);
+ memclear(gp);
gp.value = &tmp;
gp.param = I915_PARAM_HAS_EXECBUF2;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_relaxed_fencing = ret == 0;
+ bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
+
gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_wait_timeout = ret == 0;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
+ gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret == 0 && *gp.value > 0)
+ bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
+
if (bufmgr_gem->gen < 4) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;
}
}
+ if (bufmgr_gem->gen >= 8) {
+ gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret == 0 && *gp.value == 3)
+ bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
+ }
+
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).
bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
- bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+ bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
bufmgr_gem->bufmgr.debug = 0;
bufmgr_gem->bufmgr.check_aperture_space =
drm_intel_gem_check_aperture_space;
drm_intel_gem_get_pipe_from_crtc_id;
bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
- DRMINITLISTHEAD(&bufmgr_gem->named);
init_cache_buckets(bufmgr_gem);
DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
bufmgr_gem->vma_max = -1; /* unlimited by default */
- return &bufmgr_gem->bufmgr;
+ DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+exit:
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+
+ return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
}