return ret;
}
-static void *amdgpu_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+static void *amdgpu_bo_map(struct bo *bo, struct mapping *mapping, size_t plane, uint32_t map_flags)
{
int ret;
union drm_amdgpu_gem_mmap gem_map;
return MAP_FAILED;
}
- data->length = bo->total_size;
+ mapping->vma->length = bo->total_size;
return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.out.addr_ptr);
if (map_flags) {
if (lock_data_[0]) {
drv_bo_invalidate(bo_, lock_data_[0]);
- vaddr = lock_data_[0]->addr;
+ vaddr = lock_data_[0]->vma->addr;
} else {
vaddr = drv_bo_map(bo_, 0, 0, drv_bo_get_width(bo_), drv_bo_get_height(bo_),
map_flags, &lock_data_[0], 0);
int32_t lockcount_;
uint32_t num_planes_;
- struct map_info *lock_data_[DRV_MAX_PLANES];
+ struct mapping *lock_data_[DRV_MAX_PLANES];
};
#endif
pthread_mutex_unlock(&drv->driver_lock);
if (total == 0) {
- assert(drv_map_info_destroy(bo) == 0);
+ assert(drv_mapping_destroy(bo) == 0);
bo->drv->backend->bo_destroy(bo);
}
}
void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t map_flags, struct map_info **map_data, size_t plane)
+ uint32_t map_flags, struct mapping **map_data, size_t plane)
{
void *ptr;
uint8_t *addr;
size_t offset;
- struct map_info *data;
+ struct mapping *mapping;
assert(width > 0);
assert(height > 0);
pthread_mutex_lock(&bo->drv->driver_lock);
if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) {
- data = (struct map_info *)ptr;
+ mapping = (struct mapping *)ptr;
/* TODO(gsingh): support mapping same buffer with different flags. */
- assert(data->map_flags == map_flags);
- data->refcount++;
+ assert(mapping->vma->map_flags == map_flags);
+ mapping->vma->refcount++;
goto success;
}
- data = calloc(1, sizeof(*data));
- addr = bo->drv->backend->bo_map(bo, data, plane, map_flags);
+ mapping = calloc(1, sizeof(*mapping));
+ mapping->vma = calloc(1, sizeof(*mapping->vma));
+ addr = bo->drv->backend->bo_map(bo, mapping, plane, map_flags);
if (addr == MAP_FAILED) {
*map_data = NULL;
- free(data);
+ free(mapping->vma);
+ free(mapping);
pthread_mutex_unlock(&bo->drv->driver_lock);
return MAP_FAILED;
}
- data->refcount = 1;
- data->addr = addr;
- data->handle = bo->handles[plane].u32;
- data->map_flags = map_flags;
- drmHashInsert(bo->drv->map_table, bo->handles[plane].u32, (void *)data);
+ mapping->vma->refcount = 1;
+ mapping->vma->addr = addr;
+ mapping->vma->handle = bo->handles[plane].u32;
+ mapping->vma->map_flags = map_flags;
+ drmHashInsert(bo->drv->map_table, bo->handles[plane].u32, (void *)mapping);
success:
- drv_bo_invalidate(bo, data);
- *map_data = data;
+ drv_bo_invalidate(bo, mapping);
+ *map_data = mapping;
offset = drv_bo_get_plane_stride(bo, plane) * y;
offset += drv_stride_from_format(bo->format, x, plane);
- addr = (uint8_t *)data->addr;
+ addr = (uint8_t *)mapping->vma->addr;
addr += drv_bo_get_plane_offset(bo, plane) + offset;
pthread_mutex_unlock(&bo->drv->driver_lock);
return (void *)addr;
}
-int drv_bo_unmap(struct bo *bo, struct map_info *data)
+int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
{
- int ret = drv_bo_flush(bo, data);
+ int ret = drv_bo_flush(bo, mapping);
if (ret)
return ret;
pthread_mutex_lock(&bo->drv->driver_lock);
- if (!--data->refcount) {
- ret = bo->drv->backend->bo_unmap(bo, data);
- drmHashDelete(bo->drv->map_table, data->handle);
- free(data);
+ if (!--mapping->vma->refcount) {
+ ret = bo->drv->backend->bo_unmap(bo, mapping);
+ drmHashDelete(bo->drv->map_table, mapping->vma->handle);
+ free(mapping->vma);
+ free(mapping);
}
pthread_mutex_unlock(&bo->drv->driver_lock);
return ret;
}
-int drv_bo_invalidate(struct bo *bo, struct map_info *data)
+int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret = 0;
- assert(data);
- assert(data->refcount > 0);
+
+ assert(mapping);
+ assert(mapping->vma);
+ assert(mapping->vma->refcount > 0);
if (bo->drv->backend->bo_invalidate)
- ret = bo->drv->backend->bo_invalidate(bo, data);
+ ret = bo->drv->backend->bo_invalidate(bo, mapping);
return ret;
}
-int drv_bo_flush(struct bo *bo, struct map_info *data)
+int drv_bo_flush(struct bo *bo, struct mapping *mapping)
{
int ret = 0;
- assert(data);
- assert(data->refcount > 0);
+
+ assert(mapping);
+ assert(mapping->vma);
+ assert(mapping->vma->refcount > 0);
assert(!(bo->use_flags & BO_USE_PROTECTED));
if (bo->drv->backend->bo_flush)
- ret = bo->drv->backend->bo_flush(bo, data);
+ ret = bo->drv->backend->bo_flush(bo, mapping);
return ret;
}
uint64_t use_flags;
};
-struct map_info {
+struct vma {
void *addr;
size_t length;
uint32_t handle;
void *priv;
};
+struct mapping {
+ struct vma *vma;
+};
+
struct driver *drv_create(int fd);
void drv_destroy(struct driver *drv);
struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data);
void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t map_flags, struct map_info **map_data, size_t plane);
+ uint32_t map_flags, struct mapping **map_data, size_t plane);
-int drv_bo_unmap(struct bo *bo, struct map_info *data);
+int drv_bo_unmap(struct bo *bo, struct mapping *mapping);
-int drv_bo_invalidate(struct bo *bo, struct map_info *data);
+int drv_bo_invalidate(struct bo *bo, struct mapping *mapping);
-int drv_bo_flush(struct bo *bo, struct map_info *data);
+int drv_bo_flush(struct bo *bo, struct mapping *mapping);
uint32_t drv_bo_get_width(struct bo *bo);
uint32_t format, const uint64_t *modifiers, uint32_t count);
int (*bo_destroy)(struct bo *bo);
int (*bo_import)(struct bo *bo, struct drv_import_fd_data *data);
- void *(*bo_map)(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags);
- int (*bo_unmap)(struct bo *bo, struct map_info *data);
- int (*bo_invalidate)(struct bo *bo, struct map_info *data);
- int (*bo_flush)(struct bo *bo, struct map_info *data);
+ void *(*bo_map)(struct bo *bo, struct mapping *mapping, size_t plane, uint32_t map_flags);
+ int (*bo_unmap)(struct bo *bo, struct mapping *mapping);
+ int (*bo_invalidate)(struct bo *bo, struct mapping *mapping);
+ int (*bo_flush)(struct bo *bo, struct mapping *mapping);
uint32_t (*resolve_format)(uint32_t format, uint64_t use_flags);
};
*stride = gbm_bo_get_plane_stride(bo, plane);
map_flags = (transfer_flags & GBM_BO_TRANSFER_READ) ? BO_MAP_READ : BO_MAP_NONE;
map_flags |= (transfer_flags & GBM_BO_TRANSFER_WRITE) ? BO_MAP_WRITE : BO_MAP_NONE;
- return drv_bo_map(bo->bo, x, y, width, height, map_flags, (struct map_info **)map_data,
+ return drv_bo_map(bo->bo, x, y, width, height, map_flags, (struct mapping **)map_data,
plane);
}
return 0;
}
-void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+void *drv_dumb_bo_map(struct bo *bo, struct mapping *mapping, size_t plane, uint32_t map_flags)
{
int ret;
size_t i;
for (i = 0; i < bo->num_planes; i++)
if (bo->handles[i].u32 == bo->handles[plane].u32)
- data->length += bo->sizes[i];
+ mapping->vma->length += bo->sizes[i];
- return mmap(0, data->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ return mmap(0, mapping->vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
map_dumb.offset);
}
-int drv_bo_munmap(struct bo *bo, struct map_info *data)
+int drv_bo_munmap(struct bo *bo, struct mapping *mapping)
{
- return munmap(data->addr, data->length);
+ return munmap(mapping->vma->addr, mapping->vma->length);
}
-int drv_map_info_destroy(struct bo *bo)
+int drv_mapping_destroy(struct bo *bo)
{
int ret;
void *ptr;
size_t plane;
- struct map_info *data;
+ struct mapping *mapping;
/*
* This function is called right before the buffer is destroyed. It will free any mappings
for (plane = 0; plane < bo->num_planes; plane++) {
if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) {
- data = (struct map_info *)ptr;
- ret = bo->drv->backend->bo_unmap(bo, data);
+ mapping = (struct mapping *)ptr;
+ ret = bo->drv->backend->bo_unmap(bo, mapping);
if (ret) {
fprintf(stderr, "drv: munmap failed");
return ret;
}
- drmHashDelete(bo->drv->map_table, data->handle);
- free(data);
+ drmHashDelete(bo->drv->map_table, mapping->vma->handle);
+ free(mapping->vma);
+ free(mapping);
}
}
int drv_dumb_bo_destroy(struct bo *bo);
int drv_gem_bo_destroy(struct bo *bo);
int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data);
-void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags);
-int drv_bo_munmap(struct bo *bo, struct map_info *data);
-int drv_map_info_destroy(struct bo *bo);
+void *drv_dumb_bo_map(struct bo *bo, struct mapping *mapping, size_t plane, uint32_t map_flags);
+int drv_bo_munmap(struct bo *bo, struct mapping *mapping);
+int drv_mapping_destroy(struct bo *bo);
int drv_get_prot(uint32_t map_flags);
uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane);
void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane);
return 0;
}
-static void *i915_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+static void *i915_bo_map(struct bo *bo, struct mapping *mapping, size_t plane, uint32_t map_flags)
{
int ret;
void *addr;
return addr;
}
- data->length = bo->total_size;
+ mapping->vma->length = bo->total_size;
return addr;
}
-static int i915_bo_invalidate(struct bo *bo, struct map_info *data)
+static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
struct drm_i915_gem_set_domain set_domain;
set_domain.handle = bo->handles[0].u32;
if (bo->tiling == I915_TILING_NONE) {
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
- if (data->map_flags & BO_MAP_WRITE)
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
set_domain.write_domain = I915_GEM_DOMAIN_CPU;
} else {
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
- if (data->map_flags & BO_MAP_WRITE)
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
}
return 0;
}
-static int i915_bo_flush(struct bo *bo, struct map_info *data)
+static int i915_bo_flush(struct bo *bo, struct mapping *mapping)
{
struct i915_device *i915 = bo->drv->priv;
if (!i915->has_llc && bo->tiling == I915_TILING_NONE)
- i915_clflush(data->addr, data->length);
+ i915_clflush(mapping->vma->addr, mapping->vma->length);
return 0;
}
return 0;
}
-static void *mediatek_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+static void *mediatek_bo_map(struct bo *bo, struct mapping *mapping, size_t plane,
+ uint32_t map_flags)
{
int ret;
struct drm_mtk_gem_map_off gem_map;
void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
- data->length = bo->total_size;
+ mapping->vma->length = bo->total_size;
if (bo->use_flags & BO_USE_RENDERSCRIPT) {
priv = calloc(1, sizeof(*priv));
priv->cached_addr = calloc(1, bo->total_size);
priv->gem_addr = addr;
memcpy(priv->cached_addr, priv->gem_addr, bo->total_size);
- data->priv = priv;
+ mapping->vma->priv = priv;
addr = priv->cached_addr;
}
return addr;
}
-static int mediatek_bo_unmap(struct bo *bo, struct map_info *data)
+static int mediatek_bo_unmap(struct bo *bo, struct mapping *mapping)
{
- if (data->priv) {
- struct mediatek_private_map_data *priv = data->priv;
- data->addr = priv->gem_addr;
+ if (mapping->vma->priv) {
+ struct mediatek_private_map_data *priv = mapping->vma->priv;
+ mapping->vma->addr = priv->gem_addr;
free(priv->cached_addr);
free(priv);
- data->priv = NULL;
+ mapping->vma->priv = NULL;
}
- return munmap(data->addr, data->length);
+ return munmap(mapping->vma->addr, mapping->vma->length);
}
-static int mediatek_bo_flush(struct bo *bo, struct map_info *data)
+static int mediatek_bo_flush(struct bo *bo, struct mapping *mapping)
{
- struct mediatek_private_map_data *priv = data->priv;
- if (priv && (data->map_flags & BO_MAP_WRITE))
+ struct mediatek_private_map_data *priv = mapping->vma->priv;
+ if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
memcpy(priv->gem_addr, priv->cached_addr, bo->total_size);
return 0;
ARRAY_SIZE(modifiers));
}
-static void *rockchip_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+static void *rockchip_bo_map(struct bo *bo, struct mapping *mapping, size_t plane,
+ uint32_t map_flags)
{
int ret;
struct drm_rockchip_gem_map_off gem_map;
void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
- data->length = bo->total_size;
+ mapping->vma->length = bo->total_size;
if (bo->use_flags & BO_USE_RENDERSCRIPT) {
priv = calloc(1, sizeof(*priv));
priv->cached_addr = calloc(1, bo->total_size);
priv->gem_addr = addr;
memcpy(priv->cached_addr, priv->gem_addr, bo->total_size);
- data->priv = priv;
+ mapping->vma->priv = priv;
addr = priv->cached_addr;
}
return addr;
}
-static int rockchip_bo_unmap(struct bo *bo, struct map_info *data)
+static int rockchip_bo_unmap(struct bo *bo, struct mapping *mapping)
{
- if (data->priv) {
- struct rockchip_private_map_data *priv = data->priv;
- data->addr = priv->gem_addr;
+ if (mapping->vma->priv) {
+ struct rockchip_private_map_data *priv = mapping->vma->priv;
+ mapping->vma->addr = priv->gem_addr;
free(priv->cached_addr);
free(priv);
- data->priv = NULL;
+ mapping->vma->priv = NULL;
}
- return munmap(data->addr, data->length);
+ return munmap(mapping->vma->addr, mapping->vma->length);
}
-static int rockchip_bo_flush(struct bo *bo, struct map_info *data)
+static int rockchip_bo_flush(struct bo *bo, struct mapping *mapping)
{
- struct rockchip_private_map_data *priv = data->priv;
- if (priv && (data->map_flags & BO_MAP_WRITE))
+ struct rockchip_private_map_data *priv = mapping->vma->priv;
+ if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
memcpy(priv->gem_addr, priv->cached_addr, bo->total_size);
return 0;
return 0;
}
-static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+static void *tegra_bo_map(struct bo *bo, struct mapping *mapping, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_tegra_gem_mmap gem_map;
void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
- data->length = bo->total_size;
+ mapping->vma->length = bo->total_size;
if ((bo->tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
priv = calloc(1, sizeof(*priv));
priv->untiled = calloc(1, bo->total_size);
priv->tiled = addr;
- data->priv = priv;
+ mapping->vma->priv = priv;
transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_READ_TILED_BUFFER);
addr = priv->untiled;
}
return addr;
}
-static int tegra_bo_unmap(struct bo *bo, struct map_info *data)
+static int tegra_bo_unmap(struct bo *bo, struct mapping *mapping)
{
- if (data->priv) {
- struct tegra_private_map_data *priv = data->priv;
- data->addr = priv->tiled;
+ if (mapping->vma->priv) {
+ struct tegra_private_map_data *priv = mapping->vma->priv;
+ mapping->vma->addr = priv->tiled;
free(priv->untiled);
free(priv);
- data->priv = NULL;
+ mapping->vma->priv = NULL;
}
- return munmap(data->addr, data->length);
+ return munmap(mapping->vma->addr, mapping->vma->length);
}
-static int tegra_bo_flush(struct bo *bo, struct map_info *data)
+static int tegra_bo_flush(struct bo *bo, struct mapping *mapping)
{
- struct tegra_private_map_data *priv = data->priv;
+ struct tegra_private_map_data *priv = mapping->vma->priv;
- if (priv && (data->map_flags & BO_MAP_WRITE))
+ if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER);
return 0;
return 0;
}
-static void *vc4_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+static void *vc4_bo_map(struct bo *bo, struct mapping *mapping, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_vc4_mmap_bo bo_map;
return MAP_FAILED;
}
- data->length = bo->total_size;
+ mapping->vma->length = bo->total_size;
return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
bo_map.offset);
}