#ifdef DRV_TEGRA
+#include <assert.h>
+#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
static void transfer_tile(struct bo *bo, uint8_t *tiled, uint8_t *untiled, enum tegra_map_type type,
uint32_t bytes_per_pixel, uint32_t gob_top, uint32_t gob_left,
- uint32_t gob_size_pixels)
+ uint32_t gob_size_pixels, uint8_t *tiled_last)
{
uint8_t *tmp;
uint32_t x, y, k;
x = gob_left + (((k >> 3) & 8) | ((k >> 1) & 4) | (k & 3));
y = gob_top + ((k >> 7 << 3) | ((k >> 3) & 6) | ((k >> 2) & 1));
- tmp = untiled + (y * bo->strides[0]) + (x * bytes_per_pixel);
+ if (tiled >= tiled_last)
+ return;
+
+ if (x >= bo->meta.width || y >= bo->meta.height) {
+ tiled += bytes_per_pixel;
+ continue;
+ }
+
+ tmp = untiled + y * bo->meta.strides[0] + x * bytes_per_pixel;
if (type == TEGRA_READ_TILED_BUFFER)
memcpy(tmp, tiled, bytes_per_pixel);
uint32_t gob_width, gob_height, gob_size_bytes, gob_size_pixels, gob_count_x, gob_count_y,
gob_top, gob_left;
uint32_t i, j, offset;
- uint8_t *tmp;
- uint32_t bytes_per_pixel = drv_stride_from_format(bo->format, 1, 0);
+ uint8_t *tmp, *tiled_last;
+ uint32_t bytes_per_pixel = drv_stride_from_format(bo->meta.format, 1, 0);
/*
* The blocklinear format consists of 8*(2^n) x 64 byte sized tiles,
gob_width = DIV_ROUND_UP(NV_BLOCKLINEAR_GOB_WIDTH, bytes_per_pixel);
gob_height = NV_BLOCKLINEAR_GOB_HEIGHT * (1 << NV_DEFAULT_BLOCK_HEIGHT_LOG2);
/* Calculate the height from maximum possible gob height */
- while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->height)
+ while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->meta.height)
gob_height /= 2;
gob_size_bytes = gob_height * NV_BLOCKLINEAR_GOB_WIDTH;
gob_size_pixels = gob_height * gob_width;
- gob_count_x = DIV_ROUND_UP(bo->strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
- gob_count_y = DIV_ROUND_UP(bo->height, gob_height);
+ gob_count_x = DIV_ROUND_UP(bo->meta.strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
+ gob_count_y = DIV_ROUND_UP(bo->meta.height, gob_height);
+
+ tiled_last = tiled + bo->meta.total_size;
offset = 0;
for (j = 0; j < gob_count_y; j++) {
gob_left = i * gob_width;
transfer_tile(bo, tmp, untiled, type, bytes_per_pixel, gob_top, gob_left,
- gob_size_pixels);
+ gob_size_pixels, tiled_last);
offset += gob_size_bytes;
}
static int tegra_init(struct driver *drv)
{
- int ret;
struct format_metadata metadata;
- uint64_t flags = BO_USE_RENDER_MASK;
+ uint64_t use_flags = BO_USE_RENDER_MASK;
metadata.tiling = NV_MEM_KIND_PITCH;
metadata.priority = 1;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- flags &= ~BO_USE_SW_WRITE_OFTEN;
- flags &= ~BO_USE_SW_READ_OFTEN;
- flags &= ~BO_USE_LINEAR;
+ use_flags &= ~BO_USE_SW_WRITE_OFTEN;
+ use_flags &= ~BO_USE_SW_READ_OFTEN;
+ use_flags &= ~BO_USE_LINEAR;
metadata.tiling = NV_MEM_KIND_C32_2CRA;
metadata.priority = 2;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
}
static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
uint32_t size, stride, block_height_log2 = 0;
enum nv_mem_kind kind = NV_MEM_KIND_PITCH;
struct drm_tegra_gem_create gem_create;
int ret;
- if (flags & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
+ if (use_flags &
+ (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
compute_layout_linear(width, height, format, &stride, &size);
else
compute_layout_blocklinear(width, height, format, &kind, &block_height_log2,
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
- return ret;
+ drv_log("DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
+ return -errno;
}
bo->handles[0].u32 = gem_create.handle;
- bo->offsets[0] = 0;
- bo->total_size = bo->sizes[0] = size;
- bo->strides[0] = stride;
+ bo->meta.offsets[0] = 0;
+ bo->meta.total_size = bo->meta.sizes[0] = size;
+ bo->meta.strides[0] = stride;
if (kind != NV_MEM_KIND_PITCH) {
struct drm_tegra_gem_set_tiling gem_tile;
}
/* Encode blocklinear parameters for EGLImage creation. */
- bo->tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8);
- bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling);
+ bo->meta.tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8);
+ bo->meta.format_modifiers[0] = fourcc_mod_code(NV, bo->meta.tiling);
+ }
+
+ return 0;
+}
+
+static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+ int ret;
+ struct drm_tegra_gem_get_tiling gem_get_tiling;
+
+ ret = drv_prime_bo_import(bo, data);
+ if (ret)
+ return ret;
+
+ /* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
+ memset(&gem_get_tiling, 0, sizeof(gem_get_tiling));
+ gem_get_tiling.handle = bo->handles[0].u32;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_GET_TILING, &gem_get_tiling);
+ if (ret) {
+ drv_gem_bo_destroy(bo);
+ return -errno;
+ }
+
+ /* NOTE(djmk): we only know about one tiled format, so if our drmIoctl call tells us we are
+ tiled, assume it is this format (NV_MEM_KIND_C32_2CRA) otherwise linear (KIND_PITCH). */
+ if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_PITCH) {
+ bo->meta.tiling = NV_MEM_KIND_PITCH;
+ } else if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_BLOCK) {
+ bo->meta.tiling = NV_MEM_KIND_C32_2CRA;
+ } else {
+ drv_log("%s: unknown tile format %d\n", __func__, gem_get_tiling.mode);
+ drv_gem_bo_destroy(bo);
+ assert(0);
}
+ bo->meta.format_modifiers[0] = fourcc_mod_code(NV, bo->meta.tiling);
return 0;
}
-static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static void *tegra_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_tegra_gem_mmap gem_map;
ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map, sizeof(gem_map));
if (ret < 0) {
- fprintf(stderr, "drv: DRM_TEGRA_GEM_MMAP failed\n");
+ drv_log("DRM_TEGRA_GEM_MMAP failed\n");
return MAP_FAILED;
}
- void *addr = mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+ void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
-
- data->length = bo->total_size;
-
- if ((bo->tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
+ vma->length = bo->meta.total_size;
+ if ((bo->meta.tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
priv = calloc(1, sizeof(*priv));
- priv->untiled = calloc(1, bo->total_size);
+ priv->untiled = calloc(1, bo->meta.total_size);
priv->tiled = addr;
- data->priv = priv;
+ vma->priv = priv;
transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_READ_TILED_BUFFER);
addr = priv->untiled;
}
return addr;
}
-static int tegra_bo_unmap(struct bo *bo, struct map_info *data)
+static int tegra_bo_unmap(struct bo *bo, struct vma *vma)
{
- if (data->priv) {
- struct tegra_private_map_data *priv = data->priv;
- transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER);
- data->addr = priv->tiled;
+ if (vma->priv) {
+ struct tegra_private_map_data *priv = vma->priv;
+ vma->addr = priv->tiled;
free(priv->untiled);
free(priv);
- data->priv = NULL;
+ vma->priv = NULL;
}
- return munmap(data->addr, data->length);
+ return munmap(vma->addr, vma->length);
+}
+
+static int tegra_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ struct tegra_private_map_data *priv = mapping->vma->priv;
+
+ if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
+ transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER);
+
+ return 0;
}
-struct backend backend_tegra = {
+const struct backend backend_tegra = {
.name = "tegra",
.init = tegra_init,
.bo_create = tegra_bo_create,
.bo_destroy = drv_gem_bo_destroy,
- .bo_import = drv_prime_bo_import,
+ .bo_import = tegra_bo_import,
.bo_map = tegra_bo_map,
.bo_unmap = tegra_bo_unmap,
+ .bo_flush = tegra_bo_flush,
};
#endif