#ifdef DRV_I915
+#include <assert.h>
#include <errno.h>
#include <i915_drm.h>
-#include <intel_bufmgr.h>
+#include <stdbool.h>
+#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
+#include <unistd.h>
#include <xf86drm.h>
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
-static const uint32_t tileable_formats[] = { DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
- DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_UYVY,
- DRM_FORMAT_YUYV };
+#define I915_CACHELINE_SIZE 64
+#define I915_CACHELINE_MASK (I915_CACHELINE_SIZE - 1)
-static const uint32_t linear_only_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_YVU420,
- DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XRGB8888 };
-struct i915_device {
- int gen;
- drm_intel_bufmgr *mgr;
+static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F };
+
+static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
+ DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+
+static const uint64_t gen_modifier_order[] = { I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR };
+
+static const uint64_t gen11_modifier_order[] = { I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR };
+
+struct modifier_support_t {
+ const uint64_t *order;
uint32_t count;
};
-struct i915_bo {
- drm_intel_bo *ibos[DRV_MAX_PLANES];
+struct i915_device {
+ uint32_t gen;
+ int32_t has_llc;
+ struct modifier_support_t modifier;
};
-static int get_gen(int device_id)
+static uint32_t i915_get_gen(int device_id)
{
const uint16_t gen3_ids[] = { 0x2582, 0x2592, 0x2772, 0x27A2, 0x27AE,
0x29C2, 0x29B2, 0x29D2, 0xA001, 0xA011 };
+ const uint16_t gen11_ids[] = { 0x4E71, 0x4E61, 0x4E51, 0x4E55, 0x4E57 };
+ const uint16_t gen12_ids[] = { 0x9A40, 0x9A49, 0x9A59, 0x9A60, 0x9A68,
+ 0x9A70, 0x9A78, 0x9AC0, 0x9AC9, 0x9AD9,
+ 0x9AF8 };
unsigned i;
for (i = 0; i < ARRAY_SIZE(gen3_ids); i++)
if (gen3_ids[i] == device_id)
return 3;
+ /* Gen 11 */
+ for (i = 0; i < ARRAY_SIZE(gen11_ids); i++)
+ if (gen11_ids[i] == device_id)
+ return 11;
+
+ /* Gen 12 */
+ for (i = 0; i < ARRAY_SIZE(gen12_ids); i++)
+ if (gen12_ids[i] == device_id)
+ return 12;
return 4;
}
-static int i915_add_kms_item(struct driver *drv, const struct kms_item *item)
+static void i915_get_modifier_order(struct i915_device *i915)
{
- uint32_t i;
- struct combination *combo;
-
- /*
- * Older hardware can't scanout Y-tiled formats. Newer devices can, and
- * report this functionality via format modifiers.
- */
- for (i = 0; i < drv->backend->combos.size; i++) {
- combo = &drv->backend->combos.data[i];
- if (combo->format == item->format) {
- if ((combo->metadata.tiling == I915_TILING_Y &&
- item->modifier == I915_FORMAT_MOD_Y_TILED) ||
- (combo->metadata.tiling == I915_TILING_X &&
- item->modifier == I915_FORMAT_MOD_X_TILED)) {
- combo->metadata.modifier = item->modifier;
- combo->usage |= item->usage;
- } else if (combo->metadata.tiling != I915_TILING_Y) {
- combo->usage |= item->usage;
- }
- }
+ if (i915->gen == 11) {
+ i915->modifier.order = gen11_modifier_order;
+ i915->modifier.count = ARRAY_SIZE(gen11_modifier_order);
+ } else {
+ i915->modifier.order = gen_modifier_order;
+ i915->modifier.count = ARRAY_SIZE(gen_modifier_order);
}
+}
- return 0;
+static uint64_t unset_flags(uint64_t current_flags, uint64_t mask)
+{
+ uint64_t value = current_flags & ~mask;
+ return value;
}
static int i915_add_combinations(struct driver *drv)
{
- int ret;
- uint32_t i, num_items;
- struct kms_item *items;
struct format_metadata metadata;
- uint64_t flags = BO_COMMON_USE_MASK;
+ uint64_t render, scanout_and_render, texture_only;
+
+ scanout_and_render = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
+ render = BO_USE_RENDER_MASK;
+ texture_only = BO_USE_TEXTURE_MASK;
+ uint64_t linear_mask =
+ BO_USE_RENDERSCRIPT | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN;
metadata.tiling = I915_TILING_NONE;
metadata.priority = 1;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- ret = drv_add_combinations(drv, linear_only_formats, ARRAY_SIZE(linear_only_formats),
- &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, scanout_and_render);
- ret = drv_add_combinations(drv, tileable_formats, ARRAY_SIZE(tileable_formats), &metadata,
- flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
+ texture_only);
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+ drv_modify_linear_combinations(drv);
- flags &= ~BO_USE_SW_WRITE_OFTEN;
- flags &= ~BO_USE_SW_READ_OFTEN;
- flags &= ~BO_USE_LINEAR;
+ /* NV12 format for camera, display, decoding and encoding. */
+ /* IPU3 camera ISP supports only NV12 output. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+
+ /* Android CTS tests require this. */
+ drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
+
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera and input/output from hardware decoder/encoder.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
+
+ render = unset_flags(render, linear_mask);
+ scanout_and_render = unset_flags(scanout_and_render, linear_mask);
metadata.tiling = I915_TILING_X;
metadata.priority = 2;
+ metadata.modifier = I915_FORMAT_MOD_X_TILED;
- ret = drv_add_combinations(drv, tileable_formats, ARRAY_SIZE(tileable_formats), &metadata,
- flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, scanout_and_render);
metadata.tiling = I915_TILING_Y;
metadata.priority = 3;
+ metadata.modifier = I915_FORMAT_MOD_Y_TILED;
+
+ scanout_and_render =
+ unset_flags(scanout_and_render, BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY);
+/* Support y-tiled NV12 and P010 for libva */
+#ifdef I915_SCANOUT_Y_TILED
+ drv_add_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT);
+#else
+ drv_add_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
+#endif
+ scanout_and_render = unset_flags(scanout_and_render, BO_USE_SCANOUT);
+ drv_add_combination(drv, DRM_FORMAT_P010, &metadata,
+ BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
- ret = drv_add_combinations(drv, tileable_formats, ARRAY_SIZE(tileable_formats), &metadata,
- flags);
- if (ret)
- return ret;
-
- items = drv_query_kms(drv, &num_items);
- if (!items || !num_items)
- return 0;
-
- for (i = 0; i < num_items; i++) {
- ret = i915_add_kms_item(drv, &items[i]);
- if (ret) {
- free(items);
- return ret;
- }
- }
-
- free(items);
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, scanout_and_render);
return 0;
}
uint32_t *aligned_height)
{
struct i915_device *i915 = bo->drv->priv;
- uint32_t horizontal_alignment = 4;
- uint32_t vertical_alignment = 4;
+ uint32_t horizontal_alignment;
+ uint32_t vertical_alignment;
switch (tiling) {
default:
case I915_TILING_NONE:
+ /*
+ * The Intel GPU doesn't need any alignment in linear mode,
+ * but libva requires the allocation stride to be aligned to
+ * 16 bytes and height to 4 rows. Further, we round up the
+ * horizontal alignment so that row start on a cache line (64
+ * bytes).
+ */
horizontal_alignment = 64;
+ vertical_alignment = 4;
break;
case I915_TILING_X:
break;
}
- *aligned_height = ALIGN(bo->height, vertical_alignment);
+ *aligned_height = ALIGN(*aligned_height, vertical_alignment);
if (i915->gen > 3) {
*stride = ALIGN(*stride, horizontal_alignment);
} else {
return 0;
}
+static void i915_clflush(void *start, size_t size)
+{
+ void *p = (void *)(((uintptr_t)start) & ~I915_CACHELINE_MASK);
+ void *end = (void *)((uintptr_t)start + size);
+
+ __builtin_ia32_mfence();
+ while (p < end) {
+ __builtin_ia32_clflush(p);
+ p = (void *)((uintptr_t)p + I915_CACHELINE_SIZE);
+ }
+}
+
static int i915_init(struct driver *drv)
{
- struct i915_device *i915_dev;
- drm_i915_getparam_t get_param;
- int device_id;
int ret;
+ int device_id;
+ struct i915_device *i915;
+ drm_i915_getparam_t get_param = { 0 };
- i915_dev = calloc(1, sizeof(*i915_dev));
- if (!i915_dev)
- return -1;
+ i915 = calloc(1, sizeof(*i915));
+ if (!i915)
+ return -ENOMEM;
- memset(&get_param, 0, sizeof(get_param));
get_param.param = I915_PARAM_CHIPSET_ID;
get_param.value = &device_id;
ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GETPARAM failed\n");
- free(i915_dev);
+ drv_log("Failed to get I915_PARAM_CHIPSET_ID\n");
+ free(i915);
return -EINVAL;
}
- i915_dev->gen = get_gen(device_id);
- i915_dev->count = 0;
+ i915->gen = i915_get_gen(device_id);
+ i915_get_modifier_order(i915);
- i915_dev->mgr = drm_intel_bufmgr_gem_init(drv->fd, 16 * 1024);
- if (!i915_dev->mgr) {
- fprintf(stderr, "drv: drm_intel_bufmgr_gem_init failed\n");
- free(i915_dev);
+ memset(&get_param, 0, sizeof(get_param));
+ get_param.param = I915_PARAM_HAS_LLC;
+ get_param.value = &i915->has_llc;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
+ if (ret) {
+ drv_log("Failed to get I915_PARAM_HAS_LLC\n");
+ free(i915);
return -EINVAL;
}
- drv->priv = i915_dev;
+ drv->priv = i915;
return i915_add_combinations(drv);
}
-static void i915_close(struct driver *drv)
+static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format)
{
- struct i915_device *i915_dev = drv->priv;
- drm_intel_bufmgr_destroy(i915_dev->mgr);
- free(i915_dev);
- drv->priv = NULL;
+ uint32_t offset;
+ size_t plane;
+ int ret, pagesize;
+
+ offset = 0;
+ pagesize = getpagesize();
+ for (plane = 0; plane < drv_num_planes_from_format(format); plane++) {
+ uint32_t stride = drv_stride_from_format(format, width, plane);
+ uint32_t plane_height = drv_height_from_format(format, height, plane);
+
+ if (bo->meta.tiling != I915_TILING_NONE)
+ assert(IS_ALIGNED(offset, pagesize));
+
+ ret = i915_align_dimensions(bo, bo->meta.tiling, &stride, &plane_height);
+ if (ret)
+ return ret;
+
+ bo->meta.strides[plane] = stride;
+ bo->meta.sizes[plane] = stride * plane_height;
+ bo->meta.offsets[plane] = offset;
+ offset += bo->meta.sizes[plane];
+ }
+
+ bo->meta.total_size = ALIGN(offset, pagesize);
+
+ return 0;
}
-static int i915_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+static int i915_bo_compute_metadata(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags, const uint64_t *modifiers, uint32_t count)
{
- int ret;
- size_t plane;
- char name[20];
- uint32_t stride;
- uint32_t tiling_mode;
- struct i915_bo *i915_bo;
-
- stride = drv_stride_from_format(format, width, 0);
- struct i915_device *i915_dev = (struct i915_device *)bo->drv->priv;
+ uint64_t modifier;
+ struct i915_device *i915 = bo->drv->priv;
+ bool huge_bo = (i915->gen <= 11) && (width > 4096);
- if (flags & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
- tiling_mode = I915_TILING_NONE;
- else if (flags & BO_USE_SCANOUT)
- tiling_mode = I915_TILING_X;
- else
- tiling_mode = I915_TILING_Y;
+ if (modifiers) {
+ modifier =
+ drv_pick_modifier(modifiers, count, i915->modifier.order, i915->modifier.count);
+ } else {
+ struct combination *combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
+ modifier = combo->metadata.modifier;
+ }
/*
- * Align the Y plane to 128 bytes so the chroma planes would be aligned
- * to 64 byte boundaries. This is an Intel HW requirement.
+ * i915 only supports linear/x-tiled above 4096 wide
*/
- if (format == DRM_FORMAT_YVU420 || format == DRM_FORMAT_YVU420_ANDROID) {
- stride = ALIGN(stride, 128);
- tiling_mode = I915_TILING_NONE;
+ if (huge_bo && modifier != I915_FORMAT_MOD_X_TILED && modifier != DRM_FORMAT_MOD_LINEAR) {
+ uint32_t i;
+ for (i = 0; modifiers && i < count; i++) {
+ if (modifiers[i] == I915_FORMAT_MOD_X_TILED)
+ break;
+ }
+ if (i == count)
+ modifier = DRM_FORMAT_MOD_LINEAR;
+ else
+ modifier = I915_FORMAT_MOD_X_TILED;
}
- ret = i915_align_dimensions(bo, tiling_mode, &stride, &height);
- if (ret)
- return ret;
-
- drv_bo_from_format(bo, stride, height, format);
-
- snprintf(name, sizeof(name), "i915-buffer-%u", i915_dev->count);
- i915_dev->count++;
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ bo->meta.tiling = I915_TILING_NONE;
+ break;
+ case I915_FORMAT_MOD_X_TILED:
+ bo->meta.tiling = I915_TILING_X;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ bo->meta.tiling = I915_TILING_Y;
+ break;
+ }
- i915_bo = calloc(1, sizeof(*i915_bo));
- if (!i915_bo)
- return -ENOMEM;
+ bo->meta.format_modifiers[0] = modifier;
- bo->priv = i915_bo;
+ if (format == DRM_FORMAT_YVU420_ANDROID) {
+ /*
+ * We only need to be able to use this as a linear texture,
+ * which doesn't put any HW restrictions on how we lay it
+ * out. The Android format does require the stride to be a
+ * multiple of 16 and expects the Cr and Cb stride to be
+ * ALIGN(Y_stride / 2, 16), which we can make happen by
+ * aligning to 32 bytes here.
+ */
+ uint32_t stride = ALIGN(width, 32);
+ drv_bo_from_format(bo, stride, height, format);
+ } else if (modifier == I915_FORMAT_MOD_Y_TILED_CCS) {
+ /*
+ * For compressed surfaces, we need a color control surface
+ * (CCS). Color compression is only supported for Y tiled
+ * surfaces, and for each 32x16 tiles in the main surface we
+ * need a tile in the control surface. Y tiles are 128 bytes
+ * wide and 32 lines tall and we use that to first compute the
+ * width and height in tiles of the main surface. stride and
+ * height are already multiples of 128 and 32, respectively:
+ */
+ uint32_t stride = drv_stride_from_format(format, width, 0);
+ uint32_t width_in_tiles = DIV_ROUND_UP(stride, 128);
+ uint32_t height_in_tiles = DIV_ROUND_UP(height, 32);
+ uint32_t size = width_in_tiles * height_in_tiles * 4096;
+ uint32_t offset = 0;
+
+ bo->meta.strides[0] = width_in_tiles * 128;
+ bo->meta.sizes[0] = size;
+ bo->meta.offsets[0] = offset;
+ offset += size;
+
+ /*
+ * Now, compute the width and height in tiles of the control
+ * surface by dividing and rounding up.
+ */
+ uint32_t ccs_width_in_tiles = DIV_ROUND_UP(width_in_tiles, 32);
+ uint32_t ccs_height_in_tiles = DIV_ROUND_UP(height_in_tiles, 16);
+ uint32_t ccs_size = ccs_width_in_tiles * ccs_height_in_tiles * 4096;
+
+ /*
+ * With stride and height aligned to y tiles, offset is
+ * already a multiple of 4096, which is the required alignment
+ * of the CCS.
+ */
+ bo->meta.strides[1] = ccs_width_in_tiles * 128;
+ bo->meta.sizes[1] = ccs_size;
+ bo->meta.offsets[1] = offset;
+ offset += ccs_size;
- i915_bo->ibos[0] = drm_intel_bo_alloc(i915_dev->mgr, name, bo->total_size, 0);
- if (!i915_bo->ibos[0]) {
- fprintf(stderr, "drv: drm_intel_bo_alloc failed");
- free(i915_bo);
- bo->priv = NULL;
- return -ENOMEM;
+ bo->meta.num_planes = 2;
+ bo->meta.total_size = offset;
+ } else {
+ i915_bo_from_format(bo, width, height, format);
}
+ return 0;
+}
- for (plane = 0; plane < bo->num_planes; plane++) {
- if (plane > 0)
- drm_intel_bo_reference(i915_bo->ibos[0]);
+static int i915_bo_create_from_metadata(struct bo *bo)
+{
+ int ret;
+ size_t plane;
+ struct drm_i915_gem_create gem_create = { 0 };
+ struct drm_i915_gem_set_tiling gem_set_tiling = { 0 };
- bo->handles[plane].u32 = i915_bo->ibos[0]->handle;
- i915_bo->ibos[plane] = i915_bo->ibos[0];
+ gem_create.size = bo->meta.total_size;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n", gem_create.size);
+ return -errno;
}
- bo->tiling = tiling_mode;
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = gem_create.handle;
- ret = drm_intel_bo_set_tiling(i915_bo->ibos[0], &bo->tiling, bo->strides[0]);
+ gem_set_tiling.handle = bo->handles[0].u32;
+ gem_set_tiling.tiling_mode = bo->meta.tiling;
+ gem_set_tiling.stride = bo->meta.strides[0];
- if (ret || bo->tiling != tiling_mode) {
- fprintf(stderr, "drv: drm_intel_gem_bo_set_tiling failed errno=%x, stride=%x\n",
- errno, bo->strides[0]);
- /* Calls i915 bo destroy. */
- bo->drv->backend->bo_destroy(bo);
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_TILING, &gem_set_tiling);
+ if (ret) {
+ struct drm_gem_close gem_close = { 0 };
+ gem_close.handle = bo->handles[0].u32;
+ drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+
+ drv_log("DRM_IOCTL_I915_GEM_SET_TILING failed with %d\n", errno);
return -errno;
}
return 0;
}
-static int i915_bo_destroy(struct bo *bo)
+static void i915_close(struct driver *drv)
{
- size_t plane;
- struct i915_bo *i915_bo = bo->priv;
+ free(drv->priv);
+ drv->priv = NULL;
+}
+
+static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+ int ret;
+ struct drm_i915_gem_get_tiling gem_get_tiling = { 0 };
- for (plane = 0; plane < bo->num_planes; plane++)
- drm_intel_bo_unreference(i915_bo->ibos[plane]);
+ ret = drv_prime_bo_import(bo, data);
+ if (ret)
+ return ret;
- free(i915_bo);
- bo->priv = NULL;
+ /* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
+ gem_get_tiling.handle = bo->handles[0].u32;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_GET_TILING, &gem_get_tiling);
+ if (ret) {
+ drv_gem_bo_destroy(bo);
+ drv_log("DRM_IOCTL_I915_GEM_GET_TILING failed.\n");
+ return ret;
+ }
+
+ bo->meta.tiling = gem_get_tiling.tiling_mode;
return 0;
}
-static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
- size_t plane;
- uint32_t swizzling;
- struct i915_bo *i915_bo;
- struct i915_device *i915_dev = bo->drv->priv;
+ int ret;
+ void *addr;
- i915_bo = calloc(1, sizeof(*i915_bo));
- if (!i915_bo)
- return -ENOMEM;
+ if (bo->meta.format_modifiers[0] == I915_FORMAT_MOD_Y_TILED_CCS)
+ return MAP_FAILED;
- bo->priv = i915_bo;
+ if (bo->meta.tiling == I915_TILING_NONE) {
+ struct drm_i915_gem_mmap gem_map = { 0 };
+ /* TODO(b/118799155): We don't seem to have a good way to
+ * detect the use cases for which WC mapping is really needed.
+ * The current heuristic seems overly coarse and may be slowing
+ * down some other use cases unnecessarily.
+ *
+ * For now, care must be taken not to use WC mappings for
+ * Renderscript and camera use cases, as they're
+ * performance-sensitive. */
+ if ((bo->meta.use_flags & BO_USE_SCANOUT) &&
+ !(bo->meta.use_flags &
+ (BO_USE_RENDERSCRIPT | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)))
+ gem_map.flags = I915_MMAP_WC;
+
+ gem_map.handle = bo->handles[0].u32;
+ gem_map.offset = 0;
+ gem_map.size = bo->meta.total_size;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_MMAP failed\n");
+ return MAP_FAILED;
+ }
- /*
- * When self-importing, libdrm_intel increments the reference count
- * on the drm_intel_bo. It also returns the same drm_intel_bo per GEM
- * handle. Thus, we don't need to increase the reference count
- * (i.e, drv_increment_reference_count) when importing with this
- * backend.
- */
- for (plane = 0; plane < bo->num_planes; plane++) {
-
- i915_bo->ibos[plane] = drm_intel_bo_gem_create_from_prime(
- i915_dev->mgr, data->fds[plane], data->sizes[plane]);
-
- if (!i915_bo->ibos[plane]) {
- /*
- * Need to call GEM close on planes that were opened,
- * if any. Adjust the num_planes variable to be the
- * plane that failed, so GEM close will be called on
- * planes before that plane.
- */
- bo->num_planes = plane;
- i915_bo_destroy(bo);
- fprintf(stderr, "drv: i915: failed to import failed");
- return -EINVAL;
+ addr = (void *)(uintptr_t)gem_map.addr_ptr;
+ } else {
+ struct drm_i915_gem_mmap_gtt gem_map = { 0 };
+
+ gem_map.handle = bo->handles[0].u32;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
+ return MAP_FAILED;
}
- bo->handles[plane].u32 = i915_bo->ibos[plane]->handle;
+ addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED,
+ bo->drv->fd, gem_map.offset);
}
- if (drm_intel_bo_get_tiling(i915_bo->ibos[0], &bo->tiling, &swizzling)) {
- fprintf(stderr, "drv: drm_intel_bo_get_tiling failed");
- i915_bo_destroy(bo);
- return -EINVAL;
+ if (addr == MAP_FAILED) {
+ drv_log("i915 GEM mmap failed\n");
+ return addr;
}
- return 0;
+ vma->length = bo->meta.total_size;
+ return addr;
}
-static void *i915_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
- struct i915_bo *i915_bo = bo->priv;
+ struct drm_i915_gem_set_domain set_domain = { 0 };
- if (bo->tiling == I915_TILING_NONE)
- /* TODO(gsingh): use bo_map flags to determine if we should
- * enable writing.
- */
- ret = drm_intel_bo_map(i915_bo->ibos[0], 1);
- else
- ret = drm_intel_gem_bo_map_gtt(i915_bo->ibos[0]);
+ set_domain.handle = bo->handles[0].u32;
+ if (bo->meta.tiling == I915_TILING_NONE) {
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
+ set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+ } else {
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ }
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
if (ret) {
- fprintf(stderr, "drv: i915_bo_map failed.");
- return MAP_FAILED;
+ drv_log("DRM_IOCTL_I915_GEM_SET_DOMAIN with %d\n", ret);
+ return ret;
}
- return i915_bo->ibos[0]->virtual;
+ return 0;
}
-static int i915_bo_unmap(struct bo *bo, struct map_info *data)
+static int i915_bo_flush(struct bo *bo, struct mapping *mapping)
{
- int ret;
- struct i915_bo *i915_bo = bo->priv;
-
- if (bo->tiling == I915_TILING_NONE)
- ret = drm_intel_bo_unmap(i915_bo->ibos[0]);
- else
- ret = drm_intel_gem_bo_unmap_gtt(i915_bo->ibos[0]);
+ struct i915_device *i915 = bo->drv->priv;
+ if (!i915->has_llc && bo->meta.tiling == I915_TILING_NONE)
+ i915_clflush(mapping->vma->addr, mapping->vma->length);
- return ret;
+ return 0;
}
-static uint32_t i915_resolve_format(uint32_t format)
+static uint32_t i915_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ /* KBL camera subsystem requires NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ return DRM_FORMAT_NV12;
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
- return DRM_FORMAT_YVU420_ANDROID;
+ /*
+ * KBL camera subsystem requires NV12. Our other use cases
+ * don't care:
+ * - Hardware video supports NV12,
+ * - USB Camera HALv3 supports NV12,
+ * - USB Camera HALv1 doesn't use this format.
+ * Moreover, NV12 is preferred for video, due to overlay
+ * support on SKL+.
+ */
+ return DRM_FORMAT_NV12;
default:
return format;
}
}
-struct backend backend_i915 = {
+const struct backend backend_i915 = {
.name = "i915",
.init = i915_init,
.close = i915_close,
- .bo_create = i915_bo_create,
- .bo_destroy = i915_bo_destroy,
+ .bo_compute_metadata = i915_bo_compute_metadata,
+ .bo_create_from_metadata = i915_bo_create_from_metadata,
+ .bo_destroy = drv_gem_bo_destroy,
.bo_import = i915_bo_import,
.bo_map = i915_bo_map,
- .bo_unmap = i915_bo_unmap,
+ .bo_unmap = drv_bo_munmap,
+ .bo_invalidate = i915_bo_invalidate,
+ .bo_flush = i915_bo_flush,
.resolve_format = i915_resolve_format,
};