2 * Copyright 2018 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
10 #include <drm_fourcc.h>
24 /* Alignment values are based on SDM845 Gfx IP */
25 #define DEFAULT_ALIGNMENT 64
26 #define BUFFER_SIZE_ALIGN 4096
28 #define VENUS_STRIDE_ALIGN 128
29 #define VENUS_SCANLINE_ALIGN 16
30 #define NV12_LINEAR_PADDING (12 * 1024)
31 #define NV12_UBWC_PADDING(y_stride) (MAX(16 * 1024, y_stride * 48))
32 #define MACROTILE_WIDTH_ALIGN 64
33 #define MACROTILE_HEIGHT_ALIGN 16
34 #define PLANE_SIZE_ALIGN 4096
36 #define MSM_UBWC_TILING 1
38 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
39 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
40 DRM_FORMAT_XRGB8888 };
42 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8,
43 DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
46 * Each macrotile consists of m x n (mostly 4 x 4) tiles.
47 * Pixel data pitch/stride is aligned with macrotile width.
48 * Pixel data height is aligned with macrotile height.
49 * Entire pixel data buffer is aligned with 4k(bytes).
51 static uint32_t get_ubwc_meta_size(uint32_t width, uint32_t height, uint32_t tile_width,
54 uint32_t macrotile_width, macrotile_height;
56 macrotile_width = DIV_ROUND_UP(width, tile_width);
57 macrotile_height = DIV_ROUND_UP(height, tile_height);
59 // Align meta buffer width to 64 blocks
60 macrotile_width = ALIGN(macrotile_width, MACROTILE_WIDTH_ALIGN);
62 // Align meta buffer height to 16 blocks
63 macrotile_height = ALIGN(macrotile_height, MACROTILE_HEIGHT_ALIGN);
65 return ALIGN(macrotile_width * macrotile_height, PLANE_SIZE_ALIGN);
68 static void msm_calculate_layout(struct bo *bo)
70 uint32_t width, height;
72 width = bo->meta.width;
73 height = bo->meta.height;
75 /* NV12 format requires extra padding with platform
76 * specific alignments for venus driver
78 if (bo->meta.format == DRM_FORMAT_NV12) {
79 uint32_t y_stride, uv_stride, y_scanline, uv_scanline, y_plane, uv_plane, size,
82 y_stride = ALIGN(width, VENUS_STRIDE_ALIGN);
83 uv_stride = ALIGN(width, VENUS_STRIDE_ALIGN);
84 y_scanline = ALIGN(height, VENUS_SCANLINE_ALIGN * 2);
85 uv_scanline = ALIGN(DIV_ROUND_UP(height, 2), VENUS_SCANLINE_ALIGN);
86 y_plane = y_stride * y_scanline;
87 uv_plane = uv_stride * uv_scanline;
89 if (bo->meta.tiling == MSM_UBWC_TILING) {
90 y_plane += get_ubwc_meta_size(width, height, 32, 8);
91 uv_plane += get_ubwc_meta_size(width >> 1, height >> 1, 16, 8);
92 extra_padding = NV12_UBWC_PADDING(y_stride);
94 extra_padding = NV12_LINEAR_PADDING;
97 bo->meta.strides[0] = y_stride;
98 bo->meta.sizes[0] = y_plane;
99 bo->meta.offsets[1] = y_plane;
100 bo->meta.strides[1] = uv_stride;
101 size = y_plane + uv_plane + extra_padding;
102 bo->meta.total_size = ALIGN(size, BUFFER_SIZE_ALIGN);
103 bo->meta.sizes[1] = bo->meta.total_size - bo->meta.sizes[0];
105 uint32_t stride, alignw, alignh;
107 alignw = ALIGN(width, DEFAULT_ALIGNMENT);
108 /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */
109 if (bo->meta.format == DRM_FORMAT_YVU420_ANDROID) {
112 alignh = ALIGN(height, DEFAULT_ALIGNMENT);
115 stride = drv_stride_from_format(bo->meta.format, alignw, 0);
117 /* Calculate size and assign stride, size, offset to each plane based on format */
118 drv_bo_from_format(bo, stride, alignh, bo->meta.format);
120 /* For all RGB UBWC formats */
121 if (bo->meta.tiling == MSM_UBWC_TILING) {
122 bo->meta.sizes[0] += get_ubwc_meta_size(width, height, 16, 4);
123 bo->meta.total_size = bo->meta.sizes[0];
124 assert(IS_ALIGNED(bo->meta.total_size, BUFFER_SIZE_ALIGN));
129 static bool is_ubwc_fmt(uint32_t format)
132 case DRM_FORMAT_XBGR8888:
133 case DRM_FORMAT_ABGR8888:
134 case DRM_FORMAT_XRGB8888:
135 case DRM_FORMAT_ARGB8888:
136 case DRM_FORMAT_NV12:
143 static void msm_add_ubwc_combinations(struct driver *drv, const uint32_t *formats,
144 uint32_t num_formats, struct format_metadata *metadata,
147 for (uint32_t i = 0; i < num_formats; i++) {
148 if (is_ubwc_fmt(formats[i])) {
149 struct combination combo = { .format = formats[i],
150 .metadata = *metadata,
151 .use_flags = use_flags };
152 drv_array_append(drv->combos, &combo);
157 static int msm_init(struct driver *drv)
159 struct format_metadata metadata;
160 uint64_t render_use_flags = BO_USE_RENDER_MASK;
161 uint64_t texture_use_flags = BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_DECODER;
162 uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_OFTEN |
163 BO_USE_LINEAR | BO_USE_PROTECTED);
165 drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
166 &LINEAR_METADATA, render_use_flags);
168 drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
169 &LINEAR_METADATA, texture_use_flags);
172 * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
173 * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
175 drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
176 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
178 /* Android CTS tests require this. */
179 drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
181 drv_modify_linear_combinations(drv);
183 metadata.tiling = MSM_UBWC_TILING;
184 metadata.priority = 2;
185 metadata.modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED;
187 render_use_flags &= ~sw_flags;
188 texture_use_flags &= ~sw_flags;
190 msm_add_ubwc_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
191 &metadata, render_use_flags | BO_USE_SCANOUT);
193 msm_add_ubwc_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
194 &metadata, texture_use_flags);
199 static int msm_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height,
200 uint32_t format, const uint64_t modifier)
202 struct drm_msm_gem_new req;
206 bo->meta.tiling = (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) ? MSM_UBWC_TILING : 0;
208 msm_calculate_layout(bo);
210 memset(&req, 0, sizeof(req));
211 req.flags = MSM_BO_WC | MSM_BO_SCANOUT;
212 req.size = bo->meta.total_size;
214 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MSM_GEM_NEW, &req);
216 drv_log("DRM_IOCTL_MSM_GEM_NEW failed with %s\n", strerror(errno));
221 * Though we use only one plane, we need to set handle for
222 * all planes to pass kernel checks
224 for (i = 0; i < bo->meta.num_planes; i++) {
225 bo->handles[i].u32 = req.handle;
226 bo->meta.format_modifiers[i] = modifier;
232 static int msm_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
233 uint32_t format, const uint64_t *modifiers, uint32_t count)
235 static const uint64_t modifier_order[] = {
236 DRM_FORMAT_MOD_QCOM_COMPRESSED,
237 DRM_FORMAT_MOD_LINEAR,
241 drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
243 return msm_bo_create_for_modifier(bo, width, height, format, modifier);
246 /* msm_bo_create will create linear buffers for now */
247 static int msm_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
250 struct combination *combo = drv_get_combination(bo->drv, format, flags);
253 drv_log("invalid format = %d, flags = %" PRIx64 " combination\n", format, flags);
257 return msm_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier);
260 static void *msm_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
263 struct drm_msm_gem_info req;
265 memset(&req, 0, sizeof(req));
266 req.handle = bo->handles[0].u32;
268 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MSM_GEM_INFO, &req);
270 drv_log("DRM_IOCLT_MSM_GEM_INFO failed with %s\n", strerror(errno));
273 vma->length = bo->meta.total_size;
275 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
279 const struct backend backend_msm = {
282 .bo_create = msm_bo_create,
283 .bo_create_with_modifiers = msm_bo_create_with_modifiers,
284 .bo_destroy = drv_gem_bo_destroy,
285 .bo_import = drv_prime_bo_import,
286 .bo_map = msm_bo_map,
287 .bo_unmap = drv_bo_munmap,