2 * Copyright 2014 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
19 struct planar_layout {
21 int horizontal_subsampling[DRV_MAX_PLANES];
22 int vertical_subsampling[DRV_MAX_PLANES];
23 int bytes_per_pixel[DRV_MAX_PLANES];
28 static const struct planar_layout packed_1bpp_layout = {
30 .horizontal_subsampling = { 1 },
31 .vertical_subsampling = { 1 },
32 .bytes_per_pixel = { 1 }
35 static const struct planar_layout packed_2bpp_layout = {
37 .horizontal_subsampling = { 1 },
38 .vertical_subsampling = { 1 },
39 .bytes_per_pixel = { 2 }
42 static const struct planar_layout packed_3bpp_layout = {
44 .horizontal_subsampling = { 1 },
45 .vertical_subsampling = { 1 },
46 .bytes_per_pixel = { 3 }
49 static const struct planar_layout packed_4bpp_layout = {
51 .horizontal_subsampling = { 1 },
52 .vertical_subsampling = { 1 },
53 .bytes_per_pixel = { 4 }
56 static const struct planar_layout packed_8bpp_layout = {
58 .horizontal_subsampling = { 1 },
59 .vertical_subsampling = { 1 },
60 .bytes_per_pixel = { 8 }
63 static const struct planar_layout biplanar_yuv_420_layout = {
65 .horizontal_subsampling = { 1, 2 },
66 .vertical_subsampling = { 1, 2 },
67 .bytes_per_pixel = { 1, 2 }
70 static const struct planar_layout triplanar_yuv_420_layout = {
72 .horizontal_subsampling = { 1, 2, 2 },
73 .vertical_subsampling = { 1, 2, 2 },
74 .bytes_per_pixel = { 1, 1, 1 }
77 static const struct planar_layout biplanar_yuv_p010_layout = {
79 .horizontal_subsampling = { 1, 2 },
80 .vertical_subsampling = { 1, 2 },
81 .bytes_per_pixel = { 2, 4 }
86 static const struct planar_layout *layout_from_format(uint32_t format)
89 case DRM_FORMAT_BGR233:
92 case DRM_FORMAT_RGB332:
93 return &packed_1bpp_layout;
95 case DRM_FORMAT_YVU420:
96 case DRM_FORMAT_YVU420_ANDROID:
97 return &triplanar_yuv_420_layout;
100 case DRM_FORMAT_NV21:
101 return &biplanar_yuv_420_layout;
103 case DRM_FORMAT_P010:
104 return &biplanar_yuv_p010_layout;
106 case DRM_FORMAT_ABGR1555:
107 case DRM_FORMAT_ABGR4444:
108 case DRM_FORMAT_ARGB1555:
109 case DRM_FORMAT_ARGB4444:
110 case DRM_FORMAT_BGR565:
111 case DRM_FORMAT_BGRA4444:
112 case DRM_FORMAT_BGRA5551:
113 case DRM_FORMAT_BGRX4444:
114 case DRM_FORMAT_BGRX5551:
115 case DRM_FORMAT_GR88:
116 case DRM_FORMAT_RG88:
117 case DRM_FORMAT_RGB565:
118 case DRM_FORMAT_RGBA4444:
119 case DRM_FORMAT_RGBA5551:
120 case DRM_FORMAT_RGBX4444:
121 case DRM_FORMAT_RGBX5551:
122 case DRM_FORMAT_UYVY:
123 case DRM_FORMAT_VYUY:
124 case DRM_FORMAT_XBGR1555:
125 case DRM_FORMAT_XBGR4444:
126 case DRM_FORMAT_XRGB1555:
127 case DRM_FORMAT_XRGB4444:
128 case DRM_FORMAT_YUYV:
129 case DRM_FORMAT_YVYU:
130 case DRM_FORMAT_MTISP_SXYZW10:
131 return &packed_2bpp_layout;
133 case DRM_FORMAT_BGR888:
134 case DRM_FORMAT_RGB888:
135 return &packed_3bpp_layout;
137 case DRM_FORMAT_ABGR2101010:
138 case DRM_FORMAT_ABGR8888:
139 case DRM_FORMAT_ARGB2101010:
140 case DRM_FORMAT_ARGB8888:
141 case DRM_FORMAT_AYUV:
142 case DRM_FORMAT_BGRA1010102:
143 case DRM_FORMAT_BGRA8888:
144 case DRM_FORMAT_BGRX1010102:
145 case DRM_FORMAT_BGRX8888:
146 case DRM_FORMAT_RGBA1010102:
147 case DRM_FORMAT_RGBA8888:
148 case DRM_FORMAT_RGBX1010102:
149 case DRM_FORMAT_RGBX8888:
150 case DRM_FORMAT_XBGR2101010:
151 case DRM_FORMAT_XBGR8888:
152 case DRM_FORMAT_XRGB2101010:
153 case DRM_FORMAT_XRGB8888:
154 return &packed_4bpp_layout;
156 case DRM_FORMAT_ABGR16161616F:
157 return &packed_8bpp_layout;
160 drv_log("UNKNOWN FORMAT %d\n", format);
165 size_t drv_num_planes_from_format(uint32_t format)
167 const struct planar_layout *layout = layout_from_format(format);
170 * drv_bo_new calls this function early to query number of planes and
171 * considers 0 planes to mean unknown format, so we have to support
172 * that. All other layout_from_format() queries can assume that the
173 * format is supported and that the return value is non-NULL.
176 return layout ? layout->num_planes : 0;
179 uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
181 const struct planar_layout *layout = layout_from_format(format);
183 assert(plane < layout->num_planes);
185 return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
188 uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane)
190 const struct planar_layout *layout = layout_from_format(format);
192 assert(plane < layout->num_planes);
194 return layout->vertical_subsampling[plane];
197 uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
199 const struct planar_layout *layout = layout_from_format(format);
201 assert(plane < layout->num_planes);
203 return layout->bytes_per_pixel[plane];
207 * This function returns the stride for a given format, width and plane.
209 uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
211 const struct planar_layout *layout = layout_from_format(format);
212 assert(plane < layout->num_planes);
214 uint32_t plane_width = DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]);
215 uint32_t stride = plane_width * layout->bytes_per_pixel[plane];
218 * The stride of Android YV12 buffers is required to be aligned to 16 bytes
219 * (see <system/graphics.h>).
221 if (format == DRM_FORMAT_YVU420_ANDROID)
222 stride = (plane == 0) ? ALIGN(stride, 32) : ALIGN(stride, 16);
227 uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
229 return stride * drv_height_from_format(format, height, plane);
232 static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
236 case DRM_FORMAT_YVU420:
237 case DRM_FORMAT_YVU420_ANDROID:
238 stride = DIV_ROUND_UP(stride, 2);
247 * This function fills in the buffer object given the driver aligned stride of
248 * the first plane, height and a format. This function assumes there is just
249 * one kernel buffer per buffer object.
251 int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
253 uint32_t padding[DRV_MAX_PLANES] = { 0 };
254 return drv_bo_from_format_and_padding(bo, stride, aligned_height, format, padding);
257 int drv_bo_from_format_and_padding(struct bo *bo, uint32_t stride, uint32_t aligned_height,
258 uint32_t format, uint32_t padding[DRV_MAX_PLANES])
260 size_t p, num_planes;
263 num_planes = drv_num_planes_from_format(format);
267 * HAL_PIXEL_FORMAT_YV12 requires that (see <system/graphics.h>):
268 * - the aligned height is same as the buffer's height.
269 * - the chroma stride is 16 bytes aligned, i.e., the luma's strides
270 * is 32 bytes aligned.
272 if (format == DRM_FORMAT_YVU420_ANDROID) {
273 assert(aligned_height == bo->meta.height);
274 assert(stride == ALIGN(stride, 32));
277 for (p = 0; p < num_planes; p++) {
278 bo->meta.strides[p] = subsample_stride(stride, format, p);
280 drv_size_from_format(format, bo->meta.strides[p], aligned_height, p) +
282 bo->meta.offsets[p] = offset;
283 offset += bo->meta.sizes[p];
286 bo->meta.total_size = offset;
290 int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
291 uint64_t use_flags, uint64_t quirks)
295 uint32_t aligned_width, aligned_height;
296 struct drm_mode_create_dumb create_dumb;
298 aligned_width = width;
299 aligned_height = height;
301 case DRM_FORMAT_YVU420_ANDROID:
302 /* Align width to 32 pixels, so chroma strides are 16 bytes as
303 * Android requires. */
304 aligned_width = ALIGN(width, 32);
305 /* Adjust the height to include room for chroma planes.
307 * HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
309 aligned_height = 3 * DIV_ROUND_UP(bo->meta.height, 2);
311 case DRM_FORMAT_YVU420:
312 case DRM_FORMAT_NV12:
313 /* Adjust the height to include room for chroma planes */
314 aligned_height = 3 * DIV_ROUND_UP(height, 2);
320 memset(&create_dumb, 0, sizeof(create_dumb));
321 if (quirks & BO_QUIRK_DUMB32BPP) {
323 DIV_ROUND_UP(aligned_width * layout_from_format(format)->bytes_per_pixel[0], 4);
324 create_dumb.bpp = 32;
326 create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
328 create_dumb.width = aligned_width;
329 create_dumb.height = aligned_height;
330 create_dumb.flags = 0;
332 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
334 drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno);
338 drv_bo_from_format(bo, create_dumb.pitch, height, format);
340 for (plane = 0; plane < bo->meta.num_planes; plane++)
341 bo->handles[plane].u32 = create_dumb.handle;
343 bo->meta.total_size = create_dumb.size;
347 int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
350 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_NONE);
353 int drv_dumb_bo_destroy(struct bo *bo)
355 struct drm_mode_destroy_dumb destroy_dumb;
358 memset(&destroy_dumb, 0, sizeof(destroy_dumb));
359 destroy_dumb.handle = bo->handles[0].u32;
361 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
363 drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
370 int drv_gem_bo_destroy(struct bo *bo)
372 struct drm_gem_close gem_close;
376 for (plane = 0; plane < bo->meta.num_planes; plane++) {
377 for (i = 0; i < plane; i++)
378 if (bo->handles[i].u32 == bo->handles[plane].u32)
380 /* Make sure close hasn't already been called on this handle */
384 memset(&gem_close, 0, sizeof(gem_close));
385 gem_close.handle = bo->handles[plane].u32;
387 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
389 drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
390 bo->handles[plane].u32, ret);
398 int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
402 struct drm_prime_handle prime_handle;
404 for (plane = 0; plane < bo->meta.num_planes; plane++) {
405 memset(&prime_handle, 0, sizeof(prime_handle));
406 prime_handle.fd = data->fds[plane];
408 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
411 drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
414 * Need to call GEM close on planes that were opened,
415 * if any. Adjust the num_planes variable to be the
416 * plane that failed, so GEM close will be called on
417 * planes before that plane.
419 bo->meta.num_planes = plane;
420 drv_gem_bo_destroy(bo);
424 bo->handles[plane].u32 = prime_handle.handle;
430 void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
434 struct drm_mode_map_dumb map_dumb;
436 memset(&map_dumb, 0, sizeof(map_dumb));
437 map_dumb.handle = bo->handles[plane].u32;
439 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
441 drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n");
445 for (i = 0; i < bo->meta.num_planes; i++)
446 if (bo->handles[i].u32 == bo->handles[plane].u32)
447 vma->length += bo->meta.sizes[i];
449 return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
453 int drv_bo_munmap(struct bo *bo, struct vma *vma)
455 return munmap(vma->addr, vma->length);
458 int drv_mapping_destroy(struct bo *bo)
462 struct mapping *mapping;
466 * This function is called right before the buffer is destroyed. It will free any mappings
467 * associated with the buffer.
471 for (plane = 0; plane < bo->meta.num_planes; plane++) {
472 while (idx < drv_array_size(bo->drv->mappings)) {
473 mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx);
474 if (mapping->vma->handle != bo->handles[plane].u32) {
479 if (!--mapping->vma->refcount) {
480 ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
482 drv_log("munmap failed\n");
489 /* This shrinks and shifts the array, so don't increment idx. */
490 drv_array_remove(bo->drv->mappings, idx);
497 int drv_get_prot(uint32_t map_flags)
499 return (BO_MAP_WRITE & map_flags) ? PROT_WRITE | PROT_READ : PROT_READ;
502 uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
507 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, &count))
508 num = (uintptr_t)(count);
513 void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane)
515 uintptr_t num = drv_get_reference_count(drv, bo, plane);
517 /* If a value isn't in the table, drmHashDelete is a no-op */
518 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
519 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
522 void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane)
524 uintptr_t num = drv_get_reference_count(drv, bo, plane);
526 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
529 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1));
532 void drv_add_combination(struct driver *drv, const uint32_t format,
533 struct format_metadata *metadata, uint64_t use_flags)
535 struct combination combo = { .format = format,
536 .metadata = *metadata,
537 .use_flags = use_flags };
539 drv_array_append(drv->combos, &combo);
542 void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
543 struct format_metadata *metadata, uint64_t use_flags)
547 for (i = 0; i < num_formats; i++) {
548 struct combination combo = { .format = formats[i],
549 .metadata = *metadata,
550 .use_flags = use_flags };
552 drv_array_append(drv->combos, &combo);
556 void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
560 struct combination *combo;
561 /* Attempts to add the specified flags to an existing combination. */
562 for (i = 0; i < drv_array_size(drv->combos); i++) {
563 combo = (struct combination *)drv_array_at_idx(drv->combos, i);
564 if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
565 combo->metadata.modifier == metadata->modifier)
566 combo->use_flags |= use_flags;
570 int drv_modify_linear_combinations(struct driver *drv)
573 * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
574 * plane and as a cursor.
576 drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
577 BO_USE_CURSOR | BO_USE_SCANOUT);
578 drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
579 BO_USE_CURSOR | BO_USE_SCANOUT);
584 * Pick the best modifier from modifiers, according to the ordering
585 * given by modifier_order.
587 uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
588 const uint64_t *modifier_order, uint32_t order_count)
592 for (i = 0; i < order_count; i++) {
593 for (j = 0; j < count; j++) {
594 if (modifiers[j] == modifier_order[i]) {
600 return DRM_FORMAT_MOD_LINEAR;
604 * Search a list of modifiers to see if a given modifier is present
606 bool drv_has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier)
609 for (i = 0; i < count; i++)
610 if (list[i] == modifier)