2 * Copyright 2014 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
14 #include <xf86drmMode.h>
20 struct planar_layout {
22 int horizontal_subsampling[DRV_MAX_PLANES];
23 int vertical_subsampling[DRV_MAX_PLANES];
24 int bytes_per_pixel[DRV_MAX_PLANES];
29 static const struct planar_layout packed_1bpp_layout = {
31 .horizontal_subsampling = { 1 },
32 .vertical_subsampling = { 1 },
33 .bytes_per_pixel = { 1 }
36 static const struct planar_layout packed_2bpp_layout = {
38 .horizontal_subsampling = { 1 },
39 .vertical_subsampling = { 1 },
40 .bytes_per_pixel = { 2 }
43 static const struct planar_layout packed_3bpp_layout = {
45 .horizontal_subsampling = { 1 },
46 .vertical_subsampling = { 1 },
47 .bytes_per_pixel = { 3 }
50 static const struct planar_layout packed_4bpp_layout = {
52 .horizontal_subsampling = { 1 },
53 .vertical_subsampling = { 1 },
54 .bytes_per_pixel = { 4 }
57 static const struct planar_layout biplanar_yuv_420_layout = {
59 .horizontal_subsampling = { 1, 2 },
60 .vertical_subsampling = { 1, 2 },
61 .bytes_per_pixel = { 1, 2 }
64 static const struct planar_layout triplanar_yuv_420_layout = {
66 .horizontal_subsampling = { 1, 2, 2 },
67 .vertical_subsampling = { 1, 2, 2 },
68 .bytes_per_pixel = { 1, 1, 1 }
71 static const struct planar_layout biplanar_yuv_p010_layout = {
73 .horizontal_subsampling = { 1, 2 },
74 .vertical_subsampling = { 1, 2 },
75 .bytes_per_pixel = { 2, 4 }
80 static const struct planar_layout *layout_from_format(uint32_t format)
83 case DRM_FORMAT_BGR233:
86 case DRM_FORMAT_RGB332:
87 return &packed_1bpp_layout;
89 case DRM_FORMAT_YVU420:
90 case DRM_FORMAT_YVU420_ANDROID:
91 return &triplanar_yuv_420_layout;
95 return &biplanar_yuv_420_layout;
98 return &biplanar_yuv_p010_layout;
100 case DRM_FORMAT_ABGR1555:
101 case DRM_FORMAT_ABGR4444:
102 case DRM_FORMAT_ARGB1555:
103 case DRM_FORMAT_ARGB4444:
104 case DRM_FORMAT_BGR565:
105 case DRM_FORMAT_BGRA4444:
106 case DRM_FORMAT_BGRA5551:
107 case DRM_FORMAT_BGRX4444:
108 case DRM_FORMAT_BGRX5551:
109 case DRM_FORMAT_GR88:
110 case DRM_FORMAT_RG88:
111 case DRM_FORMAT_RGB565:
112 case DRM_FORMAT_RGBA4444:
113 case DRM_FORMAT_RGBA5551:
114 case DRM_FORMAT_RGBX4444:
115 case DRM_FORMAT_RGBX5551:
116 case DRM_FORMAT_UYVY:
117 case DRM_FORMAT_VYUY:
118 case DRM_FORMAT_XBGR1555:
119 case DRM_FORMAT_XBGR4444:
120 case DRM_FORMAT_XRGB1555:
121 case DRM_FORMAT_XRGB4444:
122 case DRM_FORMAT_YUYV:
123 case DRM_FORMAT_YVYU:
124 return &packed_2bpp_layout;
126 case DRM_FORMAT_BGR888:
127 case DRM_FORMAT_RGB888:
128 return &packed_3bpp_layout;
130 case DRM_FORMAT_ABGR2101010:
131 case DRM_FORMAT_ABGR8888:
132 case DRM_FORMAT_ARGB2101010:
133 case DRM_FORMAT_ARGB8888:
134 case DRM_FORMAT_AYUV:
135 case DRM_FORMAT_BGRA1010102:
136 case DRM_FORMAT_BGRA8888:
137 case DRM_FORMAT_BGRX1010102:
138 case DRM_FORMAT_BGRX8888:
139 case DRM_FORMAT_RGBA1010102:
140 case DRM_FORMAT_RGBA8888:
141 case DRM_FORMAT_RGBX1010102:
142 case DRM_FORMAT_RGBX8888:
143 case DRM_FORMAT_XBGR2101010:
144 case DRM_FORMAT_XBGR8888:
145 case DRM_FORMAT_XRGB2101010:
146 case DRM_FORMAT_XRGB8888:
147 return &packed_4bpp_layout;
150 drv_log("UNKNOWN FORMAT %d\n", format);
155 size_t drv_num_planes_from_format(uint32_t format)
157 const struct planar_layout *layout = layout_from_format(format);
160 * drv_bo_new calls this function early to query number of planes and
161 * considers 0 planes to mean unknown format, so we have to support
162 * that. All other layout_from_format() queries can assume that the
163 * format is supported and that the return value is non-NULL.
166 return layout ? layout->num_planes : 0;
169 uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
171 const struct planar_layout *layout = layout_from_format(format);
173 assert(plane < layout->num_planes);
175 return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
178 uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
180 const struct planar_layout *layout = layout_from_format(format);
182 assert(plane < layout->num_planes);
184 return layout->bytes_per_pixel[plane];
188 * This function returns the stride for a given format, width and plane.
190 uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
192 const struct planar_layout *layout = layout_from_format(format);
193 assert(plane < layout->num_planes);
195 uint32_t plane_width = DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]);
196 uint32_t stride = plane_width * layout->bytes_per_pixel[plane];
199 * The stride of Android YV12 buffers is required to be aligned to 16 bytes
200 * (see <system/graphics.h>).
202 if (format == DRM_FORMAT_YVU420_ANDROID)
203 stride = (plane == 0) ? ALIGN(stride, 32) : ALIGN(stride, 16);
208 uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
210 return stride * drv_height_from_format(format, height, plane);
213 static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
217 case DRM_FORMAT_YVU420:
218 case DRM_FORMAT_YVU420_ANDROID:
219 stride = DIV_ROUND_UP(stride, 2);
228 * This function fills in the buffer object given the driver aligned stride of
229 * the first plane, height and a format. This function assumes there is just
230 * one kernel buffer per buffer object.
232 int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
235 size_t p, num_planes;
238 num_planes = drv_num_planes_from_format(format);
242 * HAL_PIXEL_FORMAT_YV12 requires that (see <system/graphics.h>):
243 * - the aligned height is same as the buffer's height.
244 * - the chroma stride is 16 bytes aligned, i.e., the luma's strides
245 * is 32 bytes aligned.
247 if (format == DRM_FORMAT_YVU420_ANDROID) {
248 assert(aligned_height == bo->height);
249 assert(stride == ALIGN(stride, 32));
252 for (p = 0; p < num_planes; p++) {
253 bo->strides[p] = subsample_stride(stride, format, p);
254 bo->sizes[p] = drv_size_from_format(format, bo->strides[p], aligned_height, p);
255 bo->offsets[p] = offset;
256 offset += bo->sizes[p];
259 bo->total_size = offset;
263 int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
268 uint32_t aligned_width, aligned_height;
269 struct drm_mode_create_dumb create_dumb;
271 aligned_width = width;
272 aligned_height = height;
274 case DRM_FORMAT_YVU420_ANDROID:
275 /* Align width to 32 pixels, so chroma strides are 16 bytes as
276 * Android requires. */
277 aligned_width = ALIGN(width, 32);
278 /* Adjust the height to include room for chroma planes.
280 * HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
282 aligned_height = 3 * DIV_ROUND_UP(bo->height, 2);
284 case DRM_FORMAT_YVU420:
285 case DRM_FORMAT_NV12:
286 /* Adjust the height to include room for chroma planes */
287 aligned_height = 3 * DIV_ROUND_UP(height, 2);
293 memset(&create_dumb, 0, sizeof(create_dumb));
294 create_dumb.height = aligned_height;
295 create_dumb.width = aligned_width;
296 create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
297 create_dumb.flags = 0;
299 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
301 drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno);
305 drv_bo_from_format(bo, create_dumb.pitch, height, format);
307 for (plane = 0; plane < bo->num_planes; plane++)
308 bo->handles[plane].u32 = create_dumb.handle;
310 bo->total_size = create_dumb.size;
314 int drv_dumb_bo_destroy(struct bo *bo)
316 struct drm_mode_destroy_dumb destroy_dumb;
319 memset(&destroy_dumb, 0, sizeof(destroy_dumb));
320 destroy_dumb.handle = bo->handles[0].u32;
322 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
324 drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
331 int drv_gem_bo_destroy(struct bo *bo)
333 struct drm_gem_close gem_close;
337 for (plane = 0; plane < bo->num_planes; plane++) {
338 for (i = 0; i < plane; i++)
339 if (bo->handles[i].u32 == bo->handles[plane].u32)
341 /* Make sure close hasn't already been called on this handle */
345 memset(&gem_close, 0, sizeof(gem_close));
346 gem_close.handle = bo->handles[plane].u32;
348 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
350 drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
351 bo->handles[plane].u32, ret);
359 int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
363 struct drm_prime_handle prime_handle;
365 for (plane = 0; plane < bo->num_planes; plane++) {
366 memset(&prime_handle, 0, sizeof(prime_handle));
367 prime_handle.fd = data->fds[plane];
369 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
372 drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
375 * Need to call GEM close on planes that were opened,
376 * if any. Adjust the num_planes variable to be the
377 * plane that failed, so GEM close will be called on
378 * planes before that plane.
380 bo->num_planes = plane;
381 drv_gem_bo_destroy(bo);
385 bo->handles[plane].u32 = prime_handle.handle;
391 void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
395 struct drm_mode_map_dumb map_dumb;
397 memset(&map_dumb, 0, sizeof(map_dumb));
398 map_dumb.handle = bo->handles[plane].u32;
400 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
402 drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n");
406 for (i = 0; i < bo->num_planes; i++)
407 if (bo->handles[i].u32 == bo->handles[plane].u32)
408 vma->length += bo->sizes[i];
410 return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
414 int drv_bo_munmap(struct bo *bo, struct vma *vma)
416 return munmap(vma->addr, vma->length);
419 int drv_mapping_destroy(struct bo *bo)
423 struct mapping *mapping;
427 * This function is called right before the buffer is destroyed. It will free any mappings
428 * associated with the buffer.
432 for (plane = 0; plane < bo->num_planes; plane++) {
433 while (idx < drv_array_size(bo->drv->mappings)) {
434 mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx);
435 if (mapping->vma->handle != bo->handles[plane].u32) {
440 if (!--mapping->vma->refcount) {
441 ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
443 drv_log("munmap failed\n");
450 /* This shrinks and shifts the array, so don't increment idx. */
451 drv_array_remove(bo->drv->mappings, idx);
458 int drv_get_prot(uint32_t map_flags)
460 return (BO_MAP_WRITE & map_flags) ? PROT_WRITE | PROT_READ : PROT_READ;
463 uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
468 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, &count))
469 num = (uintptr_t)(count);
474 void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane)
476 uintptr_t num = drv_get_reference_count(drv, bo, plane);
478 /* If a value isn't in the table, drmHashDelete is a no-op */
479 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
480 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
483 void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane)
485 uintptr_t num = drv_get_reference_count(drv, bo, plane);
487 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
490 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1));
493 void drv_add_combination(struct driver *drv, const uint32_t format,
494 struct format_metadata *metadata, uint64_t use_flags)
496 struct combination combo = { .format = format,
497 .metadata = *metadata,
498 .use_flags = use_flags };
500 drv_array_append(drv->combos, &combo);
503 void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
504 struct format_metadata *metadata, uint64_t use_flags)
508 for (i = 0; i < num_formats; i++) {
509 struct combination combo = { .format = formats[i],
510 .metadata = *metadata,
511 .use_flags = use_flags };
513 drv_array_append(drv->combos, &combo);
517 void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
521 struct combination *combo;
522 /* Attempts to add the specified flags to an existing combination. */
523 for (i = 0; i < drv_array_size(drv->combos); i++) {
524 combo = (struct combination *)drv_array_at_idx(drv->combos, i);
525 if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
526 combo->metadata.modifier == metadata->modifier)
527 combo->use_flags |= use_flags;
531 struct drv_array *drv_query_kms(struct driver *drv)
533 struct drv_array *kms_items;
534 uint64_t plane_type = UINT64_MAX;
538 drmModePlanePtr plane;
539 drmModePropertyPtr prop;
540 drmModePlaneResPtr resources;
541 drmModeObjectPropertiesPtr props;
543 kms_items = drv_array_init(sizeof(struct kms_item));
548 * The ability to return universal planes is only complete on
549 * ChromeOS kernel versions >= v3.18. The SET_CLIENT_CAP ioctl
550 * therefore might return an error code, so don't check it. If it
551 * fails, it'll just return the plane list as overlay planes, which is
552 * fine in our case (our drivers already have cursor bits set).
553 * modetest in libdrm does the same thing.
555 drmSetClientCap(drv->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
557 resources = drmModeGetPlaneResources(drv->fd);
561 for (i = 0; i < resources->count_planes; i++) {
562 plane_type = UINT64_MAX;
563 plane = drmModeGetPlane(drv->fd, resources->planes[i]);
567 props = drmModeObjectGetProperties(drv->fd, plane->plane_id, DRM_MODE_OBJECT_PLANE);
571 for (j = 0; j < props->count_props; j++) {
572 prop = drmModeGetProperty(drv->fd, props->props[j]);
574 if (strcmp(prop->name, "type") == 0) {
575 plane_type = props->prop_values[j];
578 drmModeFreeProperty(prop);
582 switch (plane_type) {
583 case DRM_PLANE_TYPE_OVERLAY:
584 case DRM_PLANE_TYPE_PRIMARY:
585 use_flag = BO_USE_SCANOUT;
587 case DRM_PLANE_TYPE_CURSOR:
588 use_flag = BO_USE_CURSOR;
594 for (j = 0; j < plane->count_formats; j++) {
596 for (k = 0; k < drv_array_size(kms_items); k++) {
597 struct kms_item *item = drv_array_at_idx(kms_items, k);
598 if (item->format == plane->formats[j] &&
599 item->modifier == DRM_FORMAT_MOD_LINEAR) {
600 item->use_flags |= use_flag;
607 struct kms_item item = { .format = plane->formats[j],
608 .modifier = DRM_FORMAT_MOD_LINEAR,
609 .use_flags = use_flag };
611 drv_array_append(kms_items, &item);
615 drmModeFreeObjectProperties(props);
616 drmModeFreePlane(plane);
619 drmModeFreePlaneResources(resources);
621 if (kms_items && !drv_array_size(kms_items)) {
622 drv_array_destroy(kms_items);
629 int drv_modify_linear_combinations(struct driver *drv)
632 struct kms_item *item;
633 struct combination *combo;
634 struct drv_array *kms_items;
637 * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
638 * plane and as a cursor. Some drivers don't support
639 * drmModeGetPlaneResources, so add the combination here. Note that the
640 * kernel disregards the alpha component of ARGB unless it's an overlay
643 drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
644 BO_USE_CURSOR | BO_USE_SCANOUT);
645 drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
646 BO_USE_CURSOR | BO_USE_SCANOUT);
648 kms_items = drv_query_kms(drv);
652 for (i = 0; i < drv_array_size(kms_items); i++) {
653 item = (struct kms_item *)drv_array_at_idx(kms_items, i);
654 for (j = 0; j < drv_array_size(drv->combos); j++) {
655 combo = drv_array_at_idx(drv->combos, j);
656 if (item->format == combo->format)
657 combo->use_flags |= BO_USE_SCANOUT;
661 drv_array_destroy(kms_items);
666 * Pick the best modifier from modifiers, according to the ordering
667 * given by modifier_order.
669 uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
670 const uint64_t *modifier_order, uint32_t order_count)
674 for (i = 0; i < order_count; i++) {
675 for (j = 0; j < count; j++) {
676 if (modifiers[j] == modifier_order[i]) {
682 return DRM_FORMAT_MOD_LINEAR;
686 * Search a list of modifiers to see if a given modifier is present
688 bool drv_has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier)
691 for (i = 0; i < count; i++)
692 if (list[i] == modifier)