2 * Copyright 2014 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
15 #include <xf86drmMode.h>
19 #include "i915_private.h"
22 static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
27 case DRM_FORMAT_YVU420:
28 case DRM_FORMAT_YVU420_ANDROID:
29 stride = DIV_ROUND_UP(stride, 2);
37 static uint32_t bpp_from_format(uint32_t format, size_t plane)
39 assert(plane < drv_num_planes_from_format(format));
42 case DRM_FORMAT_BGR233:
45 case DRM_FORMAT_RGB332:
46 case DRM_FORMAT_YVU420:
47 case DRM_FORMAT_YVU420_ANDROID:
52 return (plane == 0) ? 8 : 4;
54 case DRM_FORMAT_ABGR1555:
55 case DRM_FORMAT_ABGR4444:
56 case DRM_FORMAT_ARGB1555:
57 case DRM_FORMAT_ARGB4444:
58 case DRM_FORMAT_BGR565:
59 case DRM_FORMAT_BGRA4444:
60 case DRM_FORMAT_BGRA5551:
61 case DRM_FORMAT_BGRX4444:
62 case DRM_FORMAT_BGRX5551:
65 case DRM_FORMAT_RGB565:
66 case DRM_FORMAT_RGBA4444:
67 case DRM_FORMAT_RGBA5551:
68 case DRM_FORMAT_RGBX4444:
69 case DRM_FORMAT_RGBX5551:
72 case DRM_FORMAT_XBGR1555:
73 case DRM_FORMAT_XBGR4444:
74 case DRM_FORMAT_XRGB1555:
75 case DRM_FORMAT_XRGB4444:
80 case DRM_FORMAT_BGR888:
81 case DRM_FORMAT_RGB888:
84 case DRM_FORMAT_ABGR2101010:
85 case DRM_FORMAT_ABGR8888:
86 case DRM_FORMAT_ARGB2101010:
87 case DRM_FORMAT_ARGB8888:
89 case DRM_FORMAT_BGRA1010102:
90 case DRM_FORMAT_BGRA8888:
91 case DRM_FORMAT_BGRX1010102:
92 case DRM_FORMAT_BGRX8888:
93 case DRM_FORMAT_RGBA1010102:
94 case DRM_FORMAT_RGBA8888:
95 case DRM_FORMAT_RGBX1010102:
96 case DRM_FORMAT_RGBX8888:
97 case DRM_FORMAT_XBGR2101010:
98 case DRM_FORMAT_XBGR8888:
99 case DRM_FORMAT_XRGB2101010:
100 case DRM_FORMAT_XRGB8888:
104 return i915_private_bpp_from_format(format, plane);
107 uint32_t drv_bo_get_stride_in_pixels(struct bo *bo)
109 uint32_t bytes_per_pixel = DIV_ROUND_UP(bpp_from_format(bo->format, 0), 8);
110 return DIV_ROUND_UP(bo->strides[0], bytes_per_pixel);
114 * This function returns the stride for a given format, width and plane.
116 uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
118 uint32_t stride = DIV_ROUND_UP(width * bpp_from_format(format, plane), 8);
121 * The stride of Android YV12 buffers is required to be aligned to 16 bytes
122 * (see <system/graphics.h>).
124 if (format == DRM_FORMAT_YVU420_ANDROID)
125 stride = (plane == 0) ? ALIGN(stride, 32) : ALIGN(stride, 16);
130 uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
132 assert(plane < drv_num_planes_from_format(format));
133 uint32_t vertical_subsampling;
136 case DRM_FORMAT_NV12:
137 case DRM_FORMAT_YVU420:
138 case DRM_FORMAT_YVU420_ANDROID:
139 vertical_subsampling = (plane == 0) ? 1 : 2;
142 i915_private_vertical_subsampling_from_format(&vertical_subsampling, format, plane);
145 return stride * DIV_ROUND_UP(height, vertical_subsampling);
149 * This function fills in the buffer object given the driver aligned stride of
150 * the first plane, height and a format. This function assumes there is just
151 * one kernel buffer per buffer object.
153 int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
156 size_t p, num_planes;
159 num_planes = drv_num_planes_from_format(format);
163 * HAL_PIXEL_FORMAT_YV12 requires that (see <system/graphics.h>):
164 * - the aligned height is same as the buffer's height.
165 * - the chroma stride is 16 bytes aligned, i.e., the luma's strides
166 * is 32 bytes aligned.
168 if (format == DRM_FORMAT_YVU420_ANDROID) {
169 assert(aligned_height == bo->height);
170 assert(stride == ALIGN(stride, 32));
173 for (p = 0; p < num_planes; p++) {
174 bo->strides[p] = subsample_stride(stride, format, p);
175 bo->sizes[p] = drv_size_from_format(format, bo->strides[p], aligned_height, p);
176 bo->offsets[p] = offset;
177 offset += bo->sizes[p];
180 bo->total_size = offset;
184 int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
189 uint32_t aligned_width, aligned_height;
190 struct drm_mode_create_dumb create_dumb;
192 aligned_width = width;
193 aligned_height = height;
194 if (format == DRM_FORMAT_YVU420_ANDROID) {
196 * Align width to 32 pixels, so chroma strides are 16 bytes as
199 aligned_width = ALIGN(width, 32);
202 if (format == DRM_FORMAT_YVU420_ANDROID || format == DRM_FORMAT_YVU420) {
203 aligned_height = 3 * DIV_ROUND_UP(height, 2);
206 memset(&create_dumb, 0, sizeof(create_dumb));
207 create_dumb.height = aligned_height;
208 create_dumb.width = aligned_width;
209 create_dumb.bpp = bpp_from_format(format, 0);
210 create_dumb.flags = 0;
212 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
214 fprintf(stderr, "drv: DRM_IOCTL_MODE_CREATE_DUMB failed\n");
218 drv_bo_from_format(bo, create_dumb.pitch, height, format);
220 for (plane = 0; plane < bo->num_planes; plane++)
221 bo->handles[plane].u32 = create_dumb.handle;
223 bo->total_size = create_dumb.size;
227 int drv_dumb_bo_destroy(struct bo *bo)
229 struct drm_mode_destroy_dumb destroy_dumb;
232 memset(&destroy_dumb, 0, sizeof(destroy_dumb));
233 destroy_dumb.handle = bo->handles[0].u32;
235 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
237 fprintf(stderr, "drv: DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n",
245 int drv_gem_bo_destroy(struct bo *bo)
247 struct drm_gem_close gem_close;
251 for (plane = 0; plane < bo->num_planes; plane++) {
252 for (i = 0; i < plane; i++)
253 if (bo->handles[i].u32 == bo->handles[plane].u32)
255 /* Make sure close hasn't already been called on this handle */
259 memset(&gem_close, 0, sizeof(gem_close));
260 gem_close.handle = bo->handles[plane].u32;
262 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
264 fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
265 bo->handles[plane].u32, ret);
273 int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
277 struct drm_prime_handle prime_handle;
279 for (plane = 0; plane < bo->num_planes; plane++) {
280 memset(&prime_handle, 0, sizeof(prime_handle));
281 prime_handle.fd = data->fds[plane];
283 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
286 fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n",
290 * Need to call GEM close on planes that were opened,
291 * if any. Adjust the num_planes variable to be the
292 * plane that failed, so GEM close will be called on
293 * planes before that plane.
295 bo->num_planes = plane;
296 drv_gem_bo_destroy(bo);
300 bo->handles[plane].u32 = prime_handle.handle;
303 ATOMIC_LOCK(&bo->drv->driver_lock);
304 for (plane = 0; plane < bo->num_planes; plane++) {
305 drv_increment_reference_count(bo->drv, bo, plane);
307 ATOMIC_UNLOCK(&bo->drv->driver_lock);
312 void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
316 struct drm_mode_map_dumb map_dumb;
318 memset(&map_dumb, 0, sizeof(map_dumb));
319 map_dumb.handle = bo->handles[plane].u32;
321 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
323 fprintf(stderr, "drv: DRM_IOCTL_MODE_MAP_DUMB failed \n");
327 for (i = 0; i < bo->num_planes; i++)
328 if (bo->handles[i].u32 == bo->handles[plane].u32)
329 data->length += bo->sizes[i];
331 return mmap(0, data->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
335 int drv_bo_munmap(struct bo *bo, struct map_info *data)
337 return munmap(data->addr, data->length);
340 int drv_map_info_destroy(struct bo *bo)
345 struct map_info *data;
348 * This function is called right before the buffer is destroyed. It will free any mappings
349 * associated with the buffer.
352 for (plane = 0; plane < bo->num_planes; plane++) {
353 if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) {
354 data = (struct map_info *)ptr;
355 ret = bo->drv->backend->bo_unmap(bo, data);
357 fprintf(stderr, "drv: munmap failed");
361 drmHashDelete(bo->drv->map_table, data->handle);
369 int drv_get_prot(uint32_t map_flags)
371 return (BO_MAP_WRITE & map_flags) ? PROT_WRITE | PROT_READ : PROT_READ;
374 uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
379 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, &count))
380 num = (uintptr_t)(count);
385 void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane)
387 uintptr_t num = drv_get_reference_count(drv, bo, plane);
389 /* If a value isn't in the table, drmHashDelete is a no-op */
390 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
391 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
394 void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane)
396 uintptr_t num = drv_get_reference_count(drv, bo, plane);
398 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
401 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1));
404 uint32_t drv_log_base2(uint32_t value)
414 int drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
417 struct combinations *combos = &drv->combos;
418 if (combos->size >= combos->allocations) {
419 struct combination *new_data;
420 combos->allocations *= 2;
421 new_data = realloc(combos->data, combos->allocations * sizeof(*combos->data));
425 combos->data = new_data;
428 combos->data[combos->size].format = format;
429 combos->data[combos->size].metadata.priority = metadata->priority;
430 combos->data[combos->size].metadata.tiling = metadata->tiling;
431 combos->data[combos->size].metadata.modifier = metadata->modifier;
432 combos->data[combos->size].use_flags = use_flags;
437 int drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
438 struct format_metadata *metadata, uint64_t use_flags)
442 for (i = 0; i < num_formats; i++) {
443 ret = drv_add_combination(drv, formats[i], metadata, use_flags);
451 void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
455 struct combination *combo;
456 /* Attempts to add the specified flags to an existing combination. */
457 for (i = 0; i < drv->combos.size; i++) {
458 combo = &drv->combos.data[i];
459 if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
460 combo->metadata.modifier == metadata->modifier)
461 combo->use_flags |= use_flags;
465 struct kms_item *drv_query_kms(struct driver *drv, uint32_t *num_items)
467 struct kms_item *items;
468 uint64_t plane_type, use_flag;
469 uint32_t i, j, k, allocations, item_size;
471 drmModePlanePtr plane;
472 drmModePropertyPtr prop;
473 drmModePlaneResPtr resources;
474 drmModeObjectPropertiesPtr props;
476 /* Start with a power of 2 number of allocations. */
479 items = calloc(allocations, sizeof(*items));
484 * The ability to return universal planes is only complete on
485 * ChromeOS kernel versions >= v3.18. The SET_CLIENT_CAP ioctl
486 * therefore might return an error code, so don't check it. If it
487 * fails, it'll just return the plane list as overlay planes, which is
488 * fine in our case (our drivers already have cursor bits set).
489 * modetest in libdrm does the same thing.
491 drmSetClientCap(drv->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
493 resources = drmModeGetPlaneResources(drv->fd);
497 for (i = 0; i < resources->count_planes; i++) {
498 plane = drmModeGetPlane(drv->fd, resources->planes[i]);
502 props = drmModeObjectGetProperties(drv->fd, plane->plane_id, DRM_MODE_OBJECT_PLANE);
506 for (j = 0; j < props->count_props; j++) {
507 prop = drmModeGetProperty(drv->fd, props->props[j]);
509 if (strcmp(prop->name, "type") == 0) {
510 plane_type = props->prop_values[j];
513 drmModeFreeProperty(prop);
517 switch (plane_type) {
518 case DRM_PLANE_TYPE_OVERLAY:
519 case DRM_PLANE_TYPE_PRIMARY:
520 use_flag = BO_USE_SCANOUT;
522 case DRM_PLANE_TYPE_CURSOR:
523 use_flag = BO_USE_CURSOR;
529 for (j = 0; j < plane->count_formats; j++) {
531 for (k = 0; k < item_size; k++) {
532 if (items[k].format == plane->formats[j] &&
533 items[k].modifier == DRM_FORMAT_MOD_INVALID) {
534 items[k].use_flags |= use_flag;
540 if (!found && item_size >= allocations) {
541 struct kms_item *new_data = NULL;
543 new_data = realloc(items, allocations * sizeof(*items));
553 items[item_size].format = plane->formats[j];
554 items[item_size].modifier = DRM_FORMAT_MOD_INVALID;
555 items[item_size].use_flags = use_flag;
560 drmModeFreeObjectProperties(props);
561 drmModeFreePlane(plane);
564 drmModeFreePlaneResources(resources);
566 if (items && item_size == 0) {
571 *num_items = item_size;
575 int drv_modify_linear_combinations(struct driver *drv)
577 uint32_t i, j, num_items;
578 struct kms_item *items;
579 struct combination *combo;
582 * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
583 * plane and as a cursor. Some drivers don't support
584 * drmModeGetPlaneResources, so add the combination here. Note that the
585 * kernel disregards the alpha component of ARGB unless it's an overlay
588 drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
589 BO_USE_CURSOR | BO_USE_SCANOUT);
590 drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
591 BO_USE_CURSOR | BO_USE_SCANOUT);
593 items = drv_query_kms(drv, &num_items);
594 if (!items || !num_items)
597 for (i = 0; i < num_items; i++) {
598 for (j = 0; j < drv->combos.size; j++) {
599 combo = &drv->combos.data[j];
600 if (items[i].format == combo->format)
601 combo->use_flags |= BO_USE_SCANOUT;
610 * Pick the best modifier from modifiers, according to the ordering
611 * given by modifier_order.
613 uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
614 const uint64_t *modifier_order, uint32_t order_count)
618 for (i = 0; i < order_count; i++) {
619 for (j = 0; j < count; j++) {
620 if (modifiers[j] == modifier_order[i]) {
626 return DRM_FORMAT_MOD_LINEAR;