2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
15 #include <sys/types.h>
24 extern const struct backend backend_amdgpu;
27 extern const struct backend backend_amlogic;
29 extern const struct backend backend_evdi;
31 extern const struct backend backend_exynos;
33 extern const struct backend backend_gma500;
35 extern const struct backend backend_i915;
38 extern const struct backend backend_marvell;
41 extern const struct backend backend_mediatek;
43 extern const struct backend backend_nouveau;
45 extern const struct backend backend_radeon;
48 extern const struct backend backend_rockchip;
51 extern const struct backend backend_tegra;
53 extern const struct backend backend_udl;
55 extern const struct backend backend_vc4;
57 extern const struct backend backend_vgem;
58 extern const struct backend backend_virtio_gpu;
60 static const struct backend *drv_get_backend(int fd)
62 drmVersionPtr drm_version;
65 drm_version = drmGetVersion(fd);
70 const struct backend *backend_list[] = {
105 &backend_vgem, &backend_virtio_gpu,
108 for (i = 0; i < ARRAY_SIZE(backend_list); i++)
109 if (!strcmp(drm_version->name, backend_list[i]->name)) {
110 drmFreeVersion(drm_version);
111 return backend_list[i];
114 drmFreeVersion(drm_version);
118 struct driver *drv_create(int fd)
123 drv = (struct driver *)calloc(1, sizeof(*drv));
129 drv->backend = drv_get_backend(fd);
134 if (pthread_mutex_init(&drv->driver_lock, NULL))
137 drv->buffer_table = drmHashCreate();
138 if (!drv->buffer_table)
141 drv->mappings = drv_array_init(sizeof(struct mapping));
143 goto free_buffer_table;
145 drv->combos = drv_array_init(sizeof(struct combination));
149 if (drv->backend->init) {
150 ret = drv->backend->init(drv);
152 drv_array_destroy(drv->combos);
160 drv_array_destroy(drv->mappings);
162 drmHashDestroy(drv->buffer_table);
164 pthread_mutex_destroy(&drv->driver_lock);
170 void drv_destroy(struct driver *drv)
172 pthread_mutex_lock(&drv->driver_lock);
174 if (drv->backend->close)
175 drv->backend->close(drv);
177 drmHashDestroy(drv->buffer_table);
178 drv_array_destroy(drv->mappings);
179 drv_array_destroy(drv->combos);
181 pthread_mutex_unlock(&drv->driver_lock);
182 pthread_mutex_destroy(&drv->driver_lock);
187 int drv_get_fd(struct driver *drv)
192 const char *drv_get_name(struct driver *drv)
194 return drv->backend->name;
197 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
199 struct combination *curr, *best;
201 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
206 for (i = 0; i < drv_array_size(drv->combos); i++) {
207 curr = drv_array_at_idx(drv->combos, i);
208 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
209 if (!best || best->metadata.priority < curr->metadata.priority)
216 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
221 bo = (struct bo *)calloc(1, sizeof(*bo));
230 bo->use_flags = use_flags;
231 bo->num_planes = drv_num_planes_from_format(format);
233 if (!bo->num_planes) {
241 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
248 bo = drv_bo_new(drv, width, height, format, use_flags);
253 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
260 pthread_mutex_lock(&drv->driver_lock);
262 for (plane = 0; plane < bo->num_planes; plane++) {
264 assert(bo->offsets[plane] >= bo->offsets[plane - 1]);
266 drv_increment_reference_count(drv, bo, plane);
269 pthread_mutex_unlock(&drv->driver_lock);
274 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
275 uint32_t format, const uint64_t *modifiers, uint32_t count)
281 if (!drv->backend->bo_create_with_modifiers) {
286 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE);
291 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers, count);
298 pthread_mutex_lock(&drv->driver_lock);
300 for (plane = 0; plane < bo->num_planes; plane++) {
302 assert(bo->offsets[plane] >= bo->offsets[plane - 1]);
304 drv_increment_reference_count(drv, bo, plane);
307 pthread_mutex_unlock(&drv->driver_lock);
312 void drv_bo_destroy(struct bo *bo)
316 struct driver *drv = bo->drv;
318 pthread_mutex_lock(&drv->driver_lock);
320 for (plane = 0; plane < bo->num_planes; plane++)
321 drv_decrement_reference_count(drv, bo, plane);
323 for (plane = 0; plane < bo->num_planes; plane++)
324 total += drv_get_reference_count(drv, bo, plane);
326 pthread_mutex_unlock(&drv->driver_lock);
329 assert(drv_mapping_destroy(bo) == 0);
330 bo->drv->backend->bo_destroy(bo);
336 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
343 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags);
348 ret = drv->backend->bo_import(bo, data);
354 for (plane = 0; plane < bo->num_planes; plane++) {
355 bo->strides[plane] = data->strides[plane];
356 bo->offsets[plane] = data->offsets[plane];
357 bo->format_modifiers[plane] = data->format_modifiers[plane];
359 seek_end = lseek(data->fds[plane], 0, SEEK_END);
360 if (seek_end == (off_t)(-1)) {
361 fprintf(stderr, "drv: lseek() failed with %s\n", strerror(errno));
365 lseek(data->fds[plane], 0, SEEK_SET);
366 if (plane == bo->num_planes - 1 || data->offsets[plane + 1] == 0)
367 bo->sizes[plane] = seek_end - data->offsets[plane];
369 bo->sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
371 if ((int64_t)bo->offsets[plane] + bo->sizes[plane] > seek_end) {
372 fprintf(stderr, "drv: buffer size is too large.\n");
376 bo->total_size += bo->sizes[plane];
386 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
387 struct mapping **map_data, size_t plane)
391 struct mapping mapping;
393 assert(rect->width >= 0);
394 assert(rect->height >= 0);
395 assert(rect->x + rect->width <= drv_bo_get_width(bo));
396 assert(rect->y + rect->height <= drv_bo_get_height(bo));
397 assert(BO_MAP_READ_WRITE & map_flags);
398 /* No CPU access for protected buffers. */
399 assert(!(bo->use_flags & BO_USE_PROTECTED));
401 memset(&mapping, 0, sizeof(mapping));
402 mapping.rect = *rect;
403 mapping.refcount = 1;
405 pthread_mutex_lock(&bo->drv->driver_lock);
407 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
408 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
409 if (prior->vma->handle != bo->handles[plane].u32 ||
410 prior->vma->map_flags != map_flags)
413 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
414 rect->width != prior->rect.width || rect->height != prior->rect.height)
422 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
423 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
424 if (prior->vma->handle != bo->handles[plane].u32 ||
425 prior->vma->map_flags != map_flags)
428 prior->vma->refcount++;
429 mapping.vma = prior->vma;
433 mapping.vma = calloc(1, sizeof(*mapping.vma));
434 addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
435 if (addr == MAP_FAILED) {
438 pthread_mutex_unlock(&bo->drv->driver_lock);
442 mapping.vma->refcount = 1;
443 mapping.vma->addr = addr;
444 mapping.vma->handle = bo->handles[plane].u32;
445 mapping.vma->map_flags = map_flags;
448 *map_data = drv_array_append(bo->drv->mappings, &mapping);
450 drv_bo_invalidate(bo, *map_data);
451 addr = (uint8_t *)((*map_data)->vma->addr);
452 addr += drv_bo_get_plane_offset(bo, plane);
453 pthread_mutex_unlock(&bo->drv->driver_lock);
457 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
460 int ret = drv_bo_flush(bo, mapping);
464 pthread_mutex_lock(&bo->drv->driver_lock);
466 if (--mapping->refcount)
469 if (!--mapping->vma->refcount) {
470 ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
474 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
475 if (mapping == (struct mapping *)drv_array_at_idx(bo->drv->mappings, i)) {
476 drv_array_remove(bo->drv->mappings, i);
482 pthread_mutex_unlock(&bo->drv->driver_lock);
486 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
491 assert(mapping->vma);
492 assert(mapping->refcount > 0);
493 assert(mapping->vma->refcount > 0);
495 if (bo->drv->backend->bo_invalidate)
496 ret = bo->drv->backend->bo_invalidate(bo, mapping);
501 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
506 assert(mapping->vma);
507 assert(mapping->refcount > 0);
508 assert(mapping->vma->refcount > 0);
509 assert(!(bo->use_flags & BO_USE_PROTECTED));
511 if (bo->drv->backend->bo_flush)
512 ret = bo->drv->backend->bo_flush(bo, mapping);
517 uint32_t drv_bo_get_width(struct bo *bo)
522 uint32_t drv_bo_get_height(struct bo *bo)
527 uint32_t drv_bo_get_stride_or_tiling(struct bo *bo)
529 return bo->tiling ? bo->tiling : drv_bo_get_plane_stride(bo, 0);
532 size_t drv_bo_get_num_planes(struct bo *bo)
534 return bo->num_planes;
537 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
539 return bo->handles[plane];
543 #define DRM_RDWR O_RDWR
546 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
550 assert(plane < bo->num_planes);
552 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
554 return (ret) ? ret : fd;
557 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
559 assert(plane < bo->num_planes);
560 return bo->offsets[plane];
563 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
565 assert(plane < bo->num_planes);
566 return bo->sizes[plane];
569 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
571 assert(plane < bo->num_planes);
572 return bo->strides[plane];
575 uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
577 assert(plane < bo->num_planes);
578 return bo->format_modifiers[plane];
581 uint32_t drv_bo_get_format(struct bo *bo)
586 uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
588 if (drv->backend->resolve_format)
589 return drv->backend->resolve_format(format, use_flags);
594 size_t drv_num_planes_from_format(uint32_t format)
597 case DRM_FORMAT_ABGR1555:
598 case DRM_FORMAT_ABGR2101010:
599 case DRM_FORMAT_ABGR4444:
600 case DRM_FORMAT_ABGR8888:
601 case DRM_FORMAT_ARGB1555:
602 case DRM_FORMAT_ARGB2101010:
603 case DRM_FORMAT_ARGB4444:
604 case DRM_FORMAT_ARGB8888:
605 case DRM_FORMAT_AYUV:
606 case DRM_FORMAT_BGR233:
607 case DRM_FORMAT_BGR565:
608 case DRM_FORMAT_BGR888:
609 case DRM_FORMAT_BGRA1010102:
610 case DRM_FORMAT_BGRA4444:
611 case DRM_FORMAT_BGRA5551:
612 case DRM_FORMAT_BGRA8888:
613 case DRM_FORMAT_BGRX1010102:
614 case DRM_FORMAT_BGRX4444:
615 case DRM_FORMAT_BGRX5551:
616 case DRM_FORMAT_BGRX8888:
618 case DRM_FORMAT_GR88:
620 case DRM_FORMAT_RG88:
621 case DRM_FORMAT_RGB332:
622 case DRM_FORMAT_RGB565:
623 case DRM_FORMAT_RGB888:
624 case DRM_FORMAT_RGBA1010102:
625 case DRM_FORMAT_RGBA4444:
626 case DRM_FORMAT_RGBA5551:
627 case DRM_FORMAT_RGBA8888:
628 case DRM_FORMAT_RGBX1010102:
629 case DRM_FORMAT_RGBX4444:
630 case DRM_FORMAT_RGBX5551:
631 case DRM_FORMAT_RGBX8888:
632 case DRM_FORMAT_UYVY:
633 case DRM_FORMAT_VYUY:
634 case DRM_FORMAT_XBGR1555:
635 case DRM_FORMAT_XBGR2101010:
636 case DRM_FORMAT_XBGR4444:
637 case DRM_FORMAT_XBGR8888:
638 case DRM_FORMAT_XRGB1555:
639 case DRM_FORMAT_XRGB2101010:
640 case DRM_FORMAT_XRGB4444:
641 case DRM_FORMAT_XRGB8888:
642 case DRM_FORMAT_YUYV:
643 case DRM_FORMAT_YVYU:
645 case DRM_FORMAT_NV12:
646 case DRM_FORMAT_NV21:
648 case DRM_FORMAT_YVU420:
649 case DRM_FORMAT_YVU420_ANDROID:
653 fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format);
657 uint32_t drv_num_buffers_per_bo(struct bo *bo)
662 for (plane = 0; plane < bo->num_planes; plane++) {
663 for (p = 0; p < plane; p++)
664 if (bo->handles[p].u32 == bo->handles[plane].u32)