2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
15 #include <sys/types.h>
20 #include <cutils/log.h>
29 extern const struct backend backend_amdgpu;
32 extern const struct backend backend_exynos;
35 extern const struct backend backend_i915;
38 extern const struct backend backend_mediatek;
41 extern const struct backend backend_msm;
44 extern const struct backend backend_rockchip;
47 extern const struct backend backend_tegra;
50 extern const struct backend backend_vc4;
53 #ifndef DRI_GENERIC_DRV
54 // Dumb / generic drivers
55 extern const struct backend backend_evdi;
56 extern const struct backend backend_marvell;
57 extern const struct backend backend_meson;
58 extern const struct backend backend_nouveau;
59 extern const struct backend backend_komeda;
60 extern const struct backend backend_radeon;
61 extern const struct backend backend_synaptics;
62 extern const struct backend backend_virtio_gpu;
63 extern const struct backend backend_udl;
64 extern const struct backend backend_vkms;
67 #ifdef DRI_GENERIC_DRV
68 extern const struct backend backend_dri_generic;
71 static const struct backend *drv_get_backend(int fd)
73 drmVersionPtr drm_version;
76 drm_version = drmGetVersion(fd);
81 const struct backend *backend_list[] = {
103 #ifndef DRI_GENERIC_DRV
104 &backend_evdi, &backend_marvell, &backend_meson, &backend_nouveau,
105 &backend_komeda, &backend_radeon, &backend_synaptics, &backend_virtio_gpu,
106 &backend_udl, &backend_virtio_gpu, &backend_vkms
110 for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
111 const struct backend *b = backend_list[i];
112 // Exactly one of the main create functions must be defined.
113 assert((b->bo_create != NULL) ^ (b->bo_create_from_metadata != NULL));
114 // Either both or neither must be implemented.
115 assert((b->bo_compute_metadata != NULL) == (b->bo_create_from_metadata != NULL));
116 // Both can't be defined, but it's okay for neither to be (i.e. only bo_create).
117 assert((b->bo_create_with_modifiers == NULL) ||
118 (b->bo_create_from_metadata == NULL));
120 if (!strcmp(drm_version->name, b->name)) {
121 drmFreeVersion(drm_version);
126 drmFreeVersion(drm_version);
130 struct driver *drv_create(int fd, bool try_generic)
135 drv = (struct driver *)calloc(1, sizeof(*drv));
141 minigbm_debug = getenv("MINIGBM_DEBUG");
142 drv->compression = (minigbm_debug == NULL) || (strcmp(minigbm_debug, "nocompression") != 0);
145 drv->backend = drv_get_backend(fd);
147 #ifdef DRI_GENERIC_DRV
148 if (!drv->backend && try_generic)
149 drv->backend = &backend_dri_generic;
155 if (pthread_mutex_init(&drv->driver_lock, NULL))
158 drv->buffer_table = drmHashCreate();
159 if (!drv->buffer_table)
162 drv->mappings = drv_array_init(sizeof(struct mapping));
164 goto free_buffer_table;
166 drv->combos = drv_array_init(sizeof(struct combination));
170 if (drv->backend->init) {
171 ret = drv->backend->init(drv);
173 drv_array_destroy(drv->combos);
181 drv_array_destroy(drv->mappings);
183 drmHashDestroy(drv->buffer_table);
185 pthread_mutex_destroy(&drv->driver_lock);
191 void drv_destroy(struct driver *drv)
193 pthread_mutex_lock(&drv->driver_lock);
195 if (drv->backend->close)
196 drv->backend->close(drv);
198 drmHashDestroy(drv->buffer_table);
199 drv_array_destroy(drv->mappings);
200 drv_array_destroy(drv->combos);
202 pthread_mutex_unlock(&drv->driver_lock);
203 pthread_mutex_destroy(&drv->driver_lock);
208 int drv_get_fd(struct driver *drv)
213 const char *drv_get_name(struct driver *drv)
215 return drv->backend->name;
218 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
220 struct combination *curr, *best;
222 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
227 for (i = 0; i < drv_array_size(drv->combos); i++) {
228 curr = drv_array_at_idx(drv->combos, i);
229 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
230 if (!best || best->metadata.priority < curr->metadata.priority)
237 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
238 uint64_t use_flags, bool is_test_buffer)
242 bo = (struct bo *)calloc(1, sizeof(*bo));
248 bo->meta.width = width;
249 bo->meta.height = height;
250 bo->meta.format = format;
251 bo->meta.use_flags = use_flags;
252 bo->meta.num_planes = drv_num_planes_from_format(format);
253 bo->is_test_buffer = is_test_buffer;
255 if (!bo->meta.num_planes) {
263 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
271 is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
272 use_flags &= ~BO_USE_TEST_ALLOC;
274 bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
280 if (drv->backend->bo_compute_metadata) {
281 ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
283 if (!is_test_alloc && ret == 0)
284 ret = drv->backend->bo_create_from_metadata(bo);
285 } else if (!is_test_alloc) {
286 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
294 pthread_mutex_lock(&drv->driver_lock);
296 for (plane = 0; plane < bo->meta.num_planes; plane++) {
298 assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
300 drv_increment_reference_count(drv, bo, plane);
303 pthread_mutex_unlock(&drv->driver_lock);
308 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
309 uint32_t format, const uint64_t *modifiers, uint32_t count)
315 if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
320 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
326 if (drv->backend->bo_compute_metadata) {
327 ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
330 ret = drv->backend->bo_create_from_metadata(bo);
332 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
341 pthread_mutex_lock(&drv->driver_lock);
343 for (plane = 0; plane < bo->meta.num_planes; plane++) {
345 assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
347 drv_increment_reference_count(drv, bo, plane);
350 pthread_mutex_unlock(&drv->driver_lock);
355 void drv_bo_destroy(struct bo *bo)
360 struct driver *drv = bo->drv;
362 if (!bo->is_test_buffer) {
363 pthread_mutex_lock(&drv->driver_lock);
365 for (plane = 0; plane < bo->meta.num_planes; plane++)
366 drv_decrement_reference_count(drv, bo, plane);
368 for (plane = 0; plane < bo->meta.num_planes; plane++)
369 total += drv_get_reference_count(drv, bo, plane);
371 pthread_mutex_unlock(&drv->driver_lock);
374 ret = drv_mapping_destroy(bo);
376 bo->drv->backend->bo_destroy(bo);
383 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
390 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
395 ret = drv->backend->bo_import(bo, data);
401 for (plane = 0; plane < bo->meta.num_planes; plane++) {
402 pthread_mutex_lock(&bo->drv->driver_lock);
403 drv_increment_reference_count(bo->drv, bo, plane);
404 pthread_mutex_unlock(&bo->drv->driver_lock);
407 for (plane = 0; plane < bo->meta.num_planes; plane++) {
408 bo->meta.strides[plane] = data->strides[plane];
409 bo->meta.offsets[plane] = data->offsets[plane];
410 bo->meta.format_modifiers[plane] = data->format_modifiers[plane];
412 seek_end = lseek(data->fds[plane], 0, SEEK_END);
413 if (seek_end == (off_t)(-1)) {
414 drv_log("lseek() failed with %s\n", strerror(errno));
418 lseek(data->fds[plane], 0, SEEK_SET);
419 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
420 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
422 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
424 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
425 drv_log("buffer size is too large.\n");
429 bo->meta.total_size += bo->meta.sizes[plane];
439 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
440 struct mapping **map_data, size_t plane)
444 struct mapping mapping = { 0 };
446 assert(rect->width >= 0);
447 assert(rect->height >= 0);
448 assert(rect->x + rect->width <= drv_bo_get_width(bo));
449 assert(rect->y + rect->height <= drv_bo_get_height(bo));
450 assert(BO_MAP_READ_WRITE & map_flags);
451 /* No CPU access for protected buffers. */
452 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
454 if (bo->is_test_buffer) {
458 mapping.rect = *rect;
459 mapping.refcount = 1;
461 pthread_mutex_lock(&bo->drv->driver_lock);
463 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
464 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
465 if (prior->vma->handle != bo->handles[plane].u32 ||
466 prior->vma->map_flags != map_flags)
469 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
470 rect->width != prior->rect.width || rect->height != prior->rect.height)
478 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
479 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
480 if (prior->vma->handle != bo->handles[plane].u32 ||
481 prior->vma->map_flags != map_flags)
484 prior->vma->refcount++;
485 mapping.vma = prior->vma;
489 mapping.vma = calloc(1, sizeof(*mapping.vma));
490 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
491 addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
492 if (addr == MAP_FAILED) {
495 pthread_mutex_unlock(&bo->drv->driver_lock);
499 mapping.vma->refcount = 1;
500 mapping.vma->addr = addr;
501 mapping.vma->handle = bo->handles[plane].u32;
502 mapping.vma->map_flags = map_flags;
505 *map_data = drv_array_append(bo->drv->mappings, &mapping);
507 drv_bo_invalidate(bo, *map_data);
508 addr = (uint8_t *)((*map_data)->vma->addr);
509 addr += drv_bo_get_plane_offset(bo, plane);
510 pthread_mutex_unlock(&bo->drv->driver_lock);
514 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
519 pthread_mutex_lock(&bo->drv->driver_lock);
521 if (--mapping->refcount)
524 if (!--mapping->vma->refcount) {
525 ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
529 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
530 if (mapping == (struct mapping *)drv_array_at_idx(bo->drv->mappings, i)) {
531 drv_array_remove(bo->drv->mappings, i);
537 pthread_mutex_unlock(&bo->drv->driver_lock);
541 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
546 assert(mapping->vma);
547 assert(mapping->refcount > 0);
548 assert(mapping->vma->refcount > 0);
550 if (bo->drv->backend->bo_invalidate)
551 ret = bo->drv->backend->bo_invalidate(bo, mapping);
556 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
561 assert(mapping->vma);
562 assert(mapping->refcount > 0);
563 assert(mapping->vma->refcount > 0);
565 if (bo->drv->backend->bo_flush)
566 ret = bo->drv->backend->bo_flush(bo, mapping);
571 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
576 assert(mapping->vma);
577 assert(mapping->refcount > 0);
578 assert(mapping->vma->refcount > 0);
579 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
581 if (bo->drv->backend->bo_flush)
582 ret = bo->drv->backend->bo_flush(bo, mapping);
584 ret = drv_bo_unmap(bo, mapping);
589 uint32_t drv_bo_get_width(struct bo *bo)
591 return bo->meta.width;
594 uint32_t drv_bo_get_height(struct bo *bo)
596 return bo->meta.height;
599 size_t drv_bo_get_num_planes(struct bo *bo)
601 return bo->meta.num_planes;
604 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
606 return bo->handles[plane];
610 #define DRM_RDWR O_RDWR
613 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
617 assert(plane < bo->meta.num_planes);
619 if (bo->is_test_buffer) {
623 if (bo->drv->backend->bo_get_plane_fd) {
624 fd = bo->drv->backend->bo_get_plane_fd(bo, plane);
630 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
632 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
634 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
636 return (ret) ? ret : fd;
639 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
641 assert(plane < bo->meta.num_planes);
642 return bo->meta.offsets[plane];
645 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
647 assert(plane < bo->meta.num_planes);
648 return bo->meta.sizes[plane];
651 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
653 assert(plane < bo->meta.num_planes);
654 return bo->meta.strides[plane];
657 uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
659 assert(plane < bo->meta.num_planes);
660 return bo->meta.format_modifiers[plane];
663 uint32_t drv_bo_get_format(struct bo *bo)
665 return bo->meta.format;
668 size_t drv_bo_get_total_size(struct bo *bo)
670 return bo->meta.total_size;
673 uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
675 if (drv->backend->resolve_format)
676 return drv->backend->resolve_format(drv, format, use_flags);
681 uint32_t drv_num_buffers_per_bo(struct bo *bo)
686 if (bo->is_test_buffer) {
690 for (plane = 0; plane < bo->meta.num_planes; plane++) {
691 for (p = 0; p < plane; p++)
692 if (bo->handles[p].u32 == bo->handles[plane].u32)
701 void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
704 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
707 va_start(args, format);
709 __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
711 fprintf(stderr, "%s ", buf);
712 vfprintf(stderr, format, args);
717 int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
718 uint32_t offsets[DRV_MAX_PLANES])
720 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
721 strides[plane] = bo->meta.strides[plane];
722 offsets[plane] = bo->meta.offsets[plane];
725 if (bo->drv->backend->resource_info)
726 return bo->drv->backend->resource_info(bo, strides, offsets);