2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
15 #include <sys/types.h>
20 #include <cutils/log.h>
29 extern const struct backend backend_amdgpu;
31 extern const struct backend backend_evdi;
33 extern const struct backend backend_exynos;
36 extern const struct backend backend_i915;
39 extern const struct backend backend_marvell;
42 extern const struct backend backend_mediatek;
45 extern const struct backend backend_meson;
48 extern const struct backend backend_msm;
50 extern const struct backend backend_nouveau;
52 extern const struct backend backend_radeon;
55 extern const struct backend backend_rockchip;
58 extern const struct backend backend_tegra;
60 extern const struct backend backend_udl;
62 extern const struct backend backend_vc4;
64 extern const struct backend backend_vgem;
65 extern const struct backend backend_virtio_gpu;
67 static const struct backend *drv_get_backend(int fd)
69 drmVersionPtr drm_version;
72 drm_version = drmGetVersion(fd);
77 const struct backend *backend_list[] = {
114 &backend_vgem, &backend_virtio_gpu,
117 for (i = 0; i < ARRAY_SIZE(backend_list); i++)
118 if (!strcmp(drm_version->name, backend_list[i]->name)) {
119 drmFreeVersion(drm_version);
120 return backend_list[i];
123 drmFreeVersion(drm_version);
127 struct driver *drv_create(int fd)
132 drv = (struct driver *)calloc(1, sizeof(*drv));
138 drv->backend = drv_get_backend(fd);
143 if (pthread_mutex_init(&drv->driver_lock, NULL))
146 drv->buffer_table = drmHashCreate();
147 if (!drv->buffer_table)
150 drv->mappings = drv_array_init(sizeof(struct mapping));
152 goto free_buffer_table;
154 drv->combos = drv_array_init(sizeof(struct combination));
158 if (drv->backend->init) {
159 ret = drv->backend->init(drv);
161 drv_array_destroy(drv->combos);
169 drv_array_destroy(drv->mappings);
171 drmHashDestroy(drv->buffer_table);
173 pthread_mutex_destroy(&drv->driver_lock);
179 void drv_destroy(struct driver *drv)
181 pthread_mutex_lock(&drv->driver_lock);
183 if (drv->backend->close)
184 drv->backend->close(drv);
186 drmHashDestroy(drv->buffer_table);
187 drv_array_destroy(drv->mappings);
188 drv_array_destroy(drv->combos);
190 pthread_mutex_unlock(&drv->driver_lock);
191 pthread_mutex_destroy(&drv->driver_lock);
196 int drv_get_fd(struct driver *drv)
201 const char *drv_get_name(struct driver *drv)
203 return drv->backend->name;
206 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
208 struct combination *curr, *best;
210 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
215 for (i = 0; i < drv_array_size(drv->combos); i++) {
216 curr = drv_array_at_idx(drv->combos, i);
217 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
218 if (!best || best->metadata.priority < curr->metadata.priority)
225 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
230 bo = (struct bo *)calloc(1, sizeof(*bo));
236 bo->meta.width = width;
237 bo->meta.height = height;
238 bo->meta.format = format;
239 bo->meta.use_flags = use_flags;
240 bo->meta.num_planes = drv_num_planes_from_format(format);
242 if (!bo->meta.num_planes) {
250 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
257 bo = drv_bo_new(drv, width, height, format, use_flags);
262 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
269 pthread_mutex_lock(&drv->driver_lock);
271 for (plane = 0; plane < bo->meta.num_planes; plane++) {
273 assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
275 drv_increment_reference_count(drv, bo, plane);
278 pthread_mutex_unlock(&drv->driver_lock);
283 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
284 uint32_t format, const uint64_t *modifiers, uint32_t count)
290 if (!drv->backend->bo_create_with_modifiers) {
295 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE);
300 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers, count);
307 pthread_mutex_lock(&drv->driver_lock);
309 for (plane = 0; plane < bo->meta.num_planes; plane++) {
311 assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
313 drv_increment_reference_count(drv, bo, plane);
316 pthread_mutex_unlock(&drv->driver_lock);
321 void drv_bo_destroy(struct bo *bo)
326 struct driver *drv = bo->drv;
328 pthread_mutex_lock(&drv->driver_lock);
330 for (plane = 0; plane < bo->meta.num_planes; plane++)
331 drv_decrement_reference_count(drv, bo, plane);
333 for (plane = 0; plane < bo->meta.num_planes; plane++)
334 total += drv_get_reference_count(drv, bo, plane);
336 pthread_mutex_unlock(&drv->driver_lock);
339 ret = drv_mapping_destroy(bo);
341 bo->drv->backend->bo_destroy(bo);
347 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
354 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags);
359 ret = drv->backend->bo_import(bo, data);
365 for (plane = 0; plane < bo->meta.num_planes; plane++) {
366 pthread_mutex_lock(&bo->drv->driver_lock);
367 drv_increment_reference_count(bo->drv, bo, plane);
368 pthread_mutex_unlock(&bo->drv->driver_lock);
371 for (plane = 0; plane < bo->meta.num_planes; plane++) {
372 bo->meta.strides[plane] = data->strides[plane];
373 bo->meta.offsets[plane] = data->offsets[plane];
374 bo->meta.format_modifiers[plane] = data->format_modifiers[plane];
376 seek_end = lseek(data->fds[plane], 0, SEEK_END);
377 if (seek_end == (off_t)(-1)) {
378 drv_log("lseek() failed with %s\n", strerror(errno));
382 lseek(data->fds[plane], 0, SEEK_SET);
383 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
384 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
386 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
388 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
389 drv_log("buffer size is too large.\n");
393 bo->meta.total_size += bo->meta.sizes[plane];
403 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
404 struct mapping **map_data, size_t plane)
408 struct mapping mapping;
410 assert(rect->width >= 0);
411 assert(rect->height >= 0);
412 assert(rect->x + rect->width <= drv_bo_get_width(bo));
413 assert(rect->y + rect->height <= drv_bo_get_height(bo));
414 assert(BO_MAP_READ_WRITE & map_flags);
415 /* No CPU access for protected buffers. */
416 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
418 memset(&mapping, 0, sizeof(mapping));
419 mapping.rect = *rect;
420 mapping.refcount = 1;
422 pthread_mutex_lock(&bo->drv->driver_lock);
424 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
425 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
426 if (prior->vma->handle != bo->handles[plane].u32 ||
427 prior->vma->map_flags != map_flags)
430 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
431 rect->width != prior->rect.width || rect->height != prior->rect.height)
439 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
440 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
441 if (prior->vma->handle != bo->handles[plane].u32 ||
442 prior->vma->map_flags != map_flags)
445 prior->vma->refcount++;
446 mapping.vma = prior->vma;
450 mapping.vma = calloc(1, sizeof(*mapping.vma));
451 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
452 addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
453 if (addr == MAP_FAILED) {
456 pthread_mutex_unlock(&bo->drv->driver_lock);
460 mapping.vma->refcount = 1;
461 mapping.vma->addr = addr;
462 mapping.vma->handle = bo->handles[plane].u32;
463 mapping.vma->map_flags = map_flags;
466 *map_data = drv_array_append(bo->drv->mappings, &mapping);
468 drv_bo_invalidate(bo, *map_data);
469 addr = (uint8_t *)((*map_data)->vma->addr);
470 addr += drv_bo_get_plane_offset(bo, plane);
471 pthread_mutex_unlock(&bo->drv->driver_lock);
475 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
480 pthread_mutex_lock(&bo->drv->driver_lock);
482 if (--mapping->refcount)
485 if (!--mapping->vma->refcount) {
486 ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
490 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
491 if (mapping == (struct mapping *)drv_array_at_idx(bo->drv->mappings, i)) {
492 drv_array_remove(bo->drv->mappings, i);
498 pthread_mutex_unlock(&bo->drv->driver_lock);
502 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
507 assert(mapping->vma);
508 assert(mapping->refcount > 0);
509 assert(mapping->vma->refcount > 0);
511 if (bo->drv->backend->bo_invalidate)
512 ret = bo->drv->backend->bo_invalidate(bo, mapping);
517 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
522 assert(mapping->vma);
523 assert(mapping->refcount > 0);
524 assert(mapping->vma->refcount > 0);
525 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
527 if (bo->drv->backend->bo_flush)
528 ret = bo->drv->backend->bo_flush(bo, mapping);
530 ret = drv_bo_unmap(bo, mapping);
535 uint32_t drv_bo_get_width(struct bo *bo)
537 return bo->meta.width;
540 uint32_t drv_bo_get_height(struct bo *bo)
542 return bo->meta.height;
545 size_t drv_bo_get_num_planes(struct bo *bo)
547 return bo->meta.num_planes;
550 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
552 return bo->handles[plane];
556 #define DRM_RDWR O_RDWR
559 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
563 assert(plane < bo->meta.num_planes);
565 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
567 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
569 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
571 return (ret) ? ret : fd;
574 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
576 assert(plane < bo->meta.num_planes);
577 return bo->meta.offsets[plane];
580 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
582 assert(plane < bo->meta.num_planes);
583 return bo->meta.sizes[plane];
586 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
588 assert(plane < bo->meta.num_planes);
589 return bo->meta.strides[plane];
592 uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
594 assert(plane < bo->meta.num_planes);
595 return bo->meta.format_modifiers[plane];
598 uint32_t drv_bo_get_format(struct bo *bo)
600 return bo->meta.format;
603 uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
605 if (drv->backend->resolve_format)
606 return drv->backend->resolve_format(drv, format, use_flags);
611 uint32_t drv_num_buffers_per_bo(struct bo *bo)
616 for (plane = 0; plane < bo->meta.num_planes; plane++) {
617 for (p = 0; p < plane; p++)
618 if (bo->handles[p].u32 == bo->handles[plane].u32)
627 void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
630 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
633 va_start(args, format);
635 __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
637 fprintf(stderr, "%s ", buf);
638 vfprintf(stderr, format, args);