2 * Copyright (c) 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
21 extern struct backend backend_amdgpu;
23 extern struct backend backend_cirrus;
24 extern struct backend backend_evdi;
26 extern struct backend backend_exynos;
28 extern struct backend backend_gma500;
30 extern struct backend backend_i915;
33 extern struct backend backend_marvell;
36 extern struct backend backend_mediatek;
39 extern struct backend backend_rockchip;
42 extern struct backend backend_tegra;
44 extern struct backend backend_udl;
45 extern struct backend backend_vgem;
46 extern struct backend backend_virtio_gpu;
48 static struct backend *drv_get_backend(int fd)
50 drmVersionPtr drm_version;
53 drm_version = drmGetVersion(fd);
58 struct backend *backend_list[] = {
88 for(i = 0; i < ARRAY_SIZE(backend_list); i++)
89 if (!strcmp(drm_version->name, backend_list[i]->name)) {
90 drmFreeVersion(drm_version);
91 return backend_list[i];
94 drmFreeVersion(drm_version);
98 struct driver *drv_create(int fd)
103 drv = (struct driver *) calloc(1, sizeof(*drv));
109 drv->backend = drv_get_backend(fd);
114 if (pthread_mutex_init(&drv->table_lock, NULL))
117 drv->buffer_table = drmHashCreate();
118 if (!drv->buffer_table)
121 drv->map_table = drmHashCreate();
123 goto free_buffer_table;
125 if (drv->backend->init) {
126 ret = drv->backend->init(drv);
134 drmHashDestroy(drv->map_table);
136 drmHashDestroy(drv->buffer_table);
138 pthread_mutex_destroy(&drv->table_lock);
144 void drv_destroy(struct driver *drv)
146 if (drv->backend->close)
147 drv->backend->close(drv);
149 pthread_mutex_destroy(&drv->table_lock);
150 drmHashDestroy(drv->buffer_table);
151 drmHashDestroy(drv->map_table);
156 int drv_get_fd(struct driver *drv)
162 drv_get_name(struct driver *drv)
164 return drv->backend->name;
167 int drv_is_format_supported(struct driver *drv, drv_format_t format,
172 if (format == DRV_FORMAT_NONE || usage == DRV_BO_USE_NONE)
175 for (i = 0 ; i < ARRAY_SIZE(drv->backend->format_list); i++)
177 if (!drv->backend->format_list[i].format)
180 if (drv->backend->format_list[i].format == format &&
181 (drv->backend->format_list[i].usage & usage) == usage)
188 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height,
193 bo = (struct bo *) calloc(1, sizeof(*bo));
202 bo->num_planes = drv_num_planes_from_format(format);
204 if (!bo->num_planes) {
212 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height,
213 drv_format_t format, uint64_t flags)
219 bo = drv_bo_new(drv, width, height, format);
224 ret = drv->backend->bo_create(bo, width, height, format, flags);
231 pthread_mutex_lock(&drv->table_lock);
233 for (plane = 0; plane < bo->num_planes; plane++)
234 drv_increment_reference_count(drv, bo, plane);
236 pthread_mutex_unlock(&drv->table_lock);
241 void drv_bo_destroy(struct bo *bo)
245 struct driver *drv = bo->drv;
247 pthread_mutex_lock(&drv->table_lock);
249 for (plane = 0; plane < bo->num_planes; plane++)
250 drv_decrement_reference_count(drv, bo, plane);
252 for (plane = 0; plane < bo->num_planes; plane++)
253 total += drv_get_reference_count(drv, bo, plane);
255 pthread_mutex_unlock(&drv->table_lock);
258 bo->drv->backend->bo_destroy(bo);
263 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
268 struct drm_prime_handle prime_handle;
270 bo = drv_bo_new(drv, data->width, data->height, data->format);
275 for (plane = 0; plane < bo->num_planes; plane++) {
277 memset(&prime_handle, 0, sizeof(prime_handle));
278 prime_handle.fd = data->fds[plane];
280 ret = drmIoctl(drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE,
284 fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed "
285 "(fd=%u)\n", prime_handle.fd);
288 bo->num_planes = plane;
297 bo->handles[plane].u32 = prime_handle.handle;
298 bo->strides[plane] = data->strides[plane];
299 bo->offsets[plane] = data->offsets[plane];
300 bo->sizes[plane] = data->sizes[plane];
301 bo->format_modifiers[plane] = data->format_modifiers[plane];
302 bo->total_size += data->sizes[plane];
304 pthread_mutex_lock(&drv->table_lock);
305 drv_increment_reference_count(drv, bo, plane);
306 pthread_mutex_unlock(&drv->table_lock);
312 void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width,
313 uint32_t height, uint32_t flags, void **map_data, size_t plane)
318 struct map_info *data;
322 assert(x + width <= drv_bo_get_width(bo));
323 assert(y + height <= drv_bo_get_height(bo));
325 pthread_mutex_lock(&bo->drv->table_lock);
327 if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) {
328 data = (struct map_info *) ptr;
333 data = calloc(1, sizeof(*data));
334 addr = bo->drv->backend->bo_map(bo, data, plane);
335 if (addr == MAP_FAILED) {
338 pthread_mutex_unlock(&bo->drv->table_lock);
344 data->handle = bo->handles[plane].u32;
345 drmHashInsert(bo->drv->buffer_table, bo->handles[plane].u32,
349 *map_data = (void *) data;
350 offset = drv_bo_get_plane_stride(bo, plane) * y;
351 offset += drv_stride_from_format(bo->format, x, plane);
352 addr = (uint8_t *) data->addr;
353 addr += drv_bo_get_plane_offset(bo, plane) + offset;
354 pthread_mutex_unlock(&bo->drv->table_lock);
356 return (void *) addr;
359 int drv_bo_unmap(struct bo *bo, void *map_data)
361 struct map_info *data = map_data;
365 assert(data->refcount >= 0);
367 pthread_mutex_lock(&bo->drv->table_lock);
369 if (!--data->refcount) {
370 ret = munmap(data->addr, data->length);
371 drmHashDelete(bo->drv->map_table, data->handle);
375 pthread_mutex_unlock(&bo->drv->table_lock);
380 uint32_t drv_bo_get_width(struct bo *bo)
385 uint32_t drv_bo_get_height(struct bo *bo)
390 uint32_t drv_bo_get_stride_or_tiling(struct bo *bo)
392 return bo->tiling ? bo->tiling : drv_bo_get_plane_stride(bo, 0);
395 size_t drv_bo_get_num_planes(struct bo *bo)
397 return bo->num_planes;
400 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
402 return bo->handles[plane];
406 #define DRM_RDWR O_RDWR
409 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
413 assert(plane < bo->num_planes);
415 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32,
416 DRM_CLOEXEC | DRM_RDWR, &fd);
418 return (ret) ? ret : fd;
422 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
424 assert(plane < bo->num_planes);
425 return bo->offsets[plane];
428 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
430 assert(plane < bo->num_planes);
431 return bo->sizes[plane];
434 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
436 assert(plane < bo->num_planes);
437 return bo->strides[plane];
440 uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
442 assert(plane < bo->num_planes);
443 return bo->format_modifiers[plane];
446 drv_format_t drv_bo_get_format(struct bo *bo)
451 drv_format_t drv_resolve_format(struct driver *drv, drv_format_t format)
453 if (drv->backend->resolve_format)
454 return drv->backend->resolve_format(format);
460 * This function returns the stride for a given format, width and plane.
462 int drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
464 /* Get stride of the first plane */
465 int stride = width * DIV_ROUND_UP(drv_bpp_from_format(format, 0), 8);
468 * Only downsample for certain multiplanar formats which are not
469 * interleaved and have horizontal subsampling. Only formats supported
470 * by our drivers are listed here -- add more as needed.
474 case DRV_FORMAT_YVU420:
483 uint32_t drv_num_buffers_per_bo(struct bo *bo)
488 for (plane = 0; plane < bo->num_planes; plane++) {
489 for (p = 0; p < plane; p++)
490 if (bo->handles[p].u32 == bo->handles[plane].u32)