2 * Copyright (c) 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
21 extern struct backend backend_amdgpu;
23 extern struct backend backend_cirrus;
24 extern struct backend backend_evdi;
26 extern struct backend backend_exynos;
28 extern struct backend backend_gma500;
30 extern struct backend backend_i915;
33 extern struct backend backend_marvell;
36 extern struct backend backend_mediatek;
39 extern struct backend backend_rockchip;
42 extern struct backend backend_tegra;
44 extern struct backend backend_udl;
45 extern struct backend backend_virtio_gpu;
47 static struct backend *drv_get_backend(int fd)
49 drmVersionPtr drm_version;
52 drm_version = drmGetVersion(fd);
57 struct backend *backend_list[] = {
86 for(i = 0; i < ARRAY_SIZE(backend_list); i++)
87 if (!strcmp(drm_version->name, backend_list[i]->name)) {
88 drmFreeVersion(drm_version);
89 return backend_list[i];
92 drmFreeVersion(drm_version);
96 struct driver *drv_create(int fd)
101 drv = (struct driver *) calloc(1, sizeof(*drv));
107 drv->backend = drv_get_backend(fd);
112 if (pthread_mutex_init(&drv->table_lock, NULL))
115 drv->buffer_table = drmHashCreate();
116 if (!drv->buffer_table)
119 drv->map_table = drmHashCreate();
121 goto free_buffer_table;
123 if (drv->backend->init) {
124 ret = drv->backend->init(drv);
132 drmHashDestroy(drv->map_table);
134 drmHashDestroy(drv->buffer_table);
136 pthread_mutex_destroy(&drv->table_lock);
142 void drv_destroy(struct driver *drv)
144 if (drv->backend->close)
145 drv->backend->close(drv);
147 pthread_mutex_destroy(&drv->table_lock);
148 drmHashDestroy(drv->buffer_table);
149 drmHashDestroy(drv->map_table);
154 int drv_get_fd(struct driver *drv)
160 drv_get_name(struct driver *drv)
162 return drv->backend->name;
165 int drv_is_format_supported(struct driver *drv, drv_format_t format,
170 if (format == DRV_FORMAT_NONE || usage == DRV_BO_USE_NONE)
173 for (i = 0 ; i < ARRAY_SIZE(drv->backend->format_list); i++)
175 if (!drv->backend->format_list[i].format)
178 if (drv->backend->format_list[i].format == format &&
179 (drv->backend->format_list[i].usage & usage) == usage)
186 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height,
191 bo = (struct bo *) calloc(1, sizeof(*bo));
200 bo->num_planes = drv_num_planes_from_format(format);
202 if (!bo->num_planes) {
210 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height,
211 drv_format_t format, uint64_t flags)
217 bo = drv_bo_new(drv, width, height, format);
222 ret = drv->backend->bo_create(bo, width, height, format, flags);
229 pthread_mutex_lock(&drv->table_lock);
231 for (plane = 0; plane < bo->num_planes; plane++)
232 drv_increment_reference_count(drv, bo, plane);
234 pthread_mutex_unlock(&drv->table_lock);
239 void drv_bo_destroy(struct bo *bo)
243 struct driver *drv = bo->drv;
245 pthread_mutex_lock(&drv->table_lock);
247 for (plane = 0; plane < bo->num_planes; plane++)
248 drv_decrement_reference_count(drv, bo, plane);
250 for (plane = 0; plane < bo->num_planes; plane++)
251 total += drv_get_reference_count(drv, bo, plane);
253 pthread_mutex_unlock(&drv->table_lock);
256 bo->drv->backend->bo_destroy(bo);
261 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
266 struct drm_prime_handle prime_handle;
268 bo = drv_bo_new(drv, data->width, data->height, data->format);
273 for (plane = 0; plane < bo->num_planes; plane++) {
275 memset(&prime_handle, 0, sizeof(prime_handle));
276 prime_handle.fd = data->fds[plane];
278 ret = drmIoctl(drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE,
282 fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed "
283 "(fd=%u)\n", prime_handle.fd);
286 bo->num_planes = plane;
295 bo->handles[plane].u32 = prime_handle.handle;
296 bo->strides[plane] = data->strides[plane];
297 bo->offsets[plane] = data->offsets[plane];
298 bo->sizes[plane] = data->sizes[plane];
299 bo->total_size += data->sizes[plane];
301 pthread_mutex_lock(&drv->table_lock);
302 drv_increment_reference_count(drv, bo, plane);
303 pthread_mutex_unlock(&drv->table_lock);
309 void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width,
310 uint32_t height, uint32_t flags, void **map_data, size_t plane)
315 struct map_info *data;
319 assert(x + width <= drv_bo_get_width(bo));
320 assert(y + height <= drv_bo_get_height(bo));
322 pthread_mutex_lock(&bo->drv->table_lock);
324 if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) {
325 data = (struct map_info *) ptr;
330 data = calloc(1, sizeof(*data));
331 addr = bo->drv->backend->bo_map(bo, data, plane);
332 if (addr == MAP_FAILED) {
335 pthread_mutex_unlock(&bo->drv->table_lock);
341 data->handle = bo->handles[plane].u32;
342 drmHashInsert(bo->drv->buffer_table, bo->handles[plane].u32,
346 *map_data = (void *) data;
347 offset = drv_bo_get_plane_stride(bo, plane) * y;
348 offset += drv_stride_from_format(bo->format, x, plane);
349 addr = (uint8_t *) data->addr;
350 addr += drv_bo_get_plane_offset(bo, plane) + offset;
351 pthread_mutex_unlock(&bo->drv->table_lock);
353 return (void *) addr;
356 int drv_bo_unmap(struct bo *bo, void *map_data)
358 struct map_info *data = map_data;
362 assert(data->refcount >= 0);
364 pthread_mutex_lock(&bo->drv->table_lock);
366 if (!--data->refcount) {
367 ret = munmap(data->addr, data->length);
368 drmHashDelete(bo->drv->map_table, data->handle);
372 pthread_mutex_unlock(&bo->drv->table_lock);
377 uint32_t drv_bo_get_width(struct bo *bo)
382 uint32_t drv_bo_get_height(struct bo *bo)
387 uint32_t drv_bo_get_stride_or_tiling(struct bo *bo)
389 return bo->tiling ? bo->tiling : drv_bo_get_plane_stride(bo, 0);
392 size_t drv_bo_get_num_planes(struct bo *bo)
394 return bo->num_planes;
397 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
399 return bo->handles[plane];
403 #define DRM_RDWR O_RDWR
406 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
410 assert(plane < bo->num_planes);
412 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32,
413 DRM_CLOEXEC | DRM_RDWR, &fd);
415 return (ret) ? ret : fd;
419 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
421 assert(plane < bo->num_planes);
422 return bo->offsets[plane];
425 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
427 assert(plane < bo->num_planes);
428 return bo->sizes[plane];
431 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
433 assert(plane < bo->num_planes);
434 return bo->strides[plane];
437 uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
439 assert(plane < bo->num_planes);
440 return bo->format_modifiers[plane];
443 drv_format_t drv_bo_get_format(struct bo *bo)
448 drv_format_t drv_resolve_format(struct driver *drv, drv_format_t format)
450 if (drv->backend->resolve_format)
451 return drv->backend->resolve_format(format);
457 * This function returns the stride for a given format, width and plane.
459 int drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
461 /* Get stride of the first plane */
462 int stride = width * DIV_ROUND_UP(drv_bpp_from_format(format, 0), 8);
465 * Only downsample for certain multiplanar formats which are not
466 * interleaved and have horizontal subsampling. Only formats supported
467 * by our drivers are listed here -- add more as needed.
471 case DRV_FORMAT_YVU420:
480 uint32_t drv_num_buffers_per_bo(struct bo *bo)
485 for (plane = 0; plane < bo->num_planes; plane++) {
486 for (p = 0; p < plane; p++)
487 if (bo->handles[p].u32 == bo->handles[plane].u32)