.close = amdgpu_close,
.bo_create = amdgpu_bo_create,
.bo_destroy = drv_gem_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = amdgpu_bo_map,
};
.init = cirrus_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};
int ret;
size_t plane;
struct bo *bo;
- struct drm_prime_handle prime_handle;
bo = drv_bo_new(drv, data->width, data->height, data->format);
if (!bo)
return NULL;
- for (plane = 0; plane < bo->num_planes; plane++) {
-
- memset(&prime_handle, 0, sizeof(prime_handle));
- prime_handle.fd = data->fds[plane];
-
- ret = drmIoctl(drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE,
- &prime_handle);
-
- if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed "
- "(fd=%u)\n", prime_handle.fd);
-
- if (plane > 0) {
- bo->num_planes = plane;
- drv_bo_destroy(bo);
- } else {
- free(bo);
- }
-
- return NULL;
- }
+ ret = drv->backend->bo_import(bo, data);
+ if (ret) {
+ free(bo);
+ return NULL;
+ }
- bo->handles[plane].u32 = prime_handle.handle;
+ for (plane = 0; plane < bo->num_planes; plane++) {
bo->strides[plane] = data->strides[plane];
bo->offsets[plane] = data->offsets[plane];
bo->sizes[plane] = data->sizes[plane];
bo->format_modifiers[plane] = data->format_modifiers[plane];
bo->total_size += data->sizes[plane];
-
- pthread_mutex_lock(&drv->driver_lock);
- drv_increment_reference_count(drv, bo, plane);
- pthread_mutex_unlock(&drv->driver_lock);
}
return bo;
uint32_t format,
const uint64_t *modifiers,
uint32_t count);
+ int (*bo_destroy)(struct bo *bo);
+ int (*bo_import)(struct bo *bo, struct drv_import_fd_data *data);
void* (*bo_map)(struct bo *bo, struct map_info *data, size_t plane);
int (*bo_unmap)(struct bo *bo, struct map_info *data);
- int (*bo_destroy)(struct bo *bo);
uint32_t (*resolve_format)(uint32_t format);
struct list_head combinations;
};
.init = evdi_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};
.init = exynos_init,
.bo_create = exynos_bo_create,
.bo_destroy = drv_gem_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};
.init = gma500_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};
return error;
}
+int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+ int ret;
+ size_t plane;
+ struct drm_prime_handle prime_handle;
+
+ for (plane = 0; plane < bo->num_planes; plane++) {
+ memset(&prime_handle, 0, sizeof(prime_handle));
+ prime_handle.fd = data->fds[plane];
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE,
+ &prime_handle);
+
+ if (ret) {
+ fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE "
+ "failed (fd=%u)\n", prime_handle.fd);
+
+ /*
+ * Need to call GEM close on planes that were opened,
+ * if any. Adjust the num_planes variable to be the
+ * plane that failed, so GEM close will be called on
+ * planes before that plane.
+ */
+ bo->num_planes = plane;
+ drv_gem_bo_destroy(bo);
+ return ret;
+ }
+
+ bo->handles[plane].u32 = prime_handle.handle;
+ }
+
+ for (plane = 0; plane < bo->num_planes; plane++) {
+ pthread_mutex_lock(&bo->drv->driver_lock);
+ drv_increment_reference_count(bo->drv, bo, plane);
+ pthread_mutex_unlock(&bo->drv->driver_lock);
+ }
+
+ return 0;
+}
+
void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane)
{
int ret;
uint32_t format, uint32_t flags);
int drv_dumb_bo_destroy(struct bo *bo);
int drv_gem_bo_destroy(struct bo *bo);
+int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data);
void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane);
uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo,
size_t plane);
.close = i915_close,
.bo_create = i915_bo_create,
.bo_destroy = drv_gem_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = i915_bo_map,
.resolve_format = i915_resolve_format,
};
.init = marvell_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};
.init = mediatek_init,
.bo_create = mediatek_bo_create,
.bo_destroy = drv_gem_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = mediatek_bo_map,
.resolve_format = mediatek_resolve_format,
};
.init = nouveau_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};
.bo_create = rockchip_bo_create,
.bo_create_with_modifiers = rockchip_bo_create_with_modifiers,
.bo_destroy = drv_gem_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = rockchip_bo_map,
.resolve_format = rockchip_resolve_format,
};
.init = tegra_init,
.bo_create = tegra_bo_create,
.bo_destroy = drv_gem_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = tegra_bo_map,
.bo_unmap = tegra_bo_unmap,
};
.init = udl_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};
.init = vgem_init,
.bo_create = vgem_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
.resolve_format = vgem_resolve_format,
};
.init = virtio_gpu_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
};