X-Git-Url: http://git.osdn.net/view?a=blobdiff_plain;f=amdgpu.c;h=795d13790602ba6668585fe5c40adb65ce91da8d;hb=1914f989418995f24e5a8ce32a2f182db576896d;hp=3dbe0e8dba78b8c2de6600c145241f7f330ce69c;hpb=ff66c801c90ddeac97e6121584daff595b85d795;p=android-x86%2Fexternal-minigbm.git diff --git a/amdgpu.c b/amdgpu.c index 3dbe0e8..795d137 100644 --- a/amdgpu.c +++ b/amdgpu.c @@ -18,11 +18,9 @@ #include "helpers.h" #include "util.h" -#ifdef __ANDROID__ -#define DRI_PATH "/vendor/lib/dri/radeonsi_dri.so" -#else -#define DRI_PATH "/usr/lib64/dri/radeonsi_dri.so" -#endif +// clang-format off +#define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so) +// clang-format on #define TILE_TYPE_LINEAR 0 /* DRI backend decides tiling in this case. */ @@ -37,8 +35,9 @@ const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMA DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 }; -const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21, - DRM_FORMAT_NV12, DRM_FORMAT_YVU420_ANDROID }; +const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, + DRM_FORMAT_NV21, DRM_FORMAT_NV12, + DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 }; static int amdgpu_init(struct driver *drv) { @@ -78,12 +77,20 @@ static int amdgpu_init(struct driver *drv) drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), &metadata, BO_USE_TEXTURE_MASK); + /* + * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the + * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future. + */ + drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER); + drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_ENCODER); + /* Android CTS tests require this. */ drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK); /* Linear formats supported by display. */ drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT); drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT); /* YUV formats for camera and display. */ @@ -119,6 +126,7 @@ static int amdgpu_init(struct driver *drv) /* Potentially tiled formats supported by display. */ drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT); drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT); return 0; } @@ -130,42 +138,20 @@ static void amdgpu_close(struct driver *drv) drv->priv = NULL; } -static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags) +static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) { int ret; uint32_t plane, stride; - struct combination *combo; union drm_amdgpu_gem_create gem_create; - combo = drv_get_combination(bo->drv, format, use_flags); - if (!combo) - return -EINVAL; - - if (combo->metadata.tiling == TILE_TYPE_DRI) { -#ifdef __ANDROID__ - /* - * Currently, the gralloc API doesn't differentiate between allocation time and map - * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at - * allocation time. - * - * See b/115946221,b/117942643 - */ - if (use_flags & (BO_USE_SW_MASK)) { - uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0); - width = ALIGN(width, 256 / bytes_per_pixel); - } -#endif - return dri_bo_create(bo, width, height, format, use_flags); - } - stride = drv_stride_from_format(format, width, 0); stride = ALIGN(stride, 256); drv_bo_from_format(bo, stride, height, format); memset(&gem_create, 0, sizeof(gem_create)); - gem_create.in.bo_size = bo->total_size; + gem_create.in.bo_size = bo->meta.total_size; gem_create.in.alignment = 256; gem_create.in.domain_flags = 0; @@ -182,20 +168,80 @@ static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint if (ret < 0) return ret; - for (plane = 0; plane < bo->num_planes; plane++) + for (plane = 0; plane < bo->meta.num_planes; plane++) bo->handles[plane].u32 = gem_create.out.handle; + bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR; + return 0; } -static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data) +static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) { struct combination *combo; - combo = drv_get_combination(bo->drv, data->format, data->use_flags); + + combo = drv_get_combination(bo->drv, format, use_flags); if (!combo) return -EINVAL; - if (combo->metadata.tiling == TILE_TYPE_DRI) + if (combo->metadata.tiling == TILE_TYPE_DRI) { + bool needs_alignment = false; +#ifdef __ANDROID__ + /* + * Currently, the gralloc API doesn't differentiate between allocation time and map + * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at + * allocation time. + * + * See b/115946221,b/117942643 + */ + if (use_flags & (BO_USE_SW_MASK)) + needs_alignment = true; +#endif + // See b/122049612 + if (use_flags & (BO_USE_SCANOUT)) + needs_alignment = true; + + if (needs_alignment) { + uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0); + width = ALIGN(width, 256 / bytes_per_pixel); + } + + return dri_bo_create(bo, width, height, format, use_flags); + } + + return amdgpu_create_bo_linear(bo, width, height, format, use_flags); +} + +static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, + uint32_t format, const uint64_t *modifiers, + uint32_t count) +{ + bool only_use_linear = true; + + for (uint32_t i = 0; i < count; ++i) + if (modifiers[i] != DRM_FORMAT_MOD_LINEAR) + only_use_linear = false; + + if (only_use_linear) + return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT); + + return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count); +} + +static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data) +{ + bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR; + if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) { + struct combination *combo; + combo = drv_get_combination(bo->drv, data->format, data->use_flags); + if (!combo) + return -EINVAL; + + dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI; + } + + if (dri_tiling) return dri_bo_import(bo, data); else return drv_prime_bo_import(bo, data); @@ -226,9 +272,9 @@ static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_ return MAP_FAILED; } - vma->length = bo->total_size; + vma->length = bo->meta.total_size; - return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, gem_map.out.addr_ptr); } @@ -266,7 +312,7 @@ static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping) return 0; } -static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags) +static uint32_t amdgpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) { switch (format) { case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: @@ -287,12 +333,14 @@ const struct backend backend_amdgpu = { .init = amdgpu_init, .close = amdgpu_close, .bo_create = amdgpu_create_bo, + .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers, .bo_destroy = amdgpu_destroy_bo, .bo_import = amdgpu_import_bo, .bo_map = amdgpu_map_bo, .bo_unmap = amdgpu_unmap_bo, .bo_invalidate = amdgpu_bo_invalidate, .resolve_format = amdgpu_resolve_format, + .num_planes_from_modifier = dri_num_planes_from_modifier, }; #endif