+ stride = drv_stride_from_format(format, width, 0);
+ stride = ALIGN(stride, 256);
+
+ drv_bo_from_format(bo, stride, height, format);
+
+ memset(&gem_create, 0, sizeof(gem_create));
+ gem_create.in.bo_size = bo->meta.total_size;
+ gem_create.in.alignment = 256;
+ gem_create.in.domain_flags = 0;
+
+ if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+ if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
+ /* Allocate the buffer with the preferred heap. */
+ ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
+ sizeof(gem_create));
+ if (ret < 0)
+ return ret;
+
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = gem_create.out.handle;
+
+ bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+
+ return 0;
+}
+
+static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct combination *combo;
+