From 516198d317d81f33839ca850e83f6717b0d80e80 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Christian=20K=C3=B6nig?= Date: Thu, 1 Sep 2022 13:49:30 +0100 Subject: [PATCH] drm/i915: audit bo->resource usage v3 MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Make sure we can at least move and alloc TT objects without backing store. v2: clear the tt object even when no resource is allocated. v3: add Matthews changes for i915 as well. Signed-off-by: Christian König Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20230124125726.13323-1-christian.koenig@amd.com --- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 27 +++++++++++--- drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 56 ++++++++++++++++++++++++---- 2 files changed, 71 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index d409a77449a3..6649d18ed1c8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -271,8 +271,6 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, { struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), bdev); - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->resource->mem_type); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); unsigned long ccs_pages = 0; enum ttm_caching caching; @@ -286,8 +284,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, if (!i915_tt) return NULL; - if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && - man->use_tt) + if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource || + ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt)) page_flags |= TTM_TT_FLAG_ZERO_ALLOC; caching = i915_ttm_select_tt_caching(obj); @@ -1051,7 +1049,26 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - if (!i915_ttm_resource_mappable(bo->resource)) { + /* + * This must be swapped out with shmem ttm_tt (pipeline-gutting). + * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as + * far as far doing a ttm_bo_move_null(), which should skip all the + * other junk. + */ + if (!bo->resource) { + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = true, /* should be idle already */ + }; + + GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)); + + ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); + if (ret) { + dma_resv_unlock(bo->base.resv); + return VM_FAULT_SIGBUS; + } + } else if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 2ebaaf4d663c..76dd9e5e1a8b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -103,7 +103,27 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); unsigned int cache_level; + unsigned int mem_flags; unsigned int i; + int mem_type; + + /* + * We might have been purged (or swapped out) if the resource is NULL, + * in which case the SYSTEM placement is the closest match to describe + * the current domain. If the object is ever used in this state then we + * will require moving it again. + */ + if (!bo->resource) { + mem_flags = I915_BO_FLAG_STRUCT_PAGE; + mem_type = I915_PL_SYSTEM; + cache_level = I915_CACHE_NONE; + } else { + mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : + I915_BO_FLAG_STRUCT_PAGE; + mem_type = bo->resource->mem_type; + cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, + bo->ttm); + } /* * If object was moved to an allowable region, update the object @@ -111,11 +131,11 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) * in an allowable region, it's evicted and we don't update the * object region. */ - if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { + if (intel_region_to_ttm_type(obj->mm.region) != mem_type) { for (i = 0; i < obj->mm.n_placements; ++i) { struct intel_memory_region *mr = obj->mm.placements[i]; - if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && + if (intel_region_to_ttm_type(mr) == mem_type && mr != obj->mm.region) { i915_gem_object_release_memory_region(obj); i915_gem_object_init_memory_region(obj, mr); @@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) } obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); + obj->mem_flags |= mem_flags; - obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : - I915_BO_FLAG_STRUCT_PAGE; - - cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, - bo->ttm); i915_gem_object_set_cache_coherency(obj, cache_level); } @@ -565,6 +581,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, return 0; } + if (!bo->resource) { + if (dst_mem->mem_type != TTM_PL_SYSTEM) { + hop->mem_type = TTM_PL_SYSTEM; + hop->flags = TTM_PL_FLAG_TEMPORARY; + return -EMULTIHOP; + } + + /* + * This is only reached when first creating the object, or if + * the object was purged or swapped out (pipeline-gutting). For + * the former we can safely skip all of the below since we are + * only using a dummy SYSTEM placement here. And with the latter + * we will always re-enter here with bo->resource set correctly + * (as per the above), since this is part of a multi-hop + * sequence, where at the end we can do the move for real. + * + * The special case here is when the dst_mem is TTM_PL_SYSTEM, + * which doens't require any kind of move, so it should be safe + * to skip all the below and call ttm_bo_move_null() here, where + * the caller in __i915_ttm_get_pages() will take care of the + * rest, since we should have a valid ttm_tt. + */ + ttm_bo_move_null(bo, dst_mem); + return 0; + } + ret = i915_ttm_move_notify(bo); if (ret) return ret; -- 2.11.0