OSDN Git Service

drm/ttm: remove bo->moving
authorChristian König <christian.koenig@amd.com>
Tue, 23 Nov 2021 10:30:35 +0000 (11:30 +0100)
committerChristian König <christian.koenig@amd.com>
Thu, 7 Apr 2022 10:53:54 +0000 (12:53 +0200)
This is now handled by the DMA-buf framework in the dma_resv obj.

Also remove the workaround inside VMWGFX to update the moving fence.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-14-christian.koenig@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
include/drm/ttm/ttm_bo_api.h

index 5031e26..3dc5ab2 100644 (file)
@@ -2447,6 +2447,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
                struct amdgpu_bo *bo = mem->bo;
                uint32_t domain = mem->domain;
                struct kfd_mem_attachment *attachment;
+               struct dma_resv_iter cursor;
+               struct dma_fence *fence;
 
                total_size += amdgpu_bo_size(bo);
 
@@ -2461,10 +2463,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
                                goto validate_map_fail;
                        }
                }
-               ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
-               if (ret) {
-                       pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
-                       goto validate_map_fail;
+               dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+                                       DMA_RESV_USAGE_KERNEL, fence) {
+                       ret = amdgpu_sync_fence(&sync_obj, fence);
+                       if (ret) {
+                               pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+                               goto validate_map_fail;
+                       }
                }
                list_for_each_entry(attachment, &mem->attachments, list) {
                        if (!attachment->is_mapped)
index 5832c05..ef93abe 100644 (file)
@@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
                if (unlikely(r))
                        goto fail_unreserve;
 
-               amdgpu_bo_fence(bo, fence, false);
-               dma_fence_put(bo->tbo.moving);
-               bo->tbo.moving = dma_fence_get(fence);
+               dma_resv_add_fence(bo->tbo.base.resv, fence,
+                                  DMA_RESV_USAGE_KERNEL);
                dma_fence_put(fence);
        }
        if (!bp->resv)
index e3fbf0f..31913ae 100644 (file)
@@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
 {
        unsigned int i;
        uint64_t value;
-       int r;
+       long r;
 
-       if (vmbo->bo.tbo.moving) {
-               r = dma_fence_wait(vmbo->bo.tbo.moving, true);
-               if (r)
-                       return r;
-       }
+       r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+                                 true, MAX_SCHEDULE_TIMEOUT);
+       if (r < 0)
+               return r;
 
        pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
 
index dbb5517..bdb44ce 100644 (file)
@@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
        struct amdgpu_bo *bo = &vmbo->bo;
        enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
                : AMDGPU_IB_POOL_DELAYED;
+       struct dma_resv_iter cursor;
        unsigned int i, ndw, nptes;
+       struct dma_fence *fence;
        uint64_t *pte;
        int r;
 
        /* Wait for PD/PT moves to be completed */
-       r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving);
-       if (r)
-               return r;
+       dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+                               DMA_RESV_USAGE_KERNEL, fence) {
+               r = amdgpu_sync_fence(&p->job->sync, fence);
+               if (r)
+                       return r;
+       }
 
        do {
                ndw = p->num_dw_left;
index 360f980..015a94f 100644 (file)
@@ -418,7 +418,6 @@ static void ttm_bo_release(struct kref *kref)
        dma_resv_unlock(bo->base.resv);
 
        atomic_dec(&ttm_glob.bo_count);
-       dma_fence_put(bo->moving);
        bo->destroy(bo);
 }
 
@@ -714,9 +713,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
 EXPORT_SYMBOL(ttm_bo_unpin);
 
 /*
- * Add the last move fence to the BO and reserve a new shared slot. We only use
- * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
- * either stall or use an exclusive fence respectively set bo->moving.
+ * Add the last move fence to the BO as kernel dependency and reserve a new
+ * fence slot.
  */
 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                                 struct ttm_resource_manager *man,
@@ -746,9 +744,6 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                dma_fence_put(fence);
                return ret;
        }
-
-       dma_fence_put(bo->moving);
-       bo->moving = fence;
        return 0;
 }
 
@@ -951,7 +946,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
        bo->bdev = bdev;
        bo->type = type;
        bo->page_alignment = page_alignment;
-       bo->moving = NULL;
        bo->pin_count = 0;
        bo->sg = sg;
        bo->bulk_move = NULL;
index 99deb45..bc51903 100644 (file)
@@ -228,7 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 
        atomic_inc(&ttm_glob.bo_count);
        INIT_LIST_HEAD(&fbo->base.ddestroy);
-       fbo->base.moving = NULL;
        drm_vma_node_reset(&fbo->base.base.vma_node);
 
        kref_init(&fbo->base.kref);
@@ -500,9 +499,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
         * operation has completed.
         */
 
-       dma_fence_put(bo->moving);
-       bo->moving = dma_fence_get(fence);
-
        ret = ttm_buffer_object_transfer(bo, &ghost_obj);
        if (ret)
                return ret;
@@ -546,9 +542,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
        spin_unlock(&from->move_lock);
 
        ttm_resource_free(bo, &bo->resource);
-
-       dma_fence_put(bo->moving);
-       bo->moving = dma_fence_get(fence);
 }
 
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
index 08ba083..5b324f2 100644 (file)
 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                                struct vm_fault *vmf)
 {
-       vm_fault_t ret = 0;
-       int err = 0;
-
-       if (likely(!bo->moving))
-               goto out_unlock;
+       long err = 0;
 
        /*
         * Quick non-stalling check for idle.
         */
-       if (dma_fence_is_signaled(bo->moving))
-               goto out_clear;
+       if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
+               return 0;
 
        /*
         * If possible, avoid waiting for GPU with mmap_lock
@@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
         * is the first attempt.
         */
        if (fault_flag_allow_retry_first(vmf->flags)) {
-               ret = VM_FAULT_RETRY;
                if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
-                       goto out_unlock;
+                       return VM_FAULT_RETRY;
 
                ttm_bo_get(bo);
                mmap_read_unlock(vmf->vma->vm_mm);
-               (void) dma_fence_wait(bo->moving, true);
+               (void)dma_resv_wait_timeout(bo->base.resv,
+                                           DMA_RESV_USAGE_KERNEL, true,
+                                           MAX_SCHEDULE_TIMEOUT);
                dma_resv_unlock(bo->base.resv);
                ttm_bo_put(bo);
-               goto out_unlock;
+               return VM_FAULT_RETRY;
        }
 
        /*
         * Ordinary wait.
         */
-       err = dma_fence_wait(bo->moving, true);
-       if (unlikely(err != 0)) {
-               ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+       err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
+                                   MAX_SCHEDULE_TIMEOUT);
+       if (unlikely(err < 0)) {
+               return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
                        VM_FAULT_NOPAGE;
-               goto out_unlock;
        }
 
-out_clear:
-       dma_fence_put(bo->moving);
-       bo->moving = NULL;
-
-out_unlock:
-       return ret;
+       return 0;
 }
 
 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
index a84d1d5..a7d62a4 100644 (file)
@@ -1161,12 +1161,6 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
                *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
                                                      PAGE_SIZE);
                vmw_bo_fence_single(bo, NULL);
-               if (bo->moving)
-                       dma_fence_put(bo->moving);
-
-               return dma_resv_get_singleton(bo->base.resv,
-                                             DMA_RESV_USAGE_WRITE,
-                                             &bo->moving);
        }
 
        return 0;
index c76932b..2d524f8 100644 (file)
@@ -94,7 +94,6 @@ struct ttm_tt;
  * @deleted: True if the object is only a zombie and already deleted.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
@@ -147,7 +146,6 @@ struct ttm_buffer_object {
         * Members protected by a bo reservation.
         */
 
-       struct dma_fence *moving;
        unsigned priority;
        unsigned pin_count;