OSDN Git Service

drm/ttm: return -EBUSY on pipelining with no_gpu_wait (v2)
authorChristian König <christian.koenig@amd.com>
Mon, 16 Sep 2019 15:20:47 +0000 (10:20 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Sep 2019 15:42:32 +0000 (10:42 -0500)
Setting the no_gpu_wait flag means that the allocate BO must be available
immediately and we can't wait for any GPU operation to finish.

v2: squash in mem leak fix, rebase

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/ttm/ttm_bo.c

index 20ff56f..6394e0c 100644 (file)
@@ -925,7 +925,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
  */
 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                                 struct ttm_mem_type_manager *man,
-                                struct ttm_mem_reg *mem)
+                                struct ttm_mem_reg *mem,
+                                bool no_wait_gpu)
 {
        struct dma_fence *fence;
        int ret;
@@ -934,19 +935,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
        fence = dma_fence_get(man->move);
        spin_unlock(&man->move_lock);
 
-       if (fence) {
-               dma_resv_add_shared_fence(bo->base.resv, fence);
+       if (!fence)
+               return 0;
 
-               ret = dma_resv_reserve_shared(bo->base.resv, 1);
-               if (unlikely(ret)) {
-                       dma_fence_put(fence);
-                       return ret;
-               }
+       if (no_wait_gpu)
+               return -EBUSY;
+
+       dma_resv_add_shared_fence(bo->base.resv, fence);
 
-               dma_fence_put(bo->moving);
-               bo->moving = fence;
+       ret = dma_resv_reserve_shared(bo->base.resv, 1);
+       if (unlikely(ret)) {
+               dma_fence_put(fence);
+               return ret;
        }
 
+       dma_fence_put(bo->moving);
+       bo->moving = fence;
        return 0;
 }
 
@@ -977,7 +981,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                        return ret;
        } while (1);
 
-       return ttm_bo_add_move_fence(bo, man, mem);
+       return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1119,14 +1123,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                if (unlikely(ret))
                        goto error;
 
-               if (mem->mm_node) {
-                       ret = ttm_bo_add_move_fence(bo, man, mem);
-                       if (unlikely(ret)) {
-                               (*man->func->put_node)(man, mem);
-                               goto error;
-                       }
-                       return 0;
+               if (!mem->mm_node)
+                       continue;
+
+               ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+               if (unlikely(ret)) {
+                       (*man->func->put_node)(man, mem);
+                       if (ret == -EBUSY)
+                               continue;
+
+                       goto error;
                }
+               return 0;
        }
 
        for (i = 0; i < placement->num_busy_placement; ++i) {