OSDN Git Service

drm/i915/gt: Only wait for GPU activity before unbinding a GGTT fence
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 1 Apr 2020 21:01:02 +0000 (22:01 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 1 Apr 2020 22:34:16 +0000 (23:34 +0100)
Only GPU activity via the GGTT fence is asynchronous, we know that we
control the CPU access directly, so we only need to wait for the GPU to
stop using the fence before we relinquish it.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200401210104.15907-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
drivers/gpu/drm/i915/i915_vma.c

index 225970f..d527b11 100644 (file)
@@ -223,6 +223,11 @@ static void fence_write(struct i915_fence_reg *fence,
        fence->dirty = false;
 }
 
+static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
+{
+       return INTEL_GEN(fence_to_i915(fence)) < 4;
+}
+
 static int fence_update(struct i915_fence_reg *fence,
                        struct i915_vma *vma)
 {
@@ -239,15 +244,18 @@ static int fence_update(struct i915_fence_reg *fence,
                if (!i915_vma_is_map_and_fenceable(vma))
                        return -EINVAL;
 
-               ret = i915_vma_sync(vma);
-               if (ret)
-                       return ret;
+               if (gpu_uses_fence_registers(fence)) {
+                       /* implicit 'unfenced' GPU blits */
+                       ret = i915_vma_sync(vma);
+                       if (ret)
+                               return ret;
+               }
        }
 
        old = xchg(&fence->vma, NULL);
        if (old) {
                /* XXX Ideally we would move the waiting to outside the mutex */
-               ret = i915_vma_sync(old);
+               ret = i915_active_wait(&fence->active);
                if (ret) {
                        fence->vma = old;
                        return ret;
@@ -869,6 +877,7 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
        for (i = 0; i < num_fences; i++) {
                struct i915_fence_reg *fence = &ggtt->fence_regs[i];
 
+               i915_active_init(&fence->active, NULL, NULL);
                fence->ggtt = ggtt;
                fence->id = i;
                list_add_tail(&fence->link, &ggtt->fence_list);
@@ -880,6 +889,14 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
 
 void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
 {
+       int i;
+
+       for (i = 0; i < ggtt->num_fences; i++) {
+               struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+               i915_active_fini(&fence->active);
+       }
+
        kfree(ggtt->fence_regs);
 }
 
index 9850f6a..08c6bb6 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/list.h>
 #include <linux/types.h>
 
+#include "i915_active.h"
+
 struct drm_i915_gem_object;
 struct i915_ggtt;
 struct i915_vma;
@@ -41,6 +43,7 @@ struct i915_fence_reg {
        struct i915_ggtt *ggtt;
        struct i915_vma *vma;
        atomic_t pin_count;
+       struct i915_active active;
        int id;
        /**
         * Whether the tiling parameters for the currently
index 18069df..616ca5a 100644 (file)
@@ -1232,6 +1232,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                dma_resv_add_shared_fence(vma->resv, &rq->fence);
                obj->write_domain = 0;
        }
+
+       if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
+               i915_active_add_request(&vma->fence->active, rq);
+
        obj->read_domains |= I915_GEM_GPU_DOMAINS;
        obj->mm.dirty = true;