OSDN Git Service

drm/nouveau: use the new iterator in nouveau_fence_sync
authorChristian König <christian.koenig@amd.com>
Wed, 16 Jun 2021 07:20:56 +0000 (09:20 +0200)
committerChristian König <christian.koenig@amd.com>
Fri, 22 Oct 2021 12:12:13 +0000 (14:12 +0200)
Simplifying the code a bit.

The new implementation unifies the handling between drivers and so
results in waiting for all shared fernces in all cases.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20211005113742.1101-26-christian.koenig@amd.com
drivers/gpu/drm/nouveau/nouveau_fence.c

index 05d0b3e..26f9299 100644 (file)
@@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 }
 
 int
-nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
+nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+                  bool exclusive, bool intr)
 {
        struct nouveau_fence_chan *fctx = chan->fence;
-       struct dma_fence *fence;
        struct dma_resv *resv = nvbo->bo.base.resv;
-       struct dma_resv_list *fobj;
+       struct dma_resv_iter cursor;
+       struct dma_fence *fence;
        struct nouveau_fence *f;
-       int ret = 0, i;
+       int ret;
 
        if (!exclusive) {
                ret = dma_resv_reserve_shared(resv, 1);
@@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
                        return ret;
        }
 
-       fobj = dma_resv_shared_list(resv);
-       fence = dma_resv_excl_fence(resv);
-
-       if (fence) {
+       dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
                struct nouveau_channel *prev = NULL;
                bool must_wait = true;
 
@@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
                if (f) {
                        rcu_read_lock();
                        prev = rcu_dereference(f->channel);
-                       if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+                       if (prev && (prev == chan ||
+                                    fctx->sync(f, prev, chan) == 0))
                                must_wait = false;
                        rcu_read_unlock();
                }
 
-               if (must_wait)
+               if (must_wait) {
                        ret = dma_fence_wait(fence, intr);
-
-               return ret;
-       }
-
-       if (!exclusive || !fobj)
-               return ret;
-
-       for (i = 0; i < fobj->shared_count && !ret; ++i) {
-               struct nouveau_channel *prev = NULL;
-               bool must_wait = true;
-
-               fence = rcu_dereference_protected(fobj->shared[i],
-                                               dma_resv_held(resv));
-
-               f = nouveau_local_fence(fence, chan->drm);
-               if (f) {
-                       rcu_read_lock();
-                       prev = rcu_dereference(f->channel);
-                       if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
-                               must_wait = false;
-                       rcu_read_unlock();
+                       if (ret)
+                               return ret;
                }
-
-               if (must_wait)
-                       ret = dma_fence_wait(fence, intr);
        }
-
-       return ret;
+       return 0;
 }
 
 void