OSDN Git Service

freedreno: fix race condition
authorRob Clark <robdclark@gmail.com>
Tue, 19 Feb 2019 14:29:49 +0000 (09:29 -0500)
committerRob Clark <robdclark@gmail.com>
Fri, 22 Feb 2019 19:05:32 +0000 (14:05 -0500)
rsc->write_batch can be cleared behind our back, so we need to acquire
the lock *before* deref'ing.

Signed-off-by: Rob Clark <robdclark@gmail.com>
src/gallium/drivers/freedreno/freedreno_batch.c
src/gallium/drivers/freedreno/freedreno_batch.h
src/gallium/drivers/freedreno/freedreno_resource.c

index a157850..84dfa89 100644 (file)
@@ -433,7 +433,7 @@ static void
 flush_write_batch(struct fd_resource *rsc)
 {
        struct fd_batch *b = NULL;
-       fd_batch_reference(&b, rsc->write_batch);
+       fd_batch_reference_locked(&b, rsc->write_batch);
 
        mtx_unlock(&b->ctx->screen->lock);
        fd_batch_flush(b, true, false);
index 7b723db..e771ad6 100644 (file)
@@ -243,6 +243,10 @@ void __fd_batch_destroy(struct fd_batch *batch);
  * WARNING the _locked() version can briefly drop the lock.  Without
  * recursive mutexes, I'm not sure there is much else we can do (since
  * __fd_batch_destroy() needs to unref resources)
+ *
+ * WARNING you must acquire the screen->lock and use the _locked()
+ * version in case that the batch being ref'd can disappear under
+ * you.
  */
 
 /* fwd-decl prototypes to untangle header dependency :-/ */
index f2ad2c5..163fa70 100644 (file)
@@ -373,7 +373,9 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
 {
        struct fd_batch *write_batch = NULL;
 
-       fd_batch_reference(&write_batch, rsc->write_batch);
+       mtx_lock(&ctx->screen->lock);
+       fd_batch_reference_locked(&write_batch, rsc->write_batch);
+       mtx_unlock(&ctx->screen->lock);
 
        if (usage & PIPE_TRANSFER_WRITE) {
                struct fd_batch *batch, *batches[32] = {};
@@ -387,7 +389,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
                mtx_lock(&ctx->screen->lock);
                batch_mask = rsc->batch_mask;
                foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
-                       fd_batch_reference(&batches[batch->idx], batch);
+                       fd_batch_reference_locked(&batches[batch->idx], batch);
                mtx_unlock(&ctx->screen->lock);
 
                foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
@@ -501,7 +503,10 @@ fd_resource_transfer_map(struct pipe_context *pctx,
                                fd_blit_to_staging(ctx, trans);
 
                                struct fd_batch *batch = NULL;
-                               fd_batch_reference(&batch, staging_rsc->write_batch);
+
+                               fd_context_lock(ctx);
+                               fd_batch_reference_locked(&batch, staging_rsc->write_batch);
+                               fd_context_unlock(ctx);
 
                                /* we can't fd_bo_cpu_prep() until the blit to staging
                                 * is submitted to kernel.. in that case write_batch
@@ -550,7 +555,9 @@ fd_resource_transfer_map(struct pipe_context *pctx,
                struct fd_batch *write_batch = NULL;
 
                /* hold a reference, so it doesn't disappear under us: */
-               fd_batch_reference(&write_batch, rsc->write_batch);
+               fd_context_lock(ctx);
+               fd_batch_reference_locked(&write_batch, rsc->write_batch);
+               fd_context_unlock(ctx);
 
                if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
                                write_batch->back_blit) {