flush_write_batch(struct fd_resource *rsc)
{
struct fd_batch *b = NULL;
- fd_batch_reference(&b, rsc->write_batch);
+ fd_batch_reference_locked(&b, rsc->write_batch);
mtx_unlock(&b->ctx->screen->lock);
fd_batch_flush(b, true, false);
* WARNING the _locked() version can briefly drop the lock. Without
* recursive mutexes, I'm not sure there is much else we can do (since
* __fd_batch_destroy() needs to unref resources)
+ *
+ * WARNING you must acquire the screen->lock and use the _locked()
+ * version in case that the batch being ref'd can disappear under
+ * you.
*/
/* fwd-decl prototypes to untangle header dependency :-/ */
{
struct fd_batch *write_batch = NULL;
- fd_batch_reference(&write_batch, rsc->write_batch);
+ mtx_lock(&ctx->screen->lock);
+ fd_batch_reference_locked(&write_batch, rsc->write_batch);
+ mtx_unlock(&ctx->screen->lock);
if (usage & PIPE_TRANSFER_WRITE) {
struct fd_batch *batch, *batches[32] = {};
mtx_lock(&ctx->screen->lock);
batch_mask = rsc->batch_mask;
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
- fd_batch_reference(&batches[batch->idx], batch);
+ fd_batch_reference_locked(&batches[batch->idx], batch);
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
fd_blit_to_staging(ctx, trans);
struct fd_batch *batch = NULL;
- fd_batch_reference(&batch, staging_rsc->write_batch);
+
+ fd_context_lock(ctx);
+ fd_batch_reference_locked(&batch, staging_rsc->write_batch);
+ fd_context_unlock(ctx);
/* we can't fd_bo_cpu_prep() until the blit to staging
* is submitted to kernel.. in that case write_batch
struct fd_batch *write_batch = NULL;
/* hold a reference, so it doesn't disappear under us: */
- fd_batch_reference(&write_batch, rsc->write_batch);
+ fd_context_lock(ctx);
+ fd_batch_reference_locked(&write_batch, rsc->write_batch);
+ fd_context_unlock(ctx);
if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
write_batch->back_blit) {