{
int err = 0;
- /* We must always wait for the exclusive fence! */
- if (rcu_access_pointer(ref->excl.fence)) {
+ if (flags & I915_ACTIVE_AWAIT_EXCL &&
+ rcu_access_pointer(ref->excl.fence)) {
err = __await_active(&ref->excl, fn, arg);
if (err)
return err;
}
- if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ if (flags & I915_ACTIVE_AWAIT_ACTIVE &&
+ i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref,
unsigned int flags);
-#define I915_ACTIVE_AWAIT_ALL BIT(0)
+#define I915_ACTIVE_AWAIT_EXCL BIT(0)
+#define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
if (!IS_ERR_OR_NULL(active)) {
/* After all individual context modifications */
err = i915_request_await_active(rq, active,
- I915_ACTIVE_AWAIT_ALL);
+ I915_ACTIVE_AWAIT_ACTIVE);
if (err)
goto err_add_request;
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active, 0);
+ err = i915_request_await_active(rq, &vma->active,
+ I915_ACTIVE_AWAIT_EXCL);
if (err)
return err;