int prio = rq_prio(rq);
/*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ /*
* On unwinding the active request, we give it a priority bump
* if it has completed waiting on any semaphore. If we know that
* the request has already started, we can prevent an unwanted
kernel_context_close(c->ctx);
}
+static int live_nopreempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ if (preempt_client_init(i915, &a))
+ goto err_unlock;
+ if (preempt_client_init(i915, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = igt_spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ rq_a->flags |= I915_REQUEST_NOPREEMPT;
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = igt_spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+err_unlock:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_client_b;
+}
+
static int live_suppress_self_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+ /*
+ * Requests containing performance queries must not be preempted by
+ * another context. They get scheduled with their default priority and
+ * once they reach the execlist ports we ensure that they stick on the
+ * HW until finished by pretending that they have maximum priority,
+ * i.e. nothing can have higher priority and force us to usurp the
+ * active request.
+ */
+ I915_PRIORITY_UNPREEMPTABLE = INT_MAX,
+
I915_PRIORITY_INVALID = INT_MIN
};
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
- if (rq->waitboost) {
+ if (i915_request_has_waitboost(rq)) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
}
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
- rq->waitboost = false;
+ rq->flags = 0;
rq->execution_mask = ALL_ENGINES;
INIT_LIST_HEAD(&rq->active_list);
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- bool waitboost;
+ unsigned long flags;
+#define I915_REQUEST_WAITBOOST BIT(0)
+#define I915_REQUEST_NOPREEMPT BIT(1)
/** timeline->request entry for this request */
struct list_head link;
rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
}
+static inline bool i915_request_has_waitboost(const struct i915_request *rq)
+{
+ return rq->flags & I915_REQUEST_WAITBOOST;
+}
+
+static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
+{
+ /* Preemption should only be disabled very rarely */
+ return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
+}
+
bool i915_retire_requests(struct drm_i915_private *i915);
#endif /* I915_REQUEST_H */
return rq->sched.attr.priority | __NO_PREEMPTION;
}
+static inline int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ return prio;
+}
+
static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{
trace_i915_request_in(rq, idx);
&engine->i915->guc.preempt_work[engine->id];
int prio = execlists->queue_priority_hint;
- if (i915_scheduler_need_preempt(prio, rq_prio(last))) {
+ if (i915_scheduler_need_preempt(prio,
+ effective_prio(last))) {
intel_write_status_page(engine,
I915_GEM_HWS_PREEMPT,
GUC_PREEMPT_INPROGRESS);
/* Serializes with i915_request_retire() */
boost = false;
spin_lock_irqsave(&rq->lock, flags);
- if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
+ if (!i915_request_has_waitboost(rq) &&
+ !dma_fence_is_signaled_locked(&rq->fence)) {
boost = !atomic_fetch_inc(&rps->num_waiters);
- rq->waitboost = true;
+ rq->flags |= I915_REQUEST_WAITBOOST;
}
spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)