aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2022-09-21 15:52:58 +0200
committerRodrigo Vivi <rodrigo.vivi@intel.com>2022-09-26 10:33:06 -0400
commit6ef7d362123ecb5bf6d163bb9c7fd6ba2d8c968c (patch)
treec64b5691788af3834432479484c3f01622fa6c07
parentf76349cf41451c5c42a99f18a9163377e4b364ff (diff)
downloadlinux-trace-6ef7d362123ecb5bf6d163bb9c7fd6ba2d8c968c.tar.gz
drm/i915/gt: Restrict forced preemption to the active context
When we submit a new pair of contexts to ELSP for execution, we start a timer by which point we expect the HW to have switched execution to the pending contexts. If the promotion to the new pair of contexts has not occurred, we declare the executing context to have hung and force the preemption to take place by resetting the engine and resubmitting the new contexts. This can lead to an unfair situation where almost all of the preemption timeout is consumed by the first context which just switches into the second context immediately prior to the timer firing and triggering the preemption reset (assuming that the timer interrupts before we process the CS events for the context switch). The second context hasn't yet had a chance to yield to the incoming ELSP (and send the ACk for the promotion) and so ends up being blamed for the reset. If we see that a context switch has occurred since setting the preemption timeout, but have not yet received the ACK for the ELSP promotion, rearm the preemption timer and check again. This is especially significant if the first context was not schedulable and so we used the shortest timer possible, greatly increasing the chance of accidentally blaming the second innocent context. Fixes: 3a7a92aba8fb ("drm/i915/execlists: Force preemption") Fixes: d12acee84ffb ("drm/i915/execlists: Cancel banned contexts on schedule-out") Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Andi Shyti <andi.shyti@linux.intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Tested-by: Andrzej Hajda <andrzej.hajda@intel.com> Cc: <stable@vger.kernel.org> # v5.5+ Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220921135258.1714873-1-andrzej.hajda@intel.com (cherry picked from commit 107ba1a2c705f4358f2602ec2f2fd821bb651f42) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c21
2 files changed, 35 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 633a7e5dba3b4b..6b5d4ea22b673b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -166,6 +166,21 @@ struct intel_engine_execlists {
struct timer_list preempt;
/**
+ * @preempt_target: active request at the time of the preemption request
+ *
+ * We force a preemption to occur if the pending contexts have not
+ * been promoted to active upon receipt of the CS ack event within
+ * the timeout. This timeout maybe chosen based on the target,
+ * using a very short timeout if the context is no longer schedulable.
+ * That short timeout may not be applicable to other contexts, so
+ * if a context switch should happen within before the preemption
+ * timeout, we may shoot early at an innocent context. To prevent this,
+ * we record which context was active at the time of the preemption
+ * request and only reset that context upon the timeout.
+ */
+ const struct i915_request *preempt_target;
+
+ /**
* @ccid: identifier for contexts submitted to this engine
*/
u32 ccid;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4b909cb88cdfb7..c718e6dc40b515 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
if (!rq)
return 0;
+ /* Only allow ourselves to force reset the currently active context */
+ engine->execlists.preempt_target = rq;
+
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) {
+ const struct i915_request *rq = *engine->execlists.active;
+
+ /*
+ * If after the preempt-timeout expired, we are still on the
+ * same active request/context as before we initiated the
+ * preemption, reset the engine.
+ *
+ * However, if we have processed a CS event to switch contexts,
+ * but not yet processed the CS event for the pending
+ * preemption, reset the timer allowing the new context to
+ * gracefully exit.
+ */
cancel_timer(&engine->execlists.preempt);
- engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ if (rq == engine->execlists.preempt_target)
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ else
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
}
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {