Commit 4413c474 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/execlists: Make submission tasklet hardirq safe



Prepare to allow the execlists submission to be run from underneath a
hardirq timer context (and not just the current softirq context) as is
required for fast preemption resets and context switches.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508210318.10274-1-chris@chris-wilson.co.uk
parent b9777c6f
Loading
Loading
Loading
Loading
+29 −13
Original line number Diff line number Diff line
@@ -356,10 +356,13 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
	struct intel_engine_cs *engine =
		container_of(execlists, typeof(*engine), execlists);
	unsigned long flags;

	spin_lock_irqsave(&engine->timeline.lock, flags);

	spin_lock_irq(&engine->timeline.lock);
	__unwind_incomplete_requests(engine);
	spin_unlock_irq(&engine->timeline.lock);

	spin_unlock_irqrestore(&engine->timeline.lock, flags);
}

static inline void
@@ -553,7 +556,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
	execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
}

static void execlists_dequeue(struct intel_engine_cs *engine)
static bool __execlists_dequeue(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;
	struct execlist_port *port = execlists->port;
@@ -563,6 +566,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
	struct rb_node *rb;
	bool submit = false;

	lockdep_assert_held(&engine->timeline.lock);

	/* Hardware submission is through 2 ports. Conceptually each port
	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
	 * static for a context, and unique to each, so we only execute
@@ -584,7 +589,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
	 * and context switches) submission.
	 */

	spin_lock_irq(&engine->timeline.lock);
	rb = execlists->first;
	GEM_BUG_ON(rb_first(&execlists->queue) != rb);

@@ -599,7 +603,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
						EXECLISTS_ACTIVE_USER));
		GEM_BUG_ON(!port_count(&port[0]));
		if (port_count(&port[0]) > 1)
			goto unlock;
			return false;

		/*
		 * If we write to ELSP a second time before the HW has had
@@ -609,11 +613,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
		 * the HW to indicate that it has had a chance to respond.
		 */
		if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
			goto unlock;
			return false;

		if (need_preempt(engine, last, execlists->queue_priority)) {
			inject_preempt_context(engine);
			goto unlock;
			return false;
		}

		/*
@@ -638,7 +642,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
		 * priorities of the ports haven't been switch.
		 */
		if (port_count(&port[1]))
			goto unlock;
			return false;

		/*
		 * WaIdleLiteRestore:bdw,skl
@@ -743,14 +747,26 @@ done:
	/* We must always keep the beast fed if we have work piled up */
	GEM_BUG_ON(execlists->first && !port_isset(execlists->port));

unlock:
	spin_unlock_irq(&engine->timeline.lock);

	if (submit) {
	/* Re-evaluate the executing context setup after each preemptive kick */
	if (last)
		execlists_user_begin(execlists, execlists->port);
		execlists_submit_ports(engine);

	return submit;
}

static void execlists_dequeue(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;
	unsigned long flags;
	bool submit;

	spin_lock_irqsave(&engine->timeline.lock, flags);
	submit = __execlists_dequeue(engine);
	spin_unlock_irqrestore(&engine->timeline.lock, flags);

	if (submit)
		execlists_submit_ports(engine);

	GEM_BUG_ON(port_isset(execlists->port) &&
		   !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
}