Commit 07bfe6bf authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/execlists: Convert recursive defer_request() into iterative



As this engine owns the lock around rq->sched.link (for those waiters
submitted to this engine), we can use that link as an element in a local
list. We can thus replace the recursive algorithm with an iterative walk
over the ordered list of waiters.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-1-chris@chris-wilson.co.uk
parent 9a6a6440
Loading
Loading
Loading
Loading
+27 −25
Original line number Diff line number Diff line
@@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists)
	return *last;
}

static void
defer_request(struct i915_request * const rq, struct list_head * const pl)
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
	struct i915_dependency *p;
	LIST_HEAD(list);

	/*
	 * We want to move the interrupted request to the back of
@@ -845,6 +844,10 @@ defer_request(struct i915_request * const rq, struct list_head * const pl)
	 * flight and were waiting for the interrupted request to
	 * be run after it again.
	 */
	do {
		struct i915_dependency *p;

		GEM_BUG_ON(i915_request_is_active(rq));
		list_move_tail(&rq->sched.link, pl);

		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
@@ -855,24 +858,23 @@ defer_request(struct i915_request * const rq, struct list_head * const pl)
			if (w->engine != rq->engine)
				continue;

		/* No waiter should start before the active request completed */
		GEM_BUG_ON(i915_request_started(w));

		GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
		if (rq_prio(w) < rq_prio(rq))
			continue;
			/* No waiter should start before its signaler */
			GEM_BUG_ON(i915_request_started(w) &&
				   !i915_request_completed(rq));

			GEM_BUG_ON(i915_request_is_active(w));
			if (list_empty(&w->sched.link))
				continue; /* Not yet submitted; unready */

		/*
		 * This should be very shallow as it is limited by the
		 * number of requests that can fit in a ring (<64) and
		 * the number of contexts that can be in flight on this
		 * engine.
		 */
		defer_request(w, pl);
			if (rq_prio(w) < rq_prio(rq))
				continue;

			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
			list_move_tail(&w->sched.link, &list);
		}

		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
	} while (rq);
}

static void defer_active(struct intel_engine_cs *engine)