Commit 30084b14 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gt: Flush other retirees inside intel_gt_retire_requests()

Our goal in wait_for_idle (intel_gt_retire_requests) is to the current
workload *and* their idle barriers. This requires us to notice the late
arrival of those, which is done by inspecting the list of active
timelines. However, if a concurrent retirer is running that new timeline
may not be added until after we drop the lock -- so flush concurrent
retirers before we take the lock and inspect the list.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/878


Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191223211008.2371613-1-chris@chris-wilson.co.uk
parent b42d3b15
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,

bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engine_flush_submission(struct intel_engine_cs *engine);

void intel_engines_reset_default_submission(struct intel_gt *gt);

+1 −5
Original line number Diff line number Diff line
@@ -1047,10 +1047,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
	return idle;
}

bool intel_engine_flush_submission(struct intel_engine_cs *engine)
void intel_engine_flush_submission(struct intel_engine_cs *engine)
{
	struct tasklet_struct *t = &engine->execlists.tasklet;
	bool active = tasklet_is_locked(t);

	if (__tasklet_is_scheduled(t)) {
		local_bh_disable();
@@ -1061,13 +1060,10 @@ bool intel_engine_flush_submission(struct intel_engine_cs *engine)
			tasklet_unlock(t);
		}
		local_bh_enable();
		active = true;
	}

	/* Otherwise flush the tasklet if it was running on another cpu */
	tasklet_unlock_wait(t);

	return active;
}

/**
+5 −11
Original line number Diff line number Diff line
@@ -26,21 +26,18 @@ static bool retire_requests(struct intel_timeline *tl)
	return !i915_active_fence_isset(&tl->last_request);
}

static bool flush_submission(struct intel_gt *gt)
static void flush_submission(struct intel_gt *gt)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	bool active = false;

	if (!intel_gt_pm_is_awake(gt))
		return false;
		return;

	for_each_engine(engine, gt, id) {
		active |= intel_engine_flush_submission(engine);
		active |= flush_work(&engine->retire_work);
		intel_engine_flush_submission(engine);
		flush_work(&engine->retire_work);
	}

	return active;
}

static void engine_retire(struct work_struct *work)
@@ -126,7 +123,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
		timeout = -timeout, interruptible = false;

	flush_submission(gt); /* kick the ksoftirqd tasklets */

	spin_lock(&timelines->lock);
	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
		if (!mutex_trylock(&tl->mutex)) {
@@ -153,6 +149,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)

		active_count += !retire_requests(tl);

		flush_submission(gt); /* sync with concurrent retirees */
		spin_lock(&timelines->lock);

		/* Resume iteration after dropping lock */
@@ -173,9 +170,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
	list_for_each_entry_safe(tl, tn, &free, link)
		__intel_timeline_free(&tl->kref);

	if (flush_submission(gt))
		active_count++;

	return active_count ? timeout : 0;
}