Commit 4856254d authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gt: Repeat wait_for_idle for retirement workers



Since we may retire timelines from secondary workers,
intel_gt_retire_requests() is not always a reliable indicator that all
pending retirements are complete. If we do detect secondary workers are
in progress, recommend intel_gt_wait_for_idle() to repeat the retirement
check.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191221180204.1201217-1-chris@chris-wilson.co.uk
parent e6ba7648
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,

bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
void intel_engine_flush_submission(struct intel_engine_cs *engine);
bool intel_engine_flush_submission(struct intel_engine_cs *engine);

void intel_engines_reset_default_submission(struct intel_gt *gt);

+5 −1
Original line number Diff line number Diff line
@@ -1079,9 +1079,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
	return idle;
}

void intel_engine_flush_submission(struct intel_engine_cs *engine)
bool intel_engine_flush_submission(struct intel_engine_cs *engine)
{
	struct tasklet_struct *t = &engine->execlists.tasklet;
	bool active = tasklet_is_locked(t);

	if (__tasklet_is_scheduled(t)) {
		local_bh_disable();
@@ -1092,10 +1093,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
			tasklet_unlock(t);
		}
		local_bh_enable();
		active = true;
	}

	/* Otherwise flush the tasklet if it was running on another cpu */
	tasklet_unlock_wait(t);

	return active;
}

/**
+14 −9
Original line number Diff line number Diff line
@@ -23,15 +23,18 @@ static void retire_requests(struct intel_timeline *tl)
			break;
}

static void flush_submission(struct intel_gt *gt)
static bool flush_submission(struct intel_gt *gt)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	bool active = false;

	for_each_engine(engine, gt, id) {
		intel_engine_flush_submission(engine);
		flush_work(&engine->retire_work);
		active |= intel_engine_flush_submission(engine);
		active |= flush_work(&engine->retire_work);
	}

	return active;
}

static void engine_retire(struct work_struct *work)
@@ -120,9 +123,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)

	spin_lock(&timelines->lock);
	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
		if (!mutex_trylock(&tl->mutex)) {
			active_count++; /* report busy to caller, try again? */
		if (!mutex_trylock(&tl->mutex))
			continue;
		}

		intel_timeline_get(tl);
		GEM_BUG_ON(!atomic_read(&tl->active_count));
@@ -147,10 +151,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)

		/* Resume iteration after dropping lock */
		list_safe_reset_next(tl, tn, link);
		if (atomic_dec_and_test(&tl->active_count)) {
		if (atomic_dec_and_test(&tl->active_count))
			list_del(&tl->link);
			active_count--;
		}
		else
			active_count += i915_active_fence_isset(&tl->last_request);

		mutex_unlock(&tl->mutex);

@@ -165,7 +169,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
	list_for_each_entry_safe(tl, tn, &free, link)
		__intel_timeline_free(&tl->kref);

	flush_submission(gt);
	if (flush_submission(gt))
		active_count++;

	return active_count ? timeout : 0;
}