Commit 7d6ce558 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Remove has-kernel-context



We can no longer assume execution ordering, and in particular we cannot
assume which context will execute last. One side-effect of this is that
we cannot determine if the kernel-context is resident on the GPU, so
remove the routines that claimed to do so.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190308093657.8640-4-chris@chris-wilson.co.uk
parent c6eeb479
Loading
Loading
Loading
Loading
+0 −13
Original line number Diff line number Diff line
@@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active,
	active->retire = fn ?: i915_active_retire_noop;
}

static inline struct i915_request *
__i915_active_request_peek(const struct i915_active_request *active)
{
	/*
	 * Inside the error capture (running with the driver in an unknown
	 * state), we want to bend the rules slightly (a lot).
	 *
	 * Work is in progress to make it safer, in the meantime this keeps
	 * the known issue from spamming the logs.
	 */
	return rcu_dereference_protected(active->request, 1);
}

/**
 * i915_active_request_raw - return the active request
 * @active - the active tracker
+1 −20
Original line number Diff line number Diff line
@@ -2828,23 +2828,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
				   round_jiffies_up_relative(HZ));
}

static void assert_kernel_context_is_current(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	if (i915_reset_failed(i915))
		return;

	i915_retire_requests(i915);

	for_each_engine(engine, i915, id) {
		GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
		GEM_BUG_ON(engine->last_retired_context !=
			   to_intel_context(i915->kernel_context, engine));
	}
}

static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
					  unsigned long mask)
{
@@ -2864,9 +2847,7 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
				   I915_GEM_IDLE_TIMEOUT))
		result = false;

	if (result) {
		assert_kernel_context_is_current(i915);
	} else {
	if (!result) {
		/* Forcibly cancel outstanding work and leave the gpu quiet. */
		dev_err(i915->drm.dev,
			"Failed to idle engines, declaring wedged!\n");
+3 −13
Original line number Diff line number Diff line
@@ -38,25 +38,15 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {

static bool ggtt_is_idle(struct drm_i915_private *i915)
{
       struct intel_engine_cs *engine;
       enum intel_engine_id id;

       if (i915->gt.active_requests)
	       return false;

       for_each_engine(engine, i915, id) {
	       if (!intel_engine_has_kernel_context(engine))
		       return false;
       }

       return true;
	return !i915->gt.active_requests;
}

static int ggtt_flush(struct drm_i915_private *i915)
{
	int err;

	/* Not everything in the GGTT is tracked via vma (otherwise we
	/*
	 * Not everything in the GGTT is tracked via vma (otherwise we
	 * could evict as required with minimal stalling) so we are forced
	 * to idle the GPU and explicitly retire outstanding requests in
	 * the hopes that we can then remove contexts and the like only
+0 −31
Original line number Diff line number Diff line
@@ -1090,37 +1090,6 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
	return true;
}

/**
 * intel_engine_has_kernel_context:
 * @engine: the engine
 *
 * Returns true if the last context to be executed on this engine, or has been
 * executed if the engine is already idle, is the kernel context
 * (#i915.kernel_context).
 */
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
	const struct intel_context *kernel_context =
		to_intel_context(engine->i915->kernel_context, engine);
	struct i915_request *rq;

	lockdep_assert_held(&engine->i915->drm.struct_mutex);

	if (!engine->context_size)
		return true;

	/*
	 * Check the last context seen by the engine. If active, it will be
	 * the last request that remains in the timeline. When idle, it is
	 * the last executed context as tracked by retirement.
	 */
	rq = __i915_active_request_peek(&engine->timeline.last_request);
	if (rq)
		return rq->hw_context == kernel_context;
	else
		return engine->last_retired_context == kernel_context;
}

void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
+0 −1
Original line number Diff line number Diff line
@@ -935,7 +935,6 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);

bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
void intel_engine_lost_context(struct intel_engine_cs *engine);

void intel_engines_park(struct drm_i915_private *i915);