Commit 9f58892e authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Pull all the reset functionality together into i915_reset.c



Currently the code to reset the GPU and our state is spread widely
across a few files. Pull the logic together into a common file.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190116153304.787-1-chris@chris-wilson.co.uk
parent 18bb2bcc
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -40,9 +40,10 @@ i915-y := i915_drv.o \
	  i915_mm.o \
	  i915_params.o \
	  i915_pci.o \
	  i915_reset.o \
	  i915_suspend.o \
	  i915_syncmap.o \
	  i915_sw_fence.o \
	  i915_syncmap.o \
	  i915_sysfs.o \
	  intel_csr.o \
	  intel_device_info.o \
+2 −0
Original line number Diff line number Diff line
@@ -32,6 +32,8 @@
#include "intel_drv.h"
#include "intel_guc_submission.h"

#include "i915_reset.h"

static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
	return to_i915(node->minor->dev);
+1 −205
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_pmu.h"
#include "i915_reset.h"
#include "i915_query.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
@@ -2205,211 +2206,6 @@ static int i915_resume_switcheroo(struct drm_device *dev)
	return i915_drm_resume(dev);
}

/**
 * i915_reset - reset chip after a hang
 * @i915: #drm_i915_private to reset
 * @stalled_mask: mask of the stalled engines with the guilty requests
 * @reason: user error message for why we are resetting
 *
 * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
 * on failure.
 *
 * Caller must hold the struct_mutex.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
void i915_reset(struct drm_i915_private *i915,
		unsigned int stalled_mask,
		const char *reason)
{
	struct i915_gpu_error *error = &i915->gpu_error;
	int ret;
	int i;

	GEM_TRACE("flags=%lx\n", error->flags);

	might_sleep();
	lockdep_assert_held(&i915->drm.struct_mutex);
	assert_rpm_wakelock_held(i915);
	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));

	if (!test_bit(I915_RESET_HANDOFF, &error->flags))
		return;

	/* Clear any previous failed attempts at recovery. Time to try again. */
	if (!i915_gem_unset_wedged(i915))
		goto wakeup;

	if (reason)
		dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
	error->reset_count++;

	ret = i915_gem_reset_prepare(i915);
	if (ret) {
		dev_err(i915->drm.dev, "GPU recovery failed\n");
		goto taint;
	}

	if (!intel_has_gpu_reset(i915)) {
		if (i915_modparams.reset)
			dev_err(i915->drm.dev, "GPU reset not supported\n");
		else
			DRM_DEBUG_DRIVER("GPU reset disabled\n");
		goto error;
	}

	for (i = 0; i < 3; i++) {
		ret = intel_gpu_reset(i915, ALL_ENGINES);
		if (ret == 0)
			break;

		msleep(100);
	}
	if (ret) {
		dev_err(i915->drm.dev, "Failed to reset chip\n");
		goto taint;
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.
	 */
	ret = i915_ggtt_enable_hw(i915);
	if (ret) {
		DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
			  ret);
		goto error;
	}

	i915_gem_reset(i915, stalled_mask);
	intel_overlay_reset(i915);

	/*
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	ret = i915_gem_init_hw(i915);
	if (ret) {
		DRM_ERROR("Failed to initialise HW following reset (%d)\n",
			  ret);
		goto error;
	}

	i915_queue_hangcheck(i915);

finish:
	i915_gem_reset_finish(i915);
wakeup:
	clear_bit(I915_RESET_HANDOFF, &error->flags);
	wake_up_bit(&error->flags, I915_RESET_HANDOFF);
	return;

taint:
	/*
	 * History tells us that if we cannot reset the GPU now, we
	 * never will. This then impacts everything that is run
	 * subsequently. On failing the reset, we mark the driver
	 * as wedged, preventing further execution on the GPU.
	 * We also want to go one step further and add a taint to the
	 * kernel so that any subsequent faults can be traced back to
	 * this failure. This is important for CI, where if the
	 * GPU/driver fails we would like to reboot and restart testing
	 * rather than continue on into oblivion. For everyone else,
	 * the system should still plod along, but they have been warned!
	 */
	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
	i915_gem_set_wedged(i915);
	i915_retire_requests(i915);
	goto finish;
}

static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
					struct intel_engine_cs *engine)
{
	return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
}

/**
 * i915_reset_engine - reset GPU engine to recover from a hang
 * @engine: engine to reset
 * @msg: reason for GPU reset; or NULL for no dev_notice()
 *
 * Reset a specific GPU engine. Useful if a hang is detected.
 * Returns zero on successful reset or otherwise an error code.
 *
 * Procedure is:
 *  - identifies the request that caused the hang and it is dropped
 *  - reset engine (which will force the engine to idle)
 *  - re-init/configure engine
 */
int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
{
	struct i915_gpu_error *error = &engine->i915->gpu_error;
	struct i915_request *active_request;
	int ret;

	GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));

	active_request = i915_gem_reset_prepare_engine(engine);
	if (IS_ERR_OR_NULL(active_request)) {
		/* Either the previous reset failed, or we pardon the reset. */
		ret = PTR_ERR(active_request);
		goto out;
	}

	if (msg)
		dev_notice(engine->i915->drm.dev,
			   "Resetting %s for %s\n", engine->name, msg);
	error->reset_engine_count[engine->id]++;

	if (!engine->i915->guc.execbuf_client)
		ret = intel_gt_reset_engine(engine->i915, engine);
	else
		ret = intel_guc_reset_engine(&engine->i915->guc, engine);
	if (ret) {
		/* If we fail here, we expect to fallback to a global reset */
		DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
				 engine->i915->guc.execbuf_client ? "GuC " : "",
				 engine->name, ret);
		goto out;
	}

	/*
	 * The request that caused the hang is stuck on elsp, we know the
	 * active request and can drop it, adjust head to skip the offending
	 * request to resume executing remaining requests in the queue.
	 */
	i915_gem_reset_engine(engine, active_request, true);

	/*
	 * The engine and its registers (and workarounds in case of render)
	 * have been reset to their default values. Follow the init_ring
	 * process to program RING_MODE, HWSP and re-enable submission.
	 */
	ret = engine->init_hw(engine);
	if (ret)
		goto out;

out:
	intel_engine_cancel_stop_cs(engine);
	i915_gem_reset_finish_engine(engine);
	return ret;
}

static int i915_pm_prepare(struct device *kdev)
{
	struct pci_dev *pdev = to_pci_dev(kdev);
+1 −32
Original line number Diff line number Diff line
@@ -2615,19 +2615,7 @@ extern const struct dev_pm_ops i915_pm_ops;
extern int i915_driver_load(struct pci_dev *pdev,
			    const struct pci_device_id *ent);
extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);

extern void i915_reset(struct drm_i915_private *i915,
		       unsigned int stalled_mask,
		       const char *reason);
extern int i915_reset_engine(struct intel_engine_cs *engine,
			     const char *reason);

extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_reset_guc(struct drm_i915_private *dev_priv);
extern int intel_guc_reset_engine(struct intel_guc *guc,
				  struct intel_engine_cs *engine);

extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2670,20 +2658,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
			   &dev_priv->gpu_error.hangcheck_work, delay);
}

__printf(4, 5)
void i915_handle_error(struct drm_i915_private *dev_priv,
		       u32 engine_mask,
		       unsigned long flags,
		       const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)

extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);

void i915_clear_error_registers(struct drm_i915_private *dev_priv);

static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
	return dev_priv->gvt;
@@ -3048,18 +3027,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
	return READ_ONCE(error->reset_engine_count[engine->id]);
}

struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv,
		    unsigned int stalled_mask);
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
			   struct i915_request *request,
			   bool stalled);

void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
+12 −434
Original line number Diff line number Diff line
@@ -27,15 +27,6 @@

#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"
#include "i915_gemfs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/reservation.h>
@@ -46,6 +37,18 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>

#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gemfs.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"

#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"

static void i915_gem_flush_free_objects(struct drm_i915_private *i915);

static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -2873,61 +2876,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
	return 0;
}

static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
					const struct i915_gem_context *ctx)
{
	unsigned int score;
	unsigned long prev_hang;

	if (i915_gem_context_is_banned(ctx))
		score = I915_CLIENT_SCORE_CONTEXT_BAN;
	else
		score = 0;

	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
		score += I915_CLIENT_SCORE_HANG_FAST;

	if (score) {
		atomic_add(score, &file_priv->ban_score);

		DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
				 ctx->name, score,
				 atomic_read(&file_priv->ban_score));
	}
}

static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
	unsigned int score;
	bool banned, bannable;

	atomic_inc(&ctx->guilty_count);

	bannable = i915_gem_context_is_bannable(ctx);
	score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
	banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;

	/* Cool contexts don't accumulate client ban score */
	if (!bannable)
		return;

	if (banned) {
		DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
				 ctx->name, atomic_read(&ctx->guilty_count),
				 score);
		i915_gem_context_set_banned(ctx);
	}

	if (!IS_ERR_OR_NULL(ctx->file_priv))
		i915_gem_client_mark_guilty(ctx->file_priv, ctx);
}

static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
	atomic_inc(&ctx->active_count);
}

struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
@@ -2958,376 +2906,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
	return active;
}

/*
 * Ensure irq handler finishes, and not run again.
 * Also return the active request so that we only search for it once.
 */
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
	struct i915_request *request;

	/*
	 * During the reset sequence, we must prevent the engine from
	 * entering RC6. As the context state is undefined until we restart
	 * the engine, if it does enter RC6 during the reset, the state
	 * written to the powercontext is undefined and so we may lose
	 * GPU state upon resume, i.e. fail to restart after a reset.
	 */
	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);

	request = engine->reset.prepare(engine);
	if (request && request->fence.error == -EIO)
		request = ERR_PTR(-EIO); /* Previous reset failed! */

	return request;
}

int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	struct i915_request *request;
	enum intel_engine_id id;
	int err = 0;

	for_each_engine(engine, dev_priv, id) {
		request = i915_gem_reset_prepare_engine(engine);
		if (IS_ERR(request)) {
			err = PTR_ERR(request);
			continue;
		}

		engine->hangcheck.active_request = request;
	}

	i915_gem_revoke_fences(dev_priv);
	intel_uc_sanitize(dev_priv);

	return err;
}

static void engine_skip_context(struct i915_request *request)
{
	struct intel_engine_cs *engine = request->engine;
	struct i915_gem_context *hung_ctx = request->gem_context;
	struct i915_timeline *timeline = request->timeline;
	unsigned long flags;

	GEM_BUG_ON(timeline == &engine->timeline);

	spin_lock_irqsave(&engine->timeline.lock, flags);
	spin_lock(&timeline->lock);

	list_for_each_entry_continue(request, &engine->timeline.requests, link)
		if (request->gem_context == hung_ctx)
			i915_request_skip(request, -EIO);

	list_for_each_entry(request, &timeline->requests, link)
		i915_request_skip(request, -EIO);

	spin_unlock(&timeline->lock);
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
}

/* Returns the request if it was guilty of the hang */
static struct i915_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
		       struct i915_request *request,
		       bool stalled)
{
	/* The guilty request will get skipped on a hung engine.
	 *
	 * Users of client default contexts do not rely on logical
	 * state preserved between batches so it is safe to execute
	 * queued requests following the hang. Non default contexts
	 * rely on preserved state, so skipping a batch loses the
	 * evolution of the state and it needs to be considered corrupted.
	 * Executing more queued batches on top of corrupted state is
	 * risky. But we take the risk by trying to advance through
	 * the queued requests in order to make the client behaviour
	 * more predictable around resets, by not throwing away random
	 * amount of batches it has prepared for execution. Sophisticated
	 * clients can use gem_reset_stats_ioctl and dma fence status
	 * (exported via sync_file info ioctl on explicit fences) to observe
	 * when it loses the context state and should rebuild accordingly.
	 *
	 * The context ban, and ultimately the client ban, mechanism are safety
	 * valves if client submission ends up resulting in nothing more than
	 * subsequent hangs.
	 */

	if (i915_request_completed(request)) {
		GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
			  engine->name, request->global_seqno,
			  request->fence.context, request->fence.seqno,
			  intel_engine_get_seqno(engine));
		stalled = false;
	}

	if (stalled) {
		i915_gem_context_mark_guilty(request->gem_context);
		i915_request_skip(request, -EIO);

		/* If this context is now banned, skip all pending requests. */
		if (i915_gem_context_is_banned(request->gem_context))
			engine_skip_context(request);
	} else {
		/*
		 * Since this is not the hung engine, it may have advanced
		 * since the hang declaration. Double check by refinding
		 * the active request at the time of the reset.
		 */
		request = i915_gem_find_active_request(engine);
		if (request) {
			unsigned long flags;

			i915_gem_context_mark_innocent(request->gem_context);
			dma_fence_set_error(&request->fence, -EAGAIN);

			/* Rewind the engine to replay the incomplete rq */
			spin_lock_irqsave(&engine->timeline.lock, flags);
			request = list_prev_entry(request, link);
			if (&request->link == &engine->timeline.requests)
				request = NULL;
			spin_unlock_irqrestore(&engine->timeline.lock, flags);
		}
	}

	return request;
}

void i915_gem_reset_engine(struct intel_engine_cs *engine,
			   struct i915_request *request,
			   bool stalled)
{
	if (request)
		request = i915_gem_reset_request(engine, request, stalled);

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset.reset(engine, request);
}

void i915_gem_reset(struct drm_i915_private *dev_priv,
		    unsigned int stalled_mask)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	i915_retire_requests(dev_priv);

	for_each_engine(engine, dev_priv, id) {
		struct intel_context *ce;

		i915_gem_reset_engine(engine,
				      engine->hangcheck.active_request,
				      stalled_mask & ENGINE_MASK(id));
		ce = fetch_and_zero(&engine->last_retired_context);
		if (ce)
			intel_context_unpin(ce);

		/*
		 * Ostensibily, we always want a context loaded for powersaving,
		 * so if the engine is idle after the reset, send a request
		 * to load our scratch kernel_context.
		 *
		 * More mysteriously, if we leave the engine idle after a reset,
		 * the next userspace batch may hang, with what appears to be
		 * an incoherent read by the CS (presumably stale TLB). An
		 * empty request appears sufficient to paper over the glitch.
		 */
		if (intel_engine_is_idle(engine)) {
			struct i915_request *rq;

			rq = i915_request_alloc(engine,
						dev_priv->kernel_context);
			if (!IS_ERR(rq))
				i915_request_add(rq);
		}
	}

	i915_gem_restore_fences(dev_priv);
}

void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
	engine->reset.finish(engine);

	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}

void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	for_each_engine(engine, dev_priv, id) {
		engine->hangcheck.active_request = NULL;
		i915_gem_reset_finish_engine(engine);
	}
}

static void nop_submit_request(struct i915_request *request)
{
	unsigned long flags;

	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
		  request->engine->name,
		  request->fence.context, request->fence.seqno);
	dma_fence_set_error(&request->fence, -EIO);

	spin_lock_irqsave(&request->engine->timeline.lock, flags);
	__i915_request_submit(request);
	intel_engine_write_global_seqno(request->engine, request->global_seqno);
	spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
}

void i915_gem_set_wedged(struct drm_i915_private *i915)
{
	struct i915_gpu_error *error = &i915->gpu_error;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	mutex_lock(&error->wedge_mutex);
	if (test_bit(I915_WEDGED, &error->flags)) {
		mutex_unlock(&error->wedge_mutex);
		return;
	}

	if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
		struct drm_printer p = drm_debug_printer(__func__);

		for_each_engine(engine, i915, id)
			intel_engine_dump(engine, &p, "%s\n", engine->name);
	}

	GEM_TRACE("start\n");

	/*
	 * First, stop submission to hw, but do not yet complete requests by
	 * rolling the global seqno forward (since this would complete requests
	 * for which we haven't set the fence error to EIO yet).
	 */
	for_each_engine(engine, i915, id)
		i915_gem_reset_prepare_engine(engine);

	/* Even if the GPU reset fails, it should still stop the engines */
	if (INTEL_GEN(i915) >= 5)
		intel_gpu_reset(i915, ALL_ENGINES);

	for_each_engine(engine, i915, id) {
		engine->submit_request = nop_submit_request;
		engine->schedule = NULL;
	}
	i915->caps.scheduler = 0;

	/*
	 * Make sure no request can slip through without getting completed by
	 * either this call here to intel_engine_write_global_seqno, or the one
	 * in nop_submit_request.
	 */
	synchronize_rcu();

	/* Mark all executing requests as skipped */
	for_each_engine(engine, i915, id)
		engine->cancel_requests(engine);

	for_each_engine(engine, i915, id) {
		i915_gem_reset_finish_engine(engine);
		intel_engine_wakeup(engine);
	}

	smp_mb__before_atomic();
	set_bit(I915_WEDGED, &error->flags);

	GEM_TRACE("end\n");
	mutex_unlock(&error->wedge_mutex);

	wake_up_all(&error->reset_queue);
}

bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
	struct i915_gpu_error *error = &i915->gpu_error;
	struct i915_timeline *tl;
	bool ret = false;

	lockdep_assert_held(&i915->drm.struct_mutex);

	if (!test_bit(I915_WEDGED, &error->flags))
		return true;

	if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
		return false;

	mutex_lock(&error->wedge_mutex);

	GEM_TRACE("start\n");

	/*
	 * Before unwedging, make sure that all pending operations
	 * are flushed and errored out - we may have requests waiting upon
	 * third party fences. We marked all inflight requests as EIO, and
	 * every execbuf since returned EIO, for consistency we want all
	 * the currently pending requests to also be marked as EIO, which
	 * is done inside our nop_submit_request - and so we must wait.
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
	list_for_each_entry(tl, &i915->gt.timelines, link) {
		struct i915_request *rq;

		rq = i915_gem_active_peek(&tl->last_request,
					  &i915->drm.struct_mutex);
		if (!rq)
			continue;

		/*
		 * We can't use our normal waiter as we want to
		 * avoid recursively trying to handle the current
		 * reset. The basic dma_fence_default_wait() installs
		 * a callback for dma_fence_signal(), which is
		 * triggered by our nop handler (indirectly, the
		 * callback enables the signaler thread which is
		 * woken by the nop_submit_request() advancing the seqno
		 * and when the seqno passes the fence, the signaler
		 * then signals the fence waking us up).
		 */
		if (dma_fence_default_wait(&rq->fence, true,
					   MAX_SCHEDULE_TIMEOUT) < 0)
			goto unlock;
	}
	i915_retire_requests(i915);
	GEM_BUG_ON(i915->gt.active_requests);

	intel_engines_sanitize(i915, false);

	/*
	 * Undo nop_submit_request. We prevent all new i915 requests from
	 * being queued (by disallowing execbuf whilst wedged) so having
	 * waited for all active requests above, we know the system is idle
	 * and do not have to worry about a thread being inside
	 * engine->submit_request() as we swap over. So unlike installing
	 * the nop_submit_request on reset, we can do this from normal
	 * context and do not require stop_machine().
	 */
	intel_engines_reset_default_submission(i915);
	i915_gem_contexts_lost(i915);

	GEM_TRACE("end\n");

	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
	clear_bit(I915_WEDGED, &i915->gpu_error.flags);
	ret = true;
unlock:
	mutex_unlock(&i915->gpu_error.wedge_mutex);

	return ret;
}

static void
i915_gem_retire_work_handler(struct work_struct *work)
{
Loading