Commit 23aae183 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-fixes-2019-11-07' of...

Merge tag 'drm-intel-next-fixes-2019-11-07' of git://anongit.freedesktop.org/drm/drm-intel

 into drm-next

One RCU fix and fix for suspend GEM_BUG_ON (with dependencies).

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191107145058.GA17401@jlahtine-desk.ger.corp.intel.com
parents 393fdfdb d9dace94
Loading
Loading
Loading
Loading
+3 −58
Original line number Diff line number Diff line
@@ -11,50 +11,6 @@

#include "i915_drv.h"

static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
	bool result = !intel_gt_is_wedged(gt);

	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
		/* XXX hide warning from gem_eio */
		if (i915_modparams.reset) {
			dev_err(gt->i915->drm.dev,
				"Failed to idle engines, declaring wedged!\n");
			GEM_TRACE_DUMP();
		}

		/*
		 * Forcibly cancel outstanding work and leave
		 * the gpu quiet.
		 */
		intel_gt_set_wedged(gt);
		result = false;
	}

	if (intel_gt_pm_wait_for_idle(gt))
		result = false;

	return result;
}

static void user_forcewake(struct intel_gt *gt, bool suspend)
{
	int count = atomic_read(&gt->user_wakeref);

	/* Inside suspend/resume so single threaded, no races to worry about. */
	if (likely(!count))
		return;

	intel_gt_pm_get(gt);
	if (suspend) {
		GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
		atomic_sub(count, &gt->wakeref.count);
	} else {
		atomic_add(count, &gt->wakeref.count);
	}
	intel_gt_pm_put(gt);
}

void i915_gem_suspend(struct drm_i915_private *i915)
{
	GEM_TRACE("\n");
@@ -62,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
	flush_workqueue(i915->wq);

	user_forcewake(&i915->gt, true);

	/*
	 * We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
@@ -73,8 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	intel_gt_suspend(&i915->gt);
	intel_uc_suspend(&i915->gt.uc);
	intel_gt_suspend_prepare(&i915->gt);

	i915_gem_drain_freed_objects(i915);
}
@@ -116,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
	 * machine in an unusable condition.
	 */

	intel_gt_suspend_late(&i915->gt);

	spin_lock_irqsave(&i915->mm.obj_lock, flags);
	for (phase = phases; *phase; phase++) {
		LIST_HEAD(keep);
@@ -140,8 +95,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
		list_splice_tail(&keep, *phase);
	}
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);

	i915_gem_sanitize(i915);
}

void i915_gem_resume(struct drm_i915_private *i915)
@@ -161,14 +114,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
	if (intel_gt_resume(&i915->gt))
		goto err_wedged;

	intel_uc_resume(&i915->gt.uc);

	/* Always reload a context for powersaving. */
	if (!switch_to_kernel_context_sync(&i915->gt))
		goto err_wedged;

	user_forcewake(&i915->gt, false);

out_unlock:
	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
	return;
+4 −2
Original line number Diff line number Diff line
@@ -31,9 +31,11 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
	intel_uc_init_early(&gt->uc);
}

void intel_gt_init_hw_early(struct drm_i915_private *i915)
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
{
	i915->gt.ggtt = &i915->ggtt;
	gt->ggtt = ggtt;

	intel_gt_sanitize(gt, false);
}

static void init_unused_ring(struct intel_gt *gt, u32 base)
+1 −1
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
}

void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
void intel_gt_driver_register(struct intel_gt *gt);
+91 −4
Original line number Diff line number Diff line
@@ -4,6 +4,8 @@
 * Copyright © 2019 Intel Corporation
 */

#include <linux/suspend.h>

#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_params.h"
@@ -18,6 +20,24 @@
#include "intel_rps.h"
#include "intel_wakeref.h"

static void user_forcewake(struct intel_gt *gt, bool suspend)
{
	int count = atomic_read(&gt->user_wakeref);

	/* Inside suspend/resume so single threaded, no races to worry about. */
	if (likely(!count))
		return;

	intel_gt_pm_get(gt);
	if (suspend) {
		GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
		atomic_sub(count, &gt->wakeref.count);
	} else {
		atomic_add(count, &gt->wakeref.count);
	}
	intel_gt_pm_put(gt);
}

static int __gt_unpark(struct intel_wakeref *wf)
{
	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -118,8 +138,22 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	intel_wakeref_t wakeref;

	GEM_TRACE("\n");
	GEM_TRACE("force:%s\n", yesno(force));

	/* Use a raw wakeref to avoid calling intel_display_power_get early */
	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);

	/*
	 * As we have just resumed the machine and woken the device up from
	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
	 * back to defaults, recovering from whatever wedged state we left it
	 * in and so worth trying to use the device once more.
	 */
	if (intel_gt_is_wedged(gt))
		intel_gt_unset_wedged(gt);

	intel_uc_sanitize(&gt->uc);

@@ -127,6 +161,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
		if (engine->reset.prepare)
			engine->reset.prepare(engine);

	intel_uc_reset_prepare(&gt->uc);

	if (reset_engines(gt) || force) {
		for_each_engine(engine, gt, id)
			__intel_engine_reset(engine, false);
@@ -135,6 +171,9 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
	for_each_engine(engine, gt, id)
		if (engine->reset.finish)
			engine->reset.finish(engine);

	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}

void intel_gt_pm_fini(struct intel_gt *gt)
@@ -148,6 +187,8 @@ int intel_gt_resume(struct intel_gt *gt)
	enum intel_engine_id id;
	int err = 0;

	GEM_TRACE("\n");

	/*
	 * After resume, we may need to poke into the pinned kernel
	 * contexts to paper over any damage caused by the sudden suspend.
@@ -186,14 +227,22 @@ int intel_gt_resume(struct intel_gt *gt)
	}

	intel_rc6_enable(&gt->rc6);

	intel_uc_resume(&gt->uc);

	user_forcewake(gt, false);

	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
	intel_gt_pm_put(gt);

	return err;
}

static void wait_for_idle(struct intel_gt *gt)
static void wait_for_suspend(struct intel_gt *gt)
{
	if (!intel_gt_pm_is_awake(gt))
		return;

	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
		/*
		 * Forcibly cancel outstanding work and leave
@@ -205,27 +254,65 @@ static void wait_for_idle(struct intel_gt *gt)
	intel_gt_pm_wait_for_idle(gt);
}

void intel_gt_suspend(struct intel_gt *gt)
void intel_gt_suspend_prepare(struct intel_gt *gt)
{
	user_forcewake(gt, true);
	wait_for_suspend(gt);

	intel_uc_suspend(&gt->uc);
}

static suspend_state_t pm_suspend_target(void)
{
#if IS_ENABLED(CONFIG_PM_SLEEP)
	return pm_suspend_target_state;
#else
	return PM_SUSPEND_TO_IDLE;
#endif
}

void intel_gt_suspend_late(struct intel_gt *gt)
{
	intel_wakeref_t wakeref;

	/* We expect to be idle already; but also want to be independent */
	wait_for_idle(gt);
	wait_for_suspend(gt);

	/*
	 * On disabling the device, we want to turn off HW access to memory
	 * that we no longer own.
	 *
	 * However, not all suspend-states disable the device. S0 (s2idle)
	 * is effectively runtime-suspend, the device is left powered on
	 * but needs to be put into a low power state. We need to keep
	 * powermanagement enabled, but we also retain system state and so
	 * it remains safe to keep on using our allocated memory.
	 */
	if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
		return;

	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
		intel_rps_disable(&gt->rps);
		intel_rc6_disable(&gt->rc6);
		intel_llc_disable(&gt->llc);
	}

	intel_gt_sanitize(gt, false);

	GEM_TRACE("\n");
}

void intel_gt_runtime_suspend(struct intel_gt *gt)
{
	intel_uc_runtime_suspend(&gt->uc);

	GEM_TRACE("\n");
}

int intel_gt_runtime_resume(struct intel_gt *gt)
{
	GEM_TRACE("\n");

	intel_gt_init_swizzling(gt);

	return intel_uc_runtime_resume(&gt->uc);
+2 −1
Original line number Diff line number Diff line
@@ -43,8 +43,9 @@ void intel_gt_pm_fini(struct intel_gt *gt);

void intel_gt_sanitize(struct intel_gt *gt, bool force);

void intel_gt_suspend_prepare(struct intel_gt *gt);
void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
void intel_gt_suspend(struct intel_gt *gt);

void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
Loading