Commit 93b0e8fe authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Mark intel_wakeref_get() as a sleeper



Assume that intel_wakeref_get() may take the mutex, and perform other
sleeping actions in the course of its callbacks and so use might_sleep()
to ensure that all callers abide. Anything that cannot sleep has to use
e.g. intel_wakeref_get_if_active() to guarantee its avoidance of the
non-atomic paths.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191121130528.309474-1-chris@chris-wilson.co.uk
parent c95d31c3
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -22,6 +22,11 @@ static inline void intel_gt_pm_get(struct intel_gt *gt)
	intel_wakeref_get(&gt->wakeref);
	intel_wakeref_get(&gt->wakeref);
}
}


static inline void __intel_gt_pm_get(struct intel_gt *gt)
{
	__intel_wakeref_get(&gt->wakeref);
}

static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
{
	return intel_wakeref_get_if_active(&gt->wakeref);
	return intel_wakeref_get_if_active(&gt->wakeref);
+1 −1
Original line number Original line Diff line number Diff line
@@ -1121,7 +1121,7 @@ __execlists_schedule_in(struct i915_request *rq)
		BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
		BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
	}
	}


	intel_gt_pm_get(engine->gt);
	__intel_gt_pm_get(engine->gt);
	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
	intel_engine_context_in(engine);
	intel_engine_context_in(engine);


+1 −1
Original line number Original line Diff line number Diff line
@@ -529,7 +529,7 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
	 * required if we generalise the inflight tracking.
	 * required if we generalise the inflight tracking.
	 */
	 */


	intel_gt_pm_get(rq->engine->gt);
	__intel_gt_pm_get(rq->engine->gt);
	return i915_request_get(rq);
	return i915_request_get(rq);
}
}


+17 −2
Original line number Original line Diff line number Diff line
@@ -59,9 +59,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);


/**
/**
 * intel_wakeref_get: Acquire the wakeref
 * intel_wakeref_get: Acquire the wakeref
 * @i915: the drm_i915_private device
 * @wf: the wakeref
 * @wf: the wakeref
 * @fn: callback for acquired the wakeref, called only on first acquire.
 *
 *
 * Acquire a hold on the wakeref. The first user to do so, will acquire
 * Acquire a hold on the wakeref. The first user to do so, will acquire
 * the runtime pm wakeref and then call the @fn underneath the wakeref
 * the runtime pm wakeref and then call the @fn underneath the wakeref
@@ -76,12 +74,29 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
static inline int
static inline int
intel_wakeref_get(struct intel_wakeref *wf)
intel_wakeref_get(struct intel_wakeref *wf)
{
{
	might_sleep();
	if (unlikely(!atomic_inc_not_zero(&wf->count)))
	if (unlikely(!atomic_inc_not_zero(&wf->count)))
		return __intel_wakeref_get_first(wf);
		return __intel_wakeref_get_first(wf);


	return 0;
	return 0;
}
}


/**
 * __intel_wakeref_get: Acquire the wakeref, again
 * @wf: the wakeref
 *
 * Increment the wakeref counter, only valid if it is already held by
 * the caller.
 *
 * See intel_wakeref_get().
 */
static inline void
__intel_wakeref_get(struct intel_wakeref *wf)
{
	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
	atomic_inc(&wf->count);
}

/**
/**
 * intel_wakeref_get_if_in_use: Acquire the wakeref
 * intel_wakeref_get_if_in_use: Acquire the wakeref
 * @wf: the wakeref
 * @wf: the wakeref