Commit 6a5d1db9 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Spin until breadcrumb threads are complete



When we need to reset the global seqno on wraparound, we have to wait
until the current rbtrees are drained (or otherwise the next waiter will
be out of sequence). The current mechanism to kick and spin until
complete, may exit too early as it would break if the target thread was
currently running. Instead, we must wake up the threads, but keep
spinning until the trees have been deleted.

In order to appease Tvrtko, busy spin rather than yield().

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161108143719.32215-1-chris@chris-wilson.co.uk


Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
parent 677100ce
Loading
Loading
Loading
Loading
+2 −3
Original line number Original line Diff line number Diff line
@@ -241,9 +241,8 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)


	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
	if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
	if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
		while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
		while (intel_breadcrumbs_busy(i915))
			yield();
			cond_resched(); /* spin until threads are complete */
		yield();
	}
	}
	atomic_set(&timeline->next_seqno, seqno);
	atomic_set(&timeline->next_seqno, seqno);


+12 −19
Original line number Original line Diff line number Diff line
@@ -629,35 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
	cancel_fake_irq(engine);
	cancel_fake_irq(engine);
}
}


unsigned int intel_kick_waiters(struct drm_i915_private *i915)
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
{
{
	struct intel_engine_cs *engine;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	enum intel_engine_id id;
	unsigned int mask = 0;
	unsigned int mask = 0;


	/* To avoid the task_struct disappearing beneath us as we wake up
	for_each_engine(engine, i915, id) {
	 * the process, we must first inspect the task_struct->state under the
		struct intel_breadcrumbs *b = &engine->breadcrumbs;
	 * RCU lock, i.e. as we call wake_up_process() we must be holding the
	 * rcu_read_lock().
	 */
	for_each_engine(engine, i915, id)
		if (unlikely(intel_engine_wakeup(engine)))
			mask |= intel_engine_flag(engine);


	return mask;
		spin_lock_irq(&b->lock);
}


unsigned int intel_kick_signalers(struct drm_i915_private *i915)
		if (b->first_wait) {
{
			wake_up_process(b->first_wait->tsk);
	struct intel_engine_cs *engine;
			mask |= intel_engine_flag(engine);
	enum intel_engine_id id;
		}
	unsigned int mask = 0;


	for_each_engine(engine, i915, id) {
		if (b->first_signal) {
		if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
			wake_up_process(b->signaler);
			wake_up_process(engine->breadcrumbs.signaler);
			mask |= intel_engine_flag(engine);
			mask |= intel_engine_flag(engine);
		}
		}

		spin_unlock_irq(&b->lock);
	}
	}


	return mask;
	return mask;
+1 −2
Original line number Original line Diff line number Diff line
@@ -578,7 +578,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)


void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
unsigned int intel_kick_signalers(struct drm_i915_private *i915);


#endif /* _INTEL_RINGBUFFER_H_ */
#endif /* _INTEL_RINGBUFFER_H_ */