Commit 88cec497 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gt: Declare timeline.lock to be irq-free



Now that we never allow the intel_wakeref callbacks to be invoked from
interrupt context, we do not need the irqsafe spinlock for the timeline.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191120170858.3965380-1-chris@chris-wilson.co.uk
parent 5cba2884
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
{
	struct intel_gt_timelines *timelines = &gt->timelines;
	struct intel_timeline *tl, *tn;
	unsigned long flags;
	bool interruptible;
	LIST_HEAD(free);

@@ -43,7 +42,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)

	flush_submission(gt); /* kick the ksoftirqd tasklets */

	spin_lock_irqsave(&timelines->lock, flags);
	spin_lock(&timelines->lock);
	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
		if (!mutex_trylock(&tl->mutex))
			continue;
@@ -51,7 +50,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
		intel_timeline_get(tl);
		GEM_BUG_ON(!atomic_read(&tl->active_count));
		atomic_inc(&tl->active_count); /* pin the list element */
		spin_unlock_irqrestore(&timelines->lock, flags);
		spin_unlock(&timelines->lock);

		if (timeout > 0) {
			struct dma_fence *fence;
@@ -67,7 +66,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)

		retire_requests(tl);

		spin_lock_irqsave(&timelines->lock, flags);
		spin_lock(&timelines->lock);

		/* Resume iteration after dropping lock */
		list_safe_reset_next(tl, tn, link);
@@ -82,7 +81,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
			list_add(&tl->link, &free);
		}
	}
	spin_unlock_irqrestore(&timelines->lock, flags);
	spin_unlock(&timelines->lock);

	list_for_each_entry_safe(tl, tn, &free, link)
		__intel_timeline_free(&tl->kref);
+4 −5
Original line number Diff line number Diff line
@@ -831,7 +831,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
	struct intel_gt_timelines *timelines = &gt->timelines;
	struct intel_timeline *tl;
	unsigned long flags;
	bool ok;

	if (!test_bit(I915_WEDGED, &gt->reset.flags))
@@ -853,7 +852,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
	spin_lock_irqsave(&timelines->lock, flags);
	spin_lock(&timelines->lock);
	list_for_each_entry(tl, &timelines->active_list, link) {
		struct dma_fence *fence;

@@ -861,7 +860,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
		if (!fence)
			continue;

		spin_unlock_irqrestore(&timelines->lock, flags);
		spin_unlock(&timelines->lock);

		/*
		 * All internal dependencies (i915_requests) will have
@@ -874,10 +873,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
		dma_fence_put(fence);

		/* Restart iteration after droping lock */
		spin_lock_irqsave(&timelines->lock, flags);
		spin_lock(&timelines->lock);
		tl = list_entry(&timelines->active_list, typeof(*tl), link);
	}
	spin_unlock_irqrestore(&timelines->lock, flags);
	spin_unlock(&timelines->lock);

	/* We must reset pending GPU events before restoring our submission */
	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
+4 −6
Original line number Diff line number Diff line
@@ -332,7 +332,6 @@ int intel_timeline_pin(struct intel_timeline *tl)
void intel_timeline_enter(struct intel_timeline *tl)
{
	struct intel_gt_timelines *timelines = &tl->gt->timelines;
	unsigned long flags;

	/*
	 * Pretend we are serialised by the timeline->mutex.
@@ -358,16 +357,15 @@ void intel_timeline_enter(struct intel_timeline *tl)
	if (atomic_add_unless(&tl->active_count, 1, 0))
		return;

	spin_lock_irqsave(&timelines->lock, flags);
	spin_lock(&timelines->lock);
	if (!atomic_fetch_inc(&tl->active_count))
		list_add_tail(&tl->link, &timelines->active_list);
	spin_unlock_irqrestore(&timelines->lock, flags);
	spin_unlock(&timelines->lock);
}

void intel_timeline_exit(struct intel_timeline *tl)
{
	struct intel_gt_timelines *timelines = &tl->gt->timelines;
	unsigned long flags;

	/* See intel_timeline_enter() */
	lockdep_assert_held(&tl->mutex);
@@ -376,10 +374,10 @@ void intel_timeline_exit(struct intel_timeline *tl)
	if (atomic_add_unless(&tl->active_count, -1, 1))
		return;

	spin_lock_irqsave(&timelines->lock, flags);
	spin_lock(&timelines->lock);
	if (atomic_dec_and_test(&tl->active_count))
		list_del(&tl->link);
	spin_unlock_irqrestore(&timelines->lock, flags);
	spin_unlock(&timelines->lock);

	/*
	 * Since this timeline is idle, all bariers upon which we were waiting