Commit d19d71fc authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Mark i915_request.timeline as a volatile, rcu pointer



The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.

One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.

v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
parent c45e788d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
	if (IS_ERR(rq))
		return rq;

	err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
	err = i915_active_add_request(&overlay->last_flip, rq);
	if (err) {
		i915_request_add(rq);
		return ERR_PTR(err);
+1 −1
Original line number Diff line number Diff line
@@ -211,7 +211,7 @@ static void clear_pages_worker(struct work_struct *work)
	 * keep track of the GPU activity within this vma/request, and
	 * propagate the signal from the request to w->dma.
	 */
	err = i915_active_ref(&vma->active, rq->timeline, rq);
	err = i915_active_add_request(&vma->active, rq);
	if (err)
		goto out_request;

+1 −1
Original line number Diff line number Diff line
@@ -910,7 +910,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
		if (emit)
			err = emit(rq, data);
		if (err == 0)
			err = i915_active_ref(&cb->base, rq->timeline, rq);
			err = i915_active_add_request(&cb->base, rq);

		i915_request_add(rq);
		if (err)
+2 −2
Original line number Diff line number Diff line
@@ -298,7 +298,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
	/* Only suitable for use in remotely modifying this context */
	GEM_BUG_ON(rq->hw_context == ce);

	if (rq->timeline != tl) { /* beware timeline sharing */
	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
		err = mutex_lock_interruptible_nested(&tl->mutex,
						      SINGLE_DEPTH_NESTING);
		if (err)
@@ -319,7 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
	 * words transfer the pinned ce object to tracked active request.
	 */
	GEM_BUG_ON(i915_active_is_idle(&ce->active));
	return i915_active_ref(&ce->active, rq->timeline, rq);
	return i915_active_add_request(&ce->active, rq);
}

struct i915_request *intel_context_create_request(struct intel_context *ce)
+50 −7
Original line number Diff line number Diff line
@@ -680,6 +680,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
				engine->status_page.vma))
		goto out_frame;

	mutex_lock(&frame->timeline.mutex);

	frame->ring.vaddr = frame->cs;
	frame->ring.size = sizeof(frame->cs);
	frame->ring.effective_size = frame->ring.size;
@@ -688,18 +690,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
	frame->rq.i915 = engine->i915;
	frame->rq.engine = engine;
	frame->rq.ring = &frame->ring;
	frame->rq.timeline = &frame->timeline;
	rcu_assign_pointer(frame->rq.timeline, &frame->timeline);

	dw = intel_timeline_pin(&frame->timeline);
	if (dw < 0)
		goto out_timeline;

	spin_lock_irq(&engine->active.lock);
	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
	spin_unlock_irq(&engine->active.lock);

	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */

	intel_timeline_unpin(&frame->timeline);

out_timeline:
	mutex_unlock(&frame->timeline.mutex);
	intel_timeline_fini(&frame->timeline);
out_frame:
	kfree(frame);
@@ -1196,6 +1202,27 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
	}
}

static struct intel_timeline *get_timeline(struct i915_request *rq)
{
	struct intel_timeline *tl;

	/*
	 * Even though we are holding the engine->active.lock here, there
	 * is no control over the submission queue per-se and we are
	 * inspecting the active state at a random point in time, with an
	 * unknown queue. Play safe and make sure the timeline remains valid.
	 * (Only being used for pretty printing, one extra kref shouldn't
	 * cause a camel stampede!)
	 */
	rcu_read_lock();
	tl = rcu_dereference(rq->timeline);
	if (!kref_get_unless_zero(&tl->kref))
		tl = NULL;
	rcu_read_unlock();

	return tl;
}

static void intel_engine_print_registers(struct intel_engine_cs *engine,
					 struct drm_printer *m)
{
@@ -1290,27 +1317,37 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
			int len;

			len = snprintf(hdr, sizeof(hdr),
				       "\t\tActive[%d: ",
				       "\t\tActive[%d]: ",
				       (int)(port - execlists->active));
			if (!i915_request_signaled(rq))
			if (!i915_request_signaled(rq)) {
				struct intel_timeline *tl = get_timeline(rq);

				len += snprintf(hdr + len, sizeof(hdr) - len,
						"ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
						i915_ggtt_offset(rq->ring->vma),
						rq->timeline->hwsp_offset,
						tl ? tl->hwsp_offset : 0,
						hwsp_seqno(rq));

				if (tl)
					intel_timeline_put(tl);
			}
			snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
			print_request(m, rq, hdr);
		}
		for (port = execlists->pending; (rq = *port); port++) {
			struct intel_timeline *tl = get_timeline(rq);
			char hdr[80];

			snprintf(hdr, sizeof(hdr),
				 "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
				 (int)(port - execlists->pending),
				 i915_ggtt_offset(rq->ring->vma),
				 rq->timeline->hwsp_offset,
				 tl ? tl->hwsp_offset : 0,
				 hwsp_seqno(rq));
			print_request(m, rq, hdr);

			if (tl)
				intel_timeline_put(tl);
		}
		spin_unlock_irqrestore(&engine->active.lock, flags);
	} else if (INTEL_GEN(dev_priv) > 6) {
@@ -1388,6 +1425,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
	spin_lock_irqsave(&engine->active.lock, flags);
	rq = intel_engine_find_active_request(engine);
	if (rq) {
		struct intel_timeline *tl = get_timeline(rq);

		print_request(m, rq, "\t\tactive ");

		drm_printf(m, "\t\tring->start:  0x%08x\n",
@@ -1400,8 +1439,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
			   rq->ring->emit);
		drm_printf(m, "\t\tring->space:  0x%08x\n",
			   rq->ring->space);

		if (tl) {
			drm_printf(m, "\t\tring->hwsp:   0x%08x\n",
			   rq->timeline->hwsp_offset);
				   tl->hwsp_offset);
			intel_timeline_put(tl);
		}

		print_request_ring(m, rq);

Loading