Commit b51c2c67 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-fixes-2020-11-25' of...

Merge tag 'drm-intel-fixes-2020-11-25' of git://anongit.freedesktop.org/drm/drm-intel

 into drm-fixes

- Fix Perf/OA workaround register corruption (Lionel)
- Correct a comment statement in GVT (Yan)
- Fix GT enable/disable iterrupts, including a race condition that prevented GPU to go idle (Chris)
- Free stale request on destroying the virtual engine (Chris)

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201126010623.GA827684@intel.com
parents 5ead67bd 280ffdb6
Loading
Loading
Loading
Loading
+92 −51
Original line number Diff line number Diff line
@@ -30,18 +30,21 @@
#include "i915_trace.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"

static void irq_enable(struct intel_engine_cs *engine)
static bool irq_enable(struct intel_engine_cs *engine)
{
	if (!engine->irq_enable)
		return;
		return false;

	/* Caller disables interrupts */
	spin_lock(&engine->gt->irq_lock);
	engine->irq_enable(engine);
	spin_unlock(&engine->gt->irq_lock);

	return true;
}

static void irq_disable(struct intel_engine_cs *engine)
@@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine)

static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
	lockdep_assert_held(&b->irq_lock);

	if (!b->irq_engine || b->irq_armed)
		return;

	if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
	/*
	 * Since we are waiting on a request, the GPU should be busy
	 * and should have its own rpm reference.
	 */
	if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
		return;

	/*
@@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
	 */
	WRITE_ONCE(b->irq_armed, true);

	/*
	 * Since we are waiting on a request, the GPU should be busy
	 * and should have its own rpm reference. This is tracked
	 * by i915->gt.awake, we can forgo holding our own wakref
	 * for the interrupt as before i915->gt.awake is released (when
	 * the driver is idle) we disarm the breadcrumbs.
	 */

	if (!b->irq_enabled++)
		irq_enable(b->irq_engine);
	/* Requests may have completed before we could enable the interrupt. */
	if (!b->irq_enabled++ && irq_enable(b->irq_engine))
		irq_work_queue(&b->irq_work);
}

static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
	lockdep_assert_held(&b->irq_lock);

	if (!b->irq_engine || !b->irq_armed)
	if (!b->irq_engine)
		return;

	spin_lock(&b->irq_lock);
	if (!b->irq_armed)
		__intel_breadcrumbs_arm_irq(b);
	spin_unlock(&b->irq_lock);
}

static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
	GEM_BUG_ON(!b->irq_enabled);
	if (!--b->irq_enabled)
		irq_disable(b->irq_engine);
@@ -105,8 +106,6 @@ static void add_signaling_context(struct intel_breadcrumbs *b,
{
	intel_context_get(ce);
	list_add_tail(&ce->signal_link, &b->signalers);
	if (list_is_first(&ce->signal_link, &b->signalers))
		__intel_breadcrumbs_arm_irq(b);
}

static void remove_signaling_context(struct intel_breadcrumbs *b,
@@ -174,34 +173,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
		intel_engine_add_retire(b->irq_engine, tl);
}

static bool __signal_request(struct i915_request *rq, struct list_head *signals)
static bool __signal_request(struct i915_request *rq)
{
	clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);

	if (!__dma_fence_signal(&rq->fence)) {
		i915_request_put(rq);
		return false;
	}

	list_add_tail(&rq->signal_link, signals);
	return true;
}

static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
	node->next = head;
	return node;
}

static void signal_irq_work(struct irq_work *work)
{
	struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
	const ktime_t timestamp = ktime_get();
	struct llist_node *signal, *sn;
	struct intel_context *ce, *cn;
	struct list_head *pos, *next;
	LIST_HEAD(signal);

	signal = NULL;
	if (unlikely(!llist_empty(&b->signaled_requests)))
		signal = llist_del_all(&b->signaled_requests);

	spin_lock(&b->irq_lock);

	if (list_empty(&b->signalers))
	/*
	 * Keep the irq armed until the interrupt after all listeners are gone.
	 *
	 * Enabling/disabling the interrupt is rather costly, roughly a couple
	 * of hundred microseconds. If we are proactive and enable/disable
	 * the interrupt around every request that wants a breadcrumb, we
	 * quickly drown in the extra orders of magnitude of latency imposed
	 * on request submission.
	 *
	 * So we try to be lazy, and keep the interrupts enabled until no
	 * more listeners appear within a breadcrumb interrupt interval (that
	 * is until a request completes that no one cares about). The
	 * observation is that listeners come in batches, and will often
	 * listen to a bunch of requests in succession. Though note on icl+,
	 * interrupts are always enabled due to concerns with rc6 being
	 * dysfunctional with per-engine interrupt masking.
	 *
	 * We also try to avoid raising too many interrupts, as they may
	 * be generated by userspace batches and it is unfortunately rather
	 * too easy to drown the CPU under a flood of GPU interrupts. Thus
	 * whenever no one appears to be listening, we turn off the interrupts.
	 * Fewer interrupts should conserve power -- at the very least, fewer
	 * interrupt draw less ire from other users of the system and tools
	 * like powertop.
	 */
	if (!signal && b->irq_armed && list_empty(&b->signalers))
		__intel_breadcrumbs_disarm_irq(b);

	list_splice_init(&b->signaled_requests, &signal);

	list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
		GEM_BUG_ON(list_empty(&ce->signals));

@@ -218,7 +248,10 @@ static void signal_irq_work(struct irq_work *work)
			 * spinlock as the callback chain may end up adding
			 * more signalers to the same context or engine.
			 */
			__signal_request(rq, &signal);
			clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
			if (__signal_request(rq))
				/* We own signal_node now, xfer to local list */
				signal = slist_add(&rq->signal_node, signal);
		}

		/*
@@ -238,9 +271,9 @@ static void signal_irq_work(struct irq_work *work)

	spin_unlock(&b->irq_lock);

	list_for_each_safe(pos, next, &signal) {
	llist_for_each_safe(signal, sn, signal) {
		struct i915_request *rq =
			list_entry(pos, typeof(*rq), signal_link);
			llist_entry(signal, typeof(*rq), signal_node);
		struct list_head cb_list;

		spin_lock(&rq->lock);
@@ -251,6 +284,9 @@ static void signal_irq_work(struct irq_work *work)

		i915_request_put(rq);
	}

	if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
		intel_breadcrumbs_arm_irq(b);
}

struct intel_breadcrumbs *
@@ -264,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)

	spin_lock_init(&b->irq_lock);
	INIT_LIST_HEAD(&b->signalers);
	INIT_LIST_HEAD(&b->signaled_requests);
	init_llist_head(&b->signaled_requests);

	init_irq_work(&b->irq_work, signal_irq_work);

@@ -292,21 +328,22 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)

void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
	unsigned long flags;

	if (!READ_ONCE(b->irq_armed))
		return;

	spin_lock_irqsave(&b->irq_lock, flags);
	__intel_breadcrumbs_disarm_irq(b);
	spin_unlock_irqrestore(&b->irq_lock, flags);

	if (!list_empty(&b->signalers))
		irq_work_queue(&b->irq_work);
	/* Kick the work once more to drain the signalers */
	irq_work_sync(&b->irq_work);
	while (unlikely(READ_ONCE(b->irq_armed))) {
		local_irq_disable();
		signal_irq_work(&b->irq_work);
		local_irq_enable();
		cond_resched();
	}
	GEM_BUG_ON(!list_empty(&b->signalers));
}

void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
{
	irq_work_sync(&b->irq_work);
	GEM_BUG_ON(!list_empty(&b->signalers));
	GEM_BUG_ON(b->irq_armed);
	kfree(b);
}

@@ -327,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq,
	 * its signal completion.
	 */
	if (__request_completed(rq)) {
		if (__signal_request(rq, &b->signaled_requests))
		if (__signal_request(rq) &&
		    llist_add(&rq->signal_node, &b->signaled_requests))
			irq_work_queue(&b->irq_work);
		return;
	}
@@ -362,8 +400,11 @@ static void insert_breadcrumb(struct i915_request *rq,
	GEM_BUG_ON(!check_signal_order(ce, rq));
	set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);

	/* Check after attaching to irq, interrupt may have already fired. */
	if (__request_completed(rq))
	/*
	 * Defer enabling the interrupt to after HW submission and recheck
	 * the request as it may have completed and raised the interrupt as
	 * we were attaching it into the lists.
	 */
	irq_work_queue(&b->irq_work);
}

+1 −1
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ struct intel_breadcrumbs {
	struct intel_engine_cs *irq_engine;

	struct list_head signalers;
	struct list_head signaled_requests;
	struct llist_head signaled_requests;

	struct irq_work irq_work; /* for use from inside irq_lock */

+53 −7
Original line number Diff line number Diff line
@@ -182,6 +182,7 @@
struct virtual_engine {
	struct intel_engine_cs base;
	struct intel_context context;
	struct rcu_work rcu;

	/*
	 * We allow only a single request through the virtual engine at a time
@@ -5425,33 +5426,57 @@ static struct list_head *virtual_queue(struct virtual_engine *ve)
	return &ve->base.execlists.default_priolist.requests[0];
}

static void virtual_context_destroy(struct kref *kref)
static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
	struct virtual_engine *ve =
		container_of(kref, typeof(*ve), context.ref);
		container_of(wrk, typeof(*ve), rcu.work);
	unsigned int n;

	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
	GEM_BUG_ON(ve->request);
	GEM_BUG_ON(ve->context.inflight);

	/* Preempt-to-busy may leave a stale request behind. */
	if (unlikely(ve->request)) {
		struct i915_request *old;

		spin_lock_irq(&ve->base.active.lock);

		old = fetch_and_zero(&ve->request);
		if (old) {
			GEM_BUG_ON(!i915_request_completed(old));
			__i915_request_submit(old);
			i915_request_put(old);
		}

		spin_unlock_irq(&ve->base.active.lock);
	}

	/*
	 * Flush the tasklet in case it is still running on another core.
	 *
	 * This needs to be done before we remove ourselves from the siblings'
	 * rbtrees as in the case it is running in parallel, it may reinsert
	 * the rb_node into a sibling.
	 */
	tasklet_kill(&ve->base.execlists.tasklet);

	/* Decouple ourselves from the siblings, no more access allowed. */
	for (n = 0; n < ve->num_siblings; n++) {
		struct intel_engine_cs *sibling = ve->siblings[n];
		struct rb_node *node = &ve->nodes[sibling->id].rb;
		unsigned long flags;

		if (RB_EMPTY_NODE(node))
			continue;

		spin_lock_irqsave(&sibling->active.lock, flags);
		spin_lock_irq(&sibling->active.lock);

		/* Detachment is lazily performed in the execlists tasklet */
		if (!RB_EMPTY_NODE(node))
			rb_erase_cached(node, &sibling->execlists.virtual);

		spin_unlock_irqrestore(&sibling->active.lock, flags);
		spin_unlock_irq(&sibling->active.lock);
	}
	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
	GEM_BUG_ON(!list_empty(virtual_queue(ve)));

	if (ve->context.state)
		__execlists_context_fini(&ve->context);
@@ -5464,6 +5489,27 @@ static void virtual_context_destroy(struct kref *kref)
	kfree(ve);
}

static void virtual_context_destroy(struct kref *kref)
{
	struct virtual_engine *ve =
		container_of(kref, typeof(*ve), context.ref);

	GEM_BUG_ON(!list_empty(&ve->context.signals));

	/*
	 * When destroying the virtual engine, we have to be aware that
	 * it may still be in use from an hardirq/softirq context causing
	 * the resubmission of a completed request (background completion
	 * due to preempt-to-busy). Before we can free the engine, we need
	 * to flush the submission code and tasklets that are still potentially
	 * accessing the engine. Flushing the tasklets requires process context,
	 * and since we can guard the resubmit onto the engine with an RCU read
	 * lock, we can delegate the free of the engine to an RCU worker.
	 */
	INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
	queue_rcu_work(system_wq, &ve->rcu);
}

static void virtual_engine_initial_hint(struct virtual_engine *ve)
{
	int swp;
+1 −1
Original line number Diff line number Diff line
@@ -255,7 +255,7 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESS	(1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED	(1 << 4)
/* This reg has been accessed through GPU commands */
/* This reg could be accessed by unaligned address */
#define F_UNALIGN	(1 << 6)
/* This reg is in GVT's mmio save-restor list and in hardware
 * logical context image
+7 −2
Original line number Diff line number Diff line
@@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
		if (ret)
			return ret;
		intel_uncore_write(uncore, oastatus_reg,
				   oastatus & ~GEN8_OASTATUS_REPORT_LOST);

		intel_uncore_rmw(uncore, oastatus_reg,
				 GEN8_OASTATUS_COUNTER_OVERFLOW |
				 GEN8_OASTATUS_REPORT_LOST,
				 IS_GEN_RANGE(uncore->i915, 8, 10) ?
				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
	}

	return gen8_append_oa_reports(stream, buf, count, offset);
Loading