Commit 43acd651 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Keep a per-engine request pool



Add a tiny per-engine request mempool so that we should always have a
request available for powermanagement allocations from tricky
contexts. This reserve is expected to be only used for kernel
contexts when barriers must be emitted [almost] without fail.

The main consumer for this reserved request is expected to be engine-pm,
for which we know that there will always be at least the previous pm
request that we can reuse under mempressure (so there should always be
a spare request for engine_park()).

This is an alternative to using a comparatively bulky mempool, which
requires custom handling for both our reserved allocation requirement
and to protect our TYPESAFE_BY_RCU slab cache. The advantage of mempool
would be that it would allow us to keep a larger per-engine request
pool. However, converting over to mempool is straightforward should the
need arise.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-and-tested-by: default avatarJanusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200402184037.21630-1-chris@chris-wilson.co.uk
parent 63d0f3ea
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -431,7 +431,14 @@ void intel_engines_free(struct intel_gt *gt)
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	/* Free the requests! dma-resv keeps fences around for an eternity */
	rcu_barrier();

	for_each_engine(engine, gt, id) {
		if (engine->request_pool)
			kmem_cache_free(i915_request_slab_cache(),
					engine->request_pool);

		kfree(engine);
		gt->engine[id] = NULL;
	}
+3 −0
Original line number Diff line number Diff line
@@ -308,6 +308,9 @@ struct intel_engine_cs {
		struct list_head hold; /* ready requests, but on hold */
	} active;

	/* keep a request in reserve for a [pm] barrier under oom */
	struct i915_request *request_pool;

	struct llist_head barrier_tasks;

	struct intel_context *kernel_context; /* pinned */
+22 −5
Original line number Diff line number Diff line
@@ -101,6 +101,11 @@ static signed long i915_fence_wait(struct dma_fence *fence,
				 timeout);
}

struct kmem_cache *i915_request_slab_cache(void)
{
	return global.slab_requests;
}

static void i915_fence_release(struct dma_fence *fence)
{
	struct i915_request *rq = to_request(fence);
@@ -115,6 +120,10 @@ static void i915_fence_release(struct dma_fence *fence)
	i915_sw_fence_fini(&rq->submit);
	i915_sw_fence_fini(&rq->semaphore);

	/* Keep one request on each engine for reserved use under mempressure */
	if (!cmpxchg(&rq->engine->request_pool, NULL, rq))
		return;

	kmem_cache_free(global.slab_requests, rq);
}

@@ -629,14 +638,22 @@ static void retire_requests(struct intel_timeline *tl)
}

static noinline struct i915_request *
request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
request_alloc_slow(struct intel_timeline *tl,
		   struct i915_request **rsvd,
		   gfp_t gfp)
{
	struct i915_request *rq;

	if (list_empty(&tl->requests))
	/* If we cannot wait, dip into our reserves */
	if (!gfpflags_allow_blocking(gfp)) {
		rq = xchg(rsvd, NULL);
		if (!rq) /* Use the normal failure path for one final WARN */
			goto out;

	if (!gfpflags_allow_blocking(gfp))
		return rq;
	}

	if (list_empty(&tl->requests))
		goto out;

	/* Move our oldest request to the slab-cache (if not in use!) */
@@ -721,7 +738,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
	rq = kmem_cache_alloc(global.slab_requests,
			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
	if (unlikely(!rq)) {
		rq = request_alloc_slow(tl, gfp);
		rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
		if (!rq) {
			ret = -ENOMEM;
			goto err_unreserve;
+2 −0
Original line number Diff line number Diff line
@@ -300,6 +300,8 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
	return fence->ops == &i915_fence_ops;
}

struct kmem_cache *i915_request_slab_cache(void);

struct i915_request * __must_check
__i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check