Commit 266a240b authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Use engine->context_pin() to report the intel_ring



Since unifying ringbuffer/execlist submission to use
engine->pin_context, we ensure that the intel_ring is available before
we start constructing the request. We can therefore move the assignment
of the request->ring to the central i915_gem_request_alloc() and not
require it in every engine->request_alloc() callback. Another small step
towards simplification (of the core, but at a cost of handling error
pointers in less important callers of engine->pin_context).

v2: Rearrange a few branches to reduce impact of PTR_ERR() on gcc's code
generation.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarOscar Mateo <oscar.mateo@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170504093308.4137-1-chris@chris-wilson.co.uk
parent c944a308
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -180,6 +180,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct drm_i915_gem_request *rq;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_ring *ring;
	int ret;

	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -198,8 +199,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
	 * the guest context, gvt can unpin the shadow_ctx safely.
	 */
	ret = engine->context_pin(engine, shadow_ctx);
	if (ret) {
	ring = engine->context_pin(engine, shadow_ctx);
	if (IS_ERR(ring)) {
		ret = PTR_ERR(ring);
		gvt_vgpu_err("fail to pin shadow context\n");
		workload->status = ret;
		mutex_unlock(&dev_priv->drm.struct_mutex);
+6 −3
Original line number Diff line number Diff line
@@ -551,6 +551,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
	struct drm_i915_private *dev_priv = engine->i915;
	struct drm_i915_gem_request *req;
	struct intel_ring *ring;
	int ret;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -565,9 +566,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
	 * GGTT space, so do this first before we reserve a seqno for
	 * ourselves.
	 */
	ret = engine->context_pin(engine, ctx);
	if (ret)
		return ERR_PTR(ret);
	ring = engine->context_pin(engine, ctx);
	if (IS_ERR(ring))
		return ERR_CAST(ring);
	GEM_BUG_ON(!ring);

	ret = reserve_seqno(engine);
	if (ret)
@@ -633,6 +635,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
	req->i915 = dev_priv;
	req->engine = engine;
	req->ctx = ctx;
	req->ring = ring;

	/* No zalloc, must clear what we need by hand */
	req->global_seqno = 0;
+6 −7
Original line number Diff line number Diff line
@@ -744,6 +744,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
	struct drm_i915_private *dev_priv = stream->dev_priv;
	struct intel_engine_cs *engine = dev_priv->engine[RCS];
	struct intel_ring *ring;
	int ret;

	ret = i915_mutex_lock_interruptible(&dev_priv->drm);
@@ -755,9 +756,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
	 *
	 * NB: implied RCS engine...
	 */
	ret = engine->context_pin(engine, stream->ctx);
	if (ret)
		goto unlock;
	ring = engine->context_pin(engine, stream->ctx);
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (IS_ERR(ring))
		return PTR_ERR(ring);

	/* Explicitly track the ID (instead of calling i915_ggtt_offset()
	 * on the fly) considering the difference with gen8+ and
@@ -766,10 +768,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
	dev_priv->perf.oa.specific_ctx_id =
		i915_ggtt_offset(stream->ctx->engine[engine->id].state);

unlock:
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return ret;
	return 0;
}

/**
+4 −3
Original line number Diff line number Diff line
@@ -469,6 +469,7 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
 */
int intel_engine_init_common(struct intel_engine_cs *engine)
{
	struct intel_ring *ring;
	int ret;

	engine->set_default_submission(engine);
@@ -480,9 +481,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
	 * be available. To avoid this we always pin the default
	 * context.
	 */
	ret = engine->context_pin(engine, engine->i915->kernel_context);
	if (ret)
		return ret;
	ring = engine->context_pin(engine, engine->i915->kernel_context);
	if (IS_ERR(ring))
		return PTR_ERR(ring);

	ret = intel_engine_init_breadcrumbs(engine);
	if (ret)
+8 −9
Original line number Diff line number Diff line
@@ -740,7 +740,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
	/* XXX Do we need to preempt to make room for us and our deps? */
}

static int execlists_context_pin(struct intel_engine_cs *engine,
static struct intel_ring *
execlists_context_pin(struct intel_engine_cs *engine,
		      struct i915_gem_context *ctx)
{
	struct intel_context *ce = &ctx->engine[engine->id];
@@ -750,8 +751,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,

	lockdep_assert_held(&ctx->i915->drm.struct_mutex);

	if (ce->pin_count++)
		return 0;
	if (likely(ce->pin_count++))
		goto out;
	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */

	if (!ce->state) {
@@ -788,7 +789,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
	ce->state->obj->mm.dirty = true;

	i915_gem_context_get(ctx);
	return 0;
out:
	return ce->ring;

unpin_map:
	i915_gem_object_unpin_map(ce->state->obj);
@@ -796,7 +798,7 @@ unpin_vma:
	__i915_vma_unpin(ce->state);
err:
	ce->pin_count = 0;
	return ret;
	return ERR_PTR(ret);
}

static void execlists_context_unpin(struct intel_engine_cs *engine,
@@ -833,9 +835,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
	 */
	request->reserved_space += EXECLISTS_REQUEST_SIZE;

	GEM_BUG_ON(!ce->ring);
	request->ring = ce->ring;

	if (i915.enable_guc_submission) {
		/*
		 * Check that the GuC has space for the request before
Loading