Commit d6376374 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Only allocate preempt context when required



If we remove some hardcoded assumptions about the preempt context having
a fixed id, reserved from use by normal user contexts, we may only
allocate the i915_gem_context when required. Then the subsequent
decisions on using preemption reduce to having the preempt context
available.

v2: Include an assert that we don't allocate the preempt context twice.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180207210544.26351-3-chris@chris-wilson.co.uk


Reviewed-by: default avatarMichel Thierry <michel.thierry@intel.com>
parent 3fed1808
Loading
Loading
Loading
Loading
+16 −15
Original line number Diff line number Diff line
@@ -449,12 +449,18 @@ destroy_kernel_context(struct i915_gem_context **ctxp)
	i915_gem_context_free(ctx);
}

static bool needs_preempt_context(struct drm_i915_private *i915)
{
	return HAS_LOGICAL_RING_PREEMPTION(i915);
}

int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
	struct i915_gem_context *ctx;
	int err;

	/* Reassure ourselves we are only called once */
	GEM_BUG_ON(dev_priv->kernel_context);
	GEM_BUG_ON(dev_priv->preempt_context);

	INIT_LIST_HEAD(&dev_priv->contexts.list);
	INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
@@ -468,8 +474,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
	ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context\n");
		err = PTR_ERR(ctx);
		goto err;
		return PTR_ERR(ctx);
	}
	/*
	 * For easy recognisablity, we want the kernel context to be 0 and then
@@ -479,23 +484,18 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
	dev_priv->kernel_context = ctx;

	/* highest priority; preempting task */
	if (needs_preempt_context(dev_priv)) {
		ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default preempt context\n");
		err = PTR_ERR(ctx);
		goto err_kernel_context;
	}
		if (!IS_ERR(ctx))
			dev_priv->preempt_context = ctx;
		else
			DRM_ERROR("Failed to create preempt context; disabling preemption\n");
	}

	DRM_DEBUG_DRIVER("%s context support initialized\n",
			 dev_priv->engine[RCS]->context_size ? "logical" :
			 "fake");
	return 0;

err_kernel_context:
	destroy_kernel_context(&dev_priv->kernel_context);
err:
	return err;
}

void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -521,6 +521,7 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
	lockdep_assert_held(&i915->drm.struct_mutex);

	if (i915->preempt_context)
		destroy_kernel_context(&i915->preempt_context);
	destroy_kernel_context(&i915->kernel_context);

+3 −3
Original line number Diff line number Diff line
@@ -631,7 +631,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
	 * Similarly the preempt context must always be available so that
	 * we can interrupt the engine at any time.
	 */
	if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
	if (engine->i915->preempt_context) {
		ring = engine->context_pin(engine,
					   engine->i915->preempt_context);
		if (IS_ERR(ring)) {
@@ -656,7 +656,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
err_breadcrumbs:
	intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
	if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
	if (engine->i915->preempt_context)
		engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel:
	engine->context_unpin(engine, engine->i915->kernel_context);
@@ -686,7 +686,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
	if (engine->default_state)
		i915_gem_object_put(engine->default_state);

	if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
	if (engine->i915->preempt_context)
		engine->context_unpin(engine, engine->i915->preempt_context);
	engine->context_unpin(engine, engine->i915->kernel_context);
}
+13 −11
Original line number Diff line number Diff line
@@ -688,7 +688,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
		goto unlock;

	if (port_isset(port)) {
		if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
		if (engine->i915->preempt_context) {
			struct guc_preempt_work *preempt_work =
				&engine->i915->guc.preempt_work[engine->id];

@@ -984,6 +984,7 @@ static int guc_clients_create(struct intel_guc *guc)
	}
	guc->execbuf_client = client;

	if (dev_priv->preempt_context) {
		client = guc_client_alloc(dev_priv,
					  INTEL_INFO(dev_priv)->ring_mask,
					  GUC_CLIENT_PRIORITY_KMD_HIGH,
@@ -995,6 +996,7 @@ static int guc_clients_create(struct intel_guc *guc)
			return PTR_ERR(client);
		}
		guc->preempt_client = client;
	}

	return 0;
}
+11 −6
Original line number Diff line number Diff line
@@ -161,7 +161,6 @@
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
#define PREEMPT_ID 0x1

static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
					    struct intel_engine_cs *engine);
@@ -448,7 +447,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
		&engine->i915->preempt_context->engine[engine->id];
	unsigned int n;

	GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
	GEM_BUG_ON(engine->execlists.preempt_complete_status !=
		   upper_32_bits(ce->lrc_desc));
	GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));

	memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
@@ -528,7 +528,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
		if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
			goto unlock;

		if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
		if (engine->i915->preempt_context &&
		    rb_entry(rb, struct i915_priolist, node)->priority >
		    max(last->priotree.priority, 0)) {
			/*
@@ -844,7 +844,7 @@ static void execlists_submission_tasklet(unsigned long data)
			GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);

			if (status & GEN8_CTX_STATUS_COMPLETE &&
			    buf[2*head + 1] == PREEMPT_ID) {
			    buf[2*head + 1] == execlists->preempt_complete_status) {
				GEM_TRACE("%s preempt-idle\n", engine->name);

				execlists_cancel_port_requests(execlists);
@@ -1967,7 +1967,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
	engine->i915->caps.scheduler =
		I915_SCHEDULER_CAP_ENABLED |
		I915_SCHEDULER_CAP_PRIORITY;
	if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
	if (engine->i915->preempt_context)
		engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}

@@ -2045,6 +2045,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
	engine->execlists.elsp =
		engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));

	engine->execlists.preempt_complete_status = ~0u;
	if (engine->i915->preempt_context)
		engine->execlists.preempt_complete_status =
			upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);

	return 0;

error:
@@ -2307,7 +2312,7 @@ populate_lr_context(struct i915_gem_context *ctx,
	if (!engine->default_state)
		regs[CTX_CONTEXT_CONTROL + 1] |=
			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
	if (ctx->hw_id == PREEMPT_ID)
	if (ctx == ctx->i915->preempt_context)
		regs[CTX_CONTEXT_CONTROL + 1] |=
			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
					   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+5 −0
Original line number Diff line number Diff line
@@ -279,6 +279,11 @@ struct intel_engine_execlists {
	 * @csb_use_mmio: access csb through mmio, instead of hwsp
	 */
	bool csb_use_mmio;

	/**
	 * @preempt_complete_status: expected CSB upon completing preemption
	 */
	u32 preempt_complete_status;
};

#define INTEL_ENGINE_CS_MAX_NAME 8
Loading