Commit 2e49520e authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-fixes-2020-10-02' of...

Merge tag 'drm-intel-next-fixes-2020-10-02' of git://anongit.freedesktop.org/drm/drm-intel

 into drm-next

Propagated from drm-intel-next-queued:
- Fix CRTC state checker (Ville)

Propated from drm-intel-gt-next:
- Avoid implicit vmpa for highmem on 32b (Chris)
- Prevent PAT attriutes for writecombine if CPU doesn't support PAT (Chris)
- Clear the buffer pool age before use. (Chris)
- Fix error code (Dan)
- Break up error capture compression loops (Chris)
- Fix uninitialized variable in context_create_request (Maarten)
- Check for errors on i915_vm_alloc_pt_stash to avoid NULL dereference (Matt)
- Serialize debugfs i915_gem_objects with ctx->mutex (Chris)
- Fix a rebase mistake caused during drm-intel-gt-next creation (Chris)
- Hold request reference for canceling an active context (Chris)
- Heartbeats fixes (Chris)
- Use usigned during batch copies (Chris)

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201002182610.GA2204465@intel.com
parents 083320eb c60b93cd
Loading
Loading
Loading
Loading
+9 −6
Original line number Diff line number Diff line
@@ -14304,7 +14304,6 @@ verify_crtc_state(struct intel_crtc *crtc,
	struct intel_encoder *encoder;
	struct intel_crtc_state *pipe_config = old_crtc_state;
	struct drm_atomic_state *state = old_crtc_state->uapi.state;
	bool active;
	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
	intel_crtc_free_hw_state(old_crtc_state);
@@ -14314,16 +14313,19 @@ verify_crtc_state(struct intel_crtc *crtc,
	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
		    crtc->base.name);
	active = dev_priv->display.get_pipe_config(crtc, pipe_config);
	pipe_config->hw.enable = new_crtc_state->hw.enable;
	pipe_config->hw.active =
		dev_priv->display.get_pipe_config(crtc, pipe_config);
	/* we keep both pipes enabled on 830 */
	if (IS_I830(dev_priv))
		active = new_crtc_state->hw.active;
	if (IS_I830(dev_priv) && pipe_config->hw.active)
		pipe_config->hw.active = new_crtc_state->hw.active;
	I915_STATE_WARN(new_crtc_state->hw.active != active,
	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
			"crtc active state doesn't match with hw state "
			"(expected %i, found %i)\n",
			new_crtc_state->hw.active, active);
			new_crtc_state->hw.active, pipe_config->hw.active);
	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
			"transitional active state does not match atomic hw state "
@@ -14332,6 +14334,7 @@ verify_crtc_state(struct intel_crtc *crtc,
	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
		enum pipe pipe;
		bool active;
		active = encoder->get_hw_state(encoder, &pipe);
		I915_STATE_WARN(active != new_crtc_state->hw.active,
+29 −44
Original line number Diff line number Diff line
@@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx)
	return rcu_dereference_protected(ctx->engines, true);
}

static bool __reset_engine(struct intel_engine_cs *engine)
{
	struct intel_gt *gt = engine->gt;
	bool success = false;

	if (!intel_has_reset_engine(gt))
		return false;

	if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
			      &gt->reset.flags)) {
		success = intel_engine_reset(engine, NULL) == 0;
		clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
				      &gt->reset.flags);
	}

	return success;
}

static void __reset_context(struct i915_gem_context *ctx,
			    struct intel_engine_cs *engine)
{
@@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
	 * kill the banned context, we fallback to doing a local reset
	 * instead.
	 */
	if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
	    !intel_engine_pulse(engine))
		return true;

	/* If we are unable to send a pulse, try resetting this engine. */
	return __reset_engine(engine);
	return intel_engine_pulse(engine) == 0;
}

static bool
@@ -460,8 +437,8 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
		spin_lock(&locked->active.lock);
	}

	if (!i915_request_completed(rq)) {
		if (i915_request_is_active(rq) && rq->fence.error != -EIO)
	if (i915_request_is_active(rq)) {
		if (!i915_request_completed(rq))
			*active = locked;
		ret = true;
	}
@@ -479,13 +456,26 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
	if (!ce->timeline)
		return NULL;

	/*
	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
	 * to the request to prevent it being transferred to a new timeline
	 * (and onto a new timeline->requests list).
	 */
	rcu_read_lock();
	list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
		if (i915_request_is_active(rq) && i915_request_completed(rq))
			continue;
	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
		bool found;

		/* timeline is already completed upto this point? */
		if (!i915_request_get_rcu(rq))
			break;

		/* Check with the backend if the request is inflight */
		if (__active_engine(rq, &engine))
		found = true;
		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
			found = __active_engine(rq, &engine);

		i915_request_put(rq);
		if (found)
			break;
	}
	rcu_read_unlock();
@@ -493,7 +483,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
	return engine;
}

static void kill_engines(struct i915_gem_engines *engines)
static void kill_engines(struct i915_gem_engines *engines, bool ban)
{
	struct i915_gem_engines_iter it;
	struct intel_context *ce;
@@ -508,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines)
	for_each_gem_engine(ce, engines, it) {
		struct intel_engine_cs *engine;

		if (intel_context_set_banned(ce))
		if (ban && intel_context_set_banned(ce))
			continue;

		/*
@@ -521,7 +511,7 @@ static void kill_engines(struct i915_gem_engines *engines)
		engine = active_engine(ce);

		/* First attempt to gracefully cancel the context */
		if (engine && !__cancel_engine(engine))
		if (engine && !__cancel_engine(engine) && ban)
			/*
			 * If we are unable to send a preemptive pulse to bump
			 * the context from the GPU, we have to resort to a full
@@ -531,8 +521,10 @@ static void kill_engines(struct i915_gem_engines *engines)
	}
}

static void kill_stale_engines(struct i915_gem_context *ctx)
static void kill_context(struct i915_gem_context *ctx)
{
	bool ban = (!i915_gem_context_is_persistent(ctx) ||
		    !ctx->i915->params.enable_hangcheck);
	struct i915_gem_engines *pos, *next;

	spin_lock_irq(&ctx->stale.lock);
@@ -545,7 +537,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx)

		spin_unlock_irq(&ctx->stale.lock);

		kill_engines(pos);
		kill_engines(pos, ban);

		spin_lock_irq(&ctx->stale.lock);
		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
@@ -557,11 +549,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
	spin_unlock_irq(&ctx->stale.lock);
}

static void kill_context(struct i915_gem_context *ctx)
{
	kill_stale_engines(ctx);
}

static void engines_idle_release(struct i915_gem_context *ctx,
				 struct i915_gem_engines *engines)
{
@@ -596,7 +583,7 @@ static void engines_idle_release(struct i915_gem_context *ctx,

kill:
	if (list_empty(&engines->link)) /* raced, already closed */
		kill_engines(engines);
		kill_engines(engines, true);

	i915_sw_fence_commit(&engines->fence);
}
@@ -654,8 +641,6 @@ static void context_close(struct i915_gem_context *ctx)
	 * case we opt to forcibly kill off all remaining requests on
	 * context close.
	 */
	if (!i915_gem_context_is_persistent(ctx) ||
	    !ctx->i915->params.enable_hangcheck)
	kill_context(ctx);

	i915_gem_context_put(ctx);
+5 −2
Original line number Diff line number Diff line
@@ -2267,8 +2267,8 @@ struct eb_parse_work {
	struct i915_vma *batch;
	struct i915_vma *shadow;
	struct i915_vma *trampoline;
	unsigned int batch_offset;
	unsigned int batch_length;
	unsigned long batch_offset;
	unsigned long batch_length;
};

static int __eb_parse(struct dma_fence_work *work)
@@ -2338,6 +2338,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
	struct eb_parse_work *pw;
	int err;

	GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
	GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));

	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
	if (!pw)
		return -ENOMEM;
+1 −1
Original line number Diff line number Diff line
@@ -364,7 +364,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,

	vma[1] = i915_vma_instance(dst, vm, NULL);
	if (IS_ERR(vma[1]))
		return PTR_ERR(vma);
		return PTR_ERR(vma[1]);

	i915_gem_ww_ctx_init(&ww, true);
	intel_engine_pm_get(ce->engine);
+28 −2
Original line number Diff line number Diff line
@@ -254,9 +254,35 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
		return NULL;

	if (GEM_WARN_ON(type == I915_MAP_WC &&
			!static_cpu_has(X86_FEATURE_PAT)))
		return NULL;

	/* A single page can always be kmapped */
	if (n_pte == 1 && type == I915_MAP_WB)
		return kmap(sg_page(sgt->sgl));
	if (n_pte == 1 && type == I915_MAP_WB) {
		struct page *page = sg_page(sgt->sgl);

		/*
		 * On 32b, highmem using a finite set of indirect PTE (i.e.
		 * vmap) to provide virtual mappings of the high pages.
		 * As these are finite, map_new_virtual() must wait for some
		 * other kmap() to finish when it runs out. If we map a large
		 * number of objects, there is no method for it to tell us
		 * to release the mappings, and we deadlock.
		 *
		 * However, if we make an explicit vmap of the page, that
		 * uses a larger vmalloc arena, and also has the ability
		 * to tell us to release unwanted mappings. Most importantly,
		 * it will fail and propagate an error instead of waiting
		 * forever.
		 *
		 * So if the page is beyond the 32b boundary, make an explicit
		 * vmap. On 64b, this check will be optimised away as we can
		 * directly kmap any page on the system.
		 */
		if (!PageHighMem(page))
			return kmap(page);
	}

	mem = stack;
	if (n_pte > ARRAY_SIZE(stack)) {
Loading