Commit e8e61f10 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/selftests: Flush the active callbacks



Before checking the current i915_active state for the asynchronous work
we submitted, flush any ongoing callback. This ensures that our sampling
is robust and does not sporadically fail due to bad timing as the work
is running on another cpu.

v2: Drop the fence callback sync, retiring under the lock should be good
enough to synchronize with engine_retire() and the
intel_gt_retire_requests() background worker.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122132404.690440-1-chris@chris-wilson.co.uk
parent cfd821b2
Loading
Loading
Loading
Loading
+11 −6
Original line number Diff line number Diff line
@@ -48,20 +48,22 @@ static int context_sync(struct intel_context *ce)

	mutex_lock(&tl->mutex);
	do {
		struct dma_fence *fence;
		struct i915_request *rq;
		long timeout;

		fence = i915_active_fence_get(&tl->last_request);
		if (!fence)
		if (list_empty(&tl->requests))
			break;

		timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
		rq = list_last_entry(&tl->requests, typeof(*rq), link);
		i915_request_get(rq);

		timeout = i915_request_wait(rq, 0, HZ / 10);
		if (timeout < 0)
			err = timeout;
		else
			i915_request_retire_upto(to_request(fence));
			i915_request_retire_upto(rq);

		dma_fence_put(fence);
		i915_request_put(rq);
	} while (!err);
	mutex_unlock(&tl->mutex);

@@ -273,6 +275,7 @@ out_engine:
	if (err)
		goto err;

	/* Wait for the barrier and in the process wait for engine to park */
	err = context_sync(engine->kernel_context);
	if (err)
		goto err;
@@ -282,6 +285,8 @@ out_engine:
		err = -EINVAL;
	}

	intel_engine_pm_flush(engine);

	if (intel_engine_pm_is_awake(engine)) {
		struct drm_printer p = drm_debug_printer(__func__);