Commit 5918045c authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/scheduler: rework job destruction

We now destroy finished jobs from the worker thread to make sure that
we never destroy a job currently in timeout processing.
By this we avoid holding lock around ring mirror list in drm_sched_stop
which should solve a deadlock reported by a user.

v2: Remove unused variable.
v4: Move guilty job free into sched code.
v5:
Move sched->hw_rq_count to drm_sched_start to account for counter
decrement in drm_sched_stop even when we don't call resubmit jobs
if guily job did signal.
v6: remove unused variable

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109692



Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1555599624-12285-3-git-send-email-andrey.grodzovsky@amd.com
parent b3198c38
Loading
Loading
Loading
Loading
+3 −6
Original line number Diff line number Diff line
@@ -3334,7 +3334,7 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
		if (!ring || !ring->sched.thread)
			continue;

		drm_sched_stop(&ring->sched);
		drm_sched_stop(&ring->sched, &job->base);

		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
		amdgpu_fence_driver_force_completion(ring);
@@ -3343,8 +3343,6 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
	if(job)
		drm_sched_increase_karma(&job->base);



	if (!amdgpu_sriov_vf(adev)) {

		if (!need_full_reset)
@@ -3482,8 +3480,7 @@ end:
	return r;
}

static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
					  struct amdgpu_job *job)
static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev)
{
	int i;

@@ -3623,7 +3620,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */

	/* Post ASIC reset for all devs .*/
	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
		amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
		amdgpu_device_post_asic_reset(tmp_adev);

		if (r) {
			/* bad news, how to tell it to userspace ? */
+0 −5
Original line number Diff line number Diff line
@@ -118,7 +118,6 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
	unsigned int n_obj, n_bomap_pages;
	size_t file_size, mmu_size;
	__le64 *bomap, *bomap_start;
	unsigned long flags;

	/* Only catch the first event, or when manually re-armed */
	if (!etnaviv_dump_core)
@@ -135,13 +134,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
		    mmu_size + gpu->buffer.size;

	/* Add in the active command buffers */
	spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
	list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
		submit = to_etnaviv_submit(s_job);
		file_size += submit->cmdbuf.size;
		n_obj++;
	}
	spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);

	/* Add in the active buffer objects */
	list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -183,14 +180,12 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
			      gpu->buffer.size,
			      etnaviv_cmdbuf_get_va(&gpu->buffer));

	spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
	list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
		submit = to_etnaviv_submit(s_job);
		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
				      submit->cmdbuf.vaddr, submit->cmdbuf.size,
				      etnaviv_cmdbuf_get_va(&submit->cmdbuf));
	}
	spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);

	/* Reserve space for the bomap */
	if (n_bomap_pages) {
+1 −1
Original line number Diff line number Diff line
@@ -109,7 +109,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
	}

	/* block scheduler */
	drm_sched_stop(&gpu->sched);
	drm_sched_stop(&gpu->sched, sched_job);

	if(sched_job)
		drm_sched_increase_karma(sched_job);
+1 −1
Original line number Diff line number Diff line
@@ -258,7 +258,7 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
					 struct lima_sched_task *task)
{
	drm_sched_stop(&pipe->base);
	drm_sched_stop(&pipe->base, &task->base);

	if (task)
		drm_sched_increase_karma(&task->base);
+1 −1
Original line number Diff line number Diff line
@@ -387,7 +387,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
	mutex_lock(&pfdev->reset_lock);

	for (i = 0; i < NUM_JOB_SLOTS; i++)
		drm_sched_stop(&pfdev->js->queue[i].sched);
		drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);

	if (sched_job)
		drm_sched_increase_karma(sched_job);
Loading