Commit 2a85e816 authored by Alex Deucher's avatar Alex Deucher
Browse files

drm/amdgpu/sdma4: APUs do not have a page queue



Don't use the paging queue on APUs.

Tested-by: default avatarTom St Denis <tom.stdenis@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 161d0711
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ struct amdgpu_sdma {
	struct amdgpu_irq_src	illegal_inst_irq;
	int			num_instances;
	uint32_t                    srbm_soft_reset;
	bool			has_page_queue;
};

/*
+33 −20
Original line number Diff line number Diff line
@@ -746,6 +746,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
	if (enable == false) {
		sdma_v4_0_gfx_stop(adev);
		sdma_v4_0_rlc_stop(adev);
		if (adev->sdma.has_page_queue)
			sdma_v4_0_page_stop(adev);
	}

@@ -1115,6 +1116,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)

		WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
		sdma_v4_0_gfx_resume(adev, i);
		if (adev->sdma.has_page_queue)
			sdma_v4_0_page_resume(adev, i);

		/* set utc l1 enable flag always to 1 */
@@ -1457,10 +1459,13 @@ static int sdma_v4_0_early_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (adev->asic_type == CHIP_RAVEN)
	if (adev->asic_type == CHIP_RAVEN) {
		adev->sdma.num_instances = 1;
	else
		adev->sdma.has_page_queue = false;
	} else {
		adev->sdma.num_instances = 2;
		adev->sdma.has_page_queue = true;
	}

	sdma_v4_0_set_ring_funcs(adev);
	sdma_v4_0_set_buffer_funcs(adev);
@@ -1522,6 +1527,7 @@ static int sdma_v4_0_sw_init(void *handle)
		if (r)
			return r;

		if (adev->sdma.has_page_queue) {
			ring = &adev->sdma.instance[i].page;
			ring->ring_obj = NULL;
			ring->use_doorbell = false;
@@ -1535,6 +1541,7 @@ static int sdma_v4_0_sw_init(void *handle)
			if (r)
				return r;
		}
	}

	return r;
}
@@ -1546,6 +1553,7 @@ static int sdma_v4_0_sw_fini(void *handle)

	for (i = 0; i < adev->sdma.num_instances; i++) {
		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
		if (adev->sdma.has_page_queue)
			amdgpu_ring_fini(&adev->sdma.instance[i].page);
	}

@@ -1955,10 +1963,12 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
	for (i = 0; i < adev->sdma.num_instances; i++) {
		adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
		adev->sdma.instance[i].ring.me = i;
		if (adev->sdma.has_page_queue) {
			adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
			adev->sdma.instance[i].page.me = i;
		}
	}
}

static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
	.set = sdma_v4_0_set_trap_irq_state,
@@ -2056,7 +2066,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)

	adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
	for (i = 0; i < adev->sdma.num_instances; i++) {
		if (adev->sdma.has_page_queue)
			sched = &adev->sdma.instance[i].page.sched;
		else
			sched = &adev->sdma.instance[i].ring.sched;
		adev->vm_manager.vm_pte_rqs[i] =
			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
	}