Commit 4cd4c5c0 authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher
Browse files

drm/amdgpu: cleanup vega10 SRIOV code path



we can simplify all those unnecessary function under
SRIOV for vega10 since:
1) PSP L1 policy is by force enabled in SRIOV
2) original logic always set all flags which make itself
   a dummy step

besides,
1) the ih_doorbell_range set should also be skipped
for VEGA10 SRIOV.
2) the gfx_common registers should also be skipped
for VEGA10 SRIOV.

Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarEmily Deng <Emily.Deng@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 67194518
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -1643,9 +1643,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
		r = amdgpu_virt_request_full_gpu(adev, true);
		if (r)
			return -EAGAIN;

		/* query the reg access mode at the very beginning */
		amdgpu_virt_init_reg_access_mode(adev);
	}

	adev->pm.pp_feature = amdgpu_pp_feature_mask;
+0 −45
Original line number Diff line number Diff line
@@ -430,48 +430,3 @@ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)

	return clk;
}

void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
{
	struct amdgpu_virt *virt = &adev->virt;

	if (virt->ops && virt->ops->init_reg_access_mode)
		virt->ops->init_reg_access_mode(adev);
}

bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
{
	bool ret = false;
	struct amdgpu_virt *virt = &adev->virt;

	if (amdgpu_sriov_vf(adev)
		&& (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
		ret = true;

	return ret;
}

bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
{
	bool ret = false;
	struct amdgpu_virt *virt = &adev->virt;

	if (amdgpu_sriov_vf(adev)
		&& (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
		&& !(amdgpu_sriov_runtime(adev)))
		ret = true;

	return ret;
}

bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
{
	bool ret = false;
	struct amdgpu_virt *virt = &adev->virt;

	if (amdgpu_sriov_vf(adev)
		&& (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
		ret = true;

	return ret;
}
+0 −13
Original line number Diff line number Diff line
@@ -48,12 +48,6 @@ struct amdgpu_vf_error_buffer {
	uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};

/* According to the fw feature, some new reg access modes are supported */
#define AMDGPU_VIRT_REG_ACCESS_LEGACY          (1 << 0) /* directly mmio */
#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH      (1 << 1) /* by PSP */
#define AMDGPU_VIRT_REG_ACCESS_RLC             (1 << 2) /* by RLC */
#define AMDGPU_VIRT_REG_SKIP_SEETING           (1 << 3) /* Skip setting reg */

/**
 * struct amdgpu_virt_ops - amdgpu device virt operations
 */
@@ -65,7 +59,6 @@ struct amdgpu_virt_ops {
	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
	void (*init_reg_access_mode)(struct amdgpu_device *adev);
};

/*
@@ -315,10 +308,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);

void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);

#endif
+8 −9
Original line number Diff line number Diff line
@@ -715,14 +715,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
	switch (adev->asic_type) {
	case CHIP_VEGA10:
		if (!amdgpu_virt_support_skip_setting(adev)) {
		soc15_program_register_sequence(adev,
						golden_settings_gc_9_0,
						ARRAY_SIZE(golden_settings_gc_9_0));
		soc15_program_register_sequence(adev,
						golden_settings_gc_9_0_vg10,
						ARRAY_SIZE(golden_settings_gc_9_0_vg10));
		}
		break;
	case CHIP_VEGA12:
		soc15_program_register_sequence(adev,
@@ -3801,6 +3799,7 @@ static int gfx_v9_0_hw_init(void *handle)
	int r;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (!amdgpu_sriov_vf(adev))
		gfx_v9_0_init_golden_registers(adev);

	gfx_v9_0_constants_init(adev);
+1 −1
Original line number Diff line number Diff line
@@ -1201,7 +1201,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)

	switch (adev->asic_type) {
	case CHIP_VEGA10:
		if (amdgpu_virt_support_skip_setting(adev))
		if (amdgpu_sriov_vf(adev))
			break;
		/* fall through */
	case CHIP_VEGA20:
Loading