Commit bb5a2bdf authored by Yintian Tao's avatar Yintian Tao Committed by Alex Deucher
Browse files

drm/amdgpu: support dpm level modification under virtualization v3



Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function
at amdgpu_virt_ops to support the feature.

v2: add get_pp_clk existence check and use kzalloc to allocate buf

v3: return -ENOMEM for allocation failure and correct the coding style

Signed-off-by: default avatarYintian Tao <yttao@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b0960c35
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -2471,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
	mutex_init(&adev->virt.vf_errors.lock);
	hash_init(adev->mn_hash);
	mutex_init(&adev->lock_reset);
	mutex_init(&adev->virt.dpm_mutex);

	amdgpu_device_check_arguments(adev);

+4 −0
Original line number Diff line number Diff line
@@ -696,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
		if (adev->pm.dpm_enabled) {
			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
		} else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
			   adev->virt.ops->get_pp_clk) {
			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
		} else {
			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
+16 −0
Original line number Diff line number Diff line
@@ -327,6 +327,18 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
		goto fail;
	}

        if (amdgpu_sriov_vf(adev)) {
                if (amdgim_is_hwperf(adev) &&
                    adev->virt.ops->force_dpm_level) {
                        mutex_lock(&adev->pm.mutex);
                        adev->virt.ops->force_dpm_level(adev, level);
                        mutex_unlock(&adev->pm.mutex);
                        return count;
                } else {
                        return -EINVAL;
		}
        }

	if (current_level == level)
		return count;

@@ -790,6 +802,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;

	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
	    adev->virt.ops->get_pp_clk)
		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);

	if (is_support_sw_smu(adev))
		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
	else if (adev->powerplay.pp_funcs->print_clock_levels)
+49 −0
Original line number Diff line number Diff line
@@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
	}
}

static uint32_t parse_clk(char *buf, bool min)
{
        char *ptr = buf;
        uint32_t clk = 0;

        do {
                ptr = strchr(ptr, ':');
                if (!ptr)
                        break;
                ptr+=2;
                clk = simple_strtoul(ptr, NULL, 10);
        } while (!min);

        return clk * 100;
}

uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
{
	char *buf = NULL;
	uint32_t clk = 0;

	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
	clk = parse_clk(buf, lowest);

	kfree(buf);

	return clk;
}

uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
{
	char *buf = NULL;
	uint32_t clk = 0;

	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
	clk = parse_clk(buf, lowest);

	kfree(buf);

	return clk;
}
+11 −0
Original line number Diff line number Diff line
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
	int (*reset_gpu)(struct amdgpu_device *adev);
	int (*wait_reset)(struct amdgpu_device *adev);
	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};

/*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
	/* VRAM LOST by GIM */
	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
	/* HW PERF SIM in GIM */
	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};

struct amd_sriov_msg_pf2vf_info_header {
@@ -252,6 +256,8 @@ struct amdgpu_virt {
	struct amdgpu_vf_error_buffer   vf_errors;
	struct amdgpu_virt_fw_reserve	fw_reserve;
	uint32_t gim_feature;
	/* protect DPM events to GIM */
	struct mutex                    dpm_mutex;
};

#define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
#endif
}

#define amdgim_is_hwperf(adev) \
	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)

bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
					unsigned int key,
					unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);

#endif
Loading