Commit 10782166 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-next-2020-06-08' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "These are the fixes from last week for the stuff merged in the merge
  window. It got a bunch of nouveau fixes for HDA audio on some new
  GPUs, some i915 and some amdpgu fixes.

  i915:
   - gvt: Fix one clang warning on debug only function
   - Use ARRAY_SIZE for coccicheck warning
   - Use after free fix for display global state.
   - Whitelisting context-local timestamp on Gen9 and two scheduler
     fixes with deps (Cc: stable)
   - Removal of write flag from sysfs files where ineffective

  nouveau:
   - HDMI/DP audio HDA fixes
   - display hang fix for Volta/Turing
   - GK20A regression fix.

  amdgpu:
   - Prevent hwmon accesses while GPU is in reset
   - CTF interrupt fix
   - Backlight fix for renoir
   - Fix for display sync groups
   - Display bandwidth validation workaround"

* tag 'drm-next-2020-06-08' of git://anongit.freedesktop.org/drm/drm: (28 commits)
  drm/nouveau/kms/nv50-: clear SW state of disabled windows harder
  drm/nouveau: gr/gk20a: Use firmware version 0
  drm/nouveau/disp/gm200-: detect and potentially disable HDA support on some SORs
  drm/nouveau/disp/gp100: split SOR implementation from gm200
  drm/nouveau/disp: modify OR allocation policy to account for HDA requirements
  drm/nouveau/disp: split part of OR allocation logic into a function
  drm/nouveau/disp: provide hint to OR allocation about HDA requirements
  drm/amd/display: Revalidate bandwidth before commiting DC updates
  drm/amdgpu/display: use blanked rather than plane state for sync groups
  drm/i915/params: fix i915.fake_lmem_start module param sysfs permissions
  drm/i915/params: don't expose inject_probe_failure in debugfs
  drm/i915: Whitelist context-local timestamp in the gen9 cmdparser
  drm/i915: Fix global state use-after-frees with a refcount
  drm/i915: Check for awaits on still currently executing requests
  drm/i915/gt: Do not schedule normal requests immediately along virtual
  drm/i915: Reorder await_execution before await_request
  drm/nouveau/kms/gt215-: fix race with audio driver runpm
  drm/nouveau/disp/gm200-: fix NV_PDISP_SOR_HDMI2_CTRL(n) selection
  Revert "drm/amd/display: disable dcn20 abm feature for bring up"
  drm/amd/powerplay: ack the SMUToHost interrupt on receive V2
  ...
parents 20b0d067 8d286e2f
Loading
Loading
Loading
Loading
+171 −0
Original line number Diff line number Diff line
@@ -163,6 +163,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
	enum amd_pm_state_type pm;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -196,6 +199,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
	enum amd_pm_state_type  state;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (strncmp("battery", buf, strlen("battery")) == 0)
		state = POWER_STATE_TYPE_BATTERY;
	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -297,6 +303,9 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
	enum amd_dpm_forced_level level = 0xff;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -334,6 +343,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
	enum amd_dpm_forced_level current_level = 0xff;
	int ret = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (strncmp("low", buf, strlen("low")) == 0) {
		level = AMD_DPM_FORCED_LEVEL_LOW;
	} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -433,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
	struct pp_states_info data;
	int i, buf_len, ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -472,6 +487,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
	enum amd_pm_state_type pm = 0;
	int i = 0, ret = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -508,6 +526,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (adev->pp_force_state_enabled)
		return amdgpu_get_pp_cur_state(dev, attr, buf);
	else
@@ -525,6 +546,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
	unsigned long idx;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (strlen(buf) == 1)
		adev->pp_force_state_enabled = false;
	else if (is_support_sw_smu(adev))
@@ -580,6 +604,9 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
	char *table = NULL;
	int size, ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -619,6 +646,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
	struct amdgpu_device *adev = ddev->dev_private;
	int ret = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -721,6 +751,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
	const char delimiter[3] = {' ', '\n', '\0'};
	uint32_t type;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (count > 127)
		return -EINVAL;

@@ -810,6 +843,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -859,6 +895,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
	uint64_t featuremask;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = kstrtou64(buf, 0, &featuremask);
	if (ret)
		return -EINVAL;
@@ -899,6 +938,9 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -955,6 +997,9 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1018,6 +1063,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
	int ret;
	uint32_t mask = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
@@ -1049,6 +1097,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1076,6 +1127,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
	uint32_t mask = 0;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
@@ -1107,6 +1161,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1134,6 +1191,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
	int ret;
	uint32_t mask = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
@@ -1167,6 +1227,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1194,6 +1257,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
	int ret;
	uint32_t mask = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
@@ -1227,6 +1293,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1254,6 +1323,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
	int ret;
	uint32_t mask = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
@@ -1287,6 +1359,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1314,6 +1389,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
	int ret;
	uint32_t mask = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = amdgpu_read_mask(buf, count, &mask);
	if (ret)
		return ret;
@@ -1347,6 +1425,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
	uint32_t value = 0;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1372,6 +1453,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
	int ret;
	long int value;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = kstrtol(buf, 0, &value);

	if (ret)
@@ -1410,6 +1494,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
	uint32_t value = 0;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1435,6 +1522,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
	int ret;
	long int value;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = kstrtol(buf, 0, &value);

	if (ret)
@@ -1493,6 +1583,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
	ssize_t size;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(ddev->dev);
	if (ret < 0)
		return ret;
@@ -1528,6 +1621,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
	long int profile_mode = 0;
	const char delimiter[3] = {' ', '\n', '\0'};

	if (adev->in_gpu_reset)
		return -EPERM;

	tmp[0] = *(buf);
	tmp[1] = '\0';
	ret = kstrtol(tmp, 0, &profile_mode);
@@ -1587,6 +1683,9 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
	struct amdgpu_device *adev = ddev->dev_private;
	int r, value, size = sizeof(value);

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(ddev->dev);
	if (r < 0)
		return r;
@@ -1620,6 +1719,9 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
	struct amdgpu_device *adev = ddev->dev_private;
	int r, value, size = sizeof(value);

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(ddev->dev);
	if (r < 0)
		return r;
@@ -1658,6 +1760,9 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
	uint64_t count0 = 0, count1 = 0;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (adev->flags & AMD_IS_APU)
		return -ENODATA;

@@ -1694,6 +1799,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (adev->unique_id)
		return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);

@@ -1888,6 +1996,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
	int channel = to_sensor_dev_attr(attr)->index;
	int r, temp = 0, size = sizeof(temp);

	if (adev->in_gpu_reset)
		return -EPERM;

	if (channel >= PP_TEMP_MAX)
		return -EINVAL;

@@ -2019,6 +2130,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
	u32 pwm_mode = 0;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(adev->ddev->dev);
	if (ret < 0)
		return ret;
@@ -2050,6 +2164,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
	int err, ret;
	int value;

	if (adev->in_gpu_reset)
		return -EPERM;

	err = kstrtoint(buf, 10, &value);
	if (err)
		return err;
@@ -2099,6 +2216,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
	u32 value;
	u32 pwm_mode;

	if (adev->in_gpu_reset)
		return -EPERM;

	err = pm_runtime_get_sync(adev->ddev->dev);
	if (err < 0)
		return err;
@@ -2148,6 +2268,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
	int err;
	u32 speed = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	err = pm_runtime_get_sync(adev->ddev->dev);
	if (err < 0)
		return err;
@@ -2178,6 +2301,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
	int err;
	u32 speed = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	err = pm_runtime_get_sync(adev->ddev->dev);
	if (err < 0)
		return err;
@@ -2207,6 +2333,9 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
	u32 size = sizeof(min_rpm);
	int r;

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -2232,6 +2361,9 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
	u32 size = sizeof(max_rpm);
	int r;

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -2256,6 +2388,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
	int err;
	u32 rpm = 0;

	if (adev->in_gpu_reset)
		return -EPERM;

	err = pm_runtime_get_sync(adev->ddev->dev);
	if (err < 0)
		return err;
@@ -2285,6 +2420,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
	u32 value;
	u32 pwm_mode;

	if (adev->in_gpu_reset)
		return -EPERM;

	err = pm_runtime_get_sync(adev->ddev->dev);
	if (err < 0)
		return err;
@@ -2331,6 +2469,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
	u32 pwm_mode = 0;
	int ret;

	if (adev->in_gpu_reset)
		return -EPERM;

	ret = pm_runtime_get_sync(adev->ddev->dev);
	if (ret < 0)
		return ret;
@@ -2363,6 +2504,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
	int value;
	u32 pwm_mode;

	if (adev->in_gpu_reset)
		return -EPERM;

	err = kstrtoint(buf, 10, &value);
	if (err)
		return err;
@@ -2403,6 +2547,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
	u32 vddgfx;
	int r, size = sizeof(vddgfx);

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -2435,6 +2582,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
	u32 vddnb;
	int r, size = sizeof(vddnb);

	if (adev->in_gpu_reset)
		return -EPERM;

	/* only APUs have vddnb */
	if  (!(adev->flags & AMD_IS_APU))
		return -EINVAL;
@@ -2472,6 +2622,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
	int r, size = sizeof(u32);
	unsigned uw;

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -2508,6 +2661,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
	ssize_t size;
	int r;

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -2537,6 +2693,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
	ssize_t size;
	int r;

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -2567,6 +2726,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
	int err;
	u32 value;

	if (adev->in_gpu_reset)
		return -EPERM;

	if (amdgpu_sriov_vf(adev))
		return -EINVAL;

@@ -2605,6 +2767,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
	uint32_t sclk;
	int r, size = sizeof(sclk);

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -2637,6 +2802,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
	uint32_t mclk;
	int r, size = sizeof(mclk);

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(adev->ddev->dev);
	if (r < 0)
		return r;
@@ -3497,6 +3665,9 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
	u32 flags = 0;
	int r;

	if (adev->in_gpu_reset)
		return -EPERM;

	r = pm_runtime_get_sync(dev->dev);
	if (r < 0)
		return r;
+4 −7
Original line number Diff line number Diff line
@@ -1356,7 +1356,7 @@ static int dm_late_init(void *handle)
	unsigned int linear_lut[16];
	int i;
	struct dmcu *dmcu = NULL;
	bool ret = false;
	bool ret;

	if (!adev->dm.fw_dmcu)
		return detect_mst_link_for_all_connectors(adev->ddev);
@@ -1377,13 +1377,10 @@ static int dm_late_init(void *handle)
	 */
	params.min_abm_backlight = 0x28F;

	/* todo will enable for navi10 */
	if (adev->asic_type <= CHIP_RAVEN) {
	ret = dmcu_load_iram(dmcu, params);

	if (!ret)
		return -EINVAL;
	}

	return detect_mst_link_for_all_connectors(adev->ddev);
}
+26 −4
Original line number Diff line number Diff line
@@ -1016,9 +1016,17 @@ static void program_timing_sync(
			}
		}

		/* set first pipe with plane as master */
		/* set first unblanked pipe as master */
		for (j = 0; j < group_size; j++) {
			if (pipe_set[j]->plane_state) {
			bool is_blanked;

			if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
				is_blanked =
					pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
			else
				is_blanked =
					pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
			if (!is_blanked) {
				if (j == 0)
					break;

@@ -1039,9 +1047,17 @@ static void program_timing_sync(
				status->timing_sync_info.master = false;

		}
		/* remove any other pipes with plane as they have already been synced */
		/* remove any other unblanked pipes as they have already been synced */
		for (j = j + 1; j < group_size; j++) {
			if (pipe_set[j]->plane_state) {
			bool is_blanked;

			if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
				is_blanked =
					pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
			else
				is_blanked =
					pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
			if (!is_blanked) {
				group_size--;
				pipe_set[j] = pipe_set[group_size];
				j--;
@@ -2522,6 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,

	copy_stream_update_to_stream(dc, context, stream, stream_update);

	if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
		DC_ERROR("Mode validation failed for stream update!\n");
		dc_release_state(context);
		return;
	}

	commit_planes_for_stream(
				dc,
				srf_updates,
+6 −0
Original line number Diff line number Diff line
@@ -1561,6 +1561,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
	 * events for SMCToHost interrupt.
	 */
	uint32_t ctxid = entry->src_data[0];
	uint32_t data;

	if (client_id == SOC15_IH_CLIENTID_THM) {
		switch (src_id) {
@@ -1590,6 +1591,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
		orderly_poweroff(true);
	} else if (client_id == SOC15_IH_CLIENTID_MP1) {
		if (src_id == 0xfe) {
			/* ACK SMUToHost interrupt */
			data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
			data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
			WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);

			switch (ctxid) {
			case 0x3:
				dev_dbg(adev->dev, "Switched to AC mode!\n");
+39 −6
Original line number Diff line number Diff line
@@ -10,6 +10,28 @@
#include "intel_display_types.h"
#include "intel_global_state.h"

static void __intel_atomic_global_state_free(struct kref *kref)
{
	struct intel_global_state *obj_state =
		container_of(kref, struct intel_global_state, ref);
	struct intel_global_obj *obj = obj_state->obj;

	obj->funcs->atomic_destroy_state(obj, obj_state);
}

static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
{
	kref_put(&obj_state->ref, __intel_atomic_global_state_free);
}

static struct intel_global_state *
intel_atomic_global_state_get(struct intel_global_state *obj_state)
{
	kref_get(&obj_state->ref);

	return obj_state;
}

void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
				  struct intel_global_obj *obj,
				  struct intel_global_state *state,
@@ -17,6 +39,10 @@ void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
{
	memset(obj, 0, sizeof(*obj));

	state->obj = obj;

	kref_init(&state->ref);

	obj->state = state;
	obj->funcs = funcs;
	list_add_tail(&obj->head, &dev_priv->global_obj_list);
@@ -28,7 +54,9 @@ void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)

	list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
		list_del(&obj->head);
		obj->funcs->atomic_destroy_state(obj, obj->state);

		drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
		intel_atomic_global_state_put(obj->state);
	}
}

@@ -97,10 +125,14 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
	if (!obj_state)
		return ERR_PTR(-ENOMEM);

	obj_state->obj = obj;
	obj_state->changed = false;

	kref_init(&obj_state->ref);

	state->global_objs[index].state = obj_state;
	state->global_objs[index].old_state = obj->state;
	state->global_objs[index].old_state =
		intel_atomic_global_state_get(obj->state);
	state->global_objs[index].new_state = obj_state;
	state->global_objs[index].ptr = obj;
	obj_state->state = state;
@@ -163,7 +195,9 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
		new_obj_state->state = NULL;

		state->global_objs[i].state = old_obj_state;
		obj->state = new_obj_state;

		intel_atomic_global_state_put(obj->state);
		obj->state = intel_atomic_global_state_get(new_obj_state);
	}
}

@@ -172,10 +206,9 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state)
	int i;

	for (i = 0; i < state->num_global_objs; i++) {
		struct intel_global_obj *obj = state->global_objs[i].ptr;
		intel_atomic_global_state_put(state->global_objs[i].old_state);
		intel_atomic_global_state_put(state->global_objs[i].new_state);

		obj->funcs->atomic_destroy_state(obj,
						 state->global_objs[i].state);
		state->global_objs[i].ptr = NULL;
		state->global_objs[i].state = NULL;
		state->global_objs[i].old_state = NULL;
Loading