Commit 3cf43cdc authored by Ville Syrjälä's avatar Ville Syrjälä
Browse files

drm/i915: Introduce proper dbuf state



Add a global state to track the dbuf slices. Gets rid of all the nasty
coupling between state->modeset and dbuf recomputation. Also we can now
totally nuke state->active_pipe_changes.

dev_priv->wm.distrust_bios_wm still remains, but that too will get
nuked soon.

Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200225171125.28885-9-ville.syrjala@linux.intel.com


Reviewed-by: default avatarStanislav Lisovskiy <stanislav.lisovskiy@intel.com>
parent 56f48c1d
Loading
Loading
Loading
Loading
+46 −21
Original line number Diff line number Diff line
@@ -7579,6 +7579,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
		to_intel_bw_state(dev_priv->bw_obj.state);
	struct intel_cdclk_state *cdclk_state =
		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
	struct intel_dbuf_state *dbuf_state =
		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
	struct intel_crtc_state *crtc_state =
		to_intel_crtc_state(crtc->base.state);
	enum intel_display_power_domain domain;
@@ -7652,6 +7654,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
	cdclk_state->min_voltage_level[pipe] = 0;
	cdclk_state->active_pipes &= ~BIT(pipe);
	dbuf_state->active_pipes &= ~BIT(pipe);
	bw_state->data_rate[pipe] = 0;
	bw_state->num_active_planes[pipe] = 0;
}
@@ -14009,10 +14013,10 @@ static void verify_wm_state(struct intel_crtc *crtc,
	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
	if (INTEL_GEN(dev_priv) >= 11 &&
	    hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
		drm_err(&dev_priv->drm,
			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
			dev_priv->enabled_dbuf_slices_mask,
			dev_priv->dbuf.enabled_slices,
			hw_enabled_slices);
	/* planes */
@@ -14553,9 +14557,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
	state->modeset = true;
	state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
	state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
	if (state->active_pipe_changes) {
	if (state->active_pipes != dev_priv->active_pipes) {
		ret = _intel_atomic_lock_global_state(state);
		if (ret)
			return ret;
@@ -15209,22 +15211,38 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
	u8 required_slices = state->enabled_dbuf_slices_mask;
	u8 slices_union = hw_enabled_slices | required_slices;
	const struct intel_dbuf_state *new_dbuf_state =
		intel_atomic_get_new_dbuf_state(state);
	const struct intel_dbuf_state *old_dbuf_state =
		intel_atomic_get_old_dbuf_state(state);
	if (!new_dbuf_state ||
	    new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
		return;
	if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
		gen9_dbuf_slices_update(dev_priv, slices_union);
	WARN_ON(!new_dbuf_state->base.changed);
	gen9_dbuf_slices_update(dev_priv,
				old_dbuf_state->enabled_slices |
				new_dbuf_state->enabled_slices);
}
static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
	u8 required_slices = state->enabled_dbuf_slices_mask;
	const struct intel_dbuf_state *new_dbuf_state =
		intel_atomic_get_new_dbuf_state(state);
	const struct intel_dbuf_state *old_dbuf_state =
		intel_atomic_get_old_dbuf_state(state);
	if (!new_dbuf_state ||
	    new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
		return;
	if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
		gen9_dbuf_slices_update(dev_priv, required_slices);
	WARN_ON(!new_dbuf_state->base.changed);
	gen9_dbuf_slices_update(dev_priv,
				new_dbuf_state->enabled_slices);
}
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
@@ -15467,8 +15485,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
	if (state->modeset)
		intel_encoders_update_prepare(state);
	/* Enable all new slices, we might need */
	if (state->modeset)
	icl_dbuf_slice_pre_update(state);
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -15524,8 +15540,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
			dev_priv->display.optimize_watermarks(state, crtc);
	}
	/* Disable all slices, we don't need */
	if (state->modeset)
	icl_dbuf_slice_post_update(state);
	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -17420,10 +17434,14 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
	struct intel_cdclk_state *cdclk_state =
		to_intel_cdclk_state(i915->cdclk.obj.state);
	struct intel_dbuf_state *dbuf_state =
		to_intel_dbuf_state(i915->dbuf.obj.state);
	intel_update_cdclk(i915);
	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
	dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -17712,6 +17730,10 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
	if (ret)
		return ret;
	ret = intel_dbuf_init(i915);
	if (ret)
		return ret;
	ret = intel_bw_init(i915);
	if (ret)
		return ret;
@@ -18228,6 +18250,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct intel_cdclk_state *cdclk_state =
		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
	struct intel_dbuf_state *dbuf_state =
		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
	enum pipe pipe;
	struct intel_crtc *crtc;
	struct intel_encoder *encoder;
@@ -18258,7 +18282,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
			    enableddisabled(crtc_state->hw.active));
	}
	dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
	dev_priv->active_pipes = cdclk_state->active_pipes =
		dbuf_state->active_pipes = active_pipes;
	readout_plane_state(dev_priv);
+4 −4
Original line number Diff line number Diff line
@@ -1161,7 +1161,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
	u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;

	drm_WARN(&dev_priv->drm,
		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
@@ -4539,14 +4539,14 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
	for (slice = DBUF_S1; slice < num_slices; slice++)
		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));

	dev_priv->enabled_dbuf_slices_mask = req_slices;
	dev_priv->dbuf.enabled_slices = req_slices;

	mutex_unlock(&power_domains->lock);
}

static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
	dev_priv->enabled_dbuf_slices_mask =
	dev_priv->dbuf.enabled_slices =
		intel_enabled_dbuf_slices_mask(dev_priv);

	/*
@@ -4554,7 +4554,7 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
	 * figure out later which slices we have and what we need.
	 */
	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
				dev_priv->enabled_dbuf_slices_mask);
				dev_priv->dbuf.enabled_slices);
}

static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+0 −13
Original line number Diff line number Diff line
@@ -479,16 +479,6 @@ struct intel_atomic_state {

	bool dpll_set, modeset;

	/*
	 * Does this transaction change the pipes that are active?  This mask
	 * tracks which CRTC's have changed their active state at the end of
	 * the transaction (not counting the temporary disable during modesets).
	 * This mask should only be non-zero when intel_state->modeset is true,
	 * but the converse is not necessarily true; simply changing a mode may
	 * not flip the final active status of any CRTC's
	 */
	u8 active_pipe_changes;

	u8 active_pipes;

	struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -506,9 +496,6 @@ struct intel_atomic_state {
	 */
	bool global_state_changed;

	/* Number of enabled DBuf slices */
	u8 enabled_dbuf_slices_mask;

	struct i915_sw_fence commit_ready;

	struct llist_node freed;
+9 −2
Original line number Diff line number Diff line
@@ -947,6 +947,13 @@ struct drm_i915_private {
		struct intel_global_obj obj;
	} cdclk;

	struct {
		/* The current hardware dbuf configuration */
		u8 enabled_slices;

		struct intel_global_obj obj;
	} dbuf;

	/**
	 * wq - Driver workqueue for GEM.
	 *
@@ -1123,12 +1130,12 @@ struct drm_i915_private {
		 * Set during HW readout of watermarks/DDB.  Some platforms
		 * need to know when we're still using BIOS-provided values
		 * (which we don't fully trust).
		 *
		 * FIXME get rid of this.
		 */
		bool distrust_bios_wm;
	} wm;

	u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */

	struct dram_info {
		bool valid;
		bool is_16gb_dimm;
+126 −59
Original line number Diff line number Diff line
@@ -4040,7 +4040,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
				  u8 active_pipes);

static void
static int
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
				   const struct intel_crtc_state *crtc_state,
				   const u64 total_data_rate,
@@ -4053,30 +4053,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
	const struct intel_crtc *crtc;
	u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
	struct intel_dbuf_state *new_dbuf_state =
		intel_atomic_get_new_dbuf_state(intel_state);
	const struct intel_dbuf_state *old_dbuf_state =
		intel_atomic_get_old_dbuf_state(intel_state);
	u8 active_pipes = new_dbuf_state->active_pipes;
	u16 ddb_size;
	u32 ddb_range_size;
	u32 i;
	u32 dbuf_slice_mask;
	u32 active_pipes;
	u32 offset;
	u32 slice_size;
	u32 total_slice_mask;
	u32 start, end;
	int ret;

	*num_active = hweight8(active_pipes);

	if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
	if (!crtc_state->hw.active) {
		alloc->start = 0;
		alloc->end = 0;
		*num_active = hweight8(dev_priv->active_pipes);
		return;
		return 0;
	}

	if (intel_state->active_pipe_changes)
		active_pipes = intel_state->active_pipes;
	else
		active_pipes = dev_priv->active_pipes;

	*num_active = hweight8(active_pipes);

	ddb_size = intel_get_ddb_size(dev_priv);

	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
@@ -4089,13 +4088,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
	 * that changes the active CRTC list or do modeset would need to
	 * grab _all_ crtc locks, including the one we currently hold.
	 */
	if (!intel_state->active_pipe_changes && !intel_state->modeset) {
	if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
	    !dev_priv->wm.distrust_bios_wm) {
		/*
		 * alloc may be cleared by clear_intel_crtc_state,
		 * copy from old state to be sure
		 *
		 * FIXME get rid of this mess
		 */
		*alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
		return;
		return 0;
	}

	/*
@@ -4174,7 +4176,13 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
	 * FIXME: For now we always enable slice S1 as per
	 * the Bspec display initialization sequence.
	 */
	intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
	new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);

	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
		if (ret)
			return ret;
	}

	start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
	end = ddb_range_size *
@@ -4185,9 +4193,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,

	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
		      alloc->start, alloc->end);
	DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
		      intel_state->enabled_dbuf_slices_mask,
		      INTEL_INFO(dev_priv)->num_supported_dbuf_slices);

	return 0;
}

static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4310,7 +4317,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,

void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
{
	dev_priv->enabled_dbuf_slices_mask =
	dev_priv->dbuf.enabled_slices =
		intel_enabled_dbuf_slices_mask(dev_priv);
}

@@ -4758,6 +4765,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
	u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
	u32 blocks;
	int level;
	int ret;

	/* Clear the partitioning for disabled planes. */
	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
@@ -4778,8 +4786,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
							 plane_data_rate,
							 uv_plane_data_rate);

	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
	ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
						 total_data_rate,
						 alloc, &num_active);
	if (ret)
		return ret;

	alloc_size = skl_ddb_entry_size(alloc);
	if (alloc_size == 0)
		return 0;
@@ -5700,14 +5712,11 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_crtc_state *old_crtc_state;
	struct intel_crtc_state *new_crtc_state;
	struct intel_crtc *crtc;
	int ret, i;

	state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;

	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
					    new_crtc_state, i) {
		ret = skl_allocate_pipe_ddb(new_crtc_state);
@@ -5855,7 +5864,8 @@ skl_print_wm_changes(struct intel_atomic_state *state)
	}
}

static int intel_add_all_pipes(struct intel_atomic_state *state)
static int intel_add_affected_pipes(struct intel_atomic_state *state,
				    u8 pipe_mask)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_crtc *crtc;
@@ -5863,6 +5873,9 @@ static int intel_add_all_pipes(struct intel_atomic_state *state)
	for_each_intel_crtc(&dev_priv->drm, crtc) {
		struct intel_crtc_state *crtc_state;

		if ((pipe_mask & BIT(crtc->pipe)) == 0)
			continue;

		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
		if (IS_ERR(crtc_state))
			return PTR_ERR(crtc_state);
@@ -5875,49 +5888,54 @@ static int
skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	int ret;
	struct intel_crtc_state *crtc_state;
	struct intel_crtc *crtc;
	int i, ret;

	if (dev_priv->wm.distrust_bios_wm) {
		/*
	 * If this is our first atomic update following hardware readout,
	 * we can't trust the DDB that the BIOS programmed for us.  Let's
	 * pretend that all pipes switched active status so that we'll
	 * ensure a full DDB recompute.
		 * skl_ddb_get_pipe_allocation_limits() currently requires
		 * all active pipes to be included in the state so that
		 * it can redistribute the dbuf among them, and it really
		 * wants to recompute things when distrust_bios_wm is set
		 * so we add all the pipes to the state.
		 */
	if (dev_priv->wm.distrust_bios_wm) {
		ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
				       state->base.acquire_ctx);
		ret = intel_add_affected_pipes(state, ~0);
		if (ret)
			return ret;
	}

		state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
		struct intel_dbuf_state *new_dbuf_state;
		const struct intel_dbuf_state *old_dbuf_state;

		/*
		 * We usually only initialize state->active_pipes if we
		 * we're doing a modeset; make sure this field is always
		 * initialized during the sanitization process that happens
		 * on the first commit too.
		 */
		if (!state->modeset)
			state->active_pipes = dev_priv->active_pipes;
	}
		new_dbuf_state = intel_atomic_get_dbuf_state(state);
		if (IS_ERR(new_dbuf_state))
			return ret;

		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);

		new_dbuf_state->active_pipes =
			intel_calc_active_pipes(state, old_dbuf_state->active_pipes);

		if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
			break;

		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
		if (ret)
			return ret;

		/*
	 * If the modeset changes which CRTC's are active, we need to
	 * recompute the DDB allocation for *all* active pipes, even
	 * those that weren't otherwise being modified in any way by this
	 * atomic commit.  Due to the shrinking of the per-pipe allocations
	 * when new active CRTC's are added, it's possible for a pipe that
	 * we were already using and aren't changing at all here to suddenly
	 * become invalid if its DDB needs exceeds its new allocation.
	 *
	 * Note that if we wind up doing a full DDB recompute, we can't let
	 * any other display updates race with this transaction, so we need
	 * to grab the lock on *all* CRTC's.
		 * skl_ddb_get_pipe_allocation_limits() currently requires
		 * all active pipes to be included in the state so that
		 * it can redistribute the dbuf among them.
		 */
	if (state->active_pipe_changes || state->modeset) {
		ret = intel_add_all_pipes(state);
		ret = intel_add_affected_pipes(state,
					       new_dbuf_state->active_pipes);
		if (ret)
			return ret;

		break;
	}

	return 0;
@@ -7746,3 +7764,52 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
	dev_priv->runtime_pm.suspended = false;
	atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}

static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
{
	struct intel_dbuf_state *dbuf_state;

	dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
	if (!dbuf_state)
		return NULL;

	return &dbuf_state->base;
}

static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
				     struct intel_global_state *state)
{
	kfree(state);
}

static const struct intel_global_state_funcs intel_dbuf_funcs = {
	.atomic_duplicate_state = intel_dbuf_duplicate_state,
	.atomic_destroy_state = intel_dbuf_destroy_state,
};

struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_global_state *dbuf_state;

	dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
	if (IS_ERR(dbuf_state))
		return ERR_CAST(dbuf_state);

	return to_intel_dbuf_state(dbuf_state);
}

int intel_dbuf_init(struct drm_i915_private *dev_priv)
{
	struct intel_dbuf_state *dbuf_state;

	dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
	if (!dbuf_state)
		return -ENOMEM;

	intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
				     &dbuf_state->base, &intel_dbuf_funcs);

	return 0;
}
Loading