Commit 148fb2e2 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm-next



ttm:
- Replace ref/unref naming with get/put

amdgpu:
- Revert DC clang fix, causes a segfault with some compiler versions
- SR-IOV fix
- PCIE fix for vega20
- Misc DC fixes

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190201062345.7304-1-alexander.deucher@amd.com
parents 37fdaa33 47dd8048
Loading
Loading
Loading
Loading
+3 −4
Original line number Diff line number Diff line
@@ -91,10 +91,6 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
			adev->gmc.xgmi.node_id,
			adev->gmc.xgmi.hive_id, ret);
	else
		dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
			 adev->gmc.xgmi.physical_node_id,
				 adev->gmc.xgmi.hive_id);

	return ret;
}
@@ -160,6 +156,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
			break;
	}

	dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
		 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);

	mutex_unlock(&hive->hive_lock);
exit:
	return ret;
+5 −1
Original line number Diff line number Diff line
@@ -965,6 +965,10 @@ static int gmc_v9_0_sw_init(void *handle)
		 * vm size is 256TB (48bit), maximum size of Vega10,
		 * block size 512 (9bit)
		 */
		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
		if (amdgpu_sriov_vf(adev))
			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
		else
			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
		break;
	default:
+27 −2
Original line number Diff line number Diff line
@@ -4658,8 +4658,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
	flip = kzalloc(sizeof(*flip), GFP_KERNEL);
	full = kzalloc(sizeof(*full), GFP_KERNEL);

	if (!flip || !full)
	if (!flip || !full) {
		dm_error("Failed to allocate update bundles\n");
		goto cleanup;
	}

	/* update planes when needed */
	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4883,6 +4885,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
						     dc_state);
		mutex_unlock(&dm->dc_lock);
	}

cleanup:
	kfree(flip);
	kfree(full);
}

/*
@@ -4917,11 +4923,26 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
	 */
	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

		if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
		if (drm_atomic_crtc_needs_modeset(new_crtc_state)
		    && dm_old_crtc_state->stream) {
			/*
			 * If the stream is removed and CRC capture was
			 * enabled on the CRTC the extra vblank reference
			 * needs to be dropped since CRC capture will be
			 * disabled.
			 */
			if (!dm_new_crtc_state->stream
			    && dm_new_crtc_state->crc_enabled) {
				drm_crtc_vblank_put(crtc);
				dm_new_crtc_state->crc_enabled = false;
			}

			manage_dm_interrupts(adev, acrtc, false);
		}
	}
	/*
	 * Add check here for SoC's that support hardware cursor plane, to
	 * unset legacy_cursor_update
@@ -5152,6 +5173,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
			continue;

		manage_dm_interrupts(adev, acrtc, true);

		/* The stream has changed so CRC capture needs to re-enabled. */
		if (dm_new_crtc_state->crc_enabled)
			amdgpu_dm_crtc_set_crc_source(crtc, "auto");
	}

	/* update planes when needed per crtc*/
+27 −21
Original line number Diff line number Diff line
@@ -64,8 +64,10 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,

int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
	struct amdgpu_device *adev = crtc->dev->dev_private;
	struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
	struct dc_stream_state *stream_state = crtc_state->stream;
	bool enable;

	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);

@@ -80,29 +82,33 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
		return -EINVAL;
	}

	/* When enabling CRC, we should also disable dithering. */
	if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
		if (dc_stream_configure_crc(stream_state->ctx->dc,
					    stream_state,
					    true, true)) {
			crtc_state->crc_enabled = true;
			dc_stream_set_dither_option(stream_state,
						    DITHER_OPTION_TRUN8);
		}
		else
			return -EINVAL;
	} else {
		if (dc_stream_configure_crc(stream_state->ctx->dc,
					    stream_state,
					    false, false)) {
			crtc_state->crc_enabled = false;
			dc_stream_set_dither_option(stream_state,
						    DITHER_OPTION_DEFAULT);
		}
		else
	enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);

	mutex_lock(&adev->dm.dc_lock);
	if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
				     enable, enable)) {
		mutex_unlock(&adev->dm.dc_lock);
		return -EINVAL;
	}

	/* When enabling CRC, we should also disable dithering. */
	dc_stream_set_dither_option(stream_state,
				    enable ? DITHER_OPTION_TRUN8
					   : DITHER_OPTION_DEFAULT);

	mutex_unlock(&adev->dm.dc_lock);

	/*
	 * Reading the CRC requires the vblank interrupt handler to be
	 * enabled. Keep a reference until CRC capture stops.
	 */
	if (!crtc_state->crc_enabled && enable)
		drm_crtc_vblank_get(crtc);
	else if (crtc_state->crc_enabled && !enable)
		drm_crtc_vblank_put(crtc);

	crtc_state->crc_enabled = enable;

	/* Reset crc_skipped on dm state */
	crtc_state->crc_skip_count = 0;
	return 0;
+7 −0
Original line number Diff line number Diff line
@@ -263,6 +263,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
	return true;
}

/*
 * poll pending down reply before clear payload allocation table
 */
void dm_helpers_dp_mst_poll_pending_down_reply(
	struct dc_context *ctx,
	const struct dc_link *link)
{}

/*
 * Clear payload allocation table before enable MST DP link.
Loading