All of lore.kernel.org
 help / color / mirror / Atom feed
From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: stylon.wang@amd.com, Jun Lei <Jun.Lei@amd.com>,
	solomon.chiu@amd.com, Sunpeng.Li@amd.com, Harry.Wentland@amd.com,
	qingqing.zhuo@amd.com, George Shen <George.Shen@amd.com>,
	Rodrigo.Siqueira@amd.com, roman.li@amd.com,
	Wenjing Liu <Wenjing.Liu@amd.com>,
	Anson.Jacob@amd.com, Aurabindo.Pillai@amd.com, wayne.lin@amd.com,
	mikita.lipski@amd.com, Bhawanpreet.Lakha@amd.com,
	agustin.gutierrez@amd.com, pavle.kotarac@amd.com
Subject: [PATCH 11/16] drm/amd/display: Add vendor specific LTTPR workarounds for DCN31
Date: Fri, 26 Nov 2021 14:49:17 -0500	[thread overview]
Message-ID: <20211126194922.816835-12-Bhawanpreet.Lakha@amd.com> (raw)
In-Reply-To: <20211126194922.816835-1-Bhawanpreet.Lakha@amd.com>

From: George Shen <George.Shen@amd.com>

[Why]
Certain LTTPR require special workarounds in order to comply
with DP specifications.

[How]
Implement vendor specific sequences via DPCD writes to
vendor-specific LTTPR registers.

Reviewed-by: Jun Lei <Jun.Lei@amd.com>
Reviewed-by: Wenjing Liu <Wenjing.Liu@amd.com>
Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Signed-off-by: George Shen <George.Shen@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 196 +++++++++++++++++-
 drivers/gpu/drm/amd/display/dc/dc.h           |   1 +
 drivers/gpu/drm/amd/display/dc/dc_link.h      |   3 +
 .../drm/amd/display/dc/dcn31/dcn31_resource.c |   1 +
 4 files changed, 198 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index ee9b80809fc5..5eea4ec82f99 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -398,6 +398,170 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
 }
 #endif
 
+static void vendor_specific_lttpr_wa_one_start(struct dc_link *link)
+{
+	const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff};
+	const uint8_t offset = dp_convert_to_count(
+			link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+	uint32_t vendor_lttpr_write_address = 0xF004F;
+
+	if (offset != 0xFF)
+		vendor_lttpr_write_address +=
+				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+	/* W/A for certain LTTPR to reset their lane settings, part one of two */
+	core_link_write_dpcd(
+			link,
+			vendor_lttpr_write_address,
+			&vendor_lttpr_write_data[0],
+			sizeof(vendor_lttpr_write_data));
+}
+
+static void vendor_specific_lttpr_wa_one_end(
+	struct dc_link *link,
+	uint8_t retry_count)
+{
+	const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0};
+	const uint8_t offset = dp_convert_to_count(
+			link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+	uint32_t vendor_lttpr_write_address = 0xF004F;
+
+	if (!retry_count) {
+		if (offset != 0xFF)
+			vendor_lttpr_write_address +=
+					((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+		/* W/A for certain LTTPR to reset their lane settings, part two of two */
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data[0],
+				sizeof(vendor_lttpr_write_data));
+	}
+}
+
+static void vendor_specific_lttpr_wa_one_two(
+	struct dc_link *link,
+	const uint8_t rate)
+{
+	uint8_t toggle_rate = 0x0;
+
+	if (rate == 0x6)
+		toggle_rate = 0xA;
+	else
+		toggle_rate = 0x6;
+
+	if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+		/* W/A for certain LTTPR to reset internal state for link training */
+		core_link_write_dpcd(
+				link,
+				DP_LINK_BW_SET,
+				&toggle_rate,
+				1);
+	}
+
+	/* Store the last attempted link rate for this link */
+	link->vendor_specific_lttpr_link_rate_wa = rate;
+}
+
+static void vendor_specific_lttpr_wa_three(
+	struct dc_link *link,
+	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX])
+{
+	const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
+	const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
+	const uint8_t offset = dp_convert_to_count(
+			link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+	uint32_t vendor_lttpr_write_address = 0xF004F;
+	uint32_t vendor_lttpr_read_address = 0xF0053;
+	uint8_t dprx_vs = 0;
+	uint8_t dprx_pe = 0;
+	uint8_t lane;
+
+	if (offset != 0xFF) {
+		vendor_lttpr_write_address +=
+				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+		vendor_lttpr_read_address +=
+				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+	}
+
+	/* W/A to read lane settings requested by DPRX */
+	core_link_write_dpcd(
+			link,
+			vendor_lttpr_write_address,
+			&vendor_lttpr_write_data_vs[0],
+			sizeof(vendor_lttpr_write_data_vs));
+	core_link_read_dpcd(
+			link,
+			vendor_lttpr_read_address,
+			&dprx_vs,
+			1);
+	core_link_write_dpcd(
+			link,
+			vendor_lttpr_write_address,
+			&vendor_lttpr_write_data_pe[0],
+			sizeof(vendor_lttpr_write_data_pe));
+	core_link_read_dpcd(
+			link,
+			vendor_lttpr_read_address,
+			&dprx_pe,
+			1);
+
+	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+		dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3;
+		dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3;
+	}
+}
+
+static void vendor_specific_lttpr_wa_four(
+	struct dc_link *link,
+	bool apply_wa)
+{
+	const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8};
+	const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0};
+	const uint8_t offset = dp_convert_to_count(
+			link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+	uint32_t vendor_lttpr_write_address = 0xF004F;
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+	uint8_t sink_status = 0;
+	uint8_t i;
+#endif
+
+	if (offset != 0xFF)
+		vendor_lttpr_write_address +=
+				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+	/* W/A to pass through DPCD write of TPS=0 to DPRX */
+	if (apply_wa) {
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_one[0],
+				sizeof(vendor_lttpr_write_data_one));
+	}
+
+	/* clear training pattern set */
+	dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+	if (apply_wa) {
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_two[0],
+				sizeof(vendor_lttpr_write_data_two));
+	}
+
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+	/* poll for intra-hop disable */
+	for (i = 0; i < 10; i++) {
+		if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+				(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+			break;
+		udelay(1000);
+	}
+#endif
+}
+
 enum dc_status dpcd_set_link_settings(
 	struct dc_link *link,
 	const struct link_training_settings *lt_settings)
@@ -452,6 +616,13 @@ enum dc_status dpcd_set_link_settings(
 #else
 		rate = (uint8_t) (lt_settings->link_settings.link_rate);
 #endif
+		if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+					(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+					link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+			vendor_specific_lttpr_wa_one_start(link);
+			vendor_specific_lttpr_wa_one_two(link, rate);
+		}
+
 		status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
 	}
 
@@ -1329,6 +1500,13 @@ static enum link_training_result perform_clock_recovery_sequence(
 				dpcd_lane_adjust,
 				offset);
 
+		if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+				(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+				link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+			vendor_specific_lttpr_wa_one_end(link, retry_count);
+			vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust);
+		}
+
 		/* 5. check CR done*/
 		if (dp_is_cr_done(lane_count, dpcd_lane_status))
 			return LINK_TRAINING_SUCCESS;
@@ -2203,7 +2381,12 @@ enum link_training_result dc_link_dp_perform_link_training(
 			&lt_settings);
 
 	/* reset previous training states */
-	dpcd_exit_training_mode(link);
+	if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+			(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+			link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+		vendor_specific_lttpr_wa_four(link, true);
+	else
+		dpcd_exit_training_mode(link);
 
 	/* configure link prior to entering training mode */
 	dpcd_configure_lttpr_mode(link, &lt_settings);
@@ -2223,8 +2406,15 @@ enum link_training_result dc_link_dp_perform_link_training(
 	else
 		ASSERT(0);
 
-	/* exit training mode and switch to video idle */
-	dpcd_exit_training_mode(link);
+	/* exit training mode */
+	if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+			(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+			link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+		vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS));
+	else
+		dpcd_exit_training_mode(link);
+
+	/* switch to video idle */
 	if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
 		status = dp_transition_to_video_idle(link,
 				&lt_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1738556c9b59..9f5745313e48 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -696,6 +696,7 @@ struct dc_debug_options {
 	bool enable_sw_cntl_psr;
 	union dpia_debug_options dpia_debug;
 #endif
+	bool apply_vendor_specific_lttpr_wa;
 };
 
 struct gpu_info_soc_bounding_box_v1_0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 30822aa33931..a99950883e85 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -186,6 +186,9 @@ struct dc_link {
 	/* Drive settings read from integrated info table */
 	struct dc_lane_settings bios_forced_drive_settings;
 
+	/* Vendor specific LTTPR workaround variables */
+	uint8_t vendor_specific_lttpr_link_rate_wa;
+
 	/* MST record stream using this link */
 	struct link_flags {
 		bool dp_keep_receiver_powered;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 70cbd89bdcb8..6187c55ac82c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1023,6 +1023,7 @@ static const struct dc_debug_options debug_defaults_drv = {
 	},
 	.optimize_edp_link_rate = true,
 	.enable_sw_cntl_psr = true,
+	.apply_vendor_specific_lttpr_wa = true,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
-- 
2.25.1


  parent reply	other threads:[~2021-11-26 19:50 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-26 19:49 [PATCH 00/16] DC Patches Nov 26, 2021 Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 01/16] drm/amd/display: Enable P010 for DCN3x ASICs Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 02/16] drm/amd/display: Allow DSC on supported MST branch devices Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 03/16] drm/amd/display: add hdmi disable debug check Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 04/16] drm/amd/display: Clear DPCD lane settings after repeater training Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 05/16] drm/amd/display: Fix for otg synchronization logic Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 06/16] drm/amd/display: Fix for the no Audio bug with Tiled Displays Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 07/16] drm/amd/display: add function for eDP and backlight power on Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 08/16] drm/amd/display: Add work around for tunneled MST Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 09/16] drm/amd/display: Fix dual eDP abnormal display issue Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 10/16] drm/amd/display: PSR panel capability debugfs Bhawanpreet Lakha
2021-11-26 19:49 ` Bhawanpreet Lakha [this message]
2021-11-26 19:49 ` [PATCH 12/16] drm/amd/display: Skip vendor specific LTTPR w/a outside link training Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 13/16] drm/amd/display: Add force detile buffer size debug flag Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 14/16] drm/amd/display: Add 16ms AUX RD interval W/A for specific LTTPR Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 15/16] drm/amd/display: [FW Promotion] Release 0.0.95 Bhawanpreet Lakha
2021-11-26 19:49 ` [PATCH 16/16] drm/amd/display: 3.2.164 Bhawanpreet Lakha
2021-11-29 14:16 ` [PATCH 00/16] DC Patches Nov 26, 2021 Wheeler, Daniel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211126194922.816835-12-Bhawanpreet.Lakha@amd.com \
    --to=bhawanpreet.lakha@amd.com \
    --cc=Anson.Jacob@amd.com \
    --cc=Aurabindo.Pillai@amd.com \
    --cc=George.Shen@amd.com \
    --cc=Harry.Wentland@amd.com \
    --cc=Jun.Lei@amd.com \
    --cc=Rodrigo.Siqueira@amd.com \
    --cc=Sunpeng.Li@amd.com \
    --cc=Wenjing.Liu@amd.com \
    --cc=agustin.gutierrez@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=mikita.lipski@amd.com \
    --cc=pavle.kotarac@amd.com \
    --cc=qingqing.zhuo@amd.com \
    --cc=roman.li@amd.com \
    --cc=solomon.chiu@amd.com \
    --cc=stylon.wang@amd.com \
    --cc=wayne.lin@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.