All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00/12] DC Patches March 03, 2020
@ 2020-03-03 23:27 Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 01/12] drm/amd/display: update soc bb for nv14 Rodrigo Siqueira
                   ` (11 more replies)
  0 siblings, 12 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx; +Cc: Sunpeng.Li, Bhawanpreet.Lakha, Rodrigo.Siqueira, Harry.Wentland

This DC patchset brings improvements in multiple areas. In summary, we
highlight:
 
* Fix i2c_write issue
* Fix HDCP issues
* Update nv14 soc

Aric Cyr (1):
  drm/amd/display: 3.2.76

Isabel Zhang (1):
  drm/amd/display: Add stay count and bstatus to HDCP log

Martin Leung (2):
  drm/amd/display: update soc bb for nv14
  drm/amd/display: writing stereo polarity register if swapped

Nikola Cornij (1):
  drm/amd/display: Program DSC during timing programming

Rodrigo Siqueira (1):
  drm/amd/display: Stop if retimer is not available

Wenjing Liu (4):
  drm/amd/display: determine is mst hdcp based on stream instead of sink
    signal
  drm/amd/display: determine rx id list bytes to read based on device
    count
  drm/amd/display: fix a minor HDCP logging error
  drm/amd/display: separate FEC capability from fec debug flag

Yongqiang Sun (1):
  drm/amd/display: Not check wm and clk change flag in optimized
    bandwidth.

bradenbakker (1):
  drm/amd/display: Add registry for mem pwr control

 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c    |   1 +
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |  16 +--
 drivers/gpu/drm/amd/display/dc/core/dc.c      |   4 +
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  83 +++++++------
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |   4 +-
 .../drm/amd/display/dc/core/dc_link_hwss.c    |   2 +-
 drivers/gpu/drm/amd/display/dc/dc.h           |  11 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  28 ++---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_optc.c |   2 +-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c    |  31 ++---
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 113 +++++++++++++++++-
 drivers/gpu/drm/amd/display/dc/dm_cp_psp.h    |   1 +
 .../gpu/drm/amd/display/dc/inc/dc_link_dp.h   |   1 +
 .../gpu/drm/amd/display/modules/hdcp/hdcp.c   |   7 +-
 .../gpu/drm/amd/display/modules/hdcp/hdcp.h   |   6 +-
 .../drm/amd/display/modules/hdcp/hdcp_ddc.c   |  24 +++-
 .../drm/amd/display/modules/hdcp/hdcp_log.h   |   8 +-
 .../drm/amd/display/modules/inc/mod_hdcp.h    |   4 +-
 18 files changed, 240 insertions(+), 106 deletions(-)

-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 01/12] drm/amd/display: update soc bb for nv14
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-04 14:14   ` Kazlauskas, Nicholas
  2020-03-03 23:27 ` [PATCH 02/12] drm/amd/display: Add stay count and bstatus to HDCP log Rodrigo Siqueira
                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira, Martin Leung,
	Jun Lei, Bhawanpreet.Lakha

From: Martin Leung <martin.leung@amd.com>

[why]
nv14 previously inherited soc bb from generic dcn 2, did not match
watermark values according to memory team

[how]
add nv14 specific soc bb: copy nv2 generic that it was
using from before, but changed num channels to 8

Signed-off-by: Martin Leung <martin.leung@amd.com>
Reviewed-by: Jun Lei <Jun.Lei@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 113 +++++++++++++++++-
 1 file changed, 112 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index c629a7b45f56..c8b85f62ae95 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -337,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
 	.use_urgent_burst_bw = 0
 };
 
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+	.clock_limits = {
+			{
+				.state = 0,
+				.dcfclk_mhz = 560.0,
+				.fabricclk_mhz = 560.0,
+				.dispclk_mhz = 513.0,
+				.dppclk_mhz = 513.0,
+				.phyclk_mhz = 540.0,
+				.socclk_mhz = 560.0,
+				.dscclk_mhz = 171.0,
+				.dram_speed_mts = 8960.0,
+			},
+			{
+				.state = 1,
+				.dcfclk_mhz = 694.0,
+				.fabricclk_mhz = 694.0,
+				.dispclk_mhz = 642.0,
+				.dppclk_mhz = 642.0,
+				.phyclk_mhz = 600.0,
+				.socclk_mhz = 694.0,
+				.dscclk_mhz = 214.0,
+				.dram_speed_mts = 11104.0,
+			},
+			{
+				.state = 2,
+				.dcfclk_mhz = 875.0,
+				.fabricclk_mhz = 875.0,
+				.dispclk_mhz = 734.0,
+				.dppclk_mhz = 734.0,
+				.phyclk_mhz = 810.0,
+				.socclk_mhz = 875.0,
+				.dscclk_mhz = 245.0,
+				.dram_speed_mts = 14000.0,
+			},
+			{
+				.state = 3,
+				.dcfclk_mhz = 1000.0,
+				.fabricclk_mhz = 1000.0,
+				.dispclk_mhz = 1100.0,
+				.dppclk_mhz = 1100.0,
+				.phyclk_mhz = 810.0,
+				.socclk_mhz = 1000.0,
+				.dscclk_mhz = 367.0,
+				.dram_speed_mts = 16000.0,
+			},
+			{
+				.state = 4,
+				.dcfclk_mhz = 1200.0,
+				.fabricclk_mhz = 1200.0,
+				.dispclk_mhz = 1284.0,
+				.dppclk_mhz = 1284.0,
+				.phyclk_mhz = 810.0,
+				.socclk_mhz = 1200.0,
+				.dscclk_mhz = 428.0,
+				.dram_speed_mts = 16000.0,
+			},
+			/*Extra state, no dispclk ramping*/
+			{
+				.state = 5,
+				.dcfclk_mhz = 1200.0,
+				.fabricclk_mhz = 1200.0,
+				.dispclk_mhz = 1284.0,
+				.dppclk_mhz = 1284.0,
+				.phyclk_mhz = 810.0,
+				.socclk_mhz = 1200.0,
+				.dscclk_mhz = 428.0,
+				.dram_speed_mts = 16000.0,
+			},
+		},
+	.num_states = 5,
+	.sr_exit_time_us = 8.6,
+	.sr_enter_plus_exit_time_us = 10.9,
+	.urgent_latency_us = 4.0,
+	.urgent_latency_pixel_data_only_us = 4.0,
+	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+	.urgent_latency_vm_data_only_us = 4.0,
+	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+	.max_avg_sdp_bw_use_normal_percent = 40.0,
+	.max_avg_dram_bw_use_normal_percent = 40.0,
+	.writeback_latency_us = 12.0,
+	.ideal_dram_bw_after_urgent_percent = 40.0,
+	.max_request_size_bytes = 256,
+	.dram_channel_width_bytes = 2,
+	.fabric_datapath_to_dcn_data_return_bytes = 64,
+	.dcn_downspread_percent = 0.5,
+	.downspread_percent = 0.38,
+	.dram_page_open_time_ns = 50.0,
+	.dram_rw_turnaround_time_ns = 17.5,
+	.dram_return_buffer_per_channel_bytes = 8192,
+	.round_trip_ping_latency_dcfclk_cycles = 131,
+	.urgent_out_of_order_return_per_channel_bytes = 256,
+	.channel_interleave_bytes = 256,
+	.num_banks = 8,
+	.num_chans = 8,
+	.vmm_page_size_bytes = 4096,
+	.dram_clock_change_latency_us = 404.0,
+	.dummy_pstate_latency_us = 5.0,
+	.writeback_dram_clock_change_latency_us = 23.0,
+	.return_bus_width_bytes = 64,
+	.dispclk_dppclk_vco_speed_mhz = 3850,
+	.xfc_bus_transport_time_us = 20,
+	.xfc_xbuf_latency_tolerance_us = 4,
+	.use_urgent_burst_bw = 0
+};
+
 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
 
 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -3298,7 +3409,7 @@ static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
 	uint32_t hw_internal_rev)
 {
 	if (ASICREV_IS_NAVI12_P(hw_internal_rev))
-		return &dcn2_0_nv12_soc;
+		return &dcn2_0_nv14_soc;
 
 	return &dcn2_0_soc;
 }
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 02/12] drm/amd/display: Add stay count and bstatus to HDCP log
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 01/12] drm/amd/display: update soc bb for nv14 Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 03/12] drm/amd/display: determine is mst hdcp based on stream instead of sink signal Rodrigo Siqueira
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Isabel Zhang, Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira,
	Wenjing Liu, Bhawanpreet.Lakha

From: Isabel Zhang <isabel.zhang@amd.com>

[Why]
So the values of stay count and bstatus can be easily viewed during
debugging.

[How]
Add stay count and bstatus values to be outputted in HDCP log

Signed-off-by: Isabel Zhang <isabel.zhang@amd.com>
Reviewed-by: Wenjing Liu <Wenjing.Liu@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 6e844825ad23..d3192b9d0c3d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -37,10 +37,11 @@
 /* default logs */
 #define HDCP_ERROR_TRACE(hdcp, status) \
 		HDCP_LOG_ERR(hdcp, \
-			"[Link %d] WARNING %s IN STATE %s", \
+			"[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \
 			hdcp->config.index, \
 			mod_hdcp_status_to_str(status), \
-			mod_hdcp_state_id_to_str(hdcp->state.id))
+			mod_hdcp_state_id_to_str(hdcp->state.id), \
+			hdcp->state.stay_count)
 #define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
 		HDCP_LOG_VER(hdcp, \
 			"[Link %d] HDCP 1.4 enabled on display %d", \
@@ -111,6 +112,9 @@
 				sizeof(hdcp->auth.msg.hdcp1.bksv)); \
 		HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
 				sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+		HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
+				(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
+				sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
 		HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
 				sizeof(hdcp->auth.msg.hdcp1.an)); \
 		HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 03/12] drm/amd/display: determine is mst hdcp based on stream instead of sink signal
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 01/12] drm/amd/display: update soc bb for nv14 Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 02/12] drm/amd/display: Add stay count and bstatus to HDCP log Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 04/12] drm/amd/display: Add registry for mem pwr control Rodrigo Siqueira
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Ashley Thomas, Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira,
	Wenjing Liu, Bhawanpreet.Lakha

From: Wenjing Liu <Wenjing.Liu@amd.com>

[why]
It is possible even if sink signal is MST but driver enables SST stream.
We should not determine if we should do MST authentication based on
sink's capability.
Instead we should determine whether to do MST authentication based on
what we have enabled in stream.

Signed-off-by: Wenjing Liu <Wenjing.Liu@amd.com>
Reviewed-by: Ashley Thomas <Ashley.Thomas2@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 1 +
 drivers/gpu/drm/amd/display/dc/core/dc_link.c          | 2 ++
 drivers/gpu/drm/amd/display/dc/dm_cp_psp.h             | 1 +
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c        | 4 +---
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h        | 6 +++---
 drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h     | 4 ++--
 6 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c4fd148bf6e0..5b70ed3cdb88 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -412,6 +412,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
 	link->dig_be = config->link_enc_inst;
 	link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
 	link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+	link->dp.mst_supported = config->mst_supported;
 	display->adjust.disable = 1;
 	link->adjust.auth_delay = 2;
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a4680968c8f4..ddd4dca61cc3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2959,6 +2959,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
 		config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
 		config.dpms_off = dpms_off;
 		config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+		config.mst_supported = (pipe_ctx->stream->signal ==
+				SIGNAL_TYPE_DISPLAY_PORT_MST);
 		cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
 	}
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 626d22d437f4..968c46dfb506 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -32,6 +32,7 @@ struct cp_psp_stream_config {
 	uint8_t otg_inst;
 	uint8_t link_enc_inst;
 	uint8_t stream_enc_inst;
+	uint8_t mst_supported;
 	void *dm_stream_ctx;
 	bool dpms_off;
 };
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index bcba93d3b195..7a571b3f62d6 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -481,10 +481,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
 		break;
 	case SIGNAL_TYPE_EDP:
 	case SIGNAL_TYPE_DISPLAY_PORT:
-		mode = MOD_HDCP_MODE_DP;
-		break;
 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
-		mode = MOD_HDCP_MODE_DP_MST;
+		mode = MOD_HDCP_MODE_DP;
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 77fdcec4263e..5cb4546be0ef 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -392,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
 /* hdcp version helpers */
 static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
 {
-	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
-			hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP);
 }
 
 static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
 {
-	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
+			hdcp->connection.link.dp.mst_supported);
 }
 
 static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index bb855ea5d5a3..c088602bc1a0 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -102,6 +102,7 @@ enum mod_hdcp_status {
 struct mod_hdcp_displayport {
 	uint8_t rev;
 	uint8_t assr_supported;
+	uint8_t mst_supported;
 };
 
 struct mod_hdcp_hdmi {
@@ -110,8 +111,7 @@ struct mod_hdcp_hdmi {
 enum mod_hdcp_operation_mode {
 	MOD_HDCP_MODE_OFF,
 	MOD_HDCP_MODE_DEFAULT,
-	MOD_HDCP_MODE_DP,
-	MOD_HDCP_MODE_DP_MST
+	MOD_HDCP_MODE_DP
 };
 
 enum mod_hdcp_display_state {
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 04/12] drm/amd/display: Add registry for mem pwr control
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (2 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 03/12] drm/amd/display: determine is mst hdcp based on stream instead of sink signal Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 05/12] drm/amd/display: Not check wm and clk change flag in optimized bandwidth Rodrigo Siqueira
                   ` (7 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Charlene Liu, Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira,
	Braden Bakker, Bhawanpreet.Lakha

From: bradenbakker <braden.bakker@amd.com>

[What]
Need debug options to control lightl/deep sleep
[How]
Add registry for memory power control

Signed-off-by: Braden Bakker <Braden.Bakker@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index abff0da945e7..2b538f477c82 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -390,6 +390,7 @@ struct dc_debug_options {
 	int always_scale;
 	bool disable_pplib_clock_request;
 	bool disable_clock_gate;
+	bool disable_mem_low_power;
 	bool disable_dmcu;
 	bool disable_psr;
 	bool force_abm_enable;
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 05/12] drm/amd/display: Not check wm and clk change flag in optimized bandwidth.
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (3 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 04/12] drm/amd/display: Add registry for mem pwr control Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 06/12] drm/amd/display: Program DSC during timing programming Rodrigo Siqueira
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Sunpeng.Li, Tony Cheng, Rodrigo.Siqueira, Yongqiang Sun,
	Harry.Wentland, Bhawanpreet.Lakha

From: Yongqiang Sun <yongqiang.sun@amd.com>

[Why]
System isn't able to enter S0i3 due to not send display count 0 to smu.
When dpms off, clk changed flag is cleared alreay, and it is checked
when doing optimized bandwidth, and update clocks is bypassed due to the
flag is unset.

[How]
Remove check flag incide the function since watermark values and clocks
values are checked during update to determine whether to perform it, no
need to check it again outside the function.

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c      |  4 +++
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 28 ++++++-------------
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c    | 26 +++++++----------
 3 files changed, 23 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6dece1ee30bf..df285f57fe92 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1378,6 +1378,10 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 		}
 
 	dc->hwss.optimize_bandwidth(dc, context);
+
+	dc->clk_optimized_required = false;
+	dc->wm_optimized_required = false;
+
 	return true;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 385250e1e3fd..21c7c1b010ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2717,30 +2717,20 @@ void dcn10_optimize_bandwidth(
 		hws->funcs.verify_allow_pstate_change_high(dc);
 
 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-		if (context->stream_count == 0) {
+		if (context->stream_count == 0)
 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
 
-			dc->clk_mgr->funcs->update_clocks(
-					dc->clk_mgr,
-					context,
-					true);
-		} else if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
-			dc->clk_mgr->funcs->update_clocks(
-								dc->clk_mgr,
-								context,
-								true);
-		}
-	}
-
-	if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
-		hubbub->funcs->program_watermarks(hubbub,
-				&context->bw_ctx.bw.dcn.watermarks,
-				dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
+		dc->clk_mgr->funcs->update_clocks(
+				dc->clk_mgr,
+				context,
 				true);
 	}
 
-	dc->clk_optimized_required = false;
-	dc->wm_optimized_required = false;
+	hubbub->funcs->program_watermarks(hubbub,
+			&context->bw_ctx.bw.dcn.watermarks,
+			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
+			true);
+
 	dcn10_stereo_hw_frame_pack_wa(dc, context);
 
 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 045ba08c85b4..b0f61bd7c208 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1660,22 +1660,16 @@ void dcn20_optimize_bandwidth(
 {
 	struct hubbub *hubbub = dc->res_pool->hubbub;
 
-	if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
-		/* program dchubbub watermarks */
-		hubbub->funcs->program_watermarks(hubbub,
-						&context->bw_ctx.bw.dcn.watermarks,
-						dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
-						true);
-		dc->wm_optimized_required = false;
-	}
-
-	if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
-		dc->clk_mgr->funcs->update_clocks(
-				dc->clk_mgr,
-				context,
-				true);
-		dc->clk_optimized_required = false;
-	}
+	/* program dchubbub watermarks */
+	hubbub->funcs->program_watermarks(hubbub,
+					&context->bw_ctx.bw.dcn.watermarks,
+					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
+					true);
+
+	dc->clk_mgr->funcs->update_clocks(
+			dc->clk_mgr,
+			context,
+			true);
 }
 
 bool dcn20_update_bandwidth(
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 06/12] drm/amd/display: Program DSC during timing programming
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (4 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 05/12] drm/amd/display: Not check wm and clk change flag in optimized bandwidth Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 07/12] drm/amd/display: determine rx id list bytes to read based on device count Rodrigo Siqueira
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Sunpeng.Li, Tony Cheng, Rodrigo.Siqueira, Nikola Cornij,
	Harry.Wentland, Bhawanpreet.Lakha

From: Nikola Cornij <nikola.cornij@amd.com>

[why]
Link or DIG BE can't be exposed to a higher stream bandwidth than they
can handle. When DSC is required to fit the stream into the link
bandwidth, DSC has to be programmed during timing programming to ensure
this. Without it, intermittent issues such as black screen after S3 or a
hot-plug can be seen.

[how]
Move DSC programming from enabling stream on link to timing setup.

Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c      | 11 ++++++++---
 drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c |  2 +-
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |  7 +++++++
 drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h    |  1 +
 4 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ddd4dca61cc3..114f77759ebf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3078,9 +3078,14 @@ void core_link_enable_stream(
 
 		if (pipe_ctx->stream->timing.flags.DSC) {
 			if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
-					dc_is_virtual_signal(pipe_ctx->stream->signal))
-				dp_set_dsc_enable(pipe_ctx, true);
+					dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+				/* Here we only need to enable DSC on RX. DSC HW programming
+				 * was done earlier, as part of timing programming.
+				 */
+				dp_set_dsc_on_rx(pipe_ctx, true);
+			}
 		}
+
 		dc->hwss.enable_stream(pipe_ctx);
 
 		/* Set DPS PPS SDP (AKA "info frames") */
@@ -3107,7 +3112,7 @@ void core_link_enable_stream(
 	} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
 		if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
 				dc_is_virtual_signal(pipe_ctx->stream->signal))
-			dp_set_dsc_enable(pipe_ctx, true);
+			dp_set_dsc_on_rx(pipe_ctx, true);
 
 	}
 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 51e0ee6e7695..ac2103dec9e7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -394,7 +394,7 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
 	DC_LOG_DSC("\tslice_width %d", config->slice_width);
 }
 
-static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
 {
 	struct dc *dc = pipe_ctx->stream->ctx->dc;
 	struct dc_stream_state *stream = pipe_ctx->stream;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index b0f61bd7c208..03f0c9914520 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -623,6 +623,13 @@ enum dc_status dcn20_enable_stream_timing(
 
 	/* TODO check if timing_changed, disable stream if timing changed */
 
+	/* Have to setup DSC here to make sure the bandwidth sent to DIG BE won't be bigger than
+	 * what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag will be automatically
+	 * set at a later time when the video is enabled (DP_VID_STREAM_EN = 1).
+	 */
+	if (pipe_ctx->stream->timing.flags.DSC)
+		dp_set_dsc_on_stream(pipe_ctx, true);
+
 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
 		opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
 		opp_cnt++;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index e94e5fbf2aa2..64f401e4db54 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -85,6 +85,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable);
 bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
 bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
 void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
 bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
 
 #endif /* __DC_LINK_DP_H__ */
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 07/12] drm/amd/display: determine rx id list bytes to read based on device count
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (5 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 06/12] drm/amd/display: Program DSC during timing programming Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 08/12] drm/amd/display: fix a minor HDCP logging error Rodrigo Siqueira
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Ashley Thomas, Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira,
	Wenjing Liu, Bhawanpreet.Lakha

From: Wenjing Liu <Wenjing.Liu@amd.com>

[why]
Some RX doesn't like us to read rx id list upto max rx id list size.  As
discussed, we decided to read rx id list based on device count.

[how]
According to HDCP specs the actual size of rx id list is calculated as
rx id list size = 2+3+16+5*device_count.  We will read 16 bytes at a
time until it reached or exceeded rx id list size.

Signed-off-by: Wenjing Liu <Wenjing.Liu@amd.com>
Reviewed-by: Ashley Thomas <Ashley.Thomas2@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 .../drm/amd/display/modules/hdcp/hdcp_ddc.c   | 24 +++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index ff9d54812e62..816759d10cbc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id {
 	MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
 	MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
 	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
 	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
 	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
 	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
@@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = {
 	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
 	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
@@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = {
 	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
 	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
@@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
 
 enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
 {
-	enum mod_hdcp_status status;
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
 	if (is_dp_hdcp(hdcp)) {
+		uint32_t device_count = 0;
+		uint32_t rx_id_list_size = 0;
+		uint32_t bytes_read = 0;
+
 		hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
-				hdcp->auth.msg.hdcp2.rx_id_list+1,
-				sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
-
+						hdcp->auth.msg.hdcp2.rx_id_list+1,
+						HDCP_MAX_AUX_TRANSACTION_SIZE);
+		if (status == MOD_HDCP_STATUS_SUCCESS) {
+			bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE;
+			device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+					(HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+			rx_id_list_size = MIN((21 + 5 * device_count),
+					(sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1));
+			status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
+					hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read,
+					(rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE);
+		}
 	} else {
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
 				hdcp->auth.msg.hdcp2.rx_id_list,
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 08/12] drm/amd/display: fix a minor HDCP logging error
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (6 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 07/12] drm/amd/display: determine rx id list bytes to read based on device count Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 09/12] drm/amd/display: Stop if retimer is not available Rodrigo Siqueira
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Ashley Thomas, Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira,
	Wenjing Liu, Bhawanpreet.Lakha

From: Wenjing Liu <Wenjing.Liu@amd.com>

[why]
In HDCP Uninitialzed State, a CPIRQ event would cause log output
internal policy error because the CPIRQ event is not recognized as
unexpected event.

[how]
CPIRQ is issued in HDCP uninitialized state is unexpected.  We should
set unexpected event flag in event ctx.

Signed-off-by: Wenjing Liu <Wenjing.Liu@amd.com>
Reviewed-by: Ashley Thomas <Ashley.Thomas2@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 7a571b3f62d6..cc1d3f470b99 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -114,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
 	} else if (is_in_hdcp2_dp_states(hdcp)) {
 		status = mod_hdcp_hdcp2_dp_execution(hdcp,
 				event_ctx, &input->hdcp2);
+	} else {
+		event_ctx->unexpected_event = 1;
+		goto out;
 	}
 out:
 	return status;
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 09/12] drm/amd/display: Stop if retimer is not available
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (7 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 08/12] drm/amd/display: fix a minor HDCP logging error Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 10/12] drm/amd/display: writing stereo polarity register if swapped Rodrigo Siqueira
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Sunpeng.Li, Bhawanpreet.Lakha, Rodrigo.Siqueira, Harry.Wentland,
	Hersen Wu

Raven provides retimer feature support that requires i2c interaction in
order to make it work well, all settings required for this configuration
are loaded from the Atom bios which include the i2c address. If the
retimer feature is not available, we should abort the attempt to set
this feature, otherwise, it makes the following line return
I2C_CHANNEL_OPERATION_NO_RESPONSE:

 i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer));
 ...
 if (!i2c_success)
   ASSERT(i2c_success);

This ends up causing problems with hotplugging HDMI displays on Raven,
and causes retimer settings to warn like so:

WARNING: CPU: 1 PID: 429 at
drivers/gpu/drm/amd/amdgpu/../dal/dc/core/dc_link.c:1998
write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu] Modules linked in:
edac_mce_amd ccp kvm irqbypass binfmt_misc crct10dif_pclmul crc32_pclmul
ghash_clmulni_intel snd_hda_codec_realtek snd_hda_codec_generic
ledtrig_audio snd_hda_codec_hdmi snd_hda_intel amdgpu(+) snd_hda_codec
snd_hda_core snd_hwdep snd_pcm snd_seq_midi snd_seq_midi_event
snd_rawmidi aesni_intel snd_seq amd_iommu_v2 gpu_sched aes_x86_64
crypto_simd cryptd glue_helper snd_seq_device ttm drm_kms_helper
snd_timer eeepc_wmi wmi_bmof asus_wmi sparse_keymap drm mxm_wmi snd
k10temp fb_sys_fops syscopyarea sysfillrect sysimgblt soundcore joydev
input_leds mac_hid sch_fq_codel parport_pc ppdev lp parport ip_tables
x_tables autofs4 igb i2c_algo_bit hid_generic usbhid i2c_piix4 dca ahci
hid libahci video wmi gpio_amdpt gpio_generic CPU: 1 PID: 429 Comm:
systemd-udevd Tainted: G        W         5.2.0-rc1sept162019+ #1
Hardware name: System manufacturer System Product Name/ROG STRIX B450-F
GAMING, BIOS 2605 08/06/2019
RIP: 0010:write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu]
Code: ff 0f b6 4d ce 44 0f b6 45 cf 44 0f b6 c8 45 89 cf 44 89 e2 48 c7
c6 f0 34 bc c0 bf 04 00 00 00 e8 63 b0 90 ff 45 84 ff 75 02 <0f> 0b 42
0f b6 04 73 8d 50 f6 80 fa 02 77 8c 3c 0a 0f 85 c8 00 00 RSP:
0018:ffffa99d02726fd0 EFLAGS: 00010246
RAX: 0000000000000000 RBX: ffffa99d02727035 RCX: 0000000000000006
RDX: 0000000000000000 RSI: 0000000000000002 RDI: ffff976acc857440
RBP: ffffa99d02727018 R08: 0000000000000002 R09: 000000000002a600
R10: ffffe90610193680 R11: 00000000000005e3 R12: 000000000000005d
R13: ffff976ac4b201b8 R14: 0000000000000001 R15: 0000000000000000
FS:  00007f14f99e1680(0000) GS:ffff976acc840000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fdf212843b8 CR3: 0000000408906000 CR4: 00000000003406e0
Call Trace:
 core_link_enable_stream+0x626/0x680 [amdgpu]
 dce110_apply_ctx_to_hw+0x414/0x4e0 [amdgpu]
 dc_commit_state+0x331/0x5e0 [amdgpu]
 ? drm_calc_timestamping_constants+0xf9/0x150 [drm]
 amdgpu_dm_atomic_commit_tail+0x395/0x1e00 [amdgpu]
 ? dm_plane_helper_prepare_fb+0x20c/0x280 [amdgpu]
 commit_tail+0x42/0x70 [drm_kms_helper]
 drm_atomic_helper_commit+0x10c/0x120 [drm_kms_helper]
 amdgpu_dm_atomic_commit+0x95/0xa0 [amdgpu]
 drm_atomic_commit+0x4a/0x50 [drm]
 restore_fbdev_mode_atomic+0x1c0/0x1e0 [drm_kms_helper]
 restore_fbdev_mode+0x4c/0x160 [drm_kms_helper]
 ? _cond_resched+0x19/0x40
 drm_fb_helper_restore_fbdev_mode_unlocked+0x4e/0xa0 [drm_kms_helper]
 drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper]
 fbcon_init+0x471/0x630
 visual_init+0xd5/0x130
 do_bind_con_driver+0x20a/0x430
 do_take_over_console+0x7d/0x1b0
 do_fbcon_takeover+0x5c/0xb0
 fbcon_event_notify+0x6cd/0x8a0
 notifier_call_chain+0x4c/0x70
 blocking_notifier_call_chain+0x43/0x60
 fb_notifier_call_chain+0x1b/0x20
 register_framebuffer+0x254/0x360
 __drm_fb_helper_initial_config_and_unlock+0x2c5/0x510 [drm_kms_helper]
 drm_fb_helper_initial_config+0x35/0x40 [drm_kms_helper]
 amdgpu_fbdev_init+0xcd/0x100 [amdgpu]
 amdgpu_device_init+0x1156/0x1930 [amdgpu]
 amdgpu_driver_load_kms+0x8d/0x2e0 [amdgpu]
 drm_dev_register+0x12b/0x1c0 [drm]
 amdgpu_pci_probe+0xd3/0x160 [amdgpu]
 local_pci_probe+0x47/0xa0
 pci_device_probe+0x142/0x1b0
 really_probe+0xf5/0x3d0
 driver_probe_device+0x11b/0x130
 device_driver_attach+0x58/0x60
 __driver_attach+0xa3/0x140
 ? device_driver_attach+0x60/0x60
 ? device_driver_attach+0x60/0x60
 bus_for_each_dev+0x74/0xb0
 ? kmem_cache_alloc_trace+0x1a3/0x1c0
 driver_attach+0x1e/0x20
 bus_add_driver+0x147/0x220
 ? 0xffffffffc0cb9000
 driver_register+0x60/0x100
 ? 0xffffffffc0cb9000
 __pci_register_driver+0x5a/0x60
 amdgpu_init+0x74/0x83 [amdgpu]
 do_one_initcall+0x4a/0x1fa
 ? _cond_resched+0x19/0x40
 ? kmem_cache_alloc_trace+0x3f/0x1c0
 ? __vunmap+0x1cc/0x200
 do_init_module+0x5f/0x227
 load_module+0x2330/0x2b40
 __do_sys_finit_module+0xfc/0x120
 ? __do_sys_finit_module+0xfc/0x120
 __x64_sys_finit_module+0x1a/0x20
 do_syscall_64+0x5a/0x130
 entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7f14f9500839
Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89
f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01
f0 ff ff 73 01 c3 48 8b 0d 1f f6 2c 00 f7 d8 64 89 01 48
RSP: 002b:00007fff9bc4f5a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000055afb5abce30 RCX: 00007f14f9500839
RDX: 0000000000000000 RSI: 000055afb5ace0f0 RDI: 0000000000000017
RBP: 000055afb5ace0f0 R08: 0000000000000000 R09: 000000000000000a
R10: 0000000000000017 R11: 0000000000000246 R12: 0000000000000000
R13: 000055afb5aad800 R14: 0000000000020000 R15: 0000000000000000
---[ end trace c286e96563966f08 ]---

This commit reworks the way that we handle i2c write for retimer in the
way that we abort this configuration if the feature is not available in
the device. For debug sake, we kept a simple log message in case the
retimer is not available.

Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 67 ++++++++-----------
 1 file changed, 29 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 114f77759ebf..83df17a17271 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1765,8 +1765,7 @@ static void write_i2c_retimer_setting(
 				slave_address, buffer[0], buffer[1], i2c_success?1:0);
 
 			if (!i2c_success)
-				/* Write failure */
-				ASSERT(i2c_success);
+				goto i2c_write_fail;
 
 			/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
 			 * needs to be set to 1 on every 0xA-0xC write.
@@ -1784,8 +1783,7 @@ static void write_i2c_retimer_setting(
 						pipe_ctx->stream->link->ddc,
 						slave_address, &offset, 1, &value, 1);
 					if (!i2c_success)
-						/* Write failure */
-						ASSERT(i2c_success);
+						goto i2c_write_fail;
 				}
 
 				buffer[0] = offset;
@@ -1797,8 +1795,7 @@ static void write_i2c_retimer_setting(
 					offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 					slave_address, buffer[0], buffer[1], i2c_success?1:0);
 				if (!i2c_success)
-					/* Write failure */
-					ASSERT(i2c_success);
+					goto i2c_write_fail;
 			}
 		}
 	}
@@ -1818,8 +1815,7 @@ static void write_i2c_retimer_setting(
 					slave_address, buffer[0], buffer[1], i2c_success?1:0);
 
 				if (!i2c_success)
-					/* Write failure */
-					ASSERT(i2c_success);
+					goto i2c_write_fail;
 
 				/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
 				 * needs to be set to 1 on every 0xA-0xC write.
@@ -1837,8 +1833,7 @@ static void write_i2c_retimer_setting(
 								pipe_ctx->stream->link->ddc,
 								slave_address, &offset, 1, &value, 1);
 						if (!i2c_success)
-							/* Write failure */
-							ASSERT(i2c_success);
+							goto i2c_write_fail;
 					}
 
 					buffer[0] = offset;
@@ -1850,8 +1845,7 @@ static void write_i2c_retimer_setting(
 						offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 						slave_address, buffer[0], buffer[1], i2c_success?1:0);
 					if (!i2c_success)
-						/* Write failure */
-						ASSERT(i2c_success);
+						goto i2c_write_fail;
 				}
 			}
 		}
@@ -1869,8 +1863,7 @@ static void write_i2c_retimer_setting(
 				offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 				slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0x00 to 0x23 */
 		buffer[0] = 0x00;
@@ -1881,8 +1874,7 @@ static void write_i2c_retimer_setting(
 			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0xff to 0x00 */
 		buffer[0] = 0xff;
@@ -1893,10 +1885,14 @@ static void write_i2c_retimer_setting(
 			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 	}
+
+	return;
+
+i2c_write_fail:
+	DC_LOG_DEBUG("Set retimer failed");
 }
 
 static void write_i2c_default_retimer_setting(
@@ -1921,8 +1917,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0A to 0x17 */
 	buffer[0] = 0x0A;
@@ -1933,8 +1928,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0B to 0xDA or 0xD8 */
 	buffer[0] = 0x0B;
@@ -1945,8 +1939,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0A to 0x17 */
 	buffer[0] = 0x0A;
@@ -1957,8 +1950,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0C to 0x1D or 0x91 */
 	buffer[0] = 0x0C;
@@ -1969,8 +1961,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0A to 0x17 */
 	buffer[0] = 0x0A;
@@ -1981,8 +1972,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 
 	if (is_vga_mode) {
@@ -1997,8 +1987,7 @@ static void write_i2c_default_retimer_setting(
 			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0x00 to 0x23 */
 		buffer[0] = 0x00;
@@ -2009,8 +1998,7 @@ static void write_i2c_default_retimer_setting(
 			offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0xff to 0x00 */
 		buffer[0] = 0xff;
@@ -2021,9 +2009,13 @@ static void write_i2c_default_retimer_setting(
 			offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 	}
+
+	return;
+
+i2c_write_fail:
+	DC_LOG_DEBUG("Set default retimer failed");
 }
 
 static void write_i2c_redriver_setting(
@@ -2052,8 +2044,7 @@ static void write_i2c_redriver_setting(
 		slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
 
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		DC_LOG_DEBUG("Set redriver failed");
 }
 
 static void disable_link(struct dc_link *link, enum signal_type signal)
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 10/12] drm/amd/display: writing stereo polarity register if swapped
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (8 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 09/12] drm/amd/display: Stop if retimer is not available Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 11/12] drm/amd/display: separate FEC capability from fec debug flag Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 12/12] drm/amd/display: 3.2.76 Rodrigo Siqueira
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Aric Cyr, Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira,
	Martin Leung, Bhawanpreet.Lakha

From: Martin Leung <martin.leung@amd.com>

[why]
on some displays that prefer swapped polarity we were seeing L/R images
swapped because OTG_STEREO_SYNC_OUTPUT_POLARITY would always be mapped
to 0

[how]
fix initial dal3 implementation to properly update the polarity field
according to the crtc_stereo_flags (same as
OTG_STEREO_EYE_FLAG_POLARITY)

Signed-off-by: Martin Leung <martin.leung@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 94ac34106776..63acb8ff7462 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1193,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc,
 			REG_UPDATE_3(OTG_STEREO_CONTROL,
 				OTG_STEREO_EN, stereo_en,
 				OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
-				OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+				OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
 
 		if (flags->PROGRAM_POLARITY)
 			REG_UPDATE(OTG_STEREO_CONTROL,
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 11/12] drm/amd/display: separate FEC capability from fec debug flag
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (9 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 10/12] drm/amd/display: writing stereo polarity register if swapped Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  2020-03-03 23:27 ` [PATCH 12/12] drm/amd/display: 3.2.76 Rodrigo Siqueira
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Ashley Thomas, Sunpeng.Li, Harry.Wentland, Rodrigo.Siqueira,
	Wenjing Liu, Bhawanpreet.Lakha

From: Wenjing Liu <Wenjing.Liu@amd.com>

[why]
FEC capability query should not be affected by debugging decision on
whether to disable FEC. We should not determine if display supports FEC
by checking debug option.

Signed-off-by: Wenjing Liu <Wenjing.Liu@amd.com>
Reviewed-by: Ashley Thomas <Ashley.Thomas2@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c  | 16 ++++++++--------
 drivers/gpu/drm/amd/display/dc/core/dc_link.c    |  3 +--
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c |  4 ++--
 drivers/gpu/drm/amd/display/dc/dc.h              |  8 +++++++-
 4 files changed, 18 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 4837640530ad..cc1b52b72c0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -207,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
 
 	if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
 				   dsc_caps, NULL,
-				   &dc_sink->sink_dsc_caps.dsc_dec_caps))
+				   &dc_sink->dsc_caps.dsc_dec_caps))
 		return false;
 
 	return true;
@@ -262,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 			if (!validate_dsc_caps_on_connector(aconnector))
-				memset(&aconnector->dc_sink->sink_dsc_caps,
-				       0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+				memset(&aconnector->dc_sink->dsc_caps,
+				       0, sizeof(aconnector->dc_sink->dsc_caps));
 #endif
 		}
 	}
@@ -550,7 +550,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
 		memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
 		if (vars[i].dsc_enabled && dc_dsc_compute_config(
 					params[i].sink->ctx->dc->res_pool->dscs[0],
-					&params[i].sink->sink_dsc_caps.dsc_dec_caps,
+					&params[i].sink->dsc_caps.dsc_dec_caps,
 					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
 					0,
 					params[i].timing,
@@ -571,7 +571,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
 	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
 	dc_dsc_compute_config(
 			param.sink->ctx->dc->res_pool->dscs[0],
-			&param.sink->sink_dsc_caps.dsc_dec_caps,
+			&param.sink->dsc_caps.dsc_dec_caps,
 			param.sink->ctx->dc->debug.dsc_min_slice_height_override,
 			(int) kbps, param.timing, &dsc_config);
 
@@ -768,14 +768,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 		params[count].sink = stream->sink;
 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
 		params[count].port = aconnector->port;
-		params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+		params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
 		dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
 		if (!dc_dsc_compute_bandwidth_range(
 				stream->sink->ctx->dc->res_pool->dscs[0],
 				stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
 				dsc_policy.min_target_bpp,
 				dsc_policy.max_target_bpp,
-				&stream->sink->sink_dsc_caps.dsc_dec_caps,
+				&stream->sink->dsc_caps.dsc_dec_caps,
 				&stream->timing, &params[count].bw_range))
 			params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
 
@@ -857,7 +857,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
 		if (!aconnector || !aconnector->dc_sink)
 			continue;
 
-		if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+		if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
 			continue;
 
 		if (computed_streams[i])
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 83df17a17271..fb603bd46fac 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3407,7 +3407,7 @@ uint32_t dc_link_bandwidth_kbps(
 	link_bw_kbps *= 8;   /* 8 bits per byte*/
 	link_bw_kbps *= link_setting->lane_count;
 
-	if (dc_link_is_fec_supported(link)) {
+	if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
 		/* Account for FEC overhead.
 		 * We have to do it based on caps,
 		 * and not based on FEC being set ready,
@@ -3456,7 +3456,6 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
 	return (dc_is_dp_signal(link->connector_signal) &&
 			link->link_enc->features.fec_supported &&
 			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
-			!link->dc->debug.disable_fec &&
 			!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index fc3664dd5e88..9553755be286 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4152,7 +4152,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
 	struct link_encoder *link_enc = link->link_enc;
 	uint8_t fec_config = 0;
 
-	if (!dc_link_is_fec_supported(link))
+	if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
 		return;
 
 	if (link_enc->funcs->fec_set_ready &&
@@ -4187,7 +4187,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 {
 	struct link_encoder *link_enc = link->link_enc;
 
-	if (!dc_link_is_fec_supported(link))
+	if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
 		return;
 
 	if (link_enc->funcs->fec_set_enable &&
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2b538f477c82..5508c32f4484 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1025,6 +1025,11 @@ struct dc_sink_dsc_caps {
 	struct dsc_dec_dpcd_caps dsc_dec_caps;
 };
 
+struct dc_sink_fec_caps {
+	bool is_rx_fec_supported;
+	bool is_topology_fec_supported;
+};
+
 /*
  * The sink structure contains EDID and other display device properties
  */
@@ -1038,7 +1043,8 @@ struct dc_sink {
 	struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
 	bool converter_disable_audio;
 
-	struct dc_sink_dsc_caps sink_dsc_caps;
+	struct dc_sink_dsc_caps dsc_caps;
+	struct dc_sink_fec_caps fec_caps;
 
 	/* private to DC core */
 	struct dc_link *link;
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 12/12] drm/amd/display: 3.2.76
  2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
                   ` (10 preceding siblings ...)
  2020-03-03 23:27 ` [PATCH 11/12] drm/amd/display: separate FEC capability from fec debug flag Rodrigo Siqueira
@ 2020-03-03 23:27 ` Rodrigo Siqueira
  11 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-03 23:27 UTC (permalink / raw)
  To: amd-gfx
  Cc: Sunpeng.Li, Bhawanpreet.Lakha, Aric Cyr, Rodrigo.Siqueira,
	Harry.Wentland

From: Aric Cyr <aric.cyr@amd.com>

Signed-off-by: Aric Cyr <aric.cyr@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5508c32f4484..1e6413a79d47 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.75"
+#define DC_VER "3.2.76"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 01/12] drm/amd/display: update soc bb for nv14
  2020-03-03 23:27 ` [PATCH 01/12] drm/amd/display: update soc bb for nv14 Rodrigo Siqueira
@ 2020-03-04 14:14   ` Kazlauskas, Nicholas
  2020-03-04 15:46     ` Rodrigo Siqueira
  0 siblings, 1 reply; 15+ messages in thread
From: Kazlauskas, Nicholas @ 2020-03-04 14:14 UTC (permalink / raw)
  To: Rodrigo Siqueira, amd-gfx
  Cc: Sunpeng.Li, Bhawanpreet.Lakha, Jun Lei, Harry.Wentland, Martin Leung

On 2020-03-03 6:27 p.m., Rodrigo Siqueira wrote:
> From: Martin Leung <martin.leung@amd.com>
> 
> [why]
> nv14 previously inherited soc bb from generic dcn 2, did not match
> watermark values according to memory team
> 
> [how]
> add nv14 specific soc bb: copy nv2 generic that it was
> using from before, but changed num channels to 8
> 
> Signed-off-by: Martin Leung <martin.leung@amd.com>
> Reviewed-by: Jun Lei <Jun.Lei@amd.com>
> Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
> ---
>   .../drm/amd/display/dc/dcn20/dcn20_resource.c | 113 +++++++++++++++++-
>   1 file changed, 112 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> index c629a7b45f56..c8b85f62ae95 100644
> --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> @@ -337,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
>   	.use_urgent_burst_bw = 0
>   };
>   
> +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
> +	.clock_limits = {
> +			{
> +				.state = 0,
> +				.dcfclk_mhz = 560.0,
> +				.fabricclk_mhz = 560.0,
> +				.dispclk_mhz = 513.0,
> +				.dppclk_mhz = 513.0,
> +				.phyclk_mhz = 540.0,
> +				.socclk_mhz = 560.0,
> +				.dscclk_mhz = 171.0,
> +				.dram_speed_mts = 8960.0,
> +			},
> +			{
> +				.state = 1,
> +				.dcfclk_mhz = 694.0,
> +				.fabricclk_mhz = 694.0,
> +				.dispclk_mhz = 642.0,
> +				.dppclk_mhz = 642.0,
> +				.phyclk_mhz = 600.0,
> +				.socclk_mhz = 694.0,
> +				.dscclk_mhz = 214.0,
> +				.dram_speed_mts = 11104.0,
> +			},
> +			{
> +				.state = 2,
> +				.dcfclk_mhz = 875.0,
> +				.fabricclk_mhz = 875.0,
> +				.dispclk_mhz = 734.0,
> +				.dppclk_mhz = 734.0,
> +				.phyclk_mhz = 810.0,
> +				.socclk_mhz = 875.0,
> +				.dscclk_mhz = 245.0,
> +				.dram_speed_mts = 14000.0,
> +			},
> +			{
> +				.state = 3,
> +				.dcfclk_mhz = 1000.0,
> +				.fabricclk_mhz = 1000.0,
> +				.dispclk_mhz = 1100.0,
> +				.dppclk_mhz = 1100.0,
> +				.phyclk_mhz = 810.0,
> +				.socclk_mhz = 1000.0,
> +				.dscclk_mhz = 367.0,
> +				.dram_speed_mts = 16000.0,
> +			},
> +			{
> +				.state = 4,
> +				.dcfclk_mhz = 1200.0,
> +				.fabricclk_mhz = 1200.0,
> +				.dispclk_mhz = 1284.0,
> +				.dppclk_mhz = 1284.0,
> +				.phyclk_mhz = 810.0,
> +				.socclk_mhz = 1200.0,
> +				.dscclk_mhz = 428.0,
> +				.dram_speed_mts = 16000.0,
> +			},
> +			/*Extra state, no dispclk ramping*/
> +			{
> +				.state = 5,
> +				.dcfclk_mhz = 1200.0,
> +				.fabricclk_mhz = 1200.0,
> +				.dispclk_mhz = 1284.0,
> +				.dppclk_mhz = 1284.0,
> +				.phyclk_mhz = 810.0,
> +				.socclk_mhz = 1200.0,
> +				.dscclk_mhz = 428.0,
> +				.dram_speed_mts = 16000.0,
> +			},
> +		},
> +	.num_states = 5,
> +	.sr_exit_time_us = 8.6,
> +	.sr_enter_plus_exit_time_us = 10.9,
> +	.urgent_latency_us = 4.0,
> +	.urgent_latency_pixel_data_only_us = 4.0,
> +	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
> +	.urgent_latency_vm_data_only_us = 4.0,
> +	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
> +	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
> +	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
> +	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
> +	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
> +	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
> +	.max_avg_sdp_bw_use_normal_percent = 40.0,
> +	.max_avg_dram_bw_use_normal_percent = 40.0,
> +	.writeback_latency_us = 12.0,
> +	.ideal_dram_bw_after_urgent_percent = 40.0,
> +	.max_request_size_bytes = 256,
> +	.dram_channel_width_bytes = 2,
> +	.fabric_datapath_to_dcn_data_return_bytes = 64,
> +	.dcn_downspread_percent = 0.5,
> +	.downspread_percent = 0.38,
> +	.dram_page_open_time_ns = 50.0,
> +	.dram_rw_turnaround_time_ns = 17.5,
> +	.dram_return_buffer_per_channel_bytes = 8192,
> +	.round_trip_ping_latency_dcfclk_cycles = 131,
> +	.urgent_out_of_order_return_per_channel_bytes = 256,
> +	.channel_interleave_bytes = 256,
> +	.num_banks = 8,
> +	.num_chans = 8,
> +	.vmm_page_size_bytes = 4096,
> +	.dram_clock_change_latency_us = 404.0,
> +	.dummy_pstate_latency_us = 5.0,
> +	.writeback_dram_clock_change_latency_us = 23.0,
> +	.return_bus_width_bytes = 64,
> +	.dispclk_dppclk_vco_speed_mhz = 3850,
> +	.xfc_bus_transport_time_us = 20,
> +	.xfc_xbuf_latency_tolerance_us = 4,
> +	.use_urgent_burst_bw = 0
> +};
> +
>   struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
>   
>   #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
> @@ -3298,7 +3409,7 @@ static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
>   	uint32_t hw_internal_rev)
>   {
>   	if (ASICREV_IS_NAVI12_P(hw_internal_rev))
> -		return &dcn2_0_nv12_soc;
> +		return &dcn2_0_nv14_soc;

Are you sure this is correct? Shouldn't be checking that the ASICREV is 
Navi14 here, not Navi12?

Nicholas Kazlauskas

>   
>   	return &dcn2_0_soc;
>   }
> 

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 01/12] drm/amd/display: update soc bb for nv14
  2020-03-04 14:14   ` Kazlauskas, Nicholas
@ 2020-03-04 15:46     ` Rodrigo Siqueira
  0 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Siqueira @ 2020-03-04 15:46 UTC (permalink / raw)
  To: Kazlauskas, Nicholas
  Cc: Sunpeng.Li, Harry.Wentland, amd-gfx, Martin Leung, Jun Lei,
	Bhawanpreet.Lakha


[-- Attachment #1.1: Type: text/plain, Size: 6104 bytes --]

On 03/04, Kazlauskas, Nicholas wrote:
> On 2020-03-03 6:27 p.m., Rodrigo Siqueira wrote:
> > From: Martin Leung <martin.leung@amd.com>
> > 
> > [why]
> > nv14 previously inherited soc bb from generic dcn 2, did not match
> > watermark values according to memory team
> > 
> > [how]
> > add nv14 specific soc bb: copy nv2 generic that it was
> > using from before, but changed num channels to 8
> > 
> > Signed-off-by: Martin Leung <martin.leung@amd.com>
> > Reviewed-by: Jun Lei <Jun.Lei@amd.com>
> > Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
> > ---
> >   .../drm/amd/display/dc/dcn20/dcn20_resource.c | 113 +++++++++++++++++-
> >   1 file changed, 112 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> > index c629a7b45f56..c8b85f62ae95 100644
> > --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> > +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> > @@ -337,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
> >   	.use_urgent_burst_bw = 0
> >   };
> > +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
> > +	.clock_limits = {
> > +			{
> > +				.state = 0,
> > +				.dcfclk_mhz = 560.0,
> > +				.fabricclk_mhz = 560.0,
> > +				.dispclk_mhz = 513.0,
> > +				.dppclk_mhz = 513.0,
> > +				.phyclk_mhz = 540.0,
> > +				.socclk_mhz = 560.0,
> > +				.dscclk_mhz = 171.0,
> > +				.dram_speed_mts = 8960.0,
> > +			},
> > +			{
> > +				.state = 1,
> > +				.dcfclk_mhz = 694.0,
> > +				.fabricclk_mhz = 694.0,
> > +				.dispclk_mhz = 642.0,
> > +				.dppclk_mhz = 642.0,
> > +				.phyclk_mhz = 600.0,
> > +				.socclk_mhz = 694.0,
> > +				.dscclk_mhz = 214.0,
> > +				.dram_speed_mts = 11104.0,
> > +			},
> > +			{
> > +				.state = 2,
> > +				.dcfclk_mhz = 875.0,
> > +				.fabricclk_mhz = 875.0,
> > +				.dispclk_mhz = 734.0,
> > +				.dppclk_mhz = 734.0,
> > +				.phyclk_mhz = 810.0,
> > +				.socclk_mhz = 875.0,
> > +				.dscclk_mhz = 245.0,
> > +				.dram_speed_mts = 14000.0,
> > +			},
> > +			{
> > +				.state = 3,
> > +				.dcfclk_mhz = 1000.0,
> > +				.fabricclk_mhz = 1000.0,
> > +				.dispclk_mhz = 1100.0,
> > +				.dppclk_mhz = 1100.0,
> > +				.phyclk_mhz = 810.0,
> > +				.socclk_mhz = 1000.0,
> > +				.dscclk_mhz = 367.0,
> > +				.dram_speed_mts = 16000.0,
> > +			},
> > +			{
> > +				.state = 4,
> > +				.dcfclk_mhz = 1200.0,
> > +				.fabricclk_mhz = 1200.0,
> > +				.dispclk_mhz = 1284.0,
> > +				.dppclk_mhz = 1284.0,
> > +				.phyclk_mhz = 810.0,
> > +				.socclk_mhz = 1200.0,
> > +				.dscclk_mhz = 428.0,
> > +				.dram_speed_mts = 16000.0,
> > +			},
> > +			/*Extra state, no dispclk ramping*/
> > +			{
> > +				.state = 5,
> > +				.dcfclk_mhz = 1200.0,
> > +				.fabricclk_mhz = 1200.0,
> > +				.dispclk_mhz = 1284.0,
> > +				.dppclk_mhz = 1284.0,
> > +				.phyclk_mhz = 810.0,
> > +				.socclk_mhz = 1200.0,
> > +				.dscclk_mhz = 428.0,
> > +				.dram_speed_mts = 16000.0,
> > +			},
> > +		},
> > +	.num_states = 5,
> > +	.sr_exit_time_us = 8.6,
> > +	.sr_enter_plus_exit_time_us = 10.9,
> > +	.urgent_latency_us = 4.0,
> > +	.urgent_latency_pixel_data_only_us = 4.0,
> > +	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
> > +	.urgent_latency_vm_data_only_us = 4.0,
> > +	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
> > +	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
> > +	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
> > +	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
> > +	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
> > +	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
> > +	.max_avg_sdp_bw_use_normal_percent = 40.0,
> > +	.max_avg_dram_bw_use_normal_percent = 40.0,
> > +	.writeback_latency_us = 12.0,
> > +	.ideal_dram_bw_after_urgent_percent = 40.0,
> > +	.max_request_size_bytes = 256,
> > +	.dram_channel_width_bytes = 2,
> > +	.fabric_datapath_to_dcn_data_return_bytes = 64,
> > +	.dcn_downspread_percent = 0.5,
> > +	.downspread_percent = 0.38,
> > +	.dram_page_open_time_ns = 50.0,
> > +	.dram_rw_turnaround_time_ns = 17.5,
> > +	.dram_return_buffer_per_channel_bytes = 8192,
> > +	.round_trip_ping_latency_dcfclk_cycles = 131,
> > +	.urgent_out_of_order_return_per_channel_bytes = 256,
> > +	.channel_interleave_bytes = 256,
> > +	.num_banks = 8,
> > +	.num_chans = 8,
> > +	.vmm_page_size_bytes = 4096,
> > +	.dram_clock_change_latency_us = 404.0,
> > +	.dummy_pstate_latency_us = 5.0,
> > +	.writeback_dram_clock_change_latency_us = 23.0,
> > +	.return_bus_width_bytes = 64,
> > +	.dispclk_dppclk_vco_speed_mhz = 3850,
> > +	.xfc_bus_transport_time_us = 20,
> > +	.xfc_xbuf_latency_tolerance_us = 4,
> > +	.use_urgent_burst_bw = 0
> > +};
> > +
> >   struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
> >   #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
> > @@ -3298,7 +3409,7 @@ static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
> >   	uint32_t hw_internal_rev)
> >   {
> >   	if (ASICREV_IS_NAVI12_P(hw_internal_rev))
> > -		return &dcn2_0_nv12_soc;
> > +		return &dcn2_0_nv14_soc;
> 
> Are you sure this is correct? Shouldn't be checking that the ASICREV is
> Navi14 here, not Navi12?

Nice catch! I correct it to:

+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3408,9 +3408,12 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
        uint32_t hw_internal_rev)
 {
-       if (ASICREV_IS_NAVI12_P(hw_internal_rev))
+       if (ASICREV_IS_NAVI14_M(hw_internal_rev))
                return &dcn2_0_nv14_soc;
 
+       if (ASICREV_IS_NAVI12_P(hw_internal_rev))
+               return &dcn2_0_nv12_soc;
+
        return &dcn2_0_soc;
 }

Thanks!

> Nicholas Kazlauskas
> 
> >   	return &dcn2_0_soc;
> >   }
> > 
> 

-- 
Rodrigo Siqueira
https://siqueira.tech

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2020-03-04 15:46 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-03 23:27 [PATCH 00/12] DC Patches March 03, 2020 Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 01/12] drm/amd/display: update soc bb for nv14 Rodrigo Siqueira
2020-03-04 14:14   ` Kazlauskas, Nicholas
2020-03-04 15:46     ` Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 02/12] drm/amd/display: Add stay count and bstatus to HDCP log Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 03/12] drm/amd/display: determine is mst hdcp based on stream instead of sink signal Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 04/12] drm/amd/display: Add registry for mem pwr control Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 05/12] drm/amd/display: Not check wm and clk change flag in optimized bandwidth Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 06/12] drm/amd/display: Program DSC during timing programming Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 07/12] drm/amd/display: determine rx id list bytes to read based on device count Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 08/12] drm/amd/display: fix a minor HDCP logging error Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 09/12] drm/amd/display: Stop if retimer is not available Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 10/12] drm/amd/display: writing stereo polarity register if swapped Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 11/12] drm/amd/display: separate FEC capability from fec debug flag Rodrigo Siqueira
2020-03-03 23:27 ` [PATCH 12/12] drm/amd/display: 3.2.76 Rodrigo Siqueira

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.