amd-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Aurabindo Pillai <aurabindo.pillai@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: stylon.wang@amd.com, Charlene Liu <charlene.liu@amd.com>,
	Muhammad Ahmed <ahmed.ahmed@amd.com>,
	chiahsuan.chung@amd.com, Sunpeng.Li@amd.com,
	Rodrigo.Siqueira@amd.com, roman.li@amd.com, jerry.zuo@amd.com,
	Aurabindo.Pillai@amd.com, hersenxs.wu@amd.com, wayne.lin@amd.com,
	Harry.Wentland@amd.com, agustin.gutierrez@amd.com
Subject: [PATCH 12/13] drm/amd/display: fix HW block PG sequence
Date: Wed, 6 Dec 2023 14:52:33 -0500	[thread overview]
Message-ID: <20231206195234.182989-13-aurabindo.pillai@amd.com> (raw)
In-Reply-To: <20231206195234.182989-1-aurabindo.pillai@amd.com>

From: Charlene Liu <charlene.liu@amd.com>

[why]
Power up and power down has reverted programming order.
also make sure disable root clock last.

Reviewed-by: Muhammad Ahmed <ahmed.ahmed@amd.com>
Acked-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: Charlene Liu <charlene.liu@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn35/dcn35_init.c |   3 +-
 .../amd/display/dc/hwss/dcn35/dcn35_hwseq.c   | 134 +++++++++++++-----
 .../amd/display/dc/hwss/dcn35/dcn35_hwseq.h   |   6 +-
 .../drm/amd/display/dc/hwss/hw_sequencer.h    |   6 +-
 4 files changed, 105 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c
index 296bf3a38cb9..d594905eb246 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c
@@ -118,7 +118,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
 	.update_dsc_pg = dcn32_update_dsc_pg,
 	.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
 	.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
-	.block_power_control = dcn35_block_power_control,
+	.hw_block_power_up = dcn35_hw_block_power_up,
+	.hw_block_power_down = dcn35_hw_block_power_down,
 	.root_clock_control = dcn35_root_clock_control,
 	.set_idle_state = dcn35_set_idle_state,
 	.get_idle_state = dcn35_get_idle_state
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 09498aa92096..9262d3336182 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1123,9 +1123,23 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
 		update_state->pg_res_update[PG_HPO] = true;
 
 }
-
-void dcn35_block_power_control(struct dc *dc,
-	struct pg_block_update *update_state, bool power_on)
+/**
+	 * power down sequence
+	 * ONO Region 3, DCPG 25: hpo - SKIPPED
+	 * ONO Region 4, DCPG 0: dchubp0, dpp0
+	 * ONO Region 6, DCPG 1: dchubp1, dpp1
+	 * ONO Region 8, DCPG 2: dchubp2, dpp2
+	 * ONO Region 10, DCPG 3: dchubp3, dpp3
+	 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+	 * ONO Region 5, DCPG 16: dsc0
+	 * ONO Region 7, DCPG 17: dsc1
+	 * ONO Region 9, DCPG 18: dsc2
+	 * ONO Region 11, DCPG 19: dsc3
+	 * ONO Region 2, DCPG 24: mpc opp optc dwb
+	 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+*/
+void dcn35_hw_block_power_down(struct dc *dc,
+	struct pg_block_update *update_state)
 {
 	int i = 0;
 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
@@ -1134,66 +1148,99 @@ void dcn35_block_power_control(struct dc *dc,
 		return;
 	if (dc->debug.ignore_pg)
 		return;
+
 	if (update_state->pg_res_update[PG_HPO]) {
 		if (pg_cntl->funcs->hpo_pg_control)
-			pg_cntl->funcs->hpo_pg_control(pg_cntl, power_on);
+			pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
 	}
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
 			update_state->pg_pipe_res_update[PG_DPP][i]) {
 			if (pg_cntl->funcs->hubp_dpp_pg_control)
-				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, power_on);
+				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
 		}
-
+	}
+	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
 		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
 			if (pg_cntl->funcs->dsc_pg_control)
-				pg_cntl->funcs->dsc_pg_control(pg_cntl, i, power_on);
-		}
-
-		if (update_state->pg_pipe_res_update[PG_MPCC][i]) {
-			if (pg_cntl->funcs->mpcc_pg_control)
-				pg_cntl->funcs->mpcc_pg_control(pg_cntl, i, power_on);
-		}
-
-		if (update_state->pg_pipe_res_update[PG_OPP][i]) {
-			if (pg_cntl->funcs->opp_pg_control)
-				pg_cntl->funcs->opp_pg_control(pg_cntl, i, power_on);
-		}
-
-		if (update_state->pg_pipe_res_update[PG_OPTC][i]) {
-			if (pg_cntl->funcs->optc_pg_control)
-				pg_cntl->funcs->optc_pg_control(pg_cntl, i, power_on);
+				pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
 		}
-	}
 
-	if (update_state->pg_res_update[PG_DWB]) {
-		if (pg_cntl->funcs->dwb_pg_control)
-			pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on);
-	}
 
 	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
 	if (pg_cntl->funcs->plane_otg_pg_control)
-		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on);
+		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
 
-}
+	//domain22, 23, 25 currently always on.
 
-void dcn35_root_clock_control(struct dc *dc,
-	struct pg_block_update *update_state, bool power_on)
+}
+/**
+	 * power up sequence
+	 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+	 * ONO Region 2, DCPG 24: mpc opp optc dwb
+	 * ONO Region 5, DCPG 16: dsc0
+	 * ONO Region 7, DCPG 17: dsc1
+	 * ONO Region 9, DCPG 18: dsc2
+	 * ONO Region 11, DCPG 19: dsc3
+	 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+	 * ONO Region 4, DCPG 0: dchubp0, dpp0
+	 * ONO Region 6, DCPG 1: dchubp1, dpp1
+	 * ONO Region 8, DCPG 2: dchubp2, dpp2
+	 * ONO Region 10, DCPG 3: dchubp3, dpp3
+	 * ONO Region 3, DCPG 25: hpo - SKIPPED
+ */
+void dcn35_hw_block_power_up(struct dc *dc,
+	struct pg_block_update *update_state)
 {
 	int i = 0;
 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
 
 	if (!pg_cntl)
 		return;
+	if (dc->debug.ignore_pg)
+		return;
+	//domain22, 23, 25 currently always on.
+	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
+	if (pg_cntl->funcs->plane_otg_pg_control)
+		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
+
+	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
+		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+			if (pg_cntl->funcs->dsc_pg_control)
+				pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+		}
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
 			update_state->pg_pipe_res_update[PG_DPP][i]) {
-			if (dc->hwseq->funcs.dpp_root_clock_control)
-				dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+			if (pg_cntl->funcs->hubp_dpp_pg_control)
+				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
 		}
+	}
+	if (update_state->pg_res_update[PG_HPO]) {
+		if (pg_cntl->funcs->hpo_pg_control)
+			pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
+	}
+}
+void dcn35_root_clock_control(struct dc *dc,
+	struct pg_block_update *update_state, bool power_on)
+{
+	int i = 0;
+	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
 
+	if (!pg_cntl)
+		return;
+	/*enable root clock first when power up*/
+	if (power_on)
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+				update_state->pg_pipe_res_update[PG_DPP][i]) {
+				if (dc->hwseq->funcs.dpp_root_clock_control)
+					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+			}
+		}
+	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
 		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
 			if (power_on) {
 				if (dc->res_pool->dccg->funcs->enable_dsc)
@@ -1204,6 +1251,15 @@ void dcn35_root_clock_control(struct dc *dc,
 			}
 		}
 	}
+	/*disable root clock first when power down*/
+	if (!power_on)
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+				update_state->pg_pipe_res_update[PG_DPP][i]) {
+				if (dc->hwseq->funcs.dpp_root_clock_control)
+					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+			}
+		}
 }
 
 void dcn35_prepare_bandwidth(
@@ -1217,9 +1273,9 @@ void dcn35_prepare_bandwidth(
 
 		if (dc->hwss.root_clock_control)
 			dc->hwss.root_clock_control(dc, &pg_update_state, true);
-
-		if (dc->hwss.block_power_control)
-			dc->hwss.block_power_control(dc, &pg_update_state, true);
+		/*power up required HW block*/
+		if (dc->hwss.hw_block_power_up)
+			dc->hwss.hw_block_power_up(dc, &pg_update_state);
 	}
 
 	dcn20_prepare_bandwidth(dc, context);
@@ -1235,9 +1291,9 @@ void dcn35_optimize_bandwidth(
 
 	if (dc->hwss.calc_blocks_to_gate) {
 		dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
-
-		if (dc->hwss.block_power_control)
-			dc->hwss.block_power_control(dc, &pg_update_state, false);
+		/*try to power down unused block*/
+		if (dc->hwss.hw_block_power_down)
+			dc->hwss.hw_block_power_down(dc, &pg_update_state);
 
 		if (dc->hwss.root_clock_control)
 			dc->hwss.root_clock_control(dc, &pg_update_state, false);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0dff10d179b8..3837038dc4a8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -63,8 +63,10 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
 	struct pg_block_update *update_state);
 void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
 	struct pg_block_update *update_state);
-void dcn35_block_power_control(struct dc *dc,
-	struct pg_block_update *update_state, bool power_on);
+void dcn35_hw_block_power_up(struct dc *dc,
+	struct pg_block_update *update_state);
+void dcn35_hw_block_power_down(struct dc *dc,
+	struct pg_block_update *update_state);
 void dcn35_root_clock_control(struct dc *dc,
 	struct pg_block_update *update_state, bool power_on);
 
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 452680fe9aab..45dc6d4e9562 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -414,8 +414,10 @@ struct hw_sequencer_funcs {
 		struct pg_block_update *update_state);
 	void (*calc_blocks_to_ungate)(struct dc *dc, struct dc_state *context,
 		struct pg_block_update *update_state);
-	void (*block_power_control)(struct dc *dc,
-		struct pg_block_update *update_state, bool power_on);
+	void (*hw_block_power_up)(struct dc *dc,
+		struct pg_block_update *update_state);
+	void (*hw_block_power_down)(struct dc *dc,
+		struct pg_block_update *update_state);
 	void (*root_clock_control)(struct dc *dc,
 		struct pg_block_update *update_state, bool power_on);
 	void (*set_idle_state)(const struct dc *dc, bool allow_idle);
-- 
2.39.2


  parent reply	other threads:[~2023-12-06 19:53 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-06 19:52 [PATCH 00/13] DC Patches for Dec 11, 2023 Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 01/13] drm/amd/display: Remove minor revision 5 until proper parser is ready Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 02/13] drm/amd/display: Use explicit size for types in DCCG's struct dp_dto_params Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 03/13] drm/amd/display: allow DP40 cables to do UHBR13.5 Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 04/13] drm/amd/display: Revert "Fix conversions between bytes and KB" Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 05/13] drm/amd/display: trivial comment change Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 06/13] drm/amd/display: Revert DP2 MST hub triple display fix Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 07/13] drm/amd/display: Populate dtbclk from bounding box Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 08/13] drm/amd/display: Disable OPTC pg to match DC Hubp/dpp pg Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 09/13] drm/amd/display: Exit from idle state before accessing HW data Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 10/13] drm/amd/display: For prefetch mode > 0, extend prefetch if possible Aurabindo Pillai
2023-12-06 19:52 ` [PATCH 11/13] drm/amd/display: Force p-state disallow if leaving no plane config Aurabindo Pillai
2023-12-06 19:52 ` Aurabindo Pillai [this message]
2023-12-06 19:52 ` [PATCH 13/13] drm/amd/display: 3.2.264 Aurabindo Pillai
2023-12-11 14:42 ` [PATCH 00/13] DC Patches for Dec 11, 2023 Wheeler, Daniel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231206195234.182989-13-aurabindo.pillai@amd.com \
    --to=aurabindo.pillai@amd.com \
    --cc=Harry.Wentland@amd.com \
    --cc=Rodrigo.Siqueira@amd.com \
    --cc=Sunpeng.Li@amd.com \
    --cc=agustin.gutierrez@amd.com \
    --cc=ahmed.ahmed@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=charlene.liu@amd.com \
    --cc=chiahsuan.chung@amd.com \
    --cc=hersenxs.wu@amd.com \
    --cc=jerry.zuo@amd.com \
    --cc=roman.li@amd.com \
    --cc=stylon.wang@amd.com \
    --cc=wayne.lin@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).